blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
30baae12e19f189b80c4c6bf2f69ad918fe7a895 | 83dec5389da7a2c3e4759a3b92abcae18d60c77e | /trainlog/_version.py | b0ae2e2d97b9c7231d2d855fbb09cab5ea69c876 | [
"MIT"
] | permissive | DouglasOrr/TrainLog | 0c72b837f38145601c8de154893a95f57c6c58ed | 5478a6f90627817aae6acb665a2c36192b3ec0f2 | refs/heads/master | 2023-02-08T10:45:43.631905 | 2021-01-01T09:42:59 | 2021-01-01T09:42:59 | 323,322,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | """Module version."""
__version__ = "0.3"
| [
"douglas.orr99@gmail.com"
] | douglas.orr99@gmail.com |
fdeecbc693b16c87cc920accd36f9da1ef8b27b7 | 42067b59116507b817bb85e517c1bebe9793e44d | /mysql/MysqlException/__init__.py | d810c89f0326ad8f5a144cd682d89f04a6cedb48 | [] | no_license | Yelphp/Python_study | b05517f24a7d68ede19d1fcd73c1845dcde6eb42 | 74bcec35894e6c7a9a00151ee6c88d7b92a60689 | refs/heads/master | 2022-09-15T18:20:15.701801 | 2020-06-02T07:46:40 | 2020-06-02T07:46:40 | 263,251,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | #!usr/bin/env python
class MysqlException(Exception):
def __init__(self):
print('初始化')
pass; | [
"yels@cc.cc"
] | yels@cc.cc |
54bcf6cbfab570bf169c4098ee5b6793f73a1e8e | d2e3d7ffab17950f49ca7739d164851c2cb20793 | /currency_exchange/rateUpdater/updater.py | bccd72530d60f92e105ed37f74c2372bced69dd3 | [] | no_license | diaa95/Python_Stack | d6211208bf872d8434154186d9abe9fa291e84e1 | 4d37dca254624a8faa52264f2cdea7b389e7224f | refs/heads/master | 2023-03-17T09:29:15.197759 | 2021-03-08T16:51:27 | 2021-03-08T16:51:27 | 323,318,327 | 0 | 3 | null | 2020-12-27T09:18:55 | 2020-12-21T11:37:05 | HTML | UTF-8 | Python | false | false | 270 | py | from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from rateUpdater import rateApi
def start():
scheduler = BackgroundScheduler()
scheduler.add_job(rateApi.update_rate, 'interval', minutes=45)
scheduler.start()
| [
"Diaa.abdaldayem@gmail.com"
] | Diaa.abdaldayem@gmail.com |
0bd37e75d236a48f03ea922f9cbda948310d785c | a67b85c3dd12be926cac1b773b034ceeb45f35dd | /contacts.py | 62d831725f77113a3fb88dcb1799fa2e6cf80c50 | [] | no_license | ShujaAbrar/python | 7dd2bb0d1ce5e42e41bd0cd2aa988997f5af0019 | d5b47ff56ba79132b89c573abfe196d71906f1b8 | refs/heads/master | 2022-09-16T11:55:51.178719 | 2020-05-28T15:04:53 | 2020-05-28T15:04:53 | 266,954,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | n=int(input())
l=[]
for i in range(n):
x=list(map(str,input().split()))
if x[0]=="add":
l.append(x[1])
if x[0]=="find":
count=0
for j in range(len(l)):
if l[j].find(x[1])!=-1:
count+=1
l[j]="-1"
print(count)
___________________________________________________
output:
sample input:
4
add mapit
add mapittech
find mapit
find teh
sample output:
2
0
| [
"noreply@github.com"
] | noreply@github.com |
00d7668ae6a3f28c782338272199e6bbe8a40d2f | 97615bc7406963f570585b390997b26459cfbfa4 | /ask/qa/views.py | 619cea46a6981050361ea98b4709d6a5a7fb2dde | [] | no_license | s3b00/stepic_web_project | 5b1dba1b4ca656540426e6e408fb26cf52f37658 | 62de6bead5323a291e8ace3c7b96739d9d354ea8 | refs/heads/master | 2023-01-03T13:56:28.937493 | 2020-11-01T18:26:45 | 2020-11-01T18:26:45 | 301,691,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,327 | py | from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_GET
from django.http import HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage
from django.shortcuts import render, get_object_or_404, Http404
from .forms import AskForm, AnswerForm, RegisterForm, LoginForm
from .models import Question, Answer
@require_GET
def test(request, *args, **kwargs):
return HttpResponse('OK')
@require_GET
def get_new(request, *args, **kwargs):
try:
limit = int(request.GET.get('limit', 10))
except ValueError:
limit = 10
if limit > 10:
limit = 10
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
paginator = Paginator(Question.objects.new(), limit)
try:
page = paginator.page(page)
except EmptyPage:
page = paginator.page(paginator.num_pages)
return render(request, 'news.html', {
'paginator': paginator,
'page': page,
'questions': page.object_list
})
@require_GET
def populars(request):
try:
limit = int(request.GET.get('limit', 10))
except ValueError:
limit = 10
if limit > 10:
limit = 10
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
paginator = Paginator(Question.objects.popular(), limit)
try:
page = paginator.page(page)
except EmptyPage:
page = paginator.page(paginator.num_pages)
return render(request, 'populars.html', {
'paginator': paginator,
'page': page,
'questions': page.object_list,
'current_user_user': request.user,
'current_user_session': request.session
})
@login_required()
def get_question(request, pk):
if request.method == "POST":
form = AnswerForm(request.POST)
if form.is_valid():
answer = form.save()
answer.author = request.user
answer.save()
else:
form = AnswerForm()
question = get_object_or_404(Question, pk=pk)
return render(request, 'question_details.html', {
'question': question,
'form': form,
})
@login_required
def ask(request):
if request.method == 'POST':
form = AskForm(request.POST)
if form.is_valid():
question = form.save()
question.author = request.user
question.save()
url = question.get_url()
return HttpResponseRedirect(url)
else:
form = AskForm()
return render(request, 'add_question.html', {
'form': form,
})
def register(request):
if request.method == "POST":
form = RegisterForm(request.POST)
if form.is_valid():
user = form.save()
request.user = user
return HttpResponseRedirect('/')
else:
form = RegisterForm()
return render(request, 'register.html', {
'form': form
})
def login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
request.user = form.clean()
return HttpResponseRedirect('/')
else:
form = LoginForm()
return render(request, 'login.html', {
'form': form
})
| [
"seboo@yandex.by"
] | seboo@yandex.by |
dfda23ba52d06ae59f94778824b2821140346d4b | 924aa2af8991e9ff18821bfd852a27410b8af583 | /app/blog/main.py | 6f8e6b08276dc321cfd4ab59e6f257317f1519a1 | [] | no_license | cmanish049/fastapi | e1b86f6fdb7f172e305cf6925fcfc8fca430a34b | 433af34d765c707bcb834401930773b25e2263dd | refs/heads/main | 2023-06-05T18:00:50.818915 | 2021-06-15T09:40:47 | 2021-06-15T09:40:47 | 377,111,115 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from blog.routers import user
from fastapi import FastAPI
from blog import models
from blog.database import engine
from blog.routers import blog, user, authentication
app = FastAPI()
models.Base.metadata.create_all(bind=engine)
app.include_router(authentication.router)
app.include_router(blog.router)
app.include_router(user.router)
| [
"cmanish049@gmail.com"
] | cmanish049@gmail.com |
59835f76410fdd430aaafe095baf7b9c493635fe | f848ebf1adb25cc6d188f43fb02c06dad1b01651 | /api/employee.py | 71e0dada8044141e1a869937b0bb167c0e182676 | [] | no_license | miao88318/day03_apiTestIHRM | 673320c724d9a661fa9ed120a62e0d82118719d9 | 213e4a498055e693993b21ca2bc7942af2a25c74 | refs/heads/master | 2022-07-28T04:39:05.390142 | 2020-05-21T07:06:23 | 2020-05-21T07:06:23 | 265,769,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | # 导包
import requests
# 创建员工的api类
class TestEmployeeApi:
def __init__(self):
self.login_url = "http://ihrm-test.itheima.net" + "/api/sys/login"
self.emp_url = "http://ihrm-test.itheima.net" + "/api/sys/user"
def add_emp(self, headers, username, mobile):
response = requests.post(self.emp_url,
json={
"username": username,
"mobile": mobile,
"timeOfEntry": "2020-05-05",
"formOfEmployment": 1,
"workNumber": "123433",
"departmentName": "测试部",
"departmentId": "1063678149528784896",
"correctionTime": "2020-05-17T16:00:00.000Z"
}, headers=headers)
return response
def query_emp(self, emp_id, headers):
query_url = self.emp_url + "/" + emp_id
response = requests.get(query_url, headers=headers)
return response
def modify_emp(self,emp_id, headers, username):
modify_url = self.emp_url + "/" + emp_id
response = requests.put(url=modify_url,json={"username":username},
headers=headers)
return response
def delete_emp(self, emp_id, headers):
delete_url = self.emp_url + "/" + emp_id
response = requests.delete(url=delete_url, headers=headers)
return response | [
"stan@stan.com"
] | stan@stan.com |
738ca2b4d18b5b461b81b8391794ffc365fb64ac | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190422000909.py | 44131ed7c7cc6b39d57b9fe072ecd26c460c9af9 | [] | no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,138 | py | # Jiaxi Zhang
# George McAlear
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
import asyncio
import cozmo
from cozmo.util import distance_mm, degrees, speed_mmps, Pose
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
class CozmoWarehouseWorker:
def __init__(self, robot: cozmo.robot.Robot):
self.current_arena_pose = None
self.current_robot_pose = robot.pose
self.robot = robot
# start streaming
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.pick_up_pose = Pose(x=4.5, y=13.75, z=0, angle_z=degrees(90))
self.drop_off_pose = Pose(x=21.75, y=13.75, z=0, angle_z=degrees(90))
self.drop_off_directions = [Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), self.drop_off_pose]
self.pick_up_directions = [Pose(x=21.75, y=4.5, z=0, angle_z=degrees(90)), Pose(x=3, y=4.5, z=0, angle_z=degrees(0)), self.pick_up_pose]
self.drive_speed = speed_mmps(50)
print("Robot initialized!")
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
print("Robot initialized!")
threading.Thread(target=self.runGUI).start()
def runGUI(self):
self.gui = GUIWindow(self.grid, show_camera=True)
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(0, 0, 0)
self.gui.start()
async def drive_to(self, directions):
print("-" * 20 + "DRIVING" + "-" * 20)
if isinstance(directions, (list,)):
for pose in directions:
await self.__drive_to_pose(pose)
else:
await self.__drive_to_pose(directions)
async def __drive_to_pose(self, pose):
print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
translation = (pose - self.current_arena_pose).position
directions = Pose(x=translation.x, y=translation.y, z=0, angle_z=pose.rotation.angle_z)
print("We will follow these directions: ", directions, "\n\n")
await self.__execute_directions(directions)
print("Directions followed!", "\n\n")
self.update_current_arena_pose()
def update_current_arena_pose(self):
print("-" * 20 + "UPDATING POSE" + "-" * 20)
coordinate_systems_diff = diff_heading_deg(self.current_robot_pose.rotation.angle_z.degrees, self.current_arena_pose.rotation.angle_z.degrees)
arena_initial_pose_mm = rotate_point(self.current_robot_pose.position.x, self.current_robot_pose.position.y, coordinate_systems_diff)
arena_final_pose_mm = rotate_point(self.robot.pose.position.x, self.robot.pose.position.y, coordinate_systems_diff)
d_x = arena_final_pose_mm[0] - arena_initial_pose_mm[0]
d_y = arena_final_pose_mm[1] - arena_initial_pose_mm[1]
d_heading = self.robot.pose.rotation.angle_z - self.current_robot_pose.rotation.angle_z
difference_pose = Pose(x=d_x, y=d_y, z=0, angle_z=d_heading)
print("We think we moved ", convertPoseFromMmToInches(arena_final_pose_mm - arena_initial_pose_mm), "\n\n")
self.current_arena_pose = self.current_arena_pose + convertPoseFromMmToInches(arena_final_pose_mm - arena_initial_pose_mm)
print("Current pose is now ", self.current_arena_pose, "\n\n")
async def pick_up_cube(self, tries=5):
print("-" * 20 + "GETTING CUBE" + "-" * 20)
cube = await self.robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
picked_up_cube = await self.robot.pickup_object(cube, num_retries=tries).wait_for_completed().obj
if (picked_up_cube == None):
print("Could not get the cube.")
await self.robot.say_text("Help me!").wait_for_completed()
asyncio.sleep(5)
else:
print("Picked up cube!")
async def set_down_cube(self):
print("-" * 20 + "SETTING DOWN CUBE" + "-" * 20)
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
async def __execute_directions(self, directions):
print("Current arena pose is:", self.current_arena_pose, "\n\n")
print("Current robot pose is:", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(-self.current_arena_pose.rotation.angle_z.degrees)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.x * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose, "\n\n")
await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose, "\n\n")
await self.robot.drive_straight(distance=distance_mm(directions.position.y * self.grid.scale), speed=self.drive_speed).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose, "\n\n")
print("ROBOT is TURNING ", diff_heading_deg(directions.rotation.angle_z.degrees, 90), "degrees.", "\n\n")
await self.robot.turn_in_place(angle=degrees(diff_heading_deg(directions.rotation.angle_z.degrees, 90))).wait_for_completed()
print("ROBOT is at AFTER FINAL TURN", self.robot.pose, "\n\n")
async def localize(self, turn_angle=20):
print("-" * 20 + "LOCALIZING" + "-" * 20)
# reset our location estimates
conf = False
self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
self.pf = ParticleFilter(self.grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
while not conf:
# move a little
self.current_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=degrees(turn_angle)).wait_for_completed()
odometry = self.__compute_odometry()
detected_markers, camera_image = await self.__marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = self.pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))
print("We localized to arena location ", self.current_arena_pose)
def __compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.current_robot_pose.position.x, self.current_robot_pose.position.y, \
self.current_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / self.grid.scale, dy / self.grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def __marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
# Wait for the latest image from Cozmo
image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
async def run(robot: cozmo.robot.Robot):
cosimo = CozmoWarehouseWorker(robot)
await cosimo.localize()
await cosimo.drive_to(cosimo.pick_up_pose)
while True:
await cosimo.pick_up_cube(tries=5)
await cosimo.drive_to(cosimo.drop_off_directions)
await cosimo.set_down_cube()
await cosimo.drive_to(cosimo.pick_up_directions)
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
| [
"josh@lawn-143-215-110-217.lawn.gatech.edu"
] | josh@lawn-143-215-110-217.lawn.gatech.edu |
8d7f11c56fe6bb5b741355a5dfad0460a1ea89f4 | 10b4db1d4f894897b5ee435780bddfdedd91caf7 | /thrift/compiler/test/fixtures/basic-annotations/gen-py3/module/types.pyi | d60450c59a3809ab28d5574573d39ae4ae414318 | [
"Apache-2.0"
] | permissive | SammyEnigma/fbthrift | 04f4aca77a64c65f3d4537338f7fbf3b8214e06a | 31d7b90e30de5f90891e4a845f6704e4c13748df | refs/heads/master | 2021-11-11T16:59:04.628193 | 2021-10-12T11:19:22 | 2021-10-12T11:20:27 | 211,245,426 | 1 | 0 | Apache-2.0 | 2021-07-15T21:12:07 | 2019-09-27T05:50:42 | C++ | UTF-8 | Python | false | false | 4,129 | pyi | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
import folly.iobuf as _fbthrift_iobuf
import thrift.py3.types
import thrift.py3.exceptions
from thrift.py3.types import __NotSet, NOTSET
import typing as _typing
from typing_extensions import Final
import sys
import itertools
__property__ = property
class MyEnum(thrift.py3.types.Enum):
MyValue1: MyEnum = ...
MyValue2: MyEnum = ...
DOMAIN: MyEnum = ...
class MyStructNestedAnnotation(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
name: bool
pass
name: Final[str] = ...
def __init__(
self, *,
name: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
name: _typing.Union[str, __NotSet, None]=NOTSET
) -> MyStructNestedAnnotation: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['MyStructNestedAnnotation'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __gt__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __le__(self, other: 'MyStructNestedAnnotation') -> bool: ...
def __ge__(self, other: 'MyStructNestedAnnotation') -> bool: ...
class MyStruct(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
major: bool
package: bool
annotation_with_quote: bool
class_: bool
annotation_with_trailing_comma: bool
empty_annotations: bool
pass
major: Final[int] = ...
package: Final[str] = ...
annotation_with_quote: Final[str] = ...
class_: Final[str] = ...
annotation_with_trailing_comma: Final[str] = ...
empty_annotations: Final[str] = ...
def __init__(
self, *,
major: _typing.Optional[int]=None,
package: _typing.Optional[str]=None,
annotation_with_quote: _typing.Optional[str]=None,
class_: _typing.Optional[str]=None,
annotation_with_trailing_comma: _typing.Optional[str]=None,
empty_annotations: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
major: _typing.Union[int, __NotSet, None]=NOTSET,
package: _typing.Union[str, __NotSet, None]=NOTSET,
annotation_with_quote: _typing.Union[str, __NotSet, None]=NOTSET,
class_: _typing.Union[str, __NotSet, None]=NOTSET,
annotation_with_trailing_comma: _typing.Union[str, __NotSet, None]=NOTSET,
empty_annotations: _typing.Union[str, __NotSet, None]=NOTSET
) -> MyStruct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['MyStruct'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'MyStruct') -> bool: ...
def __gt__(self, other: 'MyStruct') -> bool: ...
def __le__(self, other: 'MyStruct') -> bool: ...
def __ge__(self, other: 'MyStruct') -> bool: ...
class SecretStruct(thrift.py3.types.Struct, _typing.Hashable):
class __fbthrift_IsSet:
id: bool
password: bool
pass
id: Final[int] = ...
password: Final[str] = ...
def __init__(
self, *,
id: _typing.Optional[int]=None,
password: _typing.Optional[str]=None
) -> None: ...
def __call__(
self, *,
id: _typing.Union[int, __NotSet, None]=NOTSET,
password: _typing.Union[str, __NotSet, None]=NOTSET
) -> SecretStruct: ...
def __reduce__(self) -> _typing.Tuple[_typing.Callable, _typing.Tuple[_typing.Type['SecretStruct'], bytes]]: ...
def __hash__(self) -> int: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __lt__(self, other: 'SecretStruct') -> bool: ...
def __gt__(self, other: 'SecretStruct') -> bool: ...
def __le__(self, other: 'SecretStruct') -> bool: ...
def __ge__(self, other: 'SecretStruct') -> bool: ...
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
9f48bfedb2c4173468cb9cd3c705f3b68fd47718 | 340352a8f482b765c3105f595502299c5e318961 | /Raia2011/model/name2idx/variables.py | 486f0c86f54b8984a157707c9b890cb701bf8703 | [
"MIT"
] | permissive | Jin005/cancer_modeling | 9e7e98f6a76c9aded1dad7f67b9e54b3fb6c9ade | 0db555dfd0e35dfa220207c775311bbba30158fc | refs/heads/master | 2022-11-05T19:57:50.630021 | 2020-06-27T04:54:24 | 2020-06-27T04:54:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | var_names = [\
'IL13stimulation',
'Rec',
'Rec_i',
'IL13_Rec',
'p_IL13_Rec',
'p_IL13_Rec_i',
'JAK2',
'pJAK2',
'SHP1',
'STAT5',
'pSTAT5',
'SOCS3mRNA',
'DecoyR',
'IL13_DecoyR',
'SOCS3',
'CD274mRNA',
#
'len_f_vars'\
]
for idx,name in enumerate(var_names):
exec('%s=%d'%(name,idx)) | [
"himoto@protein.osaka-u.ac.jp"
] | himoto@protein.osaka-u.ac.jp |
f6bdb6fdae81f13cfe121cc6e8b2f81bffc9cc72 | 485cf3c70fcaa68689a2b690b6465f1d6bcf21bd | /Python3_Selenium3/第7章/7.28.py | 8d5897a67392f8b8f40cc6a250867a33283293b2 | [] | no_license | lxz0503/study_20190608 | 5ffe08c4704bb00ad8d1980baf16b8f5e7135ff4 | 47c37798140883b8d6dc21ec5da5bc7a20988ce9 | refs/heads/master | 2022-12-23T17:23:45.039015 | 2021-06-23T14:50:19 | 2021-06-23T14:50:19 | 190,884,812 | 1 | 3 | null | 2022-12-15T23:17:33 | 2019-06-08T12:22:56 | Python | GB18030 | Python | false | false | 455 | py |
###
###配套视频已出版,学习有疑问联系作者qq:2574674466###
###
#coding=utf-8
dict_1 = {'Name': 'Jack','Age':18,'Score':100}
print("操作字典元素之前,遍历并打印字典元素如下:")
for (key,value) in dict_1.items():
print(key + ":" + str(value))
dict_1.clear()
print("操作字典元素之后,遍历并打印字典元素如下:")
print(dict_1)
for (key,value) in dict_1.items():
print(key + ":" + str(value))
| [
"lxz_20081025@163.com"
] | lxz_20081025@163.com |
eca833dd5032aa1e6795775cf3cdd34363e3c686 | 4d5f3693e6fc2c7b2c931376324229555cb72af7 | /driving_predicates.py | 4e790ce9c574c6a63b657c1b054c2cf856ba5ee0 | [] | no_license | m-j-mcdonald/driving_sim | a2ba8208f1fcf9aad2bc994d49e65bb618847134 | dc94b172ba5abb20e19039ecd1f83819f2d47d95 | refs/heads/master | 2020-03-20T03:23:49.390665 | 2018-07-10T00:11:21 | 2018-07-10T00:11:21 | 137,144,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,894 | py | import numpy as np
from sco.expr import Expr, AffExpr, EqExpr, LEqExpr
from core.util_classes.common_predicates import ExprPredicate
from internal_state.dynamics import *
MOVE_FACTOR = 2
END_DIST = 4
COL_DIST = 0.5
def add_to_attr_inds_and_res(t, attr_inds, res, param, attr_name_val_tuples):
if param.is_symbol():
t = 0
for attr_name, val in attr_name_val_tuples:
inds = np.where(param._free_attrs[attr_name][:, t])[0]
getattr(param, attr_name)[inds, t] = val[inds]
if param in attr_inds:
res[param].extend(val[inds].flatten().tolist())
attr_inds[param].append((attr_name, inds, t))
else:
res[param] = val[inds].flatten().tolist()
attr_inds[param] = [(attr_name, inds, t)]
class DrivingPredicate(ExprPredicate):
def __init__(self, name, e, attr_inds, params, expected_param_types, active_range, sim, priority):
self.sim = sim
super(DrivingPredicate, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=active_range, sim=sim, priority=priority)
class HLPred(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int))])])
A = np.zeros((2,2))
b = np.zeros((2,1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(HLPred, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
class HLNoCollisions(HLPred):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 1
self.obj, = params
super(HLNoCollisions, self).__init__(name, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = False
def check_if_true(self, sim):
return np.any([sim.check_all_collisions(v) for v in sim.user_vehicles]) or \
np.any([sim.check_all_collisions(v) for v in sim.external_vehicles])
class HLCrateInTrunk(HLPred):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj, self.crate = params
super(HLCrateInTrunk, self).__init__(name, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = False
def check_if_true(self, sim):
return self.obj.geom.in_trunk(self.crate.geom)
class DynamicPredicate(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 1
self.obj, = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int)),
("vel", np.array([0], dtype=np.int)),
("phi", np.array([0], dtype=np.int)),
("u1", np.array([0], dtype=np.int)),
("u2", np.array([0], dtype=np.int))])])
val = np.zeros((1, 14))
dynamics_expr = Expr(self.f, self.grad)
e = EqExpr(dynamics_expr, val)
super(DynamicPredicate, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=(0,1), sim=sim, priority=1)
self.spacial_anchor = False
class XValid(DynamicPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
def f(x):
return np.array([x[7] - next_px_f(x[0], x[2], x[3], x[5], x[6], self.sess)])
def grad(x):
grad = np.zeros((1, 14))
grad[0,7] = 1
grad[0,5], grad[0,6] = next_px_grad(x[0], x[2], x[3], x[5], x[6], self.sess)
return grad
self.f = f
self.grad = grad
super(XValid).__init__(name, params, expected_param_types, sim)
class YValid(DynamicPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
def f(x):
return np.array([x[8] - next_py_f(x[1], x[2], x[3], x[5], x[6], self.sess)])
def grad(x):
grad = np.zeros((1, 14))
grad[0,8] = 1
grad[0,5], grad[0,6] = next_py_grad(x[1], x[2], x[3], x[5], x[6], self.sess)
return grad
self.f = f
self.grad = grad
super(YValid).__init__(name, params, expected_param_types, sim)
class ThetaValid(DynamicPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
def f(x):
return np.array([x[9] - next_theta_f(x[2], x[3], x[4], x[5], x[6], self.sess)])
def grad(x):
grad = np.zeros((1, 14))
grad[0,9] = 1
grad[0,5], grad[0,6] = next_theta_grad(x[2], x[3], x[4], x[5], x[6], self.sess)
return grad
self.f = f
self.grad = grad
super(ThetaValid).__init__(name, params, expected_param_types, sim)
class VelocityValid(DynamicPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
def f(x):
return np.array([x[10] - next_v_f(x[3], x[5], x[6], self.sess)])
def grad(x):
grad = np.zeros((1, 14))
grad[0,10] = 1
grad[0,5], grad[0,6] = next_v_grad(x[3], x[5], x[6], self.sess)
return grad
self.f = f
self.grad = grad
super(VelocityValid).__init__(name, params, expected_param_types, sim)
class PhiValid(DynamicPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
def f(x):
return np.array([x[11] - next_phi_f(x[4], x[5], x[6], self.sess)])
def grad(x):
grad = np.zeros((1, 14))
grad[0,11] = 1
grad[0,5], grad[0,6] = next_phi_grad(x[4], x[5], x[6], self.sess)
return grad
self.f = f
self.grad = grad
super(PhiValid).__init__(name, params, expected_param_types, sim)
class At(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj, self.target = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.target, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))])])
A = np.c_[np.eye(3), -np.eye(3)]
b, val = np.zeros((3, 1)), np.zeros((3, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(At, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = True
class VehicleAt(At):
pass
class CrateAt(At):
pass
class ObstacleAt(At):
pass
class Near(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.obj, self.target, self.dist = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int))]),
(self.target, [("xy", np.array([0,1], dtype=np.int))]),,
(self.dist, [("value", np.array([0], dtype=np.int))])])
A = np.c_[np.r_[np.eye(2), -np.eye(2)], np.r_[-np.eye(2), np.eye(2)], -np.ones((4,1))]
b, val = np.zeros((4, 1)), np.zeros((4, 1))
aff_e = AffExpr(A, b)
e = LEqExpr(aff_e, val)
super(At, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = True
class VehicleAtSign(Near):
pass
class VelAt(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj, self.target = params
attr_inds = OrderedDict([(self.obj, [("vel", np.array([0], dtype=np.int))]),
(self.target, [("value", np.array([0], dtype=np.int))])])
A = np.c_[1, -1]
b, val = np.zeros((1, 1)), np.zeros((1, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(VelAt, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = True
class VehicleVelAt(Velt):
pass
class ExternalVehicleVelAt(VelAt):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj, self.target = params
if self.obj.geom.is_user:
attr_inds = OrderedDict([(self.obj, [("vel", np.array([0], dtype=np.int))]),
(self.target, [("value", np.array([0], dtype=np.int))])])
A = np.c_[0, 0]
b, val = np.zeros((1, 1)), np.zeros((1, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(VelAt, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = True
else:
super(ExternalVehicleVelAt, self).__init__(name, params, expected_param_types)
class ExternalVehiclePastRoadEnd(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 1
self.obj, = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int))])])
if not self.obj.geom.road:
A = np.zeros((2,2))
b = np.zeros((2,1))
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
else:
direction = self.obj.geom.road
rot_mat = np.array([[np.cos(direction), -np.sin(direction)],
[np.sin(direction), np.cos(direction)]])
road_len = self.obj.geom.road.length
self.road_end = np.array([self.obj.geom.road.x, self.obj.geom.road.y]) + rot_mat.dot([road_len + END_DIST, 0])
A = np.eye(2)
b = -self.road_end
val = np.zeros((2, 1))
aff_e = AffExpr(A, b)
e = EqExpr(aff_e, val)
super(ExternalVehiclePastRoadEnd, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = True
class Stationary(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 1
self.obj, = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))])])
A = np.c_[np.eye(3), -np.eye(3)]
b, val = np.zeros((3, 1)), np.zeros((3, 1))
e = EqExpr(AffExpr(A, b), val)
super(Stationary, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=(0,1), sim=sim, priority=-2)
self.spacial_anchor = False
class VehicleStationary(Stationary):
pass
class CrateStationary(Stationary):
pass
class ObstacleStationary(Stationary):
pass
class StationaryLimit(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 1
self.limit, = params
attr_inds = OrderedDict([(self.limit, [("value", np.array([0], dtype=np.int))])])
A = np.c_[1, -1]
b, val = np.zeros((1, 1)), np.zeros((1, 1))
e = EqExpr(AffExpr(A, b), val)
super(StationaryLimit, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=(0,1), sim=sim, priority=-2)
self.spacial_anchor = False
class IsMP(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 1
self.obj, = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))])])
A = np.c_[np.r_[np.eye(3), -np.eye(3)], np.r_[-np.eye(3), np.eye(3)]]
b, val = np.zeros((6, 1)), MOVE_FACTOR * np.ones((6, 1))
e = LEqExpr(AffExpr(A, b), val)
super(IsMP, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=(0,1), sim=sim, priority=-2)
self.spacial_anchor = False
class OnSurface(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj, self.surface = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int))])])
f = lambda x: self.surface.geom.to(x[0], x[1])
grad = lambda x: np.eye(2)
val = np.zeros((1, 2))
dynamics_expr = Expr(self.f, self.grad)
e = EqExpr(dynamics_expr, val)
super(OnSurface, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=2)
self.spacial_anchor = False
class OnRoad(OnSurface):
pass
class OnLot(DrivingPredicate):
pass
class InLane(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.obj, self.road, self.lane_num = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.lane_num, [("value", np.array([0], dtype=np.int))])])
f = lambda x: self.road.geom.to_lane(x[0], x[1], x[2])
grad = lambda x: np.eye(3)
val = np.zeros((2, 1))
dynamics_expr = Expr(self.f, self.grad)
e = EqExpr(dynamics_expr, val)
super(InLane, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=2)
self.spacial_anchor = False
class ExternalInLane(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.obj, self.road, self.lane_num = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.lane_num, [("value", np.array([0], dtype=np.int))])])
f = lambda x: self.road.geom.to_lane(x[0], x[1], x[2]) if not self.road.geom.is_user else np.zeros((3,))
grad = lambda x: np.eye(3)
val = np.zeros((2, 1))
dynamics_expr = Expr(self.f, self.grad)
e = EqExpr(dynamics_expr, val)
super(ExternalInLane, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=2)
self.spacial_anchor = False
class LeftOfLane(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.obj, self.road, self.lane_num = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.lane_num, [("value", np.array([0], dtype=np.int))])])
f = lambda x: self.road.geom.to_lane(x[0], x[1], x[2] - 1) if x[2] > 0 else self.road.geom.to_lane(x[0], x[1], x[2])
grad = lambda x: np.eye(3)
val = np.zeros((2, 1))
dynamics_expr = Expr(self.f, self.grad)
e = EqExpr(dynamics_expr, val)
super(LeftOfLane, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=2)
self.spacial_anchor = False
class RightOfLane(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.obj, self.road, self.lane_num = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.lane_num, [("value", np.array([0], dtype=np.int))])])
num_lanes = self.road.geom.num_lanes
f = lambda x: self.road.geom.to_lane(x[0], x[1], x[2] + 1) if x[2] < num_lanes - 1 else self.road.geom.to_lane(x[0], x[1], x[2])
grad = lambda x: np.eye(3)
val = np.zeros((2, 1))
dynamics_expr = Expr(self.f, self.grad)
e = EqExpr(dynamics_expr, val)
super(RightOfLane, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=2)
self.spacial_anchor = False
class PoseInLane(InLane):
def __init__(self, name, params, expected_param_types, sim=None):
pass
class PoseLeftOfLane(LeftOfLane):
def __init__(self, name, params, expected_param_types, sim=None):
pass
class PoseRightOfLane(RightOfLane):
def __init__(self, name, params, expected_param_types, sim=None):
pass
class XY_Limit(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.obj, self.xlimit, self.ylimit = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0, 1], dtype=np.int))]),
(self.xlimit, [("value", np.array([0], dtype=np.int))]),
(self.ylimit, [("value", np.array([0], dtype=np.int))])])
A = np.zeros((4,4))
A[:2,:2] = -np.eye(2)
A[:2,2:4] = np.eye(2)
A[2:4,:2] = -np.eye(2)
b, val = np.zeros((4, 1)), np.zeros((4, 1))
e = LEqExpr(AffExpr(A, b), val)
super(Stationary, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = False
class Limit(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj, self.limit = params
attr_inds = OrderedDict([(self.obj, [("v", np.array([0], dtype=np.int)),
("u1", np.array([0], dtype=np.int))]),
(self.limit, [("value", np.array([0], dtype=np.int))])])
b, val = np.zeros((1, 1)), np.zeros((1, 1))
e = LEqExpr(AffExpr(self.A, b), val)
super(Stationary, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=-2)
self.spacial_anchor = False
class VelLowerLimit(Limit):
def __init__(self, name, params, expected_param_types, sim=None):
self.A = np.zeros((1,3))
self.A[0,0] = -1
self.A[0,2] = 1
super(VelLowerLimit, self).__init__(name, params, expected_param_types, sim)
self.spacial_anchor = False
class VelUpperLimit(Limit):
def __init__(self, name, params, expected_param_types, sim=None):
self.A = np.zeros((1,3))
self.A[0,0] = 1
self.A[0,2] = -1
super(VelUpperLimit, self).__init__(name, params, expected_param_types, sim)
self.spacial_anchor = False
class AccLowerLimit(Limit):
def __init__(self, name, params, expected_param_types, sim=None):
self.A = np.zeros((1,3))
self.A[0,1] = -1
self.A[0,2] = 1
super(AccLowerLimit, self).__init__(name, params, expected_param_types, sim)
self.spacial_anchor = False
class AccUpperLimit(Limit):
def __init__(self, name, params, expected_param_types, sim=None):
self.A = np.zeros((1,3))
self.A[0,1] = 1
self.A[0,2] = -1
super(AccUpperLimit, self).__init__(name, params, expected_param_types, sim)
self.spacial_anchor = False
class CollisionPredicate(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj1, self.obj2 = params
attr_inds = OrderedDict([(self.obj1, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.obj2, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))])])
def f(x):
old_pose1 = self.obj1.geom.update_xy_theta(x[0], x[1], x[2], 0)
old_pose2 = self.obj2.geom.update_xy_theta(x[3], x[4], x[5], 0)
obj1_pts = self.obj1.geom.get_points(0, COL_DIST)
obj2_pts = self.obj2.geom.get_points(0, COL_DIST)
self.obj1.geom.update_xy_theta(0, old_pose1[0], old_pose1[1], old_pose1[2])
self.obj2.geom.update_xy_theta(0, old_pose2[0], old_pose2[1], old_pose2[2])
return collision_vector(obj1_pts, obj2_pts)
def grad(obj1_body, obj2_body):
grad = np.zeros((2,6))
grad[:, :2] = -np.eye(2)
grad[:, 3:5] = np.eye(2)
val = np.zeros((2, 1))
col_expr = Expr(f, grad)
e = EqExpr(col_expr, val)
super(CollisionPredicate, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=3)
self.spacial_anchor = False
class VehicleVehicleCollision(CollisionPredicate):
pass
class VehicleObstacleCollision(CollisionPredicate):
pass
class VehicleCrateCollision(CollisionPredicate):
pass
class CrateObstacleCollision(CollisionPredicate):
pass
class PathCollisionPredicate(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj1, self.obj2 = params
attr_inds = OrderedDict([(self.obj1, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.obj2, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))])])
def f(x):
old_0_pose1 = self.obj1.geom.update_xy_theta(x[0], x[1], x[2], 0)
old_0_pose2 = self.obj2.geom.update_xy_theta(x[3], x[4], x[5], 0)
old_1_pose1 = self.obj1.geom.update_xy_theta(x[6], x[7], x[8], 1)
old_1_pose2 = self.obj2.geom.update_xy_theta(x[9], x[10], x[11], 1)
obj1_pts = self.obj1.geom.get_points(0, COL_DIST) + self.obj1.geom.get_points(1, COL_DIST)
obj2_pts = self.obj2.geom.get_points(0, COL_DIST) + self.obj2.geom.get_points(1, COL_DIST)
self.obj1.geom.update_xy_theta(0, old_0_pose1[0], old_0_pose1[1], old_0_pose1[2])
self.obj2.geom.update_xy_theta(0, old_0_pose2[0], old_0_pose2[1], old_0_pose2[2])
self.obj1.geom.update_xy_theta(1, old_1_pose1[0], old_1_pose1[1], old_1_pose1[2])
self.obj2.geom.update_xy_theta(1, old_1_pose2[0], old_1_pose2[1], old_1_pose2[2])
return collision_vector(obj1_pts, obj2_pts)
def grad(obj1_body, obj2_body):
grad = np.zeros((2,12))
grad[:, :2] = -np.eye(2)
grad[:, 3:5] = np.eye(2)
grad[:, 5:8] = -np.eye(2)
grad[:, 8:11] = np.eye(2)
val = np.zeros((2, 1))
col_expr = Expr(f, grad)
e = EqExpr(col_expr, val)
super(CollisionPredicate, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=(0,1), sim=sim, priority=3)
self.spacial_anchor = False
class VehicleVehiclePathCollision(PathCollisionPredicate):
pass
class VehicleObstaclePathCollision(PathCollisionPredicate):
pass
class VehicleCratePathCollision(PathCollisionPredicate):
pass
class CrateObstaclePathCollision(CollisionPredicate):
pass
class Follow(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.v1, self.v2, self.dist = params
attr_inds = OrderedDict([(self.v1, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.v2, [("xy", np.array([0,1], dtype=np.int)),
("theta", np.array([0], dtype=np.int))]),
(self.dist, [("value", np.array([0], dtype=np.int))])])
def f(x):
old_v1_pose = self.v1.geom.update_xy_theta(x[0], x[1], x[5], 0)
front_x, front_y = self.v1.geom.vehicle_front()
target_x = x[3] - np.cos(x[5]) * x[6]
target_y =x[4] - np.sin(x[5]) * x[6]
x_delta = target_x - x[0]
y_delta = target_y - x[1]
theta_delta = x[5] - x[2]
while theta_delta > np.pi:
theta_delta -= 2 * np.pi
while theta_delta < np.pi:
theta_delta += 2 * np.pi
return np.r_[x_delta, y_delta, theta_delta].reshape((3,1))
def grad(x):
return np.c_[np.eye(3), np.zeros((3,3))]
val = np.zeros((3, 1))
e = EqExpr(Expr(f, grad), val)
super(Stationary, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=3)
self.spacial_anchor = False
class StopAtStopSign(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 2
self.obj, self.sign = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int))])])
def f(x):
if not self.sign.geom.road.is_on(x[0], x[1]):
return np.zeros((2,))
direction = self.sign.geom.road.direction
rot_mat = np.array([[np.cos(direction), -np.sin(direction)],
[np.sin(direction), np.cos(direction)]])
dist_vec = self.sign.geom.loc - x[:2]
rot_dist_vec = rot_mat.dot(dist_vec)
if np.abs(rot_dist_vec[0]) < self.sign.geom.length / 2. and np.abs(rot_dist_vec[1]) < self.sign.geom.width / 2.:
return x[2:] - x[:2]
return np.zeros((2,))
def grad(x):
return np.c_[np.eye(2), -np.eye(2)]
val = np.zeros((2, 1))
e = EqExpr(Expr(f, grad), val)
super(StopAtStopSign, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=(0,1), sim=sim, priority=2)
self.spacial_anchor = False
class ExternalDriveDownRoad(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 1
self.obj, = params
attr_inds = OrderedDict([(self.obj, [("xy", np.array([0,1], dtype=np.int))])])
if self.obj.geom.road:
direction = self.obj.geom.road.direction
rot_mat = np.array([[np.cos(direction), -np.sin(direction)],
[np.sin(direction), np.cos(direction)]])
self.dir_vec = rot_mat.dot([1,0])
else:
self.dir_vec = np.zeros((2,))
def f(x):
if not self.obj.geom.road:
return np.zeros((2,))
dist_vec = x[2:4] - x[:2]
return (dist_vec / np.linalg.norm(dist_vec)) - self.dir_vec
def grad(x):
return np.c_[np.eye(2), -np.eye(2)]
val = np.zeros((2, 1))
e = EqExpr(Expr(f, grad), val)
super(ExternalDriveDownRoad, self).__init__(name, e, attr_inds, params, expected_param_types, active_range=(0,1), sim=sim, priority=2)
self.spacial_anchor = False
class WithinDistance(DrivingPredicate):
def __init__(self, name, params, expected_param_types, sim=None):
assert len(params) == 3
self.target1, self.target2, self.dist = params
attr_inds = OrderedDict([(self.target1, [("xy", np.array([0,1], dtype=np.int))]),
(self.target2, [("xy", np.array([0,1], dtype=np.int))]),
(self.dist, [("value", np.array([0], dtype=np.int))])])
def f(x):
scaled_vec = np.abs((x[2:4] - x[:2]) / np.linalg.norm(x[2:4] - x[:2]) * x[4])
if np.all(scaled_vec < x[2:4] - x[:2]):
return -x[2:4] + x[:2] + scaled_vec
elif np.all(-scaled_vec > x[2:4] - x[:2]):
return -scaled_vec - x[2:4] + x[:2]
else:
return np.zeros((2,))
def grad(x):
return np.c_[-np.eye(2), np.eye(2), np.zeros((1,2))]
val = np.zeros((2, 1))
dynamics_expr = Expr(self.f, self.grad)
e = LEqExpr(dynamics_expr, val)
super(WithinDistance, self).__init__(name, e, attr_inds, params, expected_param_types, sim=sim, priority=1)
self.spacial_anchor = False
class PosesWithDistance(WithinDistance):
pass
| [
"m_j_mcdonald@berkeley.edu"
] | m_j_mcdonald@berkeley.edu |
81409cc992d48f7b31a22d72b721517277678d4f | ce1a7e0f31f8859b3e301d8c6a4c772a52527727 | /CSE 218/LAB 1 & 2/lab1 (2).py | b2d587777f48cb4a3030c3afbc9fa8db549496a5 | [] | no_license | SaemHasan/Level-2-Term-1_CSE_BUET | 2d9ab472b98fb8cbe6caf9c616a3882e8f9893ba | 75d0478f77d438e6bc57bc0dcd52fcde2f6a390a | refs/heads/main | 2023-06-27T05:04:52.420317 | 2021-07-31T09:28:06 | 2021-07-31T09:28:06 | 391,307,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | #import lab2(1)
print("HELLO SAYEM")
x=5
print (x)
#a=func(2,3)
| [
"1705027@ugrad.cse.buet.ac.bd"
] | 1705027@ugrad.cse.buet.ac.bd |
16443e3ed297179dafc58fb10e0a1b55dcc6fec6 | c4726317d2a4387cede3808a8f314b02d0001664 | /soft/FQW/paired_comparison/forms.py | f629043924208522123805d47471d491b0265b41 | [] | no_license | CyclopsV/Final_qualifying_work | 8220eedca7b118ce6eb1978d141ef12dabfa1def | 97f8bb3fabe7dbcad3320daa869afc96dde33bd1 | refs/heads/main | 2023-07-07T22:01:30.962479 | 2021-09-03T13:40:06 | 2021-09-03T13:40:06 | 402,754,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | from django.forms import ModelForm, TextInput, Textarea, BooleanField, ClearableFileInput
from .models import Comparisons, Users
class ComparisonsForm(ModelForm):
index_check = BooleanField(help_text='Является ли первый столбец индексами', required=False, initial='True')
class Meta:
model = Comparisons
fields = ['title', 'description', 'file']
widgets = {
'title': TextInput(attrs={
'placeholder': 'Введите название для попарного сравнения (>200 символов)',
'class': 'input-field form-elem'
}),
'description': Textarea(attrs={
'placeholder': 'Введите описание для попарного сравнения',
'class': 'input-field form-elem'
}),
'file': ClearableFileInput(attrs={
'class': 'form-elem'
})
}
class UsersForm(ModelForm):
class Meta:
model = Users
fields = ['name', 'email']
widgets = {
'name': TextInput(attrs={
'placeholder': 'ФИО (>200 символов)',
'class': 'input-field form-elem'
}),
'email': TextInput(attrs={
'placeholder': 'email',
'class': 'input-field form-elem'
})
}
| [
"vnikita3@gmail.com"
] | vnikita3@gmail.com |
f770862e00c3d4bd907fea69199c536b14816ffa | c8b938ab98e7ea8a97bd9f5ec001dbfe1af08eef | /CNN Model/main_cnn_code/cnn_crisis_train.py | 34a588a4980e1aa3fedd0377a008d7bb702b6fa2 | [] | no_license | Rajratnpranesh/CRISIS_MODEL | ca32f4190bb568d25f13f210430646a47ac97ec7 | dd1fe63f1591e9d8290513c8f750f349301d5f08 | refs/heads/master | 2020-09-11T15:18:15.149632 | 2019-11-16T18:41:40 | 2019-11-16T18:41:40 | 222,108,439 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,793 | py | '''Train LSTM RNNs on the AIDR tweet classification task.
GPU command:
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python lstm_rnns_aidr.py
Output after 4 epochs on CPU: ~0.8146
Time per epoch on CPU (Core i7): ~150s.
'''
from __future__ import print_function
import numpy as np
np.random.seed(437) # for reproducibility
# keras related
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.preprocessing import sequence
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from utilities import aidr
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import np_utils
from keras.models import model_from_json
from keras.models import Sequential
from keras.layers import Dense, Dropout, LSTM, BatchNormalization
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
from keras.optimizers import adam
#other utilities
import optparse
import logging
import sys
import csv
import os
csv.field_size_limit(sys.maxsize)
from sklearn import metrics
def build_cnn(maxlen, max_features, emb_size=128, emb_matrix=None, nb_filter=250, filter_length=3,
pool_length=2, nb_classes = 2, hidden_size=128, dropout_ratio=0.5, tune_emb=True):
''' build cnn model '''
print('Building model:', 'convolutional neural network (cnn)')
#create the emb layer
if emb_matrix is not None:
max_features, emb_size = emb_matrix.shape
emb_layer = Embedding(max_features, emb_size, weights=[emb_matrix], input_length=maxlen, trainable=tune_emb)
else:
emb_layer = Embedding(max_features, emb_size, input_length=maxlen, trainable=tune_emb)
model = Sequential()
model.add(emb_layer)
model.add(Dropout(dropout_ratio))
# we add a Convolution1D, which will learn nb_filter (word group) filters of size filter_length:
model.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
border_mode='valid', activation='relu', subsample_length=1))
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_length=pool_length))
model.add(Dropout(dropout_ratio))
# We flatten the output of the conv layer, so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(hidden_size))
model.add(Activation('relu'))
model.add(Dropout(dropout_ratio))
# We project onto a single unit output layer, and squash it with a sigmoid:
if nb_classes == 2:
print('Doing binary classification...')
model.add(Dense(1))
model.add(Activation('sigmoid'))
elif nb_classes > 2:
print('Doing classification with class #', nb_classes)
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
else:
print("Wrong argument nb_classes: ", nb_classes)
exit(1)
return model
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)
# parse user input
parser = optparse.OptionParser("%prog [options]")
#file related options
parser.add_option("-g", "--log-file", dest="log_file", help="log file [default: %default]")
parser.add_option("-d", "--data-dir", dest="data_dir", help="directory containing train, test and dev file [default: %default]")
parser.add_option("-D", "--data-spec", dest="data_spec", help="specification for training data (in, out, in_out) [default: %default]")
parser.add_option("-m", "--model-dir", dest="model_dir", help="directory to save the best models [default: %default]")
# parser.add_option("-r", "--train-file", dest="featFile_train")
# parser.add_option("-s", "--test-file", dest="featFile_test")
# parser.add_option("-v", "--validation-file", dest="featFile_dev")
# network related
parser.add_option("-t", "--max-tweet-length", dest="maxlen", type="int", help="maximul tweet length (for fixed size input) [default: %default]") # input size
parser.add_option("-F", "--nb_filter", dest="nb_filter", type="int", help="nb of filter to be applied in convolution over words [default: %default]") # uni, bi-directional
parser.add_option("-r", "--filter_length", dest="filter_length", type="int", help="length of neighborhood in words [default: %default]") # lstm, gru, simpleRNN
parser.add_option("-p", "--pool_length", dest="pool_length", type="int", help="length for max pooling [default: %default]") # lstm, gru, simpleRNN
parser.add_option("-v", "--vocabulary-size", dest="max_features", type="float", help="vocabulary size in percentage [default: %default]") # emb matrix row size
parser.add_option("-e", "--emb-size", dest="emb_size", type="int", help="dimension of embedding [default: %default]") # emb matrix col size
parser.add_option("-s", "--hidden-size", dest="hidden_size", type="int", help="hidden layer size [default: %default]") # size of the hidden layer
parser.add_option("-o", "--dropout_ratio", dest="dropout_ratio", type="float", help="ratio of cells to drop out [default: %default]")
parser.add_option("-i", "--init-type", dest="init_type", help="random or pretrained [default: %default]")
parser.add_option("-f", "--emb-file", dest="emb_file", help="file containing the word vectors [default: %default]")
parser.add_option("-P", "--tune-emb", dest="tune_emb", action="store_false", help="DON't tune word embeddings [default: %default]")
#learning related
parser.add_option("-a", "--learning-algorithm", dest="learn_alg", help="optimization algorithm (adam, sgd, adagrad, rmsprop, adadelta) [default: %default]")
parser.add_option("-b", "--minibatch-size", dest="minibatch_size", type="int", help="minibatch size [default: %default]")
parser.add_option("-l", "--loss", dest="loss", help="loss type (hinge, squared_hinge, binary_crossentropy) [default: %default]")
parser.add_option("-n", "--epochs", dest="epochs", type="int", help="nb of epochs [default: %default]")
parser.set_defaults(
data_dir = "/home/raj/mntShare/share/deep-learning-for-big-crisis-data-master/data/"
# data_dir = "../data/earthquakes/in/"
,data_spec = "in"
,log_file = "log"
,model_dir = "/home/raj/mntShare/share/deep-learning-for-big-crisis-data-master/saved_models/"
,featFile_train = "/home/raj/mntShare/share/deep-learning-for-big-crisis-data-master/data/last-nepal_prccd_train.csv"
,featFile_test = "/home/raj/mntShare/share/deep-learning-for-big-crisis-data-master/data/last-nepal_prccd_test.csv"
,featFile_dev = "/home/raj/mntShare/share/deep-learning-for-big-crisis-data-master/data/last-nepal_prccd_dev.csv"
,learn_alg = "adadelta" # sgd, adagrad, rmsprop, adadelta, adam (default)
,loss = "binary_crossentropy" # hinge, squared_hinge, binary_crossentropy (default)
,minibatch_size = 32
,dropout_ratio = 0.0
,maxlen = 100
,epochs = 25
,max_features = 80
,emb_size = 128
,hidden_size = 128
,nb_filter = 250
,filter_length = 3
,pool_length = 2
,init_type = 'random'
,emb_file = "/home/raj/mntShare/share/deep-learning-for-big-crisis-data-master/embeddings/crisis_embeddings.txt/"
,tune_emb = True
)
options,args = parser.parse_args(sys.argv)
print('Loading data...')
(X_train, y_train), (X_test, y_test), (X_dev, y_dev), max_features, E, label_id = aidr.load_and_numberize_data(path=options.data_dir,
nb_words=options.max_features, init_type=options.init_type,
embfile=options.emb_file, dev_train_merge=0, map_labels_to_five_class=0)
# print("Padding sequences....")
X_train = sequence.pad_sequences(X_train, maxlen=options.maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=options.maxlen)
X_dev = sequence.pad_sequences(X_dev, maxlen=options.maxlen)
#build model...
nb_classes = np.max(y_train) + 1
print('............................')
print(len(X_train), 'train tweets')
print(len(X_test), 'test tweets')
print(len(X_dev), 'dev tweets')
print(max_features - 3, 'vocabulary size')
print(nb_classes, 'different classes')
print('............................')
if nb_classes == 2: # binary
loss = options.loss
class_mode = "binary"
optimizer = options.learn_alg
elif nb_classes > 2: # multi-class
loss = 'categorical_crossentropy'
class_mode = 'categorical'
optimizer = options.learn_alg
print("** optimizer: " + options.learn_alg)
# convert class vectors to binary class matrices [ 1 of K encoding]
y_train_mod = np_utils.to_categorical(y_train, nb_classes)
y_test_mod = np_utils.to_categorical(y_test, nb_classes)
y_dev_mod = np_utils.to_categorical(y_dev, nb_classes)
model = build_cnn(options.maxlen, max_features, emb_matrix=E, emb_size=options.emb_size, nb_filter=options.nb_filter,
filter_length=options.filter_length, pool_length=options.pool_length, nb_classes = nb_classes,
hidden_size=options.hidden_size, dropout_ratio=options.dropout_ratio, tune_emb=options.tune_emb)
model.compile(optimizer=optimizer, loss=loss, class_mode=class_mode)
model_name = options.model_dir + "cnn" + "-" + optimizer + "-" + str(options.nb_filter) + "-" + str(options.filter_length) + \
"-" + str(options.pool_length) + "-" + str (options.tune_emb) +\
"-" + loss + "-" + str (options.minibatch_size) + "-" + str(options.dropout_ratio) + "-init-" + str (options.init_type) + "-" +\
str (options.max_features) + "-" + str (options.emb_size) + "-" + str (options.hidden_size) + ".model.cl." + str(nb_classes) + ".dom." + str(options.data_spec)
earlystopper = EarlyStopping(monitor='val_loss', patience=3, verbose=1)
checkpointer = ModelCheckpoint(filepath=model_name, monitor='val_loss', verbose=1, save_best_only=True)
if nb_classes == 2: # binary
print ('Training and validating ....')
model.fit(X_train, y_train, batch_size=options.minibatch_size, nb_epoch=options.epochs,
validation_data=(X_dev, y_dev), show_accuracy=True, verbose=2, callbacks=[earlystopper, checkpointer])
print("Test model ...")
print ("Loading ...", model_name)
model.load_weights(model_name)
score, acc = model.evaluate(X_test, y_test, batch_size=options.minibatch_size, show_accuracy=True)
print('Test accuracy:', acc)
y_prob = model.predict_proba(X_test)
##added by kamla
print("Predictions")
for e in y_prob: print(e)
###
roc = metrics.roc_auc_score(y_test, y_prob)
print("ROC Prediction (binary classification):", roc)
elif nb_classes > 2: # multi-class
print ('Training and validating ....')
#check if there is pre-trained model
#if os.path.exists(model_name) == False:
#else:
#print("Loading pre-trained model...")
#model = model_from_json(open(model_name + ".json").read())
#model.load_weights(model_name)
#model.compile(optimizer=optimizer, loss=loss, class_mode=class_mode)
model.fit(X_train, y_train_mod, batch_size=options.minibatch_size, nb_epoch=options.epochs,
validation_data=(X_dev, y_dev_mod), show_accuracy=True, verbose=2,
callbacks=[earlystopper, checkpointer])
print ("Loading ...", model_name)
model.load_weights(model_name)
print("Test model ...")
y_pred = model.predict_classes(X_test)
y_test = np.array(y_test)
acc2 = metrics.accuracy_score(y_test, y_pred)
print("Raw Accuracy:", acc2)
#get label ids in sorted
class_labels = sorted(label_id, key=label_id.get)
#print (class_labels)
print (metrics.classification_report(y_test, y_pred, target_names=class_labels, digits=4) )
print ("Confusion Matrix:\n", metrics.confusion_matrix(y_test, y_pred, labels=range(0, len(class_labels))))
if nb_classes == 2:
_p, _r, _f, sup = metrics.precision_recall_fscore_support(y_test, y_pred, average='binary')
print (" pre: " + str (_p) + " rec: " + str (_r) + " f-score: " + str (_f))
else:
mic_p, mic_r, mic_f, sup = metrics.precision_recall_fscore_support(y_test, y_pred, average='micro')
mac_p, mac_r, mac_f, sup = metrics.precision_recall_fscore_support(y_test, y_pred, average='macro')
print (" micro pre: " + str (mic_p) + " rec: " + str (mic_r) + " f-score: " + str (mic_f))
print (" macro pre: " + str (mac_p) + " rec: " + str (mac_r) + " f-score: " + str (mac_f))
# save the architecture finally in json format
json_string = model.to_json()
open(model_name + ".json", 'w').write(json_string)
| [
"raj.ratn18@gamil.com"
] | raj.ratn18@gamil.com |
24de1f20787e1d6c9153630871c6cd0f77470b59 | 4fe36a6c302b48be5cc26f5a2b6cd43d8f720d6f | /break_continue.py | 6e3961c07dfea9cd358627e5a3409e90dd1eff8f | [] | no_license | semora93/PYTHON | acdc4d1d471a180a5392da5e0c45bce765ebb1ed | 3093c12282e3e345c01c20632fd13f275bf2e011 | refs/heads/master | 2022-12-06T01:25:56.092457 | 2020-08-24T00:47:55 | 2020-08-24T00:47:55 | 282,899,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | def run():
for contador in range(1000):
if contador % 2 != 0:
continue
print(contador)
if __name__ = "__main__":
run()
| [
"sebastianmora93@gmail.com"
] | sebastianmora93@gmail.com |
47949966bb64eca06b601565bd6738c719e31d0c | 48430e3541ef93d1a87dfeca04707e1c3f01df01 | /day16/Block.py | 6c8b70a530e1bb688948627f0f8bd58ec3b06ace | [] | no_license | shinhash/MyFirstPythonProject | 859a676380deb745aff8f3461ddf45cca2eadc41 | dd408e05b2e5f0dce363caf2555e2659a7d8a1e4 | refs/heads/master | 2022-12-30T15:39:12.295534 | 2020-10-21T11:24:40 | 2020-10-21T11:24:40 | 297,940,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from random import *
class Block():
def __init__(self):
self.kind = randint(1, 7)
# self.kind = 2
self.status = 1
self.xloc = 4
self.yloc = 2
def __str__(self):
return "kind = " + str(self.kind) + ", status = " + str(self.status) + ", x = " + str(self.xloc) + ", y = " + str(self.yloc)
| [
"shinhash123@gmail.com"
] | shinhash123@gmail.com |
ef87da3bc9de550ff3d3d28e44b67645e9acf1ff | 177b940035c7b84cf97fb4f34c451b8b10e55f95 | /caso1ferreteria/caso1ferreteria/__init__.py | 0e33cddf2fdee6747d4b2606824383c0c6aa1de2 | [] | no_license | sergio-escalona/Caso1Ferreteria | 2dd3c56a58d9856a177c2ec0c2db4272d936c0d8 | 743301ac75cad571110f1e1cac4edafcadc86f4c | refs/heads/main | 2023-05-06T13:45:39.601338 | 2021-06-03T04:36:38 | 2021-06-03T04:36:38 | 359,029,991 | 0 | 0 | null | 2021-05-07T19:02:57 | 2021-04-18T02:54:03 | Python | UTF-8 | Python | false | false | 37 | py | """
Package for caso1ferreteria.
"""
| [
"cristofopo@me.com"
] | cristofopo@me.com |
f185fb7d2592d7b702fbb0aa041313972b43ce49 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/모듈과 패키지/외장함수_20200711174751.py | 99b2fc1648509015a8491be7758fc5ff48cd8b55 | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # list of python modules라고 검색한다
# Python Module Index라는 페이지를 들어간다.
# # glob : 경로 내의 폴더 / 파일 목록 조회(윈도우 dir)
# import glob
# print(glob.glob("*.py")) # 확장자가 py 인 모든 파일
# os : 운영체제에서 제공하는 기본 기능
import os
print(os,get) | [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
23a21be9bcf676c66497144883ee69f723158063 | 5df5fd36725df0cc9816c5819a3e9d231c1cad70 | /stars.py | e9e6a011992e4859a87e0ba11dd82eef570cf1b7 | [] | no_license | spartanshaz/Python | 73e77698514b20fd52bb4d42a1fc5973f664df83 | fe9921c484f8e694a2f607afb8c7b03061f90acb | refs/heads/master | 2020-05-24T14:39:29.546926 | 2015-06-10T10:18:56 | 2015-06-10T10:18:56 | 37,189,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | def box(width,height):
print width * "*"
for i in range (height-2):
print"*"+(width-2)*" " + "*"
print width*"*"
box(10,3) | [
"shahzadmuddabbir22@gmail.com"
] | shahzadmuddabbir22@gmail.com |
c32280151535cb4189a8683788d19498c57003ae | 142db8ada9070a9c1d8b2582c5819f1b915c6598 | /tests/test_cache.py | dfbec7f38bc50ef8c78704c61a37b0fe62b446a1 | [
"ISC"
] | permissive | mixja/boto3-session-cache | 77e883622d8da30d94d52691b98e68f2a38c0452 | ba64d358319ffc1a05d648867f635bcfceca4644 | refs/heads/master | 2020-12-30T13:21:56.011964 | 2018-02-26T11:40:27 | 2018-02-26T11:41:21 | 91,209,115 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | import pytest
import os
import json
from datetime import datetime
from boto3_session_cache import JSONFileCache
CACHE_PATH = os.path.expanduser(os.path.join('~', '.aws', 'cli', 'cache'))
def test_cache_lookup(fs):
cache = JSONFileCache()
key = 'my-profile--arn_aws_iam__111111111111_role-admin'
data = {'test': 'test'}
os.makedirs(CACHE_PATH)
with open(CACHE_PATH + '/' + key + '.json', 'w') as f:
json.dump(data, f)
assert cache[key] == data
assert key in cache
def test_cache_write(fs):
cache = JSONFileCache()
key = 'my-profile--arn_aws_iam__222222222222_role-admin'
data = {'test': 'test','date':datetime(2016, 3, 8, 11, 37, 24)}
cache[key] = data
with open(CACHE_PATH + '/' + key + '.json') as d:
assert json.load(d) == {'test':'test','date':'2016-03-08T11:37:24'}
def test_cache_miss(fs):
cache = JSONFileCache()
key = 'some-random-key'
with pytest.raises(KeyError) as excinfo:
cache[key]
assert key in str(excinfo.value)
def test_cache_non_serializable(fs):
cache = JSONFileCache()
key = 'some-bad-key'
with pytest.raises(ValueError) as excinfo:
cache[key] = set()
assert 'Value cannot be cached, must be JSON serializable' in str(excinfo.value) | [
"justin.menga@gmail.com"
] | justin.menga@gmail.com |
8389ca0978988a6a23f2ee68ab0f2e04e1d8f107 | 3c03073d609ea59bf3086828b6f39ccea9830d43 | /test/test_inline_object21.py | 596ed5573daf20904858755d38631195febc423a | [] | no_license | mkj28/codefresh_client | 7226201ca1958b9ee880cebccef531e43880390d | bd2bfeb43fbc27ac00e98428f1a1382df08f5a95 | refs/heads/master | 2020-04-30T07:22:20.265388 | 2019-03-22T07:49:26 | 2019-03-22T07:49:26 | 176,682,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # coding: utf-8
"""
Codefresh API
Codefresh API openAPI 3.0 specification # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import codefresh_client
from codefresh_client.models.inline_object21 import InlineObject21 # noqa: E501
from codefresh_client.rest import ApiException
class TestInlineObject21(unittest.TestCase):
"""InlineObject21 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineObject21(self):
"""Test InlineObject21"""
# FIXME: construct object with mandatory attributes with example values
# model = codefresh_client.models.inline_object21.InlineObject21() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"michal@gladly.com"
] | michal@gladly.com |
7402d3175729950a83667309b34cdfaaab5cce1f | a8c226e94330bb76f1ada9557a34d6d34eb73e59 | /ST_LE/task 4.py | 21748577de394af18f568043e29999b5b05ac897 | [] | no_license | AntonVetoshkin/Students_Lecturers | 8b789d59975b6d4907956882c79ce2e946a8393b | 7b50b7a5371a5c4c35992bc331db17f86dc87016 | refs/heads/master | 2023-05-11T20:58:01.638478 | 2021-05-26T17:31:35 | 2021-05-26T17:31:35 | 365,810,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,259 | py | class Student:
def __init__(self, name, surname, gender):
self.name = name
self.surname = surname
self.gender = gender
self.finished_courses = []
self.courses_in_progress = []
self.grades = {}
def add_courses(self, course_name):
self.finished_courses.append(course_name)
def rate_lec(self, lecturer, course, grade):
if isinstance(lecturer, Lecturer) and course in lecturer.courses_attached \
and course in self.courses_in_progress and grade in range(10):
if course in lecturer.grades:
lecturer.grades[course] += [grade]
else:
lecturer.grades[course] = [grade]
else:
return 'Ошибка'
def average_grade(self):
average_list = {}
for course, mark in self.grades.items():
average = int((sum(mark) / len(mark)))
average_list[course] = average
return sorted(average_list.items())
def __str__(self):
print('Имя: ' + self.name)
print('Фамилия: ' + self.surname)
print(self.average_grade())
print('Курсы в процессе изучения: ')
print(self.courses_in_progress)
print('Завершенные курсы: ')
print(self.finished_courses)
return
def __lt__(self, other):
if self.average_grade() > other.average_grade():
print('больше')
elif self.average_grade() < other.average_grade():
print('меньше')
else:
print('равно')
class Mentor:
def __init__(self, name, surname):
self.name = name
self.surname = surname
self.courses_attached = []
self.grades = {}
class Lecturer(Mentor):
def average_grade(self):
average_list = {}
for course, mark in self.grades.items():
average = int((sum(mark) / len(mark)))
average_list[course] = average
return sorted(average_list.items())
def __str__(self):
return ('Имя: ' + self.name + '\n' + 'Фамилия: ' + self.surname + '\n' + self.average_grade() + '\n')
def __lt__(self, other):
if self.average_grade() > other.average_grade():
print('больше')
elif self.average_grade() < other.average_grade():
print('меньше')
else:
print('равно')
class Reviewer(Mentor):
def rate_hw(self, student, course, grade):
if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress \
and grade in range(10):
if course in student.grades:
student.grades[course] += [grade]
else:
student.grades[course] = [grade]
else:
return 'Ошибка'
def __str__(self):
return ('Имя: ' + self.name + '\n' + 'Фамилия: ' + self.surname + '\n')
pet_iv = Student('Petr', 'Ivanov', 'Male')
pet_iv.add_courses(['HTML', 'C++'])
pet_iv.courses_in_progress = ['Pyton', 'CSS']
vas_se = Student('Vasiliy', 'Semenov', 'Male')
vas_se.add_courses(['HTML'])
vas_se.courses_in_progress = ['Pyton']
mark_z_l = Lecturer('Mark', 'Zoo')
mark_z_l.courses_attached = 'Pyton'
den_braun_l = Lecturer('Den', 'Braun')
den_braun_l.courses_attached = ['Pyton', 'CSS']
kris_lee_r = Reviewer('Kriss', 'Lee')
kris_lee_r.courses_attached = ['Pyton', 'CSS', 'C++']
kris_lee_r.rate_hw(pet_iv, 'Pyton', 7)
kris_lee_r.rate_hw(pet_iv, 'Pyton', 6)
si_shen_r = Reviewer('Si', 'Shen')
si_shen_r.courses_attached = ['Pyton', 'CSS']
si_shen_r.rate_hw(vas_se, 'Pyton', 4)
si_shen_r.rate_hw(vas_se, 'Pyton', 2)
pet_iv.rate_lec(mark_z_l, 'Pyton', 9)
pet_iv.rate_lec(mark_z_l, 'Pyton', 7)
vas_se.rate_lec(den_braun_l, 'Pyton', 9)
vas_se.rate_lec(den_braun_l, 'Pyton', 5)
def average_homework_grades_by_course(student_lits, course_name):
grades_list = []
counter = 0
pre_result = 0
for stud in student_lits:
print(stud.grades)
if course_name in stud.grades:
tmp_lst = stud.grades.get(f'{course_name}')
grades_list.append(tmp_lst)
for j in grades_list:
counter += len(j)
for i in range(len(grades_list)):
pre_result += sum(grades_list[i])
if counter != 0 and pre_result != 0:
result = round((pre_result / counter), 2)
return result
else:
return 0
std_lst = [pet_iv, vas_se]
print(average_homework_grades_by_course(std_lst, 'Pyton'))
def average_lect_grades_by_course(lecturer_list, course_name):
grades_list = []
counter = 0
pre_result = 0
for lect in lecturer_list:
print(lect.grades)
if course_name in lect.grades:
tmp_lst = lect.grades.get(f'{course_name}')
grades_list.append(tmp_lst)
for j in grades_list:
counter += len(j)
for i in range(len(grades_list)):
pre_result += sum(grades_list[i])
if counter != 0 and pre_result != 0:
result = round((pre_result / counter), 2)
return result
else:
return 0
lecturer_list = [mark_z_l, den_braun_l]
print(average_lect_grades_by_course(lecturer_list, 'Pyton')) | [
"aveto77@mail.ru"
] | aveto77@mail.ru |
b224e371785741f76713f1e7f35515d2c7ed977a | e786939abb1c4a8a4730879e45b2bb0d755a9b7e | /players_scrape.py | bf56853a3a3f9d22976f586fa337acfcf38a38f1 | [] | no_license | keithxm23/scrapesquawka | 2d18091793d737be2872faa64738c0f71be5f7c7 | 2a9928486643fe38137305af180bc655659e4c18 | refs/heads/master | 2021-01-20T01:57:25.338928 | 2014-07-16T23:02:48 | 2014-07-16T23:02:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | #!/usr/bin/env python
import urllib
import os
from bs4 import BeautifulSoup
PHOTO_BASE_URL = "http://www.squawka.com/wp-content/themes/squawka_web/uploaded_icons/players/thumb/"
BASE_URL = "http://www.squawka.com/wp-content/themes/squawka_web/stats_process.php?season_id=119&team_id="
TOTAL_TEAMS = 32
found_teams = 0
team_id = 0
while(True):
file_name = "./players_data/%s.xml" % str(team_id)
urllib.urlretrieve(BASE_URL+str(team_id), file_name)
if os.path.getsize(file_name) < 1000:
os.remove(file_name)
else:
found_teams+=1
print file_name
team_id+=1
if found_teams == 32: break
| [
"kmascare@akamai.com"
] | kmascare@akamai.com |
095739ac447185b9c95741b1cc83f810274081cf | b6db7fcdb24b4da8415e54de81b9b86bc6fcc757 | /annotator_supreme/config/development.py | 8fee63a37d67b3d6bda166bf841da44c1f1b9005 | [
"MIT"
] | permissive | jtoss/annotator-supreme | dcc3cb638f58342c3d8ac3033dbd3dfb486e891d | 9c1fe72f968f6facb8fafc65ba49014014049c3c | refs/heads/master | 2021-09-10T06:27:07.074733 | 2018-03-21T14:36:45 | 2018-03-21T14:36:45 | 112,369,673 | 0 | 0 | null | 2017-11-28T17:56:41 | 2017-11-28T17:56:40 | null | UTF-8 | Python | false | false | 192 | py | import logging
ENV = 'development'
APP_DEBUG = False
# CORS configuration
CORS_HEADERS = 'Content-Type'
CORS_ORIGINS = '*'
KEYSPACE = "annotator_supreme"
#Logging
LOG_LEVEL = logging.INFO
| [
"gustavofuhr87@gmail.com"
] | gustavofuhr87@gmail.com |
259af6fd38f7530b5db9bc123a7453b2252e7725 | fa8a563f71519d0bd188f9218c6e34e18e6833f7 | /CIFAR10/cifar10_input.py | 2033d7a931b65d83554c053dd56150780ec7ac44 | [] | no_license | EricGaoJiaBao/cat_emotion | b9493a4629a7e26ad5d1d724508634137c87c5cd | 7c5b3c7e82ef1e1889515892d7f2e69c1f73b904 | refs/heads/master | 2021-01-23T01:18:45.582704 | 2017-09-11T09:57:07 | 2017-09-11T09:57:07 | 102,433,313 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,999 | py | #By @Kevin Xu
#kevin28520@gmail.com
#Youtube: https://www.youtube.com/channel/UCVCSn4qQXTDAtGWpWAe4Plw
#Chinese weibo: http://bit.ly/2nAmOcO
#The aim of this project is to use TensorFlow to process our own data.
# - cifar10_input.py: read in data and generate batches
# - cifar10.py: build the model architecture, train, evaluate
# I used Ubuntu with Python 3.5, TensorFlow 1.0*, other OS should also be good.
# I didn't use data argumentation, I spent less than 30 mins with 10K steps.
# data: cifar10 binary version
# https://www.cs.toronto.edu/~kriz/cifar.html
# data size: ~184M
# How to run?
# 0. you need to change the data directory
# 1. run cifar10.py
# 2. call train() in the console to train the model
# 3. call evaluate() in the console to test on the test data
# Note:
# it is suggested to restart your kenel to train the model multiple times
# (in order to clear all the variables in the memory)
# Otherwise errors may occur: conv1/weights/biases already exist......
#%%
import tensorflow as tf
import numpy as np
import os
#%% Reading data
def read_cifar10(data_dir, is_train, batch_size, shuffle):
"""Read CIFAR10
Args:
data_dir: the directory of CIFAR10
is_train: boolen
batch_size:
shuffle:
Returns:
label: 1D tensor, tf.int32
image: 4D tensor, [batch_size, height, width, 3], tf.float32
"""
img_width = 32
img_height = 32
img_depth = 3
label_bytes = 1
image_bytes = img_width*img_height*img_depth
with tf.name_scope('input'):
if is_train:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' %ii)
for ii in np.arange(1, 6)]
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
filename_queue = tf.train.string_input_producer(filenames)
reader = tf.FixedLengthRecordReader(label_bytes + image_bytes)
key, value = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
label = tf.slice(record_bytes, [0], [label_bytes])
label = tf.cast(label, tf.int32)
image_raw = tf.slice(record_bytes, [label_bytes], [image_bytes])
image_raw = tf.reshape(image_raw, [img_depth, img_height, img_width])
image = tf.transpose(image_raw, (1,2,0)) # convert from D/H/W to H/W/D
image = tf.cast(image, tf.float32)
# # data argumentation
# image = tf.random_crop(image, [24, 24, 3])# randomly crop the image size to 24 x 24
# image = tf.image.random_flip_left_right(image)
# image = tf.image.random_brightness(image, max_delta=63)
# image = tf.image.random_contrast(image,lower=0.2,upper=1.8)
image = tf.image.per_image_standardization(image) #substract off the mean and divide by the variance
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size = batch_size,
num_threads= 16,
capacity = 2000,
min_after_dequeue = 1500)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size = batch_size,
num_threads = 16,
capacity= 2000)
# return images, tf.reshape(label_batch, [batch_size])
## ONE-HOT
n_classes = 10
label_batch = tf.one_hot(label_batch, depth= n_classes)
return images, tf.reshape(label_batch, [batch_size, n_classes])
#%% TEST
# To test the generated batches of images
# When training the model, DO comment the following codes
#import matplotlib.pyplot as plt
#
#data_dir = '/home/kevin/tensorflow/CIFAR10/data/cifar-10-batches-bin/'
#BATCH_SIZE = 10
#image_batch, label_batch = read_cifar10(data_dir,
# is_train=True,
# batch_size=BATCH_SIZE,
# shuffle=True)
#
#with tf.Session() as sess:
# i = 0
# coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(coord=coord)
#
# try:
# while not coord.should_stop() and i<1:
#
# img, label = sess.run([image_batch, label_batch])
#
# # just test one batch
# for j in np.arange(BATCH_SIZE):
# print('label: %d' %label[j])
# plt.imshow(img[j,:,:,:])
# plt.show()
# i+=1
#
# except tf.errors.OutOfRangeError:
# print('done!')
# finally:
# coord.request_stop()
# coord.join(threads)
| [
"noreply@github.com"
] | noreply@github.com |
81d721b7a0d2e8728825bf4b491d4817d1e1a862 | eeda5834c6bd0632487407e161f8f19a6328f9ea | /Topological_Sorting.py | 9d72d3a06b81c46407ef5253d5dc612846bff6e8 | [] | no_license | Halal375657/Graph-Theory | 01709f7e9125c635c680bc66557d77254a394fed | 549f0f7a7acee9e3a236044648ac635c6138b657 | refs/heads/master | 2020-11-25T02:25:58.986387 | 2020-03-23T09:50:22 | 2020-03-23T09:50:22 | 228,451,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,483 | py |
#
# Topological Sorting Algorithm.
#
# O(V + E)
#
from collections import defaultdict
graph = {}
Graph = defaultdict(list)
# Add edge of directed graph.
def addEdges(u, v):
Graph[u].append(v)
# Adding in-degree.
def addDegree(u, v):
try:
graph[v] += 1
except KeyError:
graph[v] = 1
try:
if graph[u]:
pass
except KeyError:
graph[u] = 0
# Topological Sorting.
def topsort():
L = []
nodeList = [node for node in graph]
check = {}
for node in nodeList:
check[node] = False
while nodeList:
u = nodeList.pop()
if graph[u] == 0:
if check[u] == False: #if check[u] is True that meaning is already Sort listed this node.
L.append(u)
check[u] = True
for v in Graph[u]:
graph[v] -= 1
if graph[v] == 0: # It's meaning this node is now availabe for sorting
nodeList.append(v)
return L
if __name__=="__main__":
edges = (
("Breakfast", "Office"),
("Dress up", "Office"),
("Office", "Dinner"),
("Office", "Sports"),
("Office", "Email"),
("Email", "Sports"),
("Email", "Dinner"),
("Dinner", "Sports")
)
for (u, v) in edges:
addDegree(u, v)
addEdges(u, v)
res = topsort()
print(' -> '.join(map(str, res)))
| [
"halaluddin375657@gmail.com"
] | halaluddin375657@gmail.com |
00830b1923d7887a191dd956ab3d9d6ca167d13f | 4855ca7f4b48e170206a20d75446c17ddf77cf10 | /controllers/default.py | 01fc4e0d04bacbcbb9c5117e6ebedacebe9abad4 | [
"LicenseRef-scancode-public-domain"
] | permissive | jorgeagua/init | 148e3c059538a94b8ca65e46c5ca90aef62ac0a0 | 75f9254e3961f302132859d188e2c54e9a624779 | refs/heads/master | 2021-01-17T17:00:26.252948 | 2012-05-12T23:06:21 | 2012-05-12T23:06:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,494 | py | # -*- coding: utf-8 -*-
if 0:
from gluon.globals import *
from gluon.html import *
from gluon.http import *
from gluon.tools import Auth
from gluon.sqlhtml import SQLFORM, SQLTABLE, form_factory
session = Session()
request = Request()
response = Response()
### required - do no delete
def user(): return dict(form=auth())
def download(): return response.download(request,db)
def call(): return service()
### end requires
def index():
return dict()
def error():
return dict()
@auth.requires_login()
def cliente():
# presentacion de listado de clientes
datasource = db(db.cliente.id>0).select()
powerTable = plugins.powerTable
powerTable.datasource = datasource
powerTable.headers = 'labels'
powerTable.showkeycolumn = False
powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)
powerTable.uitheme = request.vars.get('theme','Smoothness')
powerTable.dtfeatures["sScrollY"] = "100%"
powerTable.dtfeatures["sScrollX"] = "100%"
powerTable.dtfeatures['sPaginationType'] = 'full_numbers'
powerTable.dtfeatures["iDisplayLength"] = 20
# alta de clientes
mystep = [dict(title='NUEVO CLIENTE',Legend='Ingrese los datos del nuevo cliente',
fields=['empresa','contacto','telefono','correo'])]
from plugin_PowerFormWizard import PowerFormWizard
options = {'description':False,'legend':True,'validate':True}
form = PowerFormWizard(db.cliente, steps=mystep, options=options)
if form.accepts(request.vars, session):
response.flash = "registro aceptado"
elif form.errors:
form.step_validation()
response.flash = "error en los datos"
return dict( table=powerTable.create(),form=form)
@auth.requires_login()
def categoria():
# presentacion de categorias de articulosss
datasource = db(db.categoria.id>0).select()
powerTable = plugins.powerTable
powerTable.datasource = datasource
powerTable.headers = 'labels'
powerTable.showkeycolumn = False
powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)
powerTable.uitheme = request.vars.get('theme','Smoothness')
powerTable.dtfeatures["sScrollY"] = "100%"
powerTable.dtfeatures["sScrollX"] = "100%"
powerTable.dtfeatures['sPaginationType'] = 'full_numbers'
powerTable.dtfeatures["iDisplayLength"] = 30
# alta de categoria de articulos
mystep = [dict(title='NUEVAS CATEGORIAS',Legend='Ingrese una Categoria para Agrupar artículos',fields=['name'])]
from plugin_PowerFormWizard import PowerFormWizard
options = {'description':False,'legend':True,'validate':True}
form = PowerFormWizard(db.categoria, steps=mystep, options=options)
if form.accepts(request.vars, session):
response.flash = "registro aceptado"
elif form.errors:
form.step_validation()
response.flash = "error en los datos"
return dict( table=powerTable.create(),form=form)
@auth.requires_login()
def articulo():
# presentacion de tabla de articulos ############
datasource = db(db.articulo.id>0).select()
powerTable = plugins.powerTable
powerTable.datasource = datasource
powerTable.headers = 'labels'
powerTable.showkeycolumn = False
powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)
powerTable.uitheme = request.vars.get('theme','Smoothness')
powerTable.dtfeatures["sScrollY"] = "100%"
powerTable.dtfeatures["sScrollX"] = "100%"
powerTable.dtfeatures['sPaginationType'] = 'full_numbers'
powerTable.dtfeatures["iDisplayLength"] = 10
powerTable.extrajs = dict(autoresize={},
tooltip={},
details={'detailscolumns':'articulo.memo'}
)
powerTable.keycolumn = 'articulo.id'
powerTable.columns = ['articulo.descripcion','articulo.link',
'articulo.precio','articulo.categoria']
# alta de articulos ####################
mystep = [dict(title='NUEVOS ARTICULOS',Legend='Ingrese los datos del articulo',
fields=['descripcion','memo','iva','precio','categoria'])]
from plugin_PowerFormWizard import PowerFormWizard
options = {'description':False,'legend':True,'validate':True}
form = PowerFormWizard(db.articulo, steps=mystep, options=options)
if form.accepts(request.vars, session):
response.flash = "registro aceptado"
elif form.errors:
form.step_validation()
response.flash = "error en los datos"
return dict( table=powerTable.create(),form=form)
def features():
from plugin_PowerGrid.CallBack import CallBack
if (auth.has_membership(role='Admin')):
return CallBack(db.movimientos.id>0 and (db.movimientos.fecha_devuelta==None or
db.movimientos.estado == "Parte En Poder de Técnico"))
elif (auth.has_membership(role='control')):
return CallBack(db.movimientos.fecha_devuelta!=None and
db.movimientos.estado != "Parte En Poder de Técnico")
elif (auth.has_membership(role='usuario')):
return CallBack(db.movimientos.fecha_devuelta==None or
db.movimientos.estado == "Parte En Poder de Técnico")
@auth.requires_login()
def presupuestos():
from plugin_PowerGrid.PowerGrid import PowerGrid
p = PowerGrid(
callback=URL('default','features', extension='json'),
buttons=[
##('details',URL('plugin_PowerGrid','data',args=['read','products'])+'/${id}','_blank','Details of Record ${id}','modal positive left button','magnifier',[600,500]),
('cargar comprobante',URL('plugin_PowerGrid','data',args=['update','movimientos'])+'/${id}','_blank','Editando Registro ${id}','refreshmodal middle button', 'pen',[600,800]),
##('delete',URL('plugin_PowerGrid','data',args=['delete','products'])+'/${id}','_blank','Are you sure you want to delete record ${id}','confirmationmodal right negative button', 'cross'),
],
addurl=URL('plugin_PowerGrid','data',args=['create','movimientos']),
addLabel='Add New Record',
addTitle='You are adding a new record',
headers=[['id','Codigo'],['entregado_x','Entrega: '], ['retirado_x','Retira'],
],
#headers=[['entregado_x','Entrega: '], ['retirado_x','Retira'], ['id_articulo','Articulo'],['cantidad','Cantidad'],
#['cliente','Usado en']],
#hidecontrolbuttons=True,
#hiderefreshbutton=True,
hideaddbutton=True,
#_id="banana",
#target="melancia",
#searchBy='equal',
minH=800,
options=dict(#colsWidth=[60,60,60],
#width=700,
#buttonBackTitle='Back',
#buttonMax=4,
#buttonNextTitle='Next',
#success="""js[function(){alert('Executed on success');}]js""",
#before="""js[function(){alert('Executed before');}]js""",
#error="""js[function(){alert('Executed on load error');}]js""",
#buttonOption=False,
#buttonsWidth=200
#buttonTitle='oi',
#clickFx=False,
#debug=True,
#find='name',search='J',
#searchOption=False,
searchButtonLabel='Buscar',
searchButtonTitle='Clique para buscar',
searchFocus=True,
#cache=True,
#contentType='application/x-www-form-urlencoded; charset=utf-8',
#type='get',
#dataType='jsonp',
#jsonp=True,
#jsonpCallback='callback',
#findsName=[['name','name']],
#hoverFx=False,
#loadingOption=True,
#loadingText='Carregando...',
#messageOption=False,
#noResultOption=False,
noResultText='no se encontraron datos ',
#page=1,
#rows=3,
#rowsNumber=[3,25],
#params='&blablabla=45',
#resize=False,
#resultOption=False,
#resultText= 'Exibindo {from} - {to} de {total} registros',
#scroll=True,height=100,
#searchText='Busque aqui',
#sortName='name',
#sortOrder='asc',
#template='template',
#templateStyle='blabla',
),
)
return dict(p=p)
@auth.requires_login()
def presupuestos2():
class Virtual(object):
@virtualsettings(label=T('Informacion: '))
def virtualtooltip(self):
return T('se retiro para <strong>%s</strong>, en concepto de <strong>%s</strong><br>' %
(self.cliente.empresa,self.movimientos.concepto))
datasource = db(db.movimientos.cliente==db.cliente.id).select()
powerTable = plugins.powerTable
powerTable.datasource = datasource
powerTable.virtualfields = Virtual()
powerTable.headers = 'labels'
powerTable.showkeycolumn = True
powerTable.dtfeatures['bJQueryUI'] = request.vars.get('jqueryui',True)
powerTable.uitheme = request.vars.get('theme','Smoothness')
powerTable.dtfeatures['sScrollX'] = '100%'
powerTable.dtfeatures['sPaginationType'] = request.vars.get('pager','scrolling')
powerTable.extrajs = dict(autoresize={},
tooltip={},
)
powerTable.columns = ['movimientos.id',
'movimientos.entregado_x',
'movimientos.retirado_x',
'movimientos.id_articulo',
'movimientos.cantidad',
'movimientos.Comprobante',
'movimientos.fecha_pedido',
'movimientos.fecha_devuelta'
]
return dict(table=powerTable.create())
@auth.requires(auth.has_membership(role='Admin'))
def movimientos():
mystep = [dict(title='MOVIMIENTOS',Legend='Ingrese los datos solicitados',
fields=['entregado_x','retirado_x','id_articulo','cantidad',
'estado','Comprobante',
'cliente','concepto'])]
from plugin_PowerFormWizard import PowerFormWizard
options = {'description':False,'legend':True,'validate':True}
form = PowerFormWizard(db.movimientos, steps=mystep, options=options)
if form.accepts(request.vars, session):
response.flash = "registro aceptado"
rows = db(db.movimientos.id>0).select()
last_row = rows.last()
a=db.memomovi.insert(referencia=last_row.id,entregado_x=last_row.entregado_x,
retirado_x=last_row.retirado_x,id_articulo=last_row.id_articulo,
cantidad=last_row.cantidad,fecha_pedido=last_row.fecha_pedido,
fecha_devuelta=last_row.fecha_devuelta,estado=last_row.estado,
Comprobante=last_row.Comprobante,cliente=last_row.cliente,
concepto=last_row.concepto)
#db.commit()
elif form.errors:
form.step_validation()
response.flash = "error en los datos"
return dict(form=form)
| [
"jorge.agua@gmail.com"
] | jorge.agua@gmail.com |
a6ad218d7bc11de0390f8dc2972083016003f7da | fedaabb798b5110eadf7e90f461f3b52728e1b44 | /project_cellphone/cellphone.py | 53a9be9e1da9ec74a9f72b3023ee7d64e00e8aa9 | [] | no_license | HikaruG/NCU_scripts | cd776ca66174299b2054f9c2cb6c53381f3734e5 | 2bcb31613d303b7556bb85b1fb70c3112025fa9d | refs/heads/master | 2020-07-11T15:14:56.601002 | 2019-08-27T00:04:36 | 2019-08-27T00:04:36 | 204,580,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,264 | py | import cellphone_phonecallsys
reload(cellphone_phonecallsys)
import cellphone_calendar
reload(cellphone_calendar)
import cellphone_contact
reload(cellphone_contact)
class Cellphone(object):
def __init__(self, phone_id, phone_num,phone_location):
self.phone_id = phone_id
self.phone_num = phone_num
self.phone_location = phone_location
self.calls = cellphone_phonecallsys.Phonecallsys(phone_location)
self.contacts = cellphone_contact.Contact()
self.calendar = cellphone_calendar.Calendar()
def startup(self): #this startup method will generate the dict for the operations
print 'Starting up the process... '
#self.contacts =
#self.calendar =
self.operations = {1:'You selected the operation : PhoneCall',
2:'You selected the operation : Contacts',
3:'You selected the operation : Calendar'
}
self.calloperations = {1: self.calls.dial,
2: "hello its not done yet... "}
self.contactoperations = {1: self.contacts.new_contact,
2: self.contacts.display,
3: self.contacts.del_contact}
self.calendaroperations = {1: self.calendar.new_calendar,
2: self.calendar.display,
3: self.calendar.clear}
print 'Welcome back %s!' %self.phone_id
def choose_operation(self):
sel_op = -1 #reset of the variable
sel_op = int(raw_input("please choose the action you want to operate...\n" \
"1. Phonecall \n" \
"2. Contacts \n" \
"3. Calendar \n"
"Your input: "))
print self.operations[sel_op]
self.sel_op = sel_op
def processing(self):
if self.sel_op == 1:
option = int(raw_input('please select how you want to make the phonecall ... \n'
'1. From dial\n'
'2. From contacts\n'
'Your input: '))
self.calloperations[option]()
elif self.sel_op == 2:
option = int(raw_input('please select what you want to do with the contacts ... \n'
'1. Add a new contact\n'
'2. Check your current contacts\n'
'3. delete a contact\n'
'Your input: '))
self.contactoperations[option]()
elif self.sel_op == 3:
option = int(raw_input('please select what you want to do with the calendar ... \n'
'1. Add a new event\n'
'2. Check your current calendar\n'
'3. clear the calendar\n'
'Your input: '))
self.calendaroperations[option]()
if __name__ == '__main__':
oppo = Cellphone('OppoR15','0686907924','Taiwan')
oppo.startup()
while 1:
oppo.choose_operation()
oppo.processing()
oppo.processing()
oppo.choose_operation()
oppo.processing()
oppo.processing()
| [
"noreply@github.com"
] | noreply@github.com |
bad4e975b3fc893bea41ab614dc569b05124cc83 | 773789b345985925028a6a00c5ad0dd3609fad3d | /week07_assignment/wsgi_simple_server.py | eed3d5c8d799f4725e6dd2476a18434db34c635e | [] | no_license | spedl/uw_python | ea254924b37d2946da4048341d00dc52a2c4c4fa | 7073f060d725921028c44ce59f654c54f492b45f | refs/heads/master | 2020-05-28T02:38:31.837734 | 2012-03-19T04:02:43 | 2012-03-19T04:02:43 | 3,181,852 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | """
Simple WSGI application runner, including its own WSGI server
Usage: python simple_wsgi_server.py <application> <port>
Example: python simple_wsgi_server.py test.wsgi 8080
Default application is wsgi_test, default port is 8000
The application module can be named anything, but the
application callable it provides MUST be named 'application'
"""
import sys
from wsgiref.simple_server import make_server
appname = 'wsgi_test'
port = 8000
nargs = len(sys.argv)
if nargs > 1:
appname = sys.argv[1]
if nargs > 2:
port = int(sys.argv[2])
app = __import__(appname)
httpd = make_server('', port, app.application)
print "Running %s on port %s ..." % (appname, port)
# Respond to requests until process is killed
httpd.serve_forever()
| [
"jacob@bigdoor.com"
] | jacob@bigdoor.com |
0bbdd4bf7a5f32254ed7f31f8c35606cae64ef68 | 3e5ecad4d2f681f2f4f749109cc99deea1209ea4 | /tf114/tf11_2_diabetes.py | 25d2c7c7cc73f3820f81ab3f4d6d2093ecf8625e | [] | no_license | SunghoonSeok/Study | f41ede390079037b2090e6df20e5fb38f2e59b8f | 50f02b9c9bac904cd4f6923b41efabe524ff3d8a | refs/heads/master | 2023-06-18T06:47:55.545323 | 2021-07-05T00:47:55 | 2021-07-05T00:47:55 | 324,866,762 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | from sklearn.datasets import load_diabetes
import tensorflow as tf
tf.compat.v1.set_random_seed(66)
dataset = load_diabetes()
x_data = dataset.data
y_data = dataset.target
y_data = y_data.reshape(-1,1)
print(x_data.shape, y_data.shape) # (442, 10) (442,1)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, train_size=0.8, shuffle=True, random_state=66)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
x = tf.compat.v1.placeholder(tf.float32, shape=[None, 10])
y = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
y_true = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
y_pred = tf.compat.v1.placeholder(tf.float32, shape=[None, 1])
w = tf.Variable(tf.random.normal([10,1]), name='weight')
b = tf.Variable(tf.random.normal([1]), name='bias')
# hypothesis = x * w + b
hypothesis = tf.matmul(x, w) + b
cost = tf.reduce_mean(tf.square(hypothesis - y)) # loss='mse'
train = tf.train.AdamOptimizer(learning_rate=0.002).minimize(cost) # optimizer + train
from sklearn.metrics import r2_score
# r2 = r2_score(y_true, y_pred)
# with문 사용해서 자동으로 sess가 닫히도록 할수도 있다.
import numpy as np
with tf.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for step in range(150001):
cost_val, w_val, b_val, hy_val, _ = sess.run([cost,w,b,hypothesis,train], feed_dict={x:x_train,y:y_train})
if step %20 == 0:
print(step, "loss :",cost_val) # epoch, loss
y_predict = sess.run([hypothesis], feed_dict={x:x_test,y:y_test})
y_predict = np.array(y_predict)
y_predict = y_predict.reshape(-1,1)
print(r2_score(y_test, y_predict))
# 0.5063167888110058
| [
"76455292+SunghoonSeok@users.noreply.github.com"
] | 76455292+SunghoonSeok@users.noreply.github.com |
4e39f2b57ab4e11da0fd51bf75e4cf23aae756bb | 8e1c2f90d2a121dceb2fe6f8f3482c0821b49cc8 | /Girilen iki sayı arasındaki sayıları toplayan programı while döngüs....py | 2f9342fd380870e0af9bd7c1be7e724ef754f97b | [] | no_license | KaanPY/PythonBaslangicOrnekleri | 205f9c36e0289ce636337f76d8ae5fb8ab011fb4 | 88261fcf3c445f84c53651c4c612ceb1fb662004 | refs/heads/main | 2023-07-27T00:03:41.694648 | 2021-09-11T10:43:18 | 2021-09-11T10:43:18 | 405,351,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # Girilen iki sayı arasındaki sayıları toplayan programı while döngüsü ile ekrana yazınız.
sayi = int(input("Sayı bir giriniz: "))
sayi2 = int(input("Sayı iki giriniz: "))
toplam = 0
while(sayi <= sayi2):
toplam = toplam + sayi
sayi = sayi + 1
print(toplam)
| [
"noreply@github.com"
] | noreply@github.com |
691dbc4c44ced539c61b9cfa403a6ef79b3a159c | 96d55aa2f0b5ce6fb6c706dde47eeffc06853539 | /flipbookapp/views.py | 7ad7a1e96b5e510194a9c52bc6e57aec1250f1b5 | [] | no_license | laamp/flipbook | 5b63a0d281b8cde2cc0763157090a69c36929622 | c34f63885bfea8027c105d8ab207d331928d2442 | refs/heads/master | 2020-06-16T20:58:32.685578 | 2019-07-09T05:39:00 | 2019-07-09T05:39:00 | 195,702,036 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | from django.shortcuts import render
from django.http import HttpResponse
from .models import Collection, User
def home(req):
return HttpResponse("App home. Hello!")
def collection_details(req, collection_id):
collection = Collection.objects.get(id=collection_id)
context = {collection_id: {"id": collection_id, "title": collection.title}}
# return render(req, "flipbookapp/index.html", context)
return HttpResponse(f"Details for collection #{context}")
# Create your views here.
| [
"lance.matthew.smith@gmail.com"
] | lance.matthew.smith@gmail.com |
1ea0910b6c40f92198824d0746983e6181e9e305 | e3a975a04c262d7e848efb5a72d861b9c51e4dcd | /python_parallelism/part_1_threads/4_locks/rLock_usage.py | a798252b8a5428537d9abb0f92bdce933466bd68 | [] | no_license | victorjabur/presentations | b7ac9b25fc122893f87ebfcc0101a8f7307af151 | cf6dc1ea1f04d7108982dc4e1b063ec28b85e6a9 | refs/heads/master | 2021-01-19T12:42:01.726813 | 2017-08-13T00:09:16 | 2017-08-13T00:09:16 | 82,331,575 | 4 | 0 | null | 2017-02-17T19:36:59 | 2017-02-17T19:36:59 | null | UTF-8 | Python | false | false | 1,195 | py | import threading
import time
class Box(object):
lock = threading.RLock()
def __init__(self):
self.total_items = 0
def execute(self, n):
Box.lock.acquire()
self.total_items += n
Box.lock.release()
def add(self):
Box.lock.acquire()
self.execute(1)
Box.lock.release()
def remove(self):
Box.lock.acquire()
self.execute(-1)
Box.lock.release()
## These two functions run n in separate
## threads and call the Box's methods
def adder(box, items):
while items > 0:
print("adding 1_simple_process item in the box\n")
box.add()
time.sleep(5)
items -= 1
def remover(box, items):
while items > 0:
print("removing 1_simple_process item in the box")
box.remove()
time.sleep(5)
items -= 1
## the main program build some
## threads and make sure it works
if __name__ == "__main__":
items = 5
print("putting %s items in the box " % items)
box = Box()
t1 = threading.Thread(target=adder, args=(box, items))
t2 = threading.Thread(target=remover, args=(box, items))
t1.start()
t2.start() | [
"victorjabur@gmail.com"
] | victorjabur@gmail.com |
2fa2ad29b02a2a0a4a2b5c8c83e7221a7053cd6c | 0090c267509e2a0e4a3309f7cf4d0d1c20a437f4 | /pdfstat/rate.py | d55f32f1afe29b0f4cd92a1c6eb92d8178dfbe9a | [] | no_license | krzygorz/pdfstat | 6e8cd06582de8baa2444df8108464394011a4611 | 761a2f8b503ca7ad97af351f4ff586b16e9096f8 | refs/heads/master | 2022-06-17T19:22:59.914878 | 2022-05-15T10:18:03 | 2022-05-15T10:18:03 | 249,234,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from functools import namedtuple
from itertools import tee
def pairwise(iterable):
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def weighted(pairs):
s = 0
total_weight = 0
for x, weight in pairs:
s += x*weight
total_weight += weight
return s/total_weight
def pages_per_day(hist, factor=.8):
reference_time = hist[0].time
def weight(entry):
return factor**(reference_time-entry.time).days
return weighted(
(a.page-b.page, weight(a)) for a,b in pairwise(hist)
) | [
"krzygorz@gmail.com"
] | krzygorz@gmail.com |
86b0f7f50a85078402d3aee998d9312891fefd9f | 41911c73dec55eec3eac36b6d2e4346d949d2d96 | /Word Censor1.py | c23a1a32ecb185367daaa8e506ec3c058ddde7b0 | [] | no_license | mbarbour0/Practice | cff321a0e5e5090da266016f8e31a1902e1c5cb2 | 781cf1e10154f43adec59f736ef1716acba6c98d | refs/heads/master | 2021-09-07T17:02:37.501748 | 2018-02-26T14:02:35 | 2018-02-26T14:02:35 | 115,876,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | def censor(text, word):
words = text.split()
count = 0
result = ''
stars = '*' * len(word)
for i in words:
if i == word:
words[count] = stars
count += 1
result = ' '.join(words)
return result
print censor('hey hey what can i do?', 'hey') | [
"m.barbour0217@me.com"
] | m.barbour0217@me.com |
2558ae8382de1f1f6dc75e1393f2ca78965581fd | cf9f0b657a79cf3de0a0df68b915bdf23277e7c3 | /snake.py | 45129b5f4d87b8cb3a828eb53f0c8a11294fc50a | [] | no_license | imaadfakier/snake | c8043c3cd78fef4fce995eebc25327f84afdd11a | df83ae8aca553633eb4ae727a3683820c8282831 | refs/heads/main | 2023-08-15T12:56:37.689558 | 2021-10-18T08:38:22 | 2021-10-18T08:38:22 | 418,224,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | from turtle import Turtle
SNAKE_SEGMENTS_POSITIONS = [(-40, 0), (-20, 0), (0, 0)]
SNAKE_MOVE_DISTANCE = 20
UP, DOWN, LEFT, RIGHT = 90, 270, 180, 0
class Snake:
# '''
# ...
# '''
"""
...
"""
# ... (ignore)
# class attributes
# ...
def __init__(self):
self.snake_segments = list()
self.create_snake()
self.head = self.snake_segments[-1]
def create_snake(self):
for position in SNAKE_SEGMENTS_POSITIONS:
self.add_snake_segment(position)
def move(self):
for snake_segment in self.snake_segments[:len(self.snake_segments) - 1:]:
# for snake_segment_index in range(0, len(self.snake_segments) - 1):
# snake_segment.goto(
# self.snake_segments[self.snake_segments.index(snake_segment) + 1].xcor(),
# self.snake_segments[self.snake_segments.index(snake_segment) + 1].ycor()
# )
snake_segment.goto(
self.snake_segments[self.snake_segments.index(snake_segment) + 1].position()
)
# self.snake_segments[snake_segment_index].goto(
# self.snake_segments[snake_segment_index + 1].position()
# )
self.head.forward(SNAKE_MOVE_DISTANCE)
# print(self.snake_segments)
def up(self):
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
def add_snake_segment(self, position):
turtle = Turtle(shape='square')
turtle.penup()
# print(position)
turtle.goto(position)
turtle.pensize(width=20)
turtle.color('white')
if len(self.snake_segments) < 3:
self.snake_segments.append(turtle)
return
# else:
self.snake_segments.insert(0, turtle)
def extend(self):
self.add_snake_segment(position=self.snake_segments[0].position())
def reset_position(self):
for snake_segment in self.snake_segments:
# print(snake_segment)
snake_segment.goto(700, 700)
self.snake_segments.clear()
self.create_snake()
self.head = self.snake_segments[-1]
# if __name__ == 'main': # must be '__main__'
if __name__ == '__main__':
print('Running snake module - not main.py file')
| [
"58392951+imaadfakier@users.noreply.github.com"
] | 58392951+imaadfakier@users.noreply.github.com |
65c54b11e8cdd95bfecd92cf81023fefba87aa9f | 8b0d16a998719783a37b86d3dcae36ecf27029a8 | /demo/mention_pairs_extractor_cpy_12122018/pipeline.py.bak | ebc7ab1d2c7c08b7e75adc3d3b4954800ddbfe8f | [] | no_license | yusrbi/ExQuisiTe | 4e2fbbc466fbf3f0346c27adfd2e76bb0d29fd2d | 3e14df63104022c01a669ea224fb132c0bbac935 | refs/heads/master | 2020-04-18T02:58:57.789571 | 2019-01-23T15:38:40 | 2019-01-23T15:38:40 | 167,182,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,156 | bak | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
import findspark
findspark.init()
from pyspark import SparkContext, SparkConf
from loader import Document_Loader
from extractor import FeatureExtractor
import extractor
import codecs
class PipeLine:
def __init__(self):
#TODO remover the setMaster, when running on the cluster
self.conf = SparkConf().setAppName("TableCRR").setMaster('local')
self.sc = SparkContext(conf=self.conf)
def __enter__(self):
return self
def start(self, n_negative_instances):
# spark context
sc = self.sc
# load documents
batch_no =1
#log4jLogger = sc._jvm.org.apache.log4j
# logging.getLogger("py4j").setLevel(logging.ERROR)
# logger = logging.getLogger("py4j")
#log4jLogger.LogManager.getLogger("TableQCRR")
log4jLogger = sc._jvm.org.apache.log4j
LOGGER = log4jLogger.LogManager.getLogger(__name__)
LOGGER.info("Hello logger...")
LOGGER.info("pyspark script logger initialized")
doc_loader = Document_Loader(batch_size = 150)
while doc_loader.load_next_batch():
documents = doc_loader.get_batch()
if not documents:
break
partition_num = 2
doc_rdd = sc.parallelize(documents, partition_num)
#print("Documents were loaded")
LOGGER.info("Loaded Batch %d"%batch_no)
#for doc in documents.values():
#print(doc.doc_id)
# extract features
doc_rdd.flatMap(lambda x: x.get_all_edges()).saveAsTextFile('results_%sneg/edges_%s'%(n_negative_instances,batch_no))
#TODO write document edges
feature_extractor = FeatureExtractor(n_negative_instances)
doc_rdd.map(feature_extractor.extract_features_for_document)\
.flatMap(feature_extractor.extract_mention_pairs_with_features)\
.flatMap(lambda x: [mp.get_as_csv_line().encode('utf-8') for mp in x])\
.saveAsTextFile('results_%sneg/output_%s'%(n_negative_instances,batch_no))
'''
#print("Feature Extraction Done")
doc_features_rdd = doc_rdd.map(feature_extractor.extract_features_for_document)
mention_pairs_features = doc_features_rdd.flatMap(feature_extractor.extract_mention_pairs_with_features)
#print("mention pairs Created")
mention_pairs_csv = mention_pairs_features.flatMap(lambda x: [mp.get_as_csv_line().encode('utf-8') for mp in x])
mention_pairs_csv.saveAsTextFile('output_%s'%batch_no)'''
batch_no = batch_no +1
for log_msg in feature_extractor.logger:
print(log_msg)
LOGGER.info(log_msg)
#mention_pairs = mention_pairs_csv.collect()
#print ("mXId\tmTid\tdiff\tdiff_max\tdif_sum\tscale\tprec\tunit\tmod\ttokn\tnps\tsurfaceform\tGT")
#print mention_pairs
'''with codecs.open('output.csv','w','utf-8') as out:
out.write("mXId\tmTid\tdiff\tdiff_max\tdif_sum\tscale\tprec\tunit\tmod\ttokn\tnps\tsurfaceform\tGT\n")
for mention_list in mention_pairs:
for lin in mention_list:
out.write(lin)
for mention_pair_list in mention_pairs:
for mention_pair in mention_pair_list:
#if mention_pair.ground_truth:
out.write(mention_pair.get_as_csv_line().encode('utf-8')) #the line include a new line character
'''
# done
#for log_msg in feature_extractor.logger:
# LOGGER.info(log_msg)
def __exit__(self, exc_type, exc_value, traceback):
self.sc.stop()
def main():
n_neg = 1
if len(sys.argv) > 1:
n_neg = int(sys.argv[1])
print("Generating mention-pairs with %s negative samples"%n_neg)
with PipeLine() as pipeline:
pipeline.start(n_neg)
if __name__ == '__main__':
main()
| [
"yibrahim@mpi-inf.mpg.de"
] | yibrahim@mpi-inf.mpg.de |
900a29135e6327fba64530bbf7efb62664e1e3e0 | d6f9856369de739eb1d48f36704032fc9d6ad279 | /01-spider/practice/seleniumJdGrandandProduct.py | 5ca58c43fb5dff43ab415094a4e3f08a04e7f0fa | [] | no_license | huchangchun/spider | 7fd1cfd1aced71887700d1a1db176b898ca75775 | 7349c91bc5abf4a633752cc7a33fe24756d2ac97 | refs/heads/master | 2020-03-13T06:54:04.316515 | 2019-12-20T02:08:45 | 2019-12-20T02:08:45 | 131,014,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,957 | py | #encoding=utf-8
from selenium.webdriver.support import ui
from selenium.webdriver import Chrome
import time,os
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from lxml import etree
import numpy as np
if os.path.exists("C:\\Users\zxy\AppData\Local\Google\Chrome\Application\chromedriver.exe"):
driver = Chrome("C:\\Users\zxy\AppData\Local\Google\Chrome\Application\chromedriver.exe")
else:
driver = Chrome("C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe")
"""
环境要求:
1. pip install selenium
2.需要将chromedriver.exe放在driver所指路径,下载时要与本地chrome版本匹配或更新,具体版本查看chrome
下载地址:http://npm.taobao.org/mirrors/chromedriver/
"""
def isNoneOrEmpty(s):
if s is None:
return True
if isinstance(s, list):
if len(s) == 0:
return True
else:
return False
if isinstance(s, tuple):
if len(s) == 0:
return True
else:
return False
if isinstance(s, str):
if len(s) == 0:
return True
else:
return False
if isinstance(s,dict):
if len(s) == 0:
return True
else:
return False
if isinstance(s, set):
if len(s) == 0:
return True
else:
return False
if isinstance(s, int):
return False
def grabBrands(url):
goodsname = []
try:
driver.get(url)
mulitipchose = "//*[@id='J_selector']/div[1]/div/div[3]/a[2]"
more = "//*[@id='J_selector']/div[1]/div/div[3]/a[1]"
cancelBtn = "//*[@id='J_selector']/div[1]/div/div[2]/div[4]/a[2]"
element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, mulitipchose)))
element.click()
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, cancelBtn)))
page = driver.page_source
soup = BeautifulSoup(page,'html.parser')
data1 = soup.find('ul',{"class":"J_valueList v-fixed"})
datali =data1.find_all('li')
for i in datali:
goodsname.append(i.a.attrs['title'])
print("品牌数量:", len(goodsname))
except Exception as ex:
# 关闭当前标签,也可以使用quit()关闭浏览器
return None
return goodsname
def grabGoodsTypeWithClass(url):
goodsname=[]
goodshref=[]
try:
brower = webdriver.Chrome()
brower.get(url)
page = brower.page_source
soup = BeautifulSoup(page,'html.parser')
dataAll = soup.find_all(attrs={"class":"J_selectorLine s-category"})
if isNoneOrEmpty(dataAll):
return None
for i in range(len(dataAll)):
curdata = dataAll[i].find("ul",{"class":"J_valueList"})
datali = curdata.find_all('li')
for i in datali:
goodsname.append(i.a.attrs['title'])
goodshref.append(i.a.attrs['href'])
print("当前数量:", len(goodsname))
print(goodsname[:10])
except Exception as ex:
print(ex)
# 关闭当前标签,也可以使用quit()关闭浏览器
return None
return goodsname, goodshref
def runGrabBrands(goodClass: list, notallowlist=None):
url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
allgoods = []
allgoodsBrands=[]
for gcls in goodClass:
print("大类:", gcls)
flag = True
while flag:
curgoods, _ = grabGoodsTypeWithClass(url.format(gcls))
if not isNoneOrEmpty(curgoods):
allgoods.extend(curgoods)
print("当前总品类数:", len(allgoods))
flag = False
else:
print("{}获取异常,重试".format(gcls))
print("总品类数:", len(allgoods))
allgoods = list(set(allgoods))
allgoods = [g for g in allgoods if len(g) > 1]
print("去重后总品类数:", len(allgoods))
if notallowlist is not None:
allgoods = [g for g in allgoods if g not in notallowlist]
with open("{}.txt".format(",".join(goodClass)), mode='w', encoding='utf-8') as f:
f.write(",".join(allgoods))
print("前十个品类:", ",".join(allgoods[:10]))
for goodtype in allgoods:
print("获取品类品牌:{} ".format(goodtype))
flag = True
i= 0
while flag:
if isinstance(goodtype, list):
curgoodbrand = []
for gt in goodtype:
curgoodbrand.extend(grabBrands(url.format(gt)))
else:
curgoodbrand = grabBrands(url.format(goodtype))
if not isNoneOrEmpty(curgoodbrand):
print(curgoodbrand)
allgoodsBrands.extend(curgoodbrand)
print("当前总品牌数量:", len(allgoodsBrands))
flag = False
else:
print("{}获取异常,重试".format(goodtype))
goodtype = goodtype.split("/")
i += 1
if i == 3:
print("获取异常,重试{}次失败".format(i))
flag = True
print("总品牌数量:", len(allgoodsBrands))
print("去重后数量:", len(list(set(allgoodsBrands))))
if isNoneOrEmpty(allgoodsBrands):
print("数据为空")
return None
saveData("{}品牌.xlsx".format(",".join(goodClass)),list(set(allgoodsBrands)))
def getAliseFromBrand(brand):
import re
kuohaopattern = "((.*))"
curalias = re.findall(kuohaopattern, brand)
curbrand = re.sub(kuohaopattern,"", brand)
if len(curalias) > 0:
return curbrand,curalias[0]
else:
return curbrand,curbrand
def saveData(savefile, data):
brands, aliass = [],[]
import re
for b in data:
aliass.append(getAliseFromBrand(b)[1])
brands.append(getAliseFromBrand(b)[0])
assert len(brands) == len(aliass)
import pandas as pd
df = pd.DataFrame({"Brand": brands,"Alias": aliass})
df.to_excel(savefile, encoding='utf-8')
print("finnish")
def readBrandsFromXlsx(filepath, savefile):
import pandas as pd
df = pd.read_excel(filepath, encoding='utf-8')
brands = df['Brand'].tolist()
aliass = df['Alias'].tolist()
brandsdic ={}
for brand, alias in zip (brands, aliass):
if brand in brandsdic:
continue
else:
brandsdic[brand] = alias
df = pd.DataFrame({"Brand": list(brandsdic.keys()),"Alias": list(brandsdic.values())})
df.to_excel(savefile, encoding='utf-8')
def testGrabBrand():
good = '牙膏'
url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
curgoodbrand = grabBrands(url.format(good))
print(curgoodbrand)
saveData("{}品牌.xlsx".format(good), curgoodbrand)
def grabGoodTitlesWithGoodType(url):
try:
brower = webdriver.Chrome()
brower.get(url)
page = brower.page_source
selector = etree.HTML(page)
titlespath = "//*[@id='J_goodsList']/ul/li/div/div[3]/a/@title"
subtitlespath = "//*[@id='J_goodsList']/ul/li[{}]/div/div[3]/a/em/text()"
subtypespath = "//*[@id='J_goodsList']/ul/li[{}]/div/div[3]/a/em/font/text()"
totalpagepath ='//*[@id="J_bottomPage"]/span[2]/em[1]/b/text()'
nextpagebtnpath = '//*[@id="J_bottomPage"]/span[1]/a[9]'
totalpageCount = int(selector.xpath(totalpagepath)[0])
if totalpageCount > 13:
print("超过13页,截断")
totalpageCount = 13
titles = []
def gettitles(slt):
try:
curtitles = []
emselectors = slt.xpath(titlespath)
for i in range(len(emselectors)):
emtypes = slt.xpath(subtypespath.format(i))
emtitles = slt.xpath(subtitlespath.format(i))
if isinstance(emtypes,list):
if len(emtypes) == 0:
emtypes =['']
if (emtitles) == 0:
continue
curtitle =''
emtypes = emtypes[::-1]
for i in range(len(emtitles)):
curtitle += emtitles[i]
if len(emtypes) > 0:
curtitle += emtypes.pop()
if len(emtypes) > 0:
for i in range(len(emtypes)):
curtitle += emtypes.pop()
curtitle = "".join(list(set(emtypes))) + "".join(emtitles)
if len(curtitle) !=0:
curtitles.append(curtitle)
return curtitles
except Exception as ex:
return []
curtitles = gettitles(selector)
if len(curtitles) != 0:
titles.extend(curtitles)
for i in range(totalpageCount - 1):
try:
brower.find_elements_by_xpath(nextpagebtnpath)[0].click()
WebDriverWait(brower, 10)#.until(EC.element_to_be_clickable((By.XPATH, nextpagebtnpath)))
page = brower.page_source
selector = etree.HTML(page)
if len(curtitles) != 0:
titles.extend(curtitles)
except Exception as ex:
pass
print(len(titles))
print(len(list(set(titles))))
print(titles)
brower.quit()
except Exception as ex:
print(ex)
# 关闭当前标签,也可以使用quit()关闭浏览器
return None
return titles
def runGrabGoodTitlesWithTypes(goodtypes:dict):
savetitles = []
savetypes = []
url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
for gt, subgtlist in goodtypes.items():
for subgt in subgtlist:
curtitles = grabGoodTitlesWithGoodType(url.format(subgt))
if len(curtitles) == 0:
print("类型{} 没有取到数据".format(subgt))
else:
savetitles.extend(curtitles)
savetypes.extend([gt] * len(curtitles))
import pandas as pd
df = pd.DataFrame.from_dict({"title":savetitles, "goodtype":savetypes})
df.to_excel('商品标题分类信息.xlsx')
if __name__=="__main__":
#url = "https://search.jd.com/Search?keyword={}&enc=utf-8"
#grabGoodTitlesWithGoodType(url.format('花鸟盆栽'))
runGrabGoodTitlesWithTypes({"花鸟宠物":['宠物衣服','花鸟盆栽','花鸟宠物','宠物零食','宠物生活用品']})
#readBrandsFromXlsx("美妆,洗护,护肤,彩妆,口腔洗护品牌.xlsx","美妆,洗护,护肤,彩妆,口腔洗护品牌new.xlsx")
#testGrabBrand()
#runGrabBrands(['美妆','洗护','护肤','彩妆','口腔洗护'])
#notallowlist =['宗教用品',
#'特殊商品',
#'历史',
#'礼品文具',
#'港台图书',
#'礼品定制',
#'古董文玩',
#'婚庆节庆',
#'创意礼品',
#'配件',
#'工艺礼品',
#'电子礼品',
#'挂件/摆件/把件',
#'婚庆饰品',
#'美妆礼品']
#runGrabBrands(['花鸟盆栽'])
#notallowlist=['避孕套',
#' 充气/仿真娃娃',
#'辅酶Q10',
#'仿真阳具',
#'震动棒',
#'其他',
#'飞机杯',
#' 男用延时',
#'其他情趣用品',
#'倒模']
#runGrabBrands(['保健品','滋补','营养品'])
#runGrabBrands(['日用百货'])
#pass
| [
"hu_changchun@126.com"
] | hu_changchun@126.com |
0fda26b376e0f8dcbd1ac5a37aff9b42d12f11f4 | df79e52a9e9d7230edfe81573c667be514b29b62 | /exam/migrations/0006_myuser.py | 5ddf4197d533184889cd92e5ba073c09a94509a5 | [] | no_license | ragnarok347/ceconlineexam | 682b3c18242ef44601049fb7930d28ab1b3d73ee | 451168f8fbcab21f394d31161a8a5f37c0a66e4f | refs/heads/master | 2020-12-22T06:51:15.123837 | 2020-11-30T16:17:42 | 2020-11-30T16:17:42 | 236,702,041 | 0 | 0 | null | 2020-01-28T09:46:09 | 2020-01-28T09:46:08 | null | UTF-8 | Python | false | false | 908 | py | # Generated by Django 3.0.3 on 2020-02-21 10:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exam', '0005_auto_20200217_2025'),
]
operations = [
migrations.CreateModel(
name='Myuser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=50)),
('last_login', models.DateTimeField()),
('username', models.CharField(max_length=50)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('email', models.CharField(max_length=100)),
('role', models.CharField(max_length=50)),
],
),
]
| [
"noreply@github.com"
] | noreply@github.com |
791f8d85d3d886791e02f627e1970db741e70788 | 9d302ca09271e7ef892d0e07aa46181e6a8b07cd | /script.py | 433d5e2bf3e6f0fdf2dda186537df26889426239 | [] | no_license | edify42/fun-key | 0f1c18e27e52ded82c0bf67225ec65a3bf053dca | 0391c1c61f8f6cb8589a6f33d500bdd44cb445ff | refs/heads/master | 2023-08-14T23:38:27.429912 | 2021-10-03T11:04:06 | 2021-10-03T11:04:06 | 412,032,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | from ecdsa import SigningKey, SECP256k1
import ecdsa
import codecs
def max():
power = 256
max_val = 2**power
print("max number yo: ")
print(max_val)
test_val = 115792089237316195423570985008687907852837564279074904382605163141518161494337
# print(max_val) # 115792089237316195423570985008687907853269984665640564039457584007913129639936
if test_val > max_val:
print("found ya: " + str(power))
## g() was a learning block which shows how the merged 64byte number represents
# the (x,y) coordinates of the point G provided (hex).
def g():
# removed 04 from start
G = "79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8".lower()
Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
G_decimal = int(G, 16)
G_points = G_decimal.to_bytes(64, byteorder='big') # 32 + 32 bytes
print("me now")
gx = G_points[0:32].hex()
print('0x' + gx == hex(Gx))
smol_key = int(1)
big_key = int(115792089237316195423570985008687907852837564279074904382605163141518161494337-1)
rando_internet_key = "108d243c9d1b707a6d2716b5b9456e7abbf837fb01624903e4ab430760a71200"
string_private_key = int(rando_internet_key, 16) # convert to hex
# print(string_private_key)
# print(2**256 - string_private_key)
new_string = string_private_key
byte_string = new_string.to_bytes(32, byteorder='big')
print('byte_string')
print(byte_string.hex())
private_key = str.encode("")
generate = True
if generate:
private_key = SigningKey.generate(curve=SECP256k1)
string_private_key = private_key.to_string()
blah = SigningKey.from_string(string_private_key, curve=SECP256k1)
print("new key time: ")
print(blah.to_pem())
private_key_bytes = string_private_key
print("private key: ")
# Get ECDSA public key
pub_key = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key
pub_key_bytes = pub_key.to_string()
pub_key_hex = pub_key_bytes.hex()
print("length of byte array is:")
print(len(pub_key_bytes))
print("full public key: ")
print(pub_key_hex)
print("pub key x:")
pub_x = pub_key_bytes[0:32]
print(type(pub_x[0]))
pub_y = pub_key_bytes[33:64].hex() | [
"edify42@yahoo.com"
] | edify42@yahoo.com |
84a7d6d29a2a38c23bdf25e634b5e709d1813ab8 | 8ff9e061e34d77d02b130529ed8804b145df1b64 | /app/soloanalysis/view/base/__init__.py | c6ff558ab2190495e249dad3cf13be479e56de61 | [] | no_license | cash2one/solo-frame | e7277f564214304b398cb67f1b83326aee2d07ca | b9b9bd2fd96af0e0278427f3cff33582f15cf779 | refs/heads/master | 2021-01-23T04:39:19.500293 | 2016-10-29T13:59:14 | 2016-10-29T13:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,039 | py | #!/usr/bin/env python
# coding:utf-8
import _env # noqa
import sys
import httplib
import traceback
from os.path import join
import tornado
import tornado.web
from tornado.httpclient import HTTPError
from solo.config import APP, DEBUG
def get_error_html(self, status_code, **kwargs):
if status_code == 404:
from solo.web.render import render as _render
path = join(APP, 'base/error/404.html')
html = _render(path)
return self.write(html)
if self.settings.get('debug') and ('exc_info' in kwargs or 'exception' in kwargs):
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*sys.exc_info()):
self.write(line)
else:
message = kwargs.get('message', httplib.responses[status_code])
html = '<html><title>%(code)s: %(message)s</title><body>%(code)s: %(message)s</body></html>' % {
'code': status_code,
'message': message,
}
self.write(html)
# if not self._finished:
# self.finish()
tornado.web.RequestHandler.get_error_html = get_error_html
if DEBUG:
RequestHandler = tornado.web.RequestHandler
else:
from bugsnag.tornado import BugsnagRequestHandler
RequestHandler = BugsnagRequestHandler
class View(RequestHandler):
def prepare(self):
super(View, self).prepare()
def decode_argument(self, value, name=None):
return value
def redirect(self, url, permanent=False):
"""Sends a redirect to the given (optionally relative) URL."""
if self._headers_written:
raise Exception('Cannot redirect after headers have been written')
self.set_status(301 if permanent else 302)
self.set_header('Location', url)
self.finish()
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
# if self.request.method not in ('GET', 'HEAD', 'OPTIONS') and \
# self.application.settings.get('xsrf_cookies'):
# self.check_xsrf_cookie()
self.prepare()
if not self._finished:
args = [self.decode_argument(arg) for arg in args]
kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.iteritems())
if hasattr(self, 'init'):
getattr(self, 'init')(*args, **kwargs)
getattr(self, self.request.method.lower())(*args, **kwargs)
if self._auto_finish and not self._finished:
self.finish()
except Exception, e:
self._handle_request_exception(e)
if __name__ == '__main__':
pass
| [
"jay_yuyay@hotmail.com"
] | jay_yuyay@hotmail.com |
8811c483d5db3fb20315160987dfaa8a74ed1799 | 45def2cd621d5ce1b924a7c24379ab6884a71f9d | /server/test.wsgi | fc2faded6dc1fb92d11c753ab7c52d6cae7f03e3 | [] | no_license | mbirtwell/reproduce_mod_wsgi_queue_timeout_bug | 205077d67828e35f6e251a0437afd69df3ff212a | 59e760e72aac93e3f11366d236328aad39fc24a4 | refs/heads/master | 2020-07-05T05:21:12.118127 | 2019-08-15T12:15:58 | 2019-08-15T12:15:58 | 202,535,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | wsgi | from time import sleep
import flask
app = flask.Flask(__name__)
@app.route("/delay/<int:amount>/<path:ignore>")
def delay(amount, ignore):
sleep(amount)
body = """\
Delayed: {}
""".format(amount)
r = flask.make_response(body)
return r
application = app | [
"michael.birtwell@starleaf.com"
] | michael.birtwell@starleaf.com |
af2ced00ab5aa3cbb20c882eba158903939ef5e8 | 32c9f7f497ac6e3d50f8bb9a47187fa1677103eb | /src3/npiAddrExtractor.py | d6c642e8fdda399a6a9d68f916ad064e74b77a49 | [] | no_license | lancerzh/AddrVerify | 10729f29e0ba2dbf95a6eb6cfd92546d108aa78f | 23da619229b8e6cfcf874bfda1ee3cf81bf821f9 | refs/heads/master | 2021-01-01T04:55:55.946304 | 2016-04-27T14:53:15 | 2016-04-27T14:53:15 | 55,976,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,565 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on Mar 28, 2016
@author: lancer
'''
import verify_by_usps;
import USMailAddress;
import csv, codecs, io;
import time;
import socket
import sys
'''
input npi file which can doanload from http://download.cms.gov/nppes/NPI_Files.html
It include two address infomation. one is used for business mail, other one is used for Practice Location.
This python program extract the two address infomation into a csv format file. This module also try to verify the
address by using usps address verify tool.
output file columns like this
va1, va2, vc, vs, vp5, vp4, oa1, oa2, oc, os, op5, op4, on, oda1, oda2, da, da1, da2, dc, dn, dp5, dp4, npiid, npitype, addrtype, verified
addrtype = (M[ail], P[ractice])
verified = (V[erified], N[otverified], F[oreign], E[mpty])
'''
title = "va1, va2, vc, vs, vp5, vp4, oa1, oa2, oc, os, op5, op4, on, oda1, oda2, da, da1, da2, dc, ds, dp5, dp4, npiid, npitype, addrtype, verified"
title = title.split(', ');
workfor = 10000;
'''
157926,160457, 251029
'''
beginline = 750;
#beginline = 1;
endline = beginline + workfor;
addressFile = '/Users/lancer/workspace/npi/npidata_20050523-20160313.csv'
outputFile = '../DevOut.csv';
addr1Index = (21, 22, 23, 24, 25, 26);
addr2Index = (29, 30, 31, 32, 33, 34);
NotFoundMsg = '../NotFoundMsg.txt'
def extractAddr(row, index):
if len(row) >= max(index) :
a1 = row[index[0] - 1];
a2 = row[index[1] - 1];
c = row[index[2] - 1];
s = row[index[3] - 1];
p = row[index[4] - 1];
row = row[index[5] - 1];
return verify_by_usps.Address(a1, a2, c, s, p, row);
else :
return None;
def prepareCsvRow(uspsAddr, addr, distance, npiid, npitype, addrtype, verifiedType):
r = []
if uspsAddr != None :
for x in [uspsAddr.addr1, uspsAddr.addr2, uspsAddr.city, uspsAddr.state, uspsAddr.zip5, uspsAddr.zip4]:
r.append(x)
else :
for x in range(6):
r.append('')
for x in [addr.addr1, addr.addr2, addr.city, addr.state, addr.zip5, addr.zip4, addr.nation, addr.addr1, addr.addr2]:
r.append(x)
for x in distance:
r.append(str(x))
r.append(npiid)
r.append(npitype)
r.append(addrtype)
r.append(verifiedType)
return r
class Reporter:
def __init__(self, spamreader):
self.reader = spamreader;
self.lineBegin = spamreader.line_num;
self.currentLineNum = spamreader.line_num;
self.statCount = {}
self.addrCount = 0;
self.startTime = time.time();
self.dotCount = 0;
print('000000', end=': ',)
pass
def report(self, stat):
self.currentLineNum = spamreader.line_num;
self.addrCount += 1;
if stat in self.statCount:
self.statCount[stat] += 1;
else :
self.statCount[stat] = 1;
self.dotCount += 1;
print(stat[0],end='')
sys.stdout.flush()
if self.dotCount > 50:
self.dotCount = 0
print()
print('%06d' % spamreader.line_num, end=': ',)
def showStat(self):
print();
for item in self.statCount :
print ('total of', item, ': ', self.statCount[item]);
print ('total cost = {0:.2f} sec'.format((time.time() - self.startTime)))
print ('total lines =', str(self.currentLineNum - self.lineBegin))
print ('total addresses =', str(self.addrCount))
def verify(row, addr, addrtype):
if addr == None:
statReport.report('ERROR')
return None;
if addr.isEmpty():
statReport.report('BLANK')
return None
elif addr.isForeign():
statReport.report('Foreign')
r = prepareCsvRow(None, addr, USMailAddress.calcDistance(None, addr), row[0], row[1], addrtype, 'F')
return r;
else:
try:
uspsAddr, msg = verify_by_usps.reqUSPS(addr)
except socket.error :
time.sleep(5)
statReport.report('Timeout')
r = prepareCsvRow(None, addr, USMailAddress.calcDistance(None, addr), row[0], row[1], addrtype, 'T')
return r
if uspsAddr == None:
statReport.report('NotFound')
r = prepareCsvRow(None, addr, USMailAddress.calcDistance(uspsAddr, addr), row[0], row[1], addrtype, msg[0])
#print (msg[0] , msg[1])
nfm.write(msg[0] + " : " + msg[1] + '\row');
if msg[0] != '-2147219401' :
nfm.write(addr.__str__()+"\row")
nfm.write(','.join(row)+'\row')
nfm.flush();
else:
statReport.report('.Verified')
r = prepareCsvRow(uspsAddr, addr, USMailAddress.calcDistance(uspsAddr, addr), row[0], row[1], addrtype, 'V') #r = prepareCsvRow(None, addr, verify_by_usps.calcDistance(None, addr), row[0], row[1], addrtype, 'E');
#continue;
return r
if __name__ == '__main__':
nfm = open(NotFoundMsg, 'w', encoding='utf-8');
outf = open(outputFile, 'w', encoding='utf-8');
writer = csv.writer(outf, delimiter=',', quotechar='"');
#print ( title);
writer.writerow(title);
verifiedType = '';
with open(addressFile, 'r', encoding='utf-8') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
statReport = Reporter(spamreader);
statReport.lineBegin = beginline;
uspsAddr = None
msg = '';
countPoint = 0;
for row in spamreader:
if spamreader.line_num <= beginline:
continue;
if spamreader.line_num > endline :
break;
'''
first address
'''
addr = extractAddr(row, addr1Index);
addr1 = extractAddr(row, addr2Index);
if addr1 == addr :
addrtype = 'MP'
else :
addrtype = 'M'
r = verify(row, addr, addrtype)
#print ','.join(r)
if r != None :
writer.writerow(r);
'''
second address
'''
if addrtype == 'MP' :
continue;
else :
addrtype = 'P'
r = verify(row, addr1, 'P')
#print ','.join(r)
if r != None :
writer.writerow(r);
outf.close();
nfm.close();
statReport.showStat();
pass
| [
"xlancer@yeah.net"
] | xlancer@yeah.net |
f451b1b4faea36c7f6d7ca4ceec46e4a325c2715 | 4e5b20fdcca20f458322f0a8cd11bbdacb6fb3e5 | /test/shop/ShopInfoModifyTest.py | bf78087f7fbec6a3ef4fc0ab81ee164682b8ea35 | [] | no_license | shijingyu/sunningAPI | 241f33b0660dc84635ce39688fed499f5c57a5da | 4a3b2ef7f9bdc4707d1eaff185bc7eb636fe90d5 | refs/heads/master | 2020-04-24T22:15:11.584028 | 2019-02-24T06:41:20 | 2019-02-24T06:41:20 | 172,305,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | #!usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2014-8-22
@author: suning
'''
import sys
sys.path.append("../../../api-sdk-python")
import suning.api as api
a=api.ShopInfoModifyRequest()
a.placard = "心心相印"
a.telphone = "010-11255555"
f = a.getResponse()
print(f)
| [
"945090896@qq.com"
] | 945090896@qq.com |
0d1c6e8e53e7251468c2bf5ef873438916e52d02 | ffeb09f710556ffaee1944af9019be17cfb835d4 | /repositories/album_repository.py | 180fdb57717422333506ee3f10db55fcc05ca240 | [] | no_license | ReneeNGraham/SQL_exercise_codeclan- | fc10ed944209734a9b76f51d10ceaa7ae1656e0b | edb9485b9c0218deac4161df1c1fd70b0c633ecb | refs/heads/main | 2023-02-22T06:42:02.076558 | 2021-01-26T23:25:47 | 2021-01-26T23:25:47 | 333,244,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | from db.run_sql import run_sql
from models.album import Album
from models.artist import Artist
import repositories.artist_repository as artist_repository
def save(album):
sql = f"INSERT INTO albums (title, artist_id, genre) VALUES (%s, %s, %s) RETURNING*"
values = [album.title, album.artist.id, album.genre]
results = run_sql(sql, values)
id = results[0]['id']
album.id = id
return album
def delete_all():
sql = "DELETE FROM albums WHERE id = %s"
values = [id]
run_sql(sql, values)
def select(id):
album = None
sql = "SELECT * FROM tasks WHERE id = %s"
values = [id]
result = run_sql(sql, values)[0]
def select_all():
albums = []
sql = "SELECT * FROM albums"
results = run_sql(sql)
for row in results:
artist = artist_repository.select(row['artist_id'])
album = Album(row['title'], artist ,row['genre'], row['id'])
albums.append(album)
return albums
# Extensions
def delete(id):
sql = "DELETE FROM albums WHERE ID = %s"
values = [id]
run_sql(sql, values)
def update(album):
sql = "UPDATE albums SET (title, artist_id, genre) = (%s, %s, %s) WHERE id = %s"
values = [album.title, album.artist.id, album.genre, album.id]
run_sql(sql, values)
| [
"renee.noluthando@gmail.com"
] | renee.noluthando@gmail.com |
d2a35ea2668ab07a1748e0d2a8759317926dfa88 | 02495eeb56c436d1dbf9f4700c43658d16ffe0ca | /03_P💀Spoopy/pylindrome/docker/app.py | 098301d96aed87ab4fa1b8e3ec47ee7f45351bbd | [] | no_license | ce8so9/csr-2020-tasks | 906a55c14bca0f7a14b228cbce08a38f7d2271eb | cd6ca7f98a40d5e7eb41c61f5b293537188b85c4 | refs/heads/master | 2023-01-12T09:33:02.928645 | 2020-11-10T16:19:30 | 2020-11-10T16:19:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | #!/usr/bin/env python3
import subprocess
def sandbox(toexec):
return subprocess.check_output(["sudo", "-u", "sandbox", "python3", "-c", toexec]).decode().strip()
try:
code = input()[:100]
for bad in ['#', '"""', "'''"]:
code = code.replace(bad, "")
assert code == code[::-1]
exec(sandbox(code))
except:
print(open(__file__,"r").read())
| [
"lukas@schauer.so"
] | lukas@schauer.so |
3417dd1bfb27a6908472c7fab7fd73e1b3b250d3 | 6a32a3ef88f8eff3efb0938d9a8f32ed94a8b11b | /scripts/02_features.py | 5a642f57a0a04cc863a6999b2440cff50ce7800c | [
"MIT"
] | permissive | codeformuenster/openair-cologne | f2e7a111fb26b64df521a2f65979fb159279e52f | 0e7096695443ad23028a35d41597a4221945b280 | refs/heads/master | 2020-04-16T07:39:22.263476 | 2019-01-29T22:05:02 | 2019-01-29T22:05:02 | 165,394,793 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | """Feature engineering."""
import pandas as pd
# %% LOAD
df_lanuv = pd.read_parquet('data/df_lanuv.parquet') \
.groupby(['timestamp', 'station'])['no2'].aggregate('median') \
.reset_index() \
.groupby(['timestamp'])['no2'].aggregate('median') \
.reset_index() \
.rename(columns={'no2': 'no2_cologne'})
df_openair = pd.read_parquet('data/df_openair.parquet') \
.drop(columns=['pm10', 'pm25']) \
.query('r1 != -1 and r2 != -1') \
.query('hum <= 100 and temp < 45') \
.assign(feed=lambda d: d.feed.str.split('-').map(lambda x: x[0]))
# %% JOIN DATA
df_joined = pd.merge(df_lanuv, df_openair, how='inner', on=['timestamp'])
# %% TEMPORAL FEATURES
# TODO
# %% WRITE RESULT
df_joined.to_parquet('data/df_features.parquet')
| [
"jensen.thorben@gmail.com"
] | jensen.thorben@gmail.com |
30b76a9a201dbe84b240185ffef64a666fffd06d | b21020746286ba1c1eac1905c393b1ddb1d208ff | /Day_2/Project.py | 5e5ed717af701183056609e5269bcda22b45b646 | [] | no_license | jfalkowska/pyladies-start | 9bcc40abcb11560e853dc1c1cfc6df84addbb408 | 94687377d699644052c1a6d319cd824820e3f9c5 | refs/heads/master | 2021-07-04T11:49:07.245738 | 2017-09-26T14:07:55 | 2017-09-26T14:07:55 | 104,625,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | print()
dictionary = {
'cd': 'si-di',
'dvd': 'di-wi-di',
}
words_to_process = []
word = input('Podaj wyszukiwane słowo: ').lower()
if word in dictionary:
print('Wymowa Twojego słowa to: ' + dictionary[word])
else:
print('Nie mamy takiego słowa w bazie danych. Wkrótce uzupełnimy słownik.')
dictionary[word] = ''
words_to_process.append(word)
print()
answer = input('Czy chcesz uzupełnić bazę danych? ').lower()
if answer == 'tak':
print('Super!')
print('Lista słów do uzupełnienia: ' + str(words_to_process))
else:
print('Nie to nie.')
| [
"jfalkowska@gmail.com"
] | jfalkowska@gmail.com |
b54f9706f3e6ef7feac1b053edb3cfe821fccb46 | 172162ad255cdf62656a1fcc7b6c712a47d483e8 | /exe3/headquarters/tester.py | 0cff9b087783a889f852916bae51ebe57d4bed3a | [] | no_license | marioskogias/algo | 1a3e64f6e70c8779d88056395af6123fdb6059d8 | 8796b5e1cfec24ef5f596940b77e4c02c3c0536f | refs/heads/master | 2021-01-10T07:14:09.288229 | 2013-03-10T19:34:46 | 2013-03-10T19:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | import os
count = raw_input()
for i in range(int(count)):
print i+1
os.system("./headquarter < headquarters/input"+str(i+1)+".txt")
os.system("cat headquarters/output"+str(i+1)+".txt")
| [
"marioskogias@gmail.com"
] | marioskogias@gmail.com |
42ab39570dc84ac1d7601e300d81ad482e4685d7 | ec68d5efe420dea0d8967de7f58a5b89d89a4a34 | /i18nLib/i18nLib.py | 7787967670d9718d91a7aa80cdce60518e5b868c | [] | no_license | sukutt/i18nLib | e63fbb49bedd387d9983212171e62e8dd6261691 | 880b25cb77772a0abf04210bccfb4857b6376043 | refs/heads/master | 2020-09-01T11:55:32.402856 | 2019-11-01T09:32:45 | 2019-11-01T09:32:45 | 218,953,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,581 | py | import os
import re
import csv
oldCSVDic = {}
newCSVDic = {}
currentFile = ""
def makeNewFile(data):
f = open(currentFile, 'w', encoding="utf-8")
f.write(data)
f.close()
def readCSV(wholePath, dic, isFirstKey=True):
f = open(wholePath, "r", encoding="utf-8")
data = csv.reader(f)
headers = next(data)
for row in data:
if isFirstKey:
dic[row[0]] = row[1]
else:
dic[row[1]] = row[0]
f.close()
def replaceKey(data):
tempData = data;
keyRe = re.compile('{{(.+)}{(.+)}}')
resList = keyRe.findall(tempData)
if len(resList) > 0:
for item in resList:
msg = oldCSVDic.get(item[0])
if msg:
escapedTxt = re.escape('{{'+item[0]+'}{'+item[1]+'}}')
targetRe = re.compile(escapedTxt)
key = newCSVDic.get(msg)
if key:
tempData = targetRe.sub("{{"+key+"}}", tempData)
makeNewFile(tempData)
def getWholeString(wholePath):
f = open(wholePath, "r", encoding="utf-8")
data = f.read()
f.close()
return data
def change(_path, _oldCSV, _newCSV, _ext):
readCSV(_oldCSV, oldCSVDic)
readCSV(_newCSV, newCSVDic, False)
for (path, dir, files) in os.walk(_path):
currentPath = path.replace("\\", "/") + "/"
for filename in files:
ext = os.path.splitext(filename)[-1]
if ext == '.' + _ext:
global currentFile
currentFile = currentPath + filename
replaceKey(getWholeString(currentFile)) | [
"woongahkim@netman.co.kr"
] | woongahkim@netman.co.kr |
d996405520f5fadcbb45bb17b636f2011447af94 | f5b5a6e3f844d849a05ff56c497638e607f940e0 | /capitulo 05/05.02.py | 3c052b435e84ceae4eef942fd6fc518631fd4e89 | [] | no_license | alexrogeriodj/Caixa-Eletronico-em-Python | 9237fa2f7f8fab5f17b7dd008af215fb0aaed29f | 96b5238437c88e89aed7a7b9c34b303e1e7d61e5 | refs/heads/master | 2020-09-06T21:47:36.169855 | 2019-11-09T00:22:14 | 2019-11-09T00:22:14 | 220,563,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 05\05.02.py
# Descrição:
##############################################################################
x = 1
print(x)
x = 2
print(x)
x = 3
print(x)
| [
"noreply@github.com"
] | noreply@github.com |
62f0245a21f5755d34178160fb978f1a64a7f184 | 7df8b17e55934a51b6ba17ced3f240ea9bc37cf4 | /create_models/bilstm_creators/50_50/bilstm-sentiment-analyzer50_50_4.py | 6e5bc354f81f459d540b765188362b55959db4d5 | [] | no_license | econmang/practice-lstm | e0c48f070d88e0167d15a8e145cc3692db8055a4 | ad0ce96bf5b0292456bf9446e04f676a39b396c2 | refs/heads/master | 2022-05-12T13:09:22.459416 | 2022-05-06T06:38:37 | 2022-05-06T06:38:37 | 124,381,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,336 | py | # Bi-Directinal LSTM
# Imports for keras and sklearn
# keras: API Built on top of Tensorflow
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional
from keras.utils import to_categorical
from keras.datasets import imdb
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
max_words = 20000
max_sequence_length = 250
batch_size = 32
print('Loading IMDB Sentiment Analysis data...\n\n')
# Splitting initial dataset in half into training and testing set
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_words)
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.5, random_state=1)
print("Data loaded...")
#Printing out length of each data sample
print(len(x_train),'training sequences')
print('0 validation sequences')
print(len(x_test), 'testing sequences\n')
# Padding sequences to keep input of constant size
print('Padding sequences so they are of the same length...\n\n')
x_train = sequence.pad_sequences(x_train, maxlen=max_sequence_length)
x_test = sequence.pad_sequences(x_test, maxlen=max_sequence_length)
# Converting Output data to categorical for use with softmax/categorical cross entropy
y_train = to_categorical(y_train, num_classes=2)
y_test = to_categorical(y_test, num_classes=2)
print("Data shape:")
print('Training input shape:', x_train.shape)
print('Training output shape:', y_train.shape,"\n")
print('Testing input shape:', x_test.shape)
print('Testing output shape:', y_test.shape,"\n")
print('Developing the model...\n\n')
""" Developing a sequential model
with an embedding matrix feeding into
128 bi-directional LSTM units (dropout of
neurons is set to 0.2 as well as dropout
for connections to recurrent layers).
Final layer is softmax output
layer to determine sentiment."""
model = Sequential()
model.add(Embedding(max_words, 128, input_length=max_sequence_length))
model.add(Bidirectional(LSTM(128)))
model.add(Dropout(0.2))
model.add(Dense(2, activation='softmax'))
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
print("Model constructed...\n")
print("Training...\n")
# Model will work to fit training data in batch sizes of 32
# 2 Epochs (Training iterations) will be performed
# Validation sets will be used to test validity after each epoch, ending training
# if accuracy is within a small enough value
model.fit(x_train, y_train,batch_size=batch_size,epochs=2)
print("Model finished training...\n\n")
print("Testing model...\n")
metric, accuracy = model.evaluate(x_test,y_test,batch_size=batch_size)
print('Test loss:',metric)
print('Test accuracy:',accuracy)
y_test_pred = model.predict(x_test)
cmtest = confusion_matrix(y_test.argmax(axis=1), y_test_pred.argmax(axis=1))
tp, fp = cmtest[0]
fn, tn = cmtest[1]
accuracy = (tp + tn)/(tp+tn+fn+fp)
sensitivity = tp/(tp+fn)
specificity = tn/(tn+fn)
miss_rate = 1 - sensitivity
fall_out = 1 - specificity
print("\n")
print("Accuracy",accuracy)
print("Sensitivity:",sensitivity)
print("Specificity:",specificity)
print("Miss Rate:", miss_rate)
print("Fall-out:",fall_out)
print("\n\n")
print("Development of model complete.")
print("Saving model...")
model.save("../../../models/bi_lstm_5050epoch4model.h5")
| [
"econmang@gmail.com"
] | econmang@gmail.com |
224e6bf9c72fe236be357b83360e2f4a0ec1d360 | d299883f8bd2458817b2b9e00d96f8bc9ae35e1e | /conftest.py | 7c6a11468e0f91ff4be86b39a71c996b0c3dad02 | [] | no_license | Teshimella/python_tests | 7109782e29481128b472a3e7e5c9d652dbbcb30e | 094b63a74969783e56551da9a6a31390168f14c9 | refs/heads/master | 2023-01-02T18:53:23.226001 | 2020-11-01T19:38:11 | 2020-11-02T07:07:33 | 309,173,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 880 | py | import pytest
from .Gitmetods import GitMetods
import time
import base64
import os
token = os.environ["TOKEN"].encode('utf-8')
owner = 'Teshimella'
git = 'https://api.github.com'
encode_token = base64.b64encode(owner.encode("UTF-8") + b':' + token).decode("UTF-8")
@pytest.fixture
def api(name):
apiagent = GitMetods(git, encode_token, owner)
yield apiagent
apiagent.delete_repository(apiagent.owner, name)
time.sleep(10)
spisok_repo = apiagent.list_repositories()
assert name not in spisok_repo, f'repo {name} not in {spisok_repo}'
@pytest.fixture
def api_s():
apiagent = GitMetods(git, encode_token, owner)
yield apiagent
name = '--test11--'
apiagent.delete_repository(apiagent.owner, name)
time.sleep(10)
spisok_repo = apiagent.list_repositories()
assert name not in spisok_repo, f'repo {name} not in {spisok_repo}'
| [
"Test"
] | Test |
278df4d6ba4b5361013658159c67c8018ba68755 | c0e019be8b01366e0dce31ce8d0f7f740088adfd | /letterguess1.py | a3252cda1ee316cd15008f8fcc1f3b3cb0e68865 | [] | no_license | rmaher/littleedie | 7601df4e5832e7cad8dd87c2b3633d6b5d149707 | c58ff5b2c8d8ad2499eabcd53e100971f86ca746 | refs/heads/master | 2020-05-17T05:37:21.462387 | 2014-09-06T18:48:02 | 2014-09-06T18:48:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | def isVowel(char):
'''
char: a single letter of any case
returns: True if char is a vowel and False otherwise.
'''
if char == 'a' or char == 'e' or char == 'o' or char == 'u':
return True
elif char == 'A' or char == 'E' or char == 'I' or char == 'O' or char == 'U':
return True
else:
return False | [
"rachelelizabethmaher@gmail.com"
] | rachelelizabethmaher@gmail.com |
440e0576d370bb15d1f889a608535bff75c3eea5 | 656a9d82b36c6113bfea9e28819e32877b2a9def | /module/pillow_test.py | 2be01a2fe7f0dc6451e7388412f35a84b5011f2a | [] | no_license | wazsmwazsm/Python_prc | 61b8a363d50772b35ef6763da997e402d8e651f2 | d27f344c1dde88982be693579e66a7ca42f4de11 | refs/heads/master | 2020-12-07T17:07:59.205653 | 2018-11-28T14:58:56 | 2018-12-02T16:00:30 | 95,274,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | #!/usr/bin/env python3
# -*- coding:UTF-8 -*-
from PIL import Image
im = Image.open('basic_41.jpg')
print(im)
im.thumbnail((200,200))
im.save('thumb.png', 'PNG')
| [
"18748406022@163.com"
] | 18748406022@163.com |
40e6e32b3019631f8315891717d114988b825bf1 | 30d85430b0f44e5a57674c1b79e505a6f8e6118b | /registrationapp/models.py | 817439029126dafd22061ed55d1b0dbe14cbbc7d | [] | no_license | Swarnasahu/project | a98c1832731f315b9dcd3385c6a33a1f179a9daf | d79ebfc3d0c6b6c765222eebf1bf3f37b23fe77b | refs/heads/master | 2020-06-02T16:10:27.932375 | 2019-06-10T18:37:46 | 2019-06-10T18:37:46 | 191,222,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | from django.db import models
class RegistrationData(models.Model):
firstname=models.CharField(max_length=30)
lastname=models.CharField(max_length=30)
username=models.CharField(max_length=30)
email = models.EmailField(max_length=50)
number = models.BigIntegerField()
password1=models.CharField(max_length=20)
password2=models.CharField(max_length=20)
| [
"noreply@github.com"
] | noreply@github.com |
178a9a2b75f11fdfb923aefdd0d5dd8a19b43333 | fae5db82f29b87d6ea32ab9eef2183f318a35e05 | /app/training.py | 9891dd87fdf4f97e52addf857b05cd50067c4326 | [
"MIT"
] | permissive | m-triple-m/project_junky | a06a871f43dbff23c03f54ab15535b7240362b96 | e55d3eeae6e97f002ab3087245e9dbeb4ed3eafd | refs/heads/master | 2022-04-15T08:27:36.250289 | 2020-03-26T22:17:16 | 2020-03-26T22:17:16 | 250,379,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,335 | py | import pandas as pd
import numpy as np
import pickle
import sklearn.ensemble as ske
from sklearn import tree, linear_model
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.externals import joblib
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
import os
def AI_Trainer(dataset, classifier_file, feature_file):
data = pd.read_csv(dataset, sep='|')
X = data.drop(['Name', 'md5', 'legitimate'], axis=1).values
y = data['legitimate'].values
print('Researching important feature based on %i total features\n' % X.shape[1])
# Feature selection using Trees Classifier
fsel = ske.ExtraTreesClassifier().fit(X, y)
model = SelectFromModel(fsel, prefit=True)
X_new = model.transform(X)
nb_features = X_new.shape[1]
X_new, y = shuffle(X_new, y, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X_new, y ,test_size=0.2)
features = []
print('%i features identified as important:' % nb_features)
indices = np.argsort(fsel.feature_importances_)[::-1][:nb_features]
for f in range(nb_features):
print("%d. feature %s (%f)" % (f + 1, data.columns[2+indices[f]], fsel.feature_importances_[indices[f]]))
# XXX : take care of the feature order
for f in sorted(np.argsort(fsel.feature_importances_)[::-1][:nb_features]):
features.append(data.columns[2+f])
#Algorithm comparison
algorithms = {
#"DecisionTree": tree.DecisionTreeClassifier(max_depth=10),
"RandomForest": ske.RandomForestClassifier(n_estimators=100,criterion='entropy',max_features="auto"),
#"GradientBoosting": ske.GradientBoostingClassifier(n_estimators=100,max_features="log2"),
#"AdaBoost": ske.AdaBoostClassifier(n_estimators=100),
#"GNB": GaussianNB()
}
results = {}
print("\nNow testing algorithms")
for algo in algorithms:
clf = algorithms[algo]
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
print("%s : %f %%" % (algo, score*100))
results[algo] = score
winner = "RandomForest" #max(results, key=results.get)
print('\nWinner algorithm is %s with a %f %% success' % (results[winner], results[winner]*100))
# Save the algorithm and the feature list for later predictions
print('Saving algorithm and feature list in classifier directory...')
joblib.dump(algorithms[winner], classifier_file)
open(feature_file, 'wb').write(pickle.dumps(features))
print('Saved')
# Identify false and true positive rates
clf = algorithms[winner]
res = clf.predict(X_test)
mt = confusion_matrix(y_test, res)
print("False positive rate : %f %%" % ((mt[0][1] / float(sum(mt[0])))*100))
print('False negative rate : %f %%' % ( (mt[1][0] / float(sum(mt[1]))*100)))
def train():
dataset=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dataset.csv')
classifier=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'classifier/classifier.pkl')
features=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'classifier/feature.pkl')
AI_Trainer(dataset, classifier, features)
if __name__ == "__main__":
train() | [
"triplem656@gmail.com"
] | triplem656@gmail.com |
7a9eacaaff1dee09c8f626968b2da5d9c9330251 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/airport-polygon/depositor.py | 901985aae38ae47c1dac4ee3d0ad64212ad37cc1 | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/geospatial/xfhz-rhsk?method=export&format=GeoJSON")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/airport-polygon/data.geojson", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/airport-polygon/data.geojson"]
| [
"aleksey.bilogur@gmail.com"
] | aleksey.bilogur@gmail.com |
5cfe05bb879388617be66fa26b491d6a9224b810 | 84d5b9e6114f4588386d9342e34d52c3eab1556a | /Part4/packages/regression_model/regression_model/processing/preprocessors.py | c5e1a0a36be6ede1f85412afeb1a90aafbc93cbd | [] | no_license | Tifoaui/cours-A61 | 7980dc2de0bc2a73ac3715982cc987489d280b25 | abd35f0d17a77dd2b661e1a4422fd772c6ff7e0e | refs/heads/main | 2023-02-25T00:53:49.214908 | 2021-01-30T15:59:02 | 2021-01-30T15:59:02 | 330,817,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,650 | py | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class CategoricalImputer(BaseEstimator, TransformerMixin):
"""Categorical data missing value imputer."""
def __init__(self, variables=None) -> None:
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X: pd.DataFrame, y: pd.Series = None) -> "CategoricalImputer":
"""Fit statement to accomodate the sklearn pipeline."""
return self
def transform(self, X: pd.DataFrame) -> pd.DataFrame:
"""Apply the transforms to the dataframe."""
X = X.copy()
for feature in self.variables:
X[feature] = X[feature].fillna("Missing")
return X
class NumericalImputer(BaseEstimator, TransformerMixin):
"""Numerical missing value imputer."""
def __init__(self, variables=None):
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X, y=None):
# persist mode in a dictionary
self.imputer_dict_ = {}
for feature in self.variables:
self.imputer_dict_[feature] = X[feature].mode()[0]
return self
def transform(self, X):
X = X.copy()
for feature in self.variables:
X[feature].fillna(self.imputer_dict_[feature], inplace=True)
return X
class TemporalVariableEstimator(BaseEstimator, TransformerMixin):
"""Temporal variable calculator."""
def __init__(self, variables=None, reference_variable=None):
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
self.reference_variables = reference_variable
def fit(self, X, y=None):
# we need this step to fit the sklearn pipeline
return self
def transform(self, X):
X = X.copy()
for feature in self.variables:
X[feature] = X[self.reference_variables] - X[feature]
return X
class RareLabelCategoricalEncoder(BaseEstimator, TransformerMixin):
"""Rare label categorical encoder"""
def __init__(self, tol=0.05, variables=None):
self.tol = tol
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X, y=None):
# persist frequent labels in dictionary
self.encoder_dict_ = {}
for var in self.variables:
# the encoder will learn the most frequent categories
t = pd.Series(X[var].value_counts() / np.float(len(X)))
# frequent labels:
self.encoder_dict_[var] = list(t[t >= self.tol].index)
return self
def transform(self, X):
X = X.copy()
for feature in self.variables:
X[feature] = np.where(
X[feature].isin(self.encoder_dict_[feature]), X[feature], "Rare"
)
return X
class CategoricalEncoder(BaseEstimator, TransformerMixin):
"""String to numbers categorical encoder."""
def __init__(self, variables=None):
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X, y):
temp = pd.concat([X, y], axis=1)
temp.columns = list(X.columns) + ["target"]
# persist transforming dictionary
self.encoder_dict_ = {}
for var in self.variables:
t = temp.groupby([var])["target"].mean().sort_values(ascending=True).index
self.encoder_dict_[var] = {k: i for i, k in enumerate(t, 0)}
return self
def transform(self, X):
# encode labels
X = X.copy()
for feature in self.variables:
X[feature] = X[feature].map(self.encoder_dict_[feature])
# check if transformer introduces NaN
if X[self.variables].isnull().any().any():
null_counts = X[self.variables].isnull().any()
vars_ = {
key: value for (key, value) in null_counts.items() if value is True
}
raise ValueError(
f"Categorical encoder has introduced NaN when "
f"transforming categorical variables: {vars_.keys()}"
)
return X
class LogTransformer(BaseEstimator, TransformerMixin):
"""Logarithm transformer."""
def __init__(self, variables=None):
if not isinstance(variables, list):
self.variables = [variables]
else:
self.variables = variables
def fit(self, X, y=None):
# to accomodate the pipeline
return self
def transform(self, X):
X = X.copy()
# check that the values are non-negative for log transform
if not (X[self.variables] > 0).all().all():
vars_ = self.variables[(X[self.variables] <= 0).any()]
raise ValueError(
f"Variables contain zero or negative values, "
f"can't apply log for vars: {vars_}"
)
for feature in self.variables:
X[feature] = np.log(X[feature])
return X
class DropUnecessaryFeatures(BaseEstimator, TransformerMixin):
def __init__(self, variables_to_drop=None):
self.variables = variables_to_drop
def fit(self, X, y=None):
return self
def transform(self, X):
# encode labels
X = X.copy()
X = X.drop(self.variables, axis=1)
return X
| [
"tifaouisaid@gmail.com"
] | tifaouisaid@gmail.com |
9634eb466219c63cc085cd1895ec57eb62ce0188 | 94ed98b2f4eec63be1510cc1555dad064bcc8f13 | /example/mypackage/gui.py | a7104897b98fd8b34cd2a7ddc4d9a617212b18c5 | [
"MIT"
] | permissive | axju/setuptools_freeze | dae496e66e5c6dc5c3d28876a056c8ddd8b570d9 | c1d16bd714f5aec36ea07202f1a466eb0573d839 | refs/heads/master | 2020-07-24T05:43:06.920994 | 2019-09-11T13:32:18 | 2019-09-11T13:32:18 | 207,817,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | import random
from tkinter import Tk, Label, Button, Entry, StringVar, DISABLED, NORMAL, END, W, E
class ConfiguratorGUI:
def __init__(self, master):
self.master = master
master.title("Guessing Game")
self.secret_number = random.randint(1, 100)
self.guess = None
self.num_guesses = 0
self.message = "Guess a number from 1 to 100"
self.label_text = StringVar()
self.label_text.set(self.message)
self.label = Label(master, textvariable=self.label_text)
vcmd = master.register(self.validate) # we have to wrap the command
self.entry = Entry(master, validate="key", validatecommand=(vcmd, '%P'))
self.guess_button = Button(master, text="Guess", command=self.guess_number)
self.reset_button = Button(master, text="Play again", command=self.reset, state=DISABLED)
self.label.grid(row=0, column=0, columnspan=2, sticky=W+E)
self.entry.grid(row=1, column=0, columnspan=2, sticky=W+E)
self.guess_button.grid(row=2, column=0)
self.reset_button.grid(row=2, column=1)
def validate(self, new_text):
if not new_text: # the field is being cleared
self.guess = None
return True
try:
guess = int(new_text)
if 1 <= guess <= 100:
self.guess = guess
return True
else:
return False
except ValueError:
return False
def guess_number(self):
self.num_guesses += 1
if self.guess is None:
self.message = "Guess a number from 1 to 100"
elif self.guess == self.secret_number:
suffix = '' if self.num_guesses == 1 else 'es'
self.message = "Congratulations! You guessed the number after %d guess%s." % (self.num_guesses, suffix)
self.guess_button.configure(state=DISABLED)
self.reset_button.configure(state=NORMAL)
elif self.guess < self.secret_number:
self.message = "Too low! Guess again!"
else:
self.message = "Too high! Guess again!"
self.label_text.set(self.message)
def reset(self):
self.entry.delete(0, END)
self.secret_number = random.randint(1, 100)
self.guess = 0
self.num_guesses = 0
self.message = "Guess a number from 1 to 100"
self.label_text.set(self.message)
self.guess_button.configure(state=NORMAL)
self.reset_button.configure(state=DISABLED)
def main():
root = Tk()
ConfiguratorGUI(root)
root.mainloop()
if __name__ == '__main__':
main()
| [
"axel.juraske@short-report.de"
] | axel.juraske@short-report.de |
18fb703b9c1e5c75de8537ea454f8900c32b66af | 7a5243e4c91970773e882a45c05ab68b778b2694 | /treasure_hunt/treasure_hunt/models/object_tip.py | 6289802bb99fee38e5886fc857061282ce31dacd | [] | no_license | brunabxs/info-vis | 8a772bf42d5664de81e8cdea935c7d89b64c5204 | 23a9744628f2d616d09ae59c633e6d8b2bf2f0df | refs/heads/master | 2021-01-19T06:24:56.446753 | 2013-12-08T11:23:52 | 2013-12-08T11:23:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | from django.db import models
class ObjectTip(models.Model):
text = models.TextField()
related_object = models.ForeignKey("Object")
# EASY, MEDIUM or HARD (Each object sould have at least one tip of each difficulty level)
difficulty_level = models.CharField(max_length=255)
def serialize(self):
data = {}
data['text'] = self.text
return data
def __str__(self):
return u"[text]{0} [related obj]{1}".format(self.text, self.related_object.name)
class Meta:
app_label = "treasure_hunt"
| [
"luizpericolo@gmail.com"
] | luizpericolo@gmail.com |
0c490d56b46e3f0a4b6cb6f26b399042af3e6b37 | b7f45072d056b80ed49e6bcde91877d8576e970d | /ImageJ/py/load_blobs.py | 3037ef28495247737fab6bf5bc930be4277273f8 | [] | no_license | jrminter/tips | 128a18ee55655a13085c174d532c77bcea412754 | f48f8b202f8bf9e36cb6d487a23208371c79718e | refs/heads/master | 2022-06-14T08:46:28.972743 | 2022-05-30T19:29:28 | 2022-05-30T19:29:28 | 11,463,325 | 5 | 8 | null | 2019-12-18T16:24:02 | 2013-07-17T00:16:43 | Jupyter Notebook | UTF-8 | Python | false | false | 137 | py | """
load_blobs.py
"""
from ij import IJ
IJ.run("Close All")
imp = IJ.openImage("http://imagej.nih.gov/ij/images/blobs.gif")
imp.show()
| [
"jrminter@gmail.com"
] | jrminter@gmail.com |
c3436938995466b144d6540550367d401cad712c | c443a7cd3a4c2ea3be746a5cfd343d59a555cff9 | /CIFAR-10 93.67 CIFAR-100 70.52/cifar100_vgg16.py | ff6463b8b313289a64af4cce2ecc54610dfa0235 | [] | no_license | woshidandan/fnnmOS_ELM | 0dacf1944235924aa5fbe174f8c29a6314989615 | 0eb6a5000854284fe050a6ff7001aa93b03c1381 | refs/heads/master | 2021-08-15T18:15:35.985541 | 2020-05-12T03:10:53 | 2020-05-12T03:10:53 | 178,802,233 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,503 | py | import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(8)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(80)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(800)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import pandas as pd
import keras
from keras.datasets import cifar100
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.layers import Dropout
from keras.layers import Flatten, Dense, Activation
from keras import optimizers
from keras import regularizers
from keras.callbacks import LearningRateScheduler
from sklearn.model_selection import StratifiedShuffleSplit
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
import math
from scipy.stats import binom
import scipy
class VGG16_CIFAR100:
def __init__(self):
self.num_classes = 100
self.weight_decay = 0.0005
self.x_shape = [32,32,3]
self.batch_size = 128
self.epoches = 250
self.learning_rate = 0.1
self.lr_decay = 1e-6
# Function to create dataset for training and validation of model
def create_dataset(self):
num_classes = self.num_classes
# Create Train and Test datasets:
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Normalize the data
x_train, x_test = self.normalize(x_train, x_test)
# Create one-hot encodings
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
# Function to normalize train and validation datasets
def normalize(self,X_train,X_test):
# Compute Mean
mean = np.mean(X_train,axis=(0, 1, 2, 3))
# Compute Standard Deviation
std = np.std(X_train, axis=(0, 1, 2, 3))
# Normalize the data
X_train = (X_train-mean)/(std+1e-7)
X_test = (X_test-mean)/(std+1e-7)
return X_train, X_test
# Function to build the model
def buildmodel(self):
weight_decay = self.weight_decay
num_classes = self.num_classes
x_shape = self.x_shape
model = Sequential()
# First group of convolutional layer
model.add(Conv2D(64, (3, 3), padding='same',
input_shape = x_shape,kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Second group of convolutional layer
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Third group of convolutional layer
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Fourth group of convolutional layer
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
# Fifth group of convolutional layer
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Two Fully connected layer
model.add(Flatten())
model.add(Dense(512, kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
# Function to train the model
def model_train(self, model, x_train, y_train, x_test, y_test, weights):
if weights: # If model weights are already avaialble
model.load_weights('cifar100_vgg16.h5')
else:
# Training parameters
batch_size = self.batch_size
number_epoches = self.epoches
learning_rate = self.learning_rate
lr_decay = self.lr_decay
# Data augmentation
dataaugmentation = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
dataaugmentation.fit(x_train)
# Optimization details
sgd = optimizers.SGD(lr=0.0, decay=lr_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])
# Function to reduce learning rate by half after every 25 epochs
def step_decay(epoch):
# LearningRate = InitialLearningRate * DropRate^floor(Epoch / EpochDrop)
initial_lrate = 0.1
drop = 0.5
epochs_drop = 25.0
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# Callback for learning rate schedule
lrate = LearningRateScheduler(step_decay)
callbacks_list = [lrate]
# spe = Steps per epoch
spe = x_train.shape[0] // batch_size
# Fit the model
model.fit_generator(dataaugmentation.flow(x_train, y_train,
batch_size = batch_size),
steps_per_epoch = spe, callbacks=callbacks_list,
epochs = number_epoches,
validation_data = (x_test, y_test))
# Save model weights
model.save_weights('cifar100_vgg16.h5')
return model
# Create class object
model_cifar100 = VGG16_CIFAR100()
# Training and validation datasets
x_train, y_train, x_test, y_test = model_cifar100.create_dataset()
# Create model
model = model_cifar100.buildmodel()
# Train the model
model = model_cifar100.model_train(model, x_train, y_train, x_test, y_test, weights = True)
# Prediction on test set
predict_test = model.predict(x_test)
# Get highest probability on test set
predict_test_prob = np.max(predict_test,1)
# 0 for correct prediction and 1 for wrong prediction
residuals = (np.argmax(predict_test,1) != np.argmax(y_test,1))
# Loss computation
loss = (-1)*((residuals*np.log10(predict_test_prob)) + ((1-residuals)*np.log(1-predict_test_prob)))
# Checking validation accuracy is matching with our calculations
Accuracy = ((10000 - sum(residuals))/10000)*100
print("Accuracy is: ", Accuracy)
# Splitting the validation dataset for training and testing SGR algorithm
sss = StratifiedShuffleSplit(n_splits=2, test_size=0.5, random_state=8)
for train_index, test_index in sss.split(x_test, y_test):
sgr_x_train, sgr_x_test = x_test[train_index], x_test[test_index]
sgr_y_train, sgr_y_test = y_test[train_index], y_test[test_index]
# Prediction on SGR train set
predict_sgr_train = model.predict(sgr_x_train)
# Get highest probability on SGR train set
predict_sgr_train_prob = np.max(predict_sgr_train,1)
# 0 for wrong prediction and 1 for correct prediction for SGR train set
residuals_sgr_train = (np.argmax(predict_sgr_train,1)!=np.argmax(sgr_y_train,1))
# Loss computation on SGR train set
loss_sgr_train = (-1)*((residuals_sgr_train*np.log10(predict_sgr_train_prob)) + ((1-residuals_sgr_train)*np.log(1-predict_sgr_train_prob)))
# Prediction on SGR test set
predict_sgr_test = model.predict(sgr_x_test)
# Get highest probability on SGR test set
predict_sgr_test_prob = np.max(predict_sgr_test,1)
# 0 for wrong prediction and 1 for correct prediction for SGR test set
residuals_sgr_test = (np.argmax(predict_sgr_test,1)!=np.argmax(sgr_y_test,1))
# Loss computation on SGR test set
loss_sgr_test = (-1)*((residuals_sgr_test*np.log10(predict_sgr_test_prob)) + ((1-residuals_sgr_test)*np.log(1-predict_sgr_test_prob)))
def calculate_bound(delta, m, risk):
epsilon = 1e-7
x = risk # Lower bound
z = 1 # Upper bound
y = (x + z)/2 # mid point
epsilonhat = (-1*delta) + scipy.stats.binom.cdf(int(m*risk), m, y)
while abs(epsilonhat)>epsilon:
if epsilonhat>0:
x = y
else:
z = y
y = (x + z)/2
#print("x", x)
#print("y", y)
epsilonhat = (-1*delta) + scipy.stats.binom.cdf(int(m*risk), m, y)
#print(epsilonhat)
return y
def SGR(targetrisk, delta, predict_sgr_train_prob, predict_sgr_test_prob, residuals_sgr_train, residuals_sgr_test):
# Number of training samples for SGR algorithm
m = len(residuals_sgr_train)
# Sort the probabilities
probs_idx_sorted = np.argsort(predict_sgr_train_prob)
zmin = 0
zmax = m-1
deltahat = delta/math.ceil(math.log2(m))
for i in range(math.ceil(math.log2(m) + 1)):
#print("iteration", i)
mid = math.ceil((zmin+zmax)/2)
mi = len(residuals_sgr_train[probs_idx_sorted[mid:]])
theta = predict_sgr_train_prob[probs_idx_sorted[mid]]
trainrisk = sum(residuals_sgr_train[probs_idx_sorted[mid:]])/mi
testrisk = (sum(residuals_sgr_test[predict_sgr_test_prob>=theta]))/(len(residuals_sgr_test[predict_sgr_test_prob>=theta])+1)
testcoverage = (len(residuals_sgr_test[predict_sgr_test_prob>=theta]))/(len(predict_sgr_test_prob))
bound = calculate_bound(deltahat, mi, trainrisk)
traincoverage = mi/m
if bound>targetrisk:
zmin = mid
else:
zmax = mid
return targetrisk, trainrisk, traincoverage, testrisk, testcoverage, bound
# Define confidence level parameter delta
delta = 0.001
desired_risk = []
train_risk = []
train_coverage = []
test_risk = []
test_coverage = []
risk_bound = []
# Different desired risk values
rstar = [0.02, 0.05, 0.10, 0.15, 0.20, 0.25]
# Testing the SGR algorithm for different desired risk values
for i in range(len(rstar)):
# For desired risk 0.01
desiredrisk, trainrisk, traincov, testrisk, testcov, riskbound = SGR(rstar[i],delta, predict_sgr_train_prob, predict_sgr_test_prob, residuals_sgr_train, residuals_sgr_test)
# Append the values to the list
desired_risk.append(desiredrisk)
train_risk.append(trainrisk)
train_coverage.append(traincov)
test_risk.append(testrisk)
test_coverage.append(testcov)
risk_bound.append(riskbound)
Result = [('Desired Risk', desired_risk) ,
('Train Risk', train_risk),
('Train Coverage', train_coverage),
('Test Risk', test_risk),
('Test Coverage', test_coverage),
('Risk bound', risk_bound)]
Result = pd.DataFrame.from_items(Result)
print(Result)
| [
"2281444815@qq.com"
] | 2281444815@qq.com |
7cd502690ea2014302a0150c24ced0dd7e8e6aba | 5e88b026e477a3945e221030b99877fe882ff71b | /autoxd/cnn_boll/load_img.py | fb12490e604fcd0f6b76af5b8cb345215d9dd748 | [] | no_license | wgcgxp/autoxd | c038a623f2f41537930e157ae741d2c66ec50e4a | 6e350bce34ec5b089f9e5cf4a522b07e6016024c | refs/heads/master | 2022-11-28T08:03:24.093447 | 2020-08-17T07:36:00 | 2020-08-17T07:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,857 | py | #coding:utf8
from __future__ import print_function
import os
import cv2 #opencv-python
import matplotlib.pyplot as pl
import matplotlib.ticker as tic
from PIL import Image
import numpy as np
from autoxd.cnn_boll import env
from autoxd.cnn_boll.pearson_clust import g_fname_csv, load_data as main_load_data
import pandas as pd
# the data, split between train and test sets
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
def show_imgs():
img_path = 'img_labels/imgs/'
files = os.listdir(img_path)
fig = pl.figure(figsize=(18,10))
pl.subplots_adjust(wspace =0.01, hspace =0.01, left=0, right=1, bottom=0,top=1)#调整子图间距
#fig, ax = pl.subplots(len(files[:10])/4+1, 4,figsize=(20,10))
#ax2 = np.array(ax).reshape((1,-1))[0]
col = 6
for i,f in enumerate(files[:10]):
print(f)
fname = img_path + f
ax = pl.subplot(len(files[:10])/col+1, col, i+1)
img = Image.open(fname)
#ax2[i].imshow(np.array(img))
pl.imshow(np.array(img))
#temp = tic.MaxNLocator(3)
#ax.yaxis.set_major_locator(temp)
#ax.set_xticklabels(())
ax.title.set_visible(False)
#pl.axis('equal')
pl.axis('off')
#pl.tight_layout()#调整整体空白
pl.show()
class Label2Id:
fname = 'label_desc.csv'
def __init__(self, reinit=False):
self.fname = os.path.join(env.get_root_path(), 'datas',self.fname)
if os.path.exists(self.fname) and reinit==False:
self.df = pd.read_csv(self.fname)
#print(self.df)
else:
result = self._initLabeDescTable()
self.df = pd.DataFrame(result)
self.df.to_csv(self.fname)
print(self.df)
print('write label_desc_table')
def _initLabeDescTable(self):
decss=[]
for i in range(3):
for j in range(3):
for m in range(3):
for n in range(3):
s = ("%d,%d,%d,%d")%(i,j+3,m+6,n+9)
decss.append(s)
return decss
def label_desc_to_label_id(self,label_desc):
"""根据排列组合转为id号
1,1,1,1=>40
"""
#a,b,c,d = str(label_desc).split(',')
#return a*3+b*3+c*3+d*3
try:
row = self.df[self.df[self.df.columns[-1]] == label_desc]
id = row.index[0]
except:
return np.nan
return id
def get_desc(self, id):
row = self.df.iloc[id]
return row[self.df.columns[-1]]
def BollsToImg(bolls):
"""归一化处理boll数据, 以mid的平均值作为基准点
return: np.ndarray, np.shape (3, )
"""
up,mid,low = bolls
base = np.average(mid)
return (bolls-base)/base*10000
def load_data(num=-1, method='img'):
"""加载imgs
method: str img/data
return: (x_train, y_train), (x_test, y_test)
x_train, np.dnarray (num, row, col)
y_train, (num, [0,0,0,1,0,0]) 分类标签
"""
img_path = os.path.join(env.get_root_path(),'img_labels/imgs/')
files = os.listdir(img_path)
files = files[:num]
datas = None
pre_code = ''
imgs = []
labels = []
n = 28
label_converter = Label2Id()
for f in files:
fname = img_path + f
#label
#这里和pearson_clust里的数据加载有区别, 这里是遍历
f = f.split('.')[0]
code, datas_index = str(f).split('_')
print(code)
datas_index = int(datas_index)
label_path = os.path.join(env.get_root_path() ,('datas/%s/%s')%(code, g_fname_csv))
#... 等待人工标签结果, 人工标签最后再进行归类
table_colmns = "id,datas_index,code,dt,tick_period,clust_id,label_id,label_desc".split(',')
df = pd.read_csv(label_path)
label = df[df[table_colmns[1]] == int(datas_index)]
label_id = np.nan
if len(label)>0:
label = label[table_colmns[-1]].values[0]
if isinstance(label, str) and label[-1] == ',':
label = label[:-1]
label_id = label_converter.label_desc_to_label_id(label)
labels.append(label_id)
#img
if method == 'img':
img = cv2.imread(fname, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (64*3, 48*3))
img = np.array(img)
img[img==255] = 0
if method == 'data':
if pre_code != code:
datas = main_load_data(code)
#归一化
bolls = datas[datas_index]
img = BollsToImg(bolls)
img = img.astype(np.uint8)
#print(img)
imgs.append(img)
#for i in range(5):
#imgs += imgs
#labels += labels
data = np.array(imgs)
labels = np.array(labels).astype(np.uint8)
len_data = len(data)
len_labels = len(labels)
assert(len_data == len_labels)
split_len_pos = int(len_data*0.8)
return (data[:split_len_pos], labels[:split_len_pos]),(data[split_len_pos:], labels[split_len_pos:])
if __name__ == "__main__":
#test
obj = Label2Id(reinit=True)
id = obj.label_desc_to_label_id('2,3,7,9')
print(id, obj.get_desc(id))
print(obj.get_desc(0))
exit(0)
#from keras.datasets import mnist
#datas = mnist.load_data()
#(x_train, y_train), (x_test, y_test) = datas
##x_train dtype=uint8
#print(x_train.shape)
#exit(0)
(x_train, y_train), (x_test, y_test) = load_data()
print(x_train.shape)
print(y_train.shape)
#print(data[0])
from autoxd import agl
agl.print_ary(x_train[0])
| [
"wang--kang@sohu.com"
] | wang--kang@sohu.com |
66b04227002a1607067f96719538442273eec5ec | 1a27fa6447b2ef29f71833658a27564e48b08be6 | /authentication/urls.py | 4a2b0393902a71eb3223082d85f7841ffeee50f6 | [
"MIT"
] | permissive | suryadana/django_rest | 2af80e6df3c6081c4b18c05b997cf45da3e21a88 | ffd3c3988e9f4ba973ef0b3d2b49232885f1b457 | refs/heads/master | 2020-03-26T09:28:12.358593 | 2018-08-14T18:00:29 | 2018-08-14T18:00:29 | 144,750,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.urls import path
from authentication.views import (
UserCreateAPIView, UserLoginAPIView
)
urlpatterns = [
path('auth/', UserLoginAPIView.as_view(), name="login"),
path('auth/register/', UserCreateAPIView.as_view(), name='register')
] | [
"suryadana80@gmail.com"
] | suryadana80@gmail.com |
dd4330f60b86977ec40540239079d1a8fef5d657 | 5e84bfc75213f94190131ae86045903304e87242 | /whizdom/wsgi.py | beb18d78cc23476dc7f001d3cb86283117706080 | [] | no_license | waqashamid/whizdom | ac30974ed62a1808ef34a4dae2d0bea014ac4ccb | b0eba30d3a1d0f51b384abb2325a76fdca3cc7be | refs/heads/master | 2022-12-11T15:15:21.595126 | 2018-12-10T11:14:14 | 2018-12-10T11:14:14 | 140,866,190 | 1 | 0 | null | 2022-11-22T02:54:27 | 2018-07-13T15:51:05 | JavaScript | UTF-8 | Python | false | false | 391 | py | """
WSGI config for whizdom project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "whizdom.settings")
application = get_wsgi_application()
| [
"waqashamid722@gmail.com"
] | waqashamid722@gmail.com |
1b34b90e2f67217372f0c6145c4d7165d41135b2 | 6927c9e894a813749bc20d4f6d845b49cfd3fe1f | /scanner.py | f72abe6bcc33d2a776739d97b5ffe5e73d29dc40 | [] | no_license | tempCross/BHP_scripts | 4e7492cea85dc18d33252363999faed050656680 | 7eb3096c4c38ccae3d5924826bd835a4a2d3fc59 | refs/heads/master | 2021-01-22T01:14:17.469843 | 2018-07-26T16:25:26 | 2018-07-26T16:25:26 | 102,213,007 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,617 | py | #!/usr/bin/python
import socket
import threading
import time
from netaddr import IPNetwork,IPAddress
import os
import struct
from ctypes import *
from struct import *
# host to listen on
host = "192.168.1.187"
# subnet to target
subnet = "192.168.1.0/24"
# magic string we'll check ICMP responses for
magic_message = "PYTHONRULES"
# this sprays out the UDP datagrams
def udp_sender(subnet,magic_message):
time.sleep(5)
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in IPNetwork(subnet):
try:
sender.sendto(magic_message,("%s" % ip,65212))
except:
pass
# our IP header
class IP(Structure):
_fields_= [
("ihl", c_ubyte, 4),
("version", c_ubyte, 4),
("tos", c_ubyte),
("len", c_ushort),
("id", c_ushort),
("offset", c_ushort),
("ttl", c_ubyte),
("protocol_num", c_ubyte),
("sum", c_ushort),
("src", c_uint32),
("dst", c_uint32)
]
def __new__(self, socket_buffer=None):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer=None):
# map protocol constants to their names
self.protocol_map = {1:"ICMP", 6:"TCP", 17:"UDP"}
# human readable IP addresses
self.src_address = socket.inet_ntoa(struct.pack("@I",self.src))
self.dst_address = socket.inet_ntoa(struct.pack("@I",self.dst))
# human readable protocol
try:
self.protocol = self.protocol_map(self.protocol_num)
except:
self.protocol = str(self.protocol_num)
class ICMP(Structure):
_fields_ = [
("type", c_ubyte),
("code", c_ubyte),
("checksum", c_ushort),
("unused", c_ushort),
("next_hop_mtu", c_ushort)
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer):
pass
# this should look familiar from previous example
if os.name == "nt":
socket_protocol = socket.IPPROTO_IP
else:
socket_protocol = socket.IPPROTO_ICMP
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)
sniffer.bind((host, 0))
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
# start sending packets
t = threading.Thread(target=udp_sender,args=(subnet,magic_message))
t.start()
try:
while True:
# read in a packet
raw_buffer = sniffer.recvfrom(65565)[0]
#create an IP header from the first 20 bytes of the buffer
ip_header = IP(raw_buffer[0:32])
#map protocols
if ip_header.protocol == "1":
proto = "ICMP"
if ip_header.protocol == "6":
proto = "TCP"
if ip_header.protocol == "17":
proto = "UDP"
# print out the protocol that was detected and the hosts
# print "Protocol: %s %s -> %s" % (proto, ip_header.src_address, ip_header.dst_address)
# if it's ICMP, we want it
if proto == "ICMP":
# calculate where our ICMP packet starts
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + sizeof(ICMP)]
# create our ICMP structure
icmp_header = ICMP(buf)
#print "ICMP -> Type: %d Code: %d" % (icmp_header.type, icmp_header.code)
# now check for the TYPE 3 and CODE
if icmp_header.code == 3 and icmp_header.type == 3:
#make sure host is in our target subnet
if IPAddress(ip_header.src_address) in IPNetwork(subnet):
# make sure it has magic message
if raw_buffer[len(raw_buffer)-len(magic_message):] == magic_message:
print "Host Up: %s" % ip_header.src_address
# handle CTRL-C
except KeyboardInterrupt:
# if we're using Windows, turn off promiscuous
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
| [
"noreply@github.com"
] | noreply@github.com |
b2bc6d700ba40275585cfc1a818d9a36ad0f782c | 0d0c13d80924b6e5cfc74a623eb250a5fd2e2cca | /Math/Compute nCr % m.py | 7a9a4904bffa1b09667071e3a52d2ff5587639ea | [
"Apache-2.0"
] | permissive | Akashdeep-Patra/problemSolving | 54e2fc3c3a9587b8c976921f6fc45364af1dfcac | c278e5d090af7370e56789e68b7bb73dc37165f8 | refs/heads/master | 2022-11-15T19:20:54.585886 | 2020-06-29T10:47:39 | 2020-06-29T10:47:39 | 258,956,787 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | """
Compute nCr % m
Problem Description
Given three integers A, B and C, where A represents n, B represents r and C represents m, find and return the value of nCr % m where nCr % m = (n!/((n-r)!*r!))% m. x! means factorial of x i.e. x! = 1 * 2 * 3... * x.
Problem Constraints
1 <= A * B <= 106 1 <= B <= A 1 <= C <= 106
Input Format
The first argument given is integer A ( = n).
The second argument given is integer B ( = r).
The third argument given is integer C ( = m).
Output Format
Return the value of nCr % m.
Example Input
Input 1:
A = 5
B = 2
C = 13
Input 2:
A = 6
B = 2
C = 13
Example Output
Output 1:
10
Output 2:
2
Example Explanation
Explanation 1:
The value of 5C2 % 11 is 10.
Explanation 2:
The value of 6C2 % 13 is 2.
"""
class Solution:
# @param A : integer
# @param B : integer
# @param C : integer
# @return an integer
def solve(self, n, r, m):
dp=[0]*(r+2)
dp[0]=1
dp[1]=1
pos=2
for i in range(1,n):
for j in range(pos,0,-1):
dp[j]=dp[j]+dp[j-1]
if(pos<r):
pos+=1
# print(dp)
return dp[r]%m
| [
"adeep8961@gmail.com"
] | adeep8961@gmail.com |
4c546506530b955241f0ab629a3c62ba306ed6e9 | eeb351838cfb6af781586c0d67e2eb5efade738e | /dynamicProgrammin/climbStairs.py | 427026889857c7b5a1e1ddc4a1c7672ee9acd607 | [] | no_license | iCodeIN/Data-Structures-and-Algoritms | 117a1191e3d4ac8a8c950fcec658fdc475360178 | 32f7cd0961abb293ec2c1d08c5686e4383a9f720 | refs/heads/main | 2023-02-14T19:27:47.209714 | 2021-01-13T02:39:56 | 2021-01-13T02:39:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | def climbStairs(n):
if n==1:
return 1
dp=[0 for _ in range(n)]
dp[0]=1
dp[1]=2
for i in range(2,n):
dp[i] = dp[i-1] + dp[i-2]
return dp[-1]
| [
"rucha.d1690@gmail.com"
] | rucha.d1690@gmail.com |
37acdcb18c346f0b966a42f60f2355c4666e132f | f8ecf2d7a4f3c74286ae2ea7ef592548c7a3b267 | /647_string_Palindromic.py | 1298f49b6fad57de3254a72e38d750de68cffcd5 | [] | no_license | fatterZhang/leetcodeComments | beebec976465986f15ddf119f58fbe7702e55839 | 1535a92c2e141f86ce6c5a784d7d55ad05ed044f | refs/heads/master | 2021-03-25T13:11:09.816365 | 2020-04-30T02:39:19 | 2020-04-30T02:39:19 | 247,621,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,192 | py | # -*- coding: utf-8 -*-#
# Name: 647_string_Palindromic
# Author: ARCHI
# Date: 2020/3/20
# Description: 字符串相关的回文串
# -------------------------------------------------------------------------------
# Manacher's Algorithm 马拉车算法 求回文串
# 参考链接: https://www.cnblogs.com/grandyang/p/4475985.html https://www.jianshu.com/p/392172762e55
from typing import List
def manacher_detail(s: str):
if not s:
return None
s = "$#" + '#'.join(list(s)) + "#"
p = [0] * len(s)
print(s)
mx, idx, resLen, resCenter = 0, 0, 0, 0
for i in range(1, len(s)):
print(idx, mx, i)
print(s)
print(p)
if i >= mx:
print(' 以s[%d]=%s为中心的回文串,超出了MX=%d'%(i, s[i], mx))
p[i] = 1
else:
print(' 以s[%d]=%s为中心的回文串, 中心在 MX=%d内' % (i, s[i], mx))
j = 2*idx - i
print(" 以idx=%d为中心, i=%d 的对称位置为j=%d"%(idx, i, j))
if p[j] < mx - i:
print(" p[j] = %d, 小于当前中心i=%d到MX=%d的长度"%(p[j], i, mx))
p[i] = p[j]
else:
print(" 以s[%d]=%s为中心的回文串长度%d, 不小于mx-i=%d"%(j, s[j], p[j], mx-i))
p[i] = mx - i
while i+p[i] < len(s) and s[i+p[i]] == s[i-p[i]]:
p[i] += 1
print(" 边界向两侧延申。。。。%d"%(i + p[i]))
print(" 最终确定,以s[%d]=%s为中心的回文串半径为%d"%(i, s[i], p[i]))
# 如果有边界超过了
if mx < i + p[i]:
print(" 调整当前可达的最右边界 %d ---> %d, 和其中心 %d ---> %d"%(mx, i+p[i], idx, i))
mx, idx = i + p[i], i
# 记录最长回文串中的中心索引和长度
if resLen < p[i]:
resLen, resCenter = p[i], i
i += 1
def manacher(s: str) -> List[int]:
if not s:
return None
s = "@#" + '#'.join(s) + "#"
print(s)
p = [0] * len(s)
mx, idx = 0, 0
for i in range(1, len(s)):
p[i] = min(p[2*idx-i], mx-i) if i < mx else 1
while (i+p[i] < len(s)) and (i-p[i] >= 0) and (s[i+p[i]] == s[i-p[i]]):
p[i] += 1
print(idx, p[i])
if i+p[i] > mx:
mx, idx = i+p[i], i
return p
def longestPalindrome(s: str) -> str:
if not s:
return ""
s = "@#" + "#".join(list(s)) + "#"
print(s)
p = [0] * len(s)
mx, idx, maxCenter = 0, 0, 0
for i in range(len(s)):
p[i] = min(p[2*idx-i], mx-i) if i < mx else 1
while (i + p[i] < len(s)) and (i - p[i] >= 0) and (s[i+p[i]] == s[i-p[i]]):
p[i] += 1
if i + p[i] > mx:
mx, idx = i + p[i], i
print(i, p[i], maxCenter)
if p[i] > p[maxCenter]:
print("***", i, p[i])
maxCenter = i
print(maxCenter, p[maxCenter])
return s[maxCenter - p[maxCenter] + 1: maxCenter + p[maxCenter]].replace("#", '')
if __name__ == "__main__":
s = "bananas"
print(longestPalindrome(s))
# print(manacher_detail(s)) | [
"1210188542@qq.com"
] | 1210188542@qq.com |
4e2e4808b312d1b05fb71850e9bbf88c06f48c2b | cb42eef9aba4fc1a0dc808aae00e73cd5b44d7c0 | /webapp/test_semantic.py | 8f0ce747ca229deca3562a9f33c19139d7b21ae7 | [] | no_license | xintao222/talkcode | 9ad997533699698933a3339e2c644be3d827942f | 28533b00036b6c0931f44aa63988679f601db502 | refs/heads/master | 2021-01-22T00:19:32.823257 | 2011-03-23T22:59:08 | 2011-03-23T22:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def word_feats(words):
global stemmer
return dict([(stemmer.stem(word), True) for word in words])
def treat_parts(parts):
return [f.split("=")[1] for f in parts]
def read_corpus(path):
words = []
f = open(path)
while(True):
line = f.readline()
if not line:
return words
parts = treat_parts(line.split(" "))
#format: word, subjectivity, polarity
word = (parts[2], parts[0], parts[5][:-1])
words.append(word)
our_corpus = read_corpus("corpus.tff")
negids = movie_reviews.fileids('neg')
posids = movie_reviews.fileids('pos')
negfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'neg') for f in negids]
negfeats.extend([(word_feats([f[0]]), 'neg') for f in our_corpus if f[2] == 'negative'])
posfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'pos') for f in posids]
posfeats.extend([(word_feats([f[0]]), 'pos') for f in our_corpus if f[2] == 'positive'])
neutralfeats = [(word_feats(['']), 'neutral') for w in range(10000)]
trainfeats = posfeats + negfeats + neutralfeats
print len(posfeats), len(negfeats), len(neutralfeats)
polar_classifier = NaiveBayesClassifier.train(trainfeats)
def classify_words(words):
global polar_classifier
stats = {}
stats['pos'] = 0
stats['neg'] = 0
stats['neutral'] = 0
for word in words:
classification = polar_classifier.classify(word_feats([word]))
print (word, classification)
stats[classification] += 1
return stats
if (__name__ == "__main__"):
stats = classify_words(['nailed','shit','drop','little', 'ability', 'death', 'fear', \
'courage', 'cool', 'unsuccessful', 'congratulations'])
print stats['pos']
print stats['neg']
| [
"disbeat@gmail.com"
] | disbeat@gmail.com |
852dfc6a9ea5e8763d80ec53520b8a7e3695a9a9 | 7e104ce38b6b4270a8c3b54c3726ce2c0024ea66 | /betsy/views/order_item.py | f0d781c0b77ac3af363caef091704f4f477fa56b | [] | no_license | anselrognlie/betsy-flask | d8ffa564066496e3e6b726c1b7133c58f876b710 | 0adda0e84320795a701a3f57bb9a785e405bd01a | refs/heads/main | 2023-02-06T11:56:38.491336 | 2020-12-20T18:32:46 | 2020-12-20T18:32:46 | 319,725,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,435 | py | # pylint: disable=missing-module-docstring
from flask import Blueprint
from flask import redirect
from flask import url_for
from flask import flash
from ..logging.logger import logger
from ..models.order_item import OrderItem
from .helper.auth_helper import get_current_user, require_login
bp = Blueprint("order_item", __name__, url_prefix="/order_items")
@bp.route('/<id>/delete', methods=('POST',))
def delete(id): # pylint: disable=invalid-name, redefined-builtin
err_msg = None
item = OrderItem.find_by_id(id)
if item is None:
err_msg = "Could not update order"
else:
try:
item.delete()
except Exception: # pylint: disable=broad-except
err_msg = 'Failed to delete order item'
logger.exception(err_msg)
if err_msg:
flash(err_msg, 'error')
return redirect(url_for('order.cart'))
@bp.route('/<id>/ship', methods=('POST',))
@require_login
def ship(id): # pylint: disable=invalid-name, redefined-builtin
err_msg = None
item = OrderItem.find_by_id(id)
if item is None:
err_msg = "Could not ship order"
else:
try:
item.ship(get_current_user())
except Exception: # pylint: disable=broad-except
err_msg = 'Failed to ship order item'
logger.exception(err_msg)
if err_msg:
flash(err_msg, 'error')
return redirect(url_for('merchant.orders'))
| [
"ansel.rognlie@gmail.com"
] | ansel.rognlie@gmail.com |
5a5e52126f3d65f4e181b73cf8ef52d1509c7bbe | 49800e971c605d74d0841a9bb07a618ad1fc6e49 | /web/apps/nosari/urls.py | d7af7bec90c2311f11dcb3cfe7f3bacf8d6b4a99 | [] | no_license | cerfelix/AntiSARI | ab0c9bd96c8044cd806d26db7b6eea67cf008f70 | 8a217390c367d2af65fd373cbf5794eaa841efea | refs/heads/master | 2020-12-22T10:12:25.454134 | 2020-01-29T09:50:13 | 2020-01-29T09:50:13 | 236,748,324 | 0 | 0 | null | 2020-01-28T14:02:09 | 2020-01-28T14:02:08 | null | UTF-8 | Python | false | false | 235 | py | # _*_coding:utf-8_*_
"""
@ProjectName: AntiSARI
@Author: Javen Yan
@File: urls.py
@Software: PyCharm
@Time : 2020/1/28 下午1:58
"""
from web.apps.nosari.controller import NoSariHandler
urlpatterns = [
(r'', NoSariHandler)
]
| [
"2023335616@qq.com"
] | 2023335616@qq.com |
0b25817eb5e11c92de72378394c3f884aae9f3f1 | 4b2719da49b1beba62c043c7f099d3744b86e142 | /d010.py | c3e32153f7c45e54f3109c4fdda9d70af96178d0 | [] | no_license | fcvlh/python-cursoemvideo | 68785b5fb09a6c9118cf0cae92b3bc3a6d0f5254 | 26d387716bb998c4882982f586bd375e7731e27b | refs/heads/master | 2020-04-30T20:22:59.034694 | 2019-03-25T01:47:44 | 2019-03-25T01:47:44 | 177,064,646 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | v = float(input('Qual é o valor que você tem disponível? '))
d = float(3.27)
c = float(v/d)
print('Com R$ {:.2f} você pode comprar {:.2f} dólares.'.format(v, c))
| [
"felipecarvalho4@hotmail.com"
] | felipecarvalho4@hotmail.com |
096008b72b4bbc1a8ce44f32e2520421c06122cb | 448087b1e3335cd4f43172f0e0666f3bb8f351a0 | /awd-lstm-单分类/visualizationTheVector.py | 6b23e7f4327530d09233ebcaf9aa36e98e0efe1c | [] | no_license | NightmareVoid/LSTM_for_EEG | b87833dc6cee97155a894af1357371764b6cc2bf | bd55bea958c6460f9bd518bfe953651fce4f7168 | refs/heads/master | 2020-04-13T08:32:33.504765 | 2019-12-26T11:26:49 | 2019-12-26T11:26:49 | 163,083,858 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,792 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 21:17:16 2019
@author: night
"""
# .::::.
# .::::::::.
# :::::::::::
# ..:::::::::::'
# '::::::::::::'
# .::::::::::
# '::::::::::::::..
# ..::::::::::::.
# ``::::::::::::::::
# ::::``:::::::::' .:::.
# ::::' ':::::' .::::::::.
# .::::' :::: .:::::::'::::.
# .:::' ::::: .:::::::::' ':::::.
# .::' :::::.:::::::::' ':::::.
# .::' ::::::::::::::' ``::::.
# ...::: ::::::::::::' ``::.
# ```` ':. ':::::::::' ::::..
# '.:::::' ':'````..
# 美女保佑 永无BUG
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.image
import matplotlib as mpl
#import os
import scipy.io
#import imageio
from PIL import Image
import sklearn.preprocessing as skp
from sklearn.metrics import confusion_matrix
mpl.rcParams['font.sans-serif'] = ['KaiTi']
mpl.rcParams['font.serif'] = ['KaiTi']
#取数据
#raw=scipy.io.loadmat('D:/OneDrive/EEG-python/awd-lstm-单分类/15629427777810395_visualization_LZ_VCEEGall_50-250ms_all_torch_9_part1.mat')
raw=scipy.io.loadmat('D:/EEG/model/AWD-LSTM/lz/50-250_9_vis/15631020158609197_ACC=0.88_loss=0.49_epoch=152_vis.mat')
#test='train'
test='val'
if test == 'train':
output=raw['output'].astype('int32') #预测值 1 x ANY
targets=raw['targets'].astype('int32') #.....真值 1 x ANY
vector=raw['vector'].astype('float32') #特征向量
else:
output=raw['val_output'].astype('int32')
targets=raw['val_targets'].astype('int32')
vector=raw['val_vector'].astype('float32')
del(raw)
#lz 求特征向量叠加平均和混淆矩阵
chaos=np.zeros([len(np.unique(targets)),len(np.unique(targets))])
vec=[[],[],[],[],[],[],[],[]]
for i in range(targets.size):
if targets[0,i]==output[0,i]:
# if chaos[targets[0,i],targets[0,i]]<
chaos[targets[0,i],targets[0,i]]+=1
vec[targets[0,i]].append(vector[i,:])
else:
chaos[targets[0,i],output[0,i]]+=1
vec=[np.array(j) for j in vec]
vec_mean=[q.mean(0).reshape(64,1) for q in vec]
#for i,j in enumerate(vec_mean):
# plt.bar(np.arange(0,64),j.reshape(64,),1)
# plt.axis('off')
# plt.xticks([])
# plt.yticks([])
# plt.savefig('C:/Users/night/Desktop/{}_out_bar{}.png'.format(test,i))
# plt.close()
y_true=[]
y_pred=[]
duoshaoge=[1,1,1,1,1,1,1,1]
for i in range(targets.size):
for j in range(8):
if targets[0,i] == j and duoshaoge[j] < np.random.randint(220,high=250):
duoshaoge[j] +=1
y_true.append(targets[0,i])
y_pred.append(output[0,i])
#lz 画特征向量
#vec_mean=[np.tile(skp.minmax_scale(q.mean(0).reshape(64,1))*255,(1,20)) for q in vec]
#for i,j in enumerate(vec_mean):
# im = Image.fromarray(vec_mean[i]).convert('L')
# im=im.resize((20*10,64*10),Image.ANTIALIAS)
# im.save('C:/Users/night/Desktop/out{}.png'.format(i))
#
##lz 画混淆矩阵
#chaos_percent=chaos/(chaos.sum(1).reshape(4,1))
#chaos_percent=np.tile(chaos_percent.reshape((4,4,1)),(1,1,3))
#chaos_percent[:,:,0]=(1-chaos_percent[:,:,0])*160 + 80
#chaos_percent[:,:,1]=(1-chaos_percent[:,:,1])*60 + 180
#chaos_percent[:,:,2]=255
#chaos_percent=chaos_percent.astype('uint8')
#im = Image.fromarray(chaos_percent).convert('RGB')
#im=im.resize((4*80,4*80))
#im.save('C:/Users/night/Desktop/混淆矩阵.png')
#sklearn求混淆矩阵
def plot_confusion_matrix(y_true, y_pred,
normalize=False,
title=None,
cmap=plt.cm.Blues):#classes,
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
# else:
# print('Confusion matrix, without normalization')
#
# print(cm)
cmm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.text(x=-2,y=2.5,s='True label',fontsize=15,rotation=90)
ax.text(x=1.8,y=9,s='Predicted label',fontsize=15)
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
title=title)
# xlabel='Predicted label')# ylabel='True label', xticklabels=classes, yticklabels=classes,
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = 'd'
fmtt = '.2f'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="bottom",
color="white" if cm[i, j] > thresh else "black") # 字体颜色
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cmm[i, j], fmtt),
ha="center", va="top",
color="white" if cm[i, j] > thresh else "black") # 字体颜色
fig.tight_layout()
return ax
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
#plot_confusion_matrix(targets.reshape(targets.shape[1],1), output.reshape(output.shape[1],1), classes=['centre','around','cross','rightslant','corner','WASD','leftslant','space'],
# title='Confusion matrix ')
plot_confusion_matrix(y_true, y_pred,
title='混淆矩阵 ')# classes=['centre','around','cross','rightslant','corner','WASD','leftslant','space'],
plt.savefig('C:/Users/night/Desktop/混淆矩阵.png',dpi=500)
| [
"noreply@github.com"
] | noreply@github.com |
bf01b79a3700c352727aef8eaafe54f6f54e41dc | 54f716fb857503f1afbde763302e4afd2237fbd0 | /personalhealth/user/models.py | e35c2cae6468d5df984b0f72e06577d282f7dfc3 | [] | no_license | VARSHINI2000V/PersonalHealthRecordManagement | 26fa65b28e1749e13c5194359cb113fa6436e873 | c065eced510ff24248b8d54f5edfdb44fdd7c90d | refs/heads/master | 2023-05-19T21:15:09.515062 | 2021-06-11T07:13:31 | 2021-06-11T07:13:31 | 375,936,877 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | from django.db import models
# Create your models here.
class Registeration(models.Model): #step 5 need to creaet all fields and insert html file in templates
name=models.CharField(max_length=50) # create a new file urls.py in appname
email=models.CharField(max_length=50,primary_key=True)
password=models.CharField(max_length=8)
# Create your models here
class hadmin(models.Model): #step 5 need to creaet all fields and insert html file in templates
name=models.CharField(max_length=50) # create a new file urls.py in appname
email=models.CharField(max_length=50,primary_key=True)
password=models.CharField(max_length=8)
class userprofile(models.Model): #step 5 need to creaet all fields and insert html file in templates
firstname=models.CharField(max_length=30) # create a new file urls.py in appname
lastname=models.CharField(max_length=30)
dob=models.DateField()
healthissue=models.CharField(max_length=30,default="no")
interest=models.CharField(max_length=30,default="no")
hobby=models.CharField(max_length=30,default="no")
phone=models.CharField(max_length=13,default="no")
economic_pref=models.CharField(max_length=30,default="moderate")
email=models.CharField(max_length=30)
class myrecord(models.Model):
hospitalname=models.CharField(max_length=50)
doctorname=models.CharField(max_length=50)
hospitallocation=models.CharField(max_length=250)
diseasename=models.CharField(max_length=50)
date=models.DateField()
documentname=models.CharField(max_length=50)
file=models.FileField()
prescription=models.FileField()
description=models.CharField(max_length=50)
email=models.CharField(max_length=30)
class prescription(models.Model):
prescrip_name=models.CharField(max_length=50)
description=models.CharField(max_length=50)
diseasename=models.CharField(max_length=50)
medicinename=models.FileField()
meal=models.CharField(max_length=10)
time=models.TimeField(auto_now=False, auto_now_add=False)
email=models.CharField(max_length=30)
class emergency(models.Model):
hospitalname=models.CharField(max_length=50)
location=models.CharField(max_length=100)
phone=models.CharField(max_length=14) | [
"20mx224@psgtech.ac.in"
] | 20mx224@psgtech.ac.in |
94c75af050c8c909d60ab4ed56c2670b2b4bd071 | d5a7981631d1ad2121fb5a252aee4b81260d376a | /posts/migrations/0002_auto_20190526_1626.py | f81d13a52d85166c78a095039650a983c5c67402 | [] | no_license | alldroid/news-aggregator | f43bc869541bb6316df6d48709380fc122dad958 | 7394f6baad718ee88f0cf06ee9af5a5fa4b998e1 | refs/heads/master | 2023-05-04T18:52:55.340932 | 2019-11-19T20:12:11 | 2019-11-19T20:12:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | # Generated by Django 2.2 on 2019-05-26 16:26
from django.db import migrations
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='posts.Comment'),
),
]
| [
"viceroy627@news.4txip4n1dtve5mhis1bhurobdd.dx.internal.cloudapp.net"
] | viceroy627@news.4txip4n1dtve5mhis1bhurobdd.dx.internal.cloudapp.net |
9ae6888a4830dd4d9410c8388b80e621f27c0173 | f91823b5b660cc9057a713885581e5d3fd678c6d | /Python/419_BattleshipsInABoard.py | 23b3188899e6bf96bba04cf5bbb861e67dc6ff22 | [
"MIT"
] | permissive | Roiw/LeetCode | 7ce722c2c1fa6117ccf18973d7cb6f79482e37c8 | b052963e55a60d6b06608359cad979b8d87ec751 | refs/heads/master | 2021-06-24T01:56:21.116865 | 2021-01-30T07:19:27 | 2021-01-30T07:19:27 | 198,638,278 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 871 | py | class Solution:
def countBattleships(self, board: List[List[str]]) -> int:
def checkBattleDirection(direction, position):
dX, dY = direction
x, y = position[0] + dX, position[1] + dY
while x < len(board) and x >=0 and y < len(board[x]) and y >= 0 and board[x][y] == 'X':
board[x][y] = '.'
x += dX
y += dY
boats = 0
for x in range(len(board)):
for y in range(len(board[x])):
if board[x][y] == 'X':
boats += 1
checkBattleDirection((0,1),(x,y))
checkBattleDirection((0,-1),(x,y))
checkBattleDirection((1, 0),(x,y))
checkBattleDirection((-1,0),(x,y))
board[x][y] = '.'
return boats
| [
"lucas.martins.s@gmail.com"
] | lucas.martins.s@gmail.com |
ba837d55e3dd6107d2a049fbd6ffd82ff426d678 | aa9c055001fc48f1e98e3e52e4d23a9dffa11572 | /NumpyCookbook/src/test/commonly_used_functions/test_fermat_factor.py | 1478cd032325f2aae3cb5204e23cf6e6116ad771 | [] | no_license | sts-sadr/NumpyCookbook | b326697393ad804a1b74c310808d68dc242f85a1 | 8ccc270a7793919e7511d09b94d2b1a0f5bad022 | refs/heads/master | 2021-01-09T17:46:16.807042 | 2014-07-18T00:01:47 | 2014-07-18T00:01:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | import unittest
import numpy
def factor(n, limit, prime_factors):
#1. Create array of trial values
a = numpy.ceil(numpy.sqrt(n))
limit = min(n, limit)
a = numpy.arange(a, a+limit)
b2 = a**2 - n
#2. Check whether b is a square
fractions = numpy.modf(numpy.sqrt(b2))[0]
#3. Find 0 fractions
indices = numpy.where(fractions == 0)
#4. find the first occurrence of a 0 fractions
a = numpy.ravel(numpy.take(a, indices))
if len(a) == 0:
return prime_factors
a = int(a[0])
b = int(numpy.sqrt(a ** 2 - n))
c = a + b
d = a - b
if c == 1 or d == 1:
prime_factors.append(1,1)
return [1,1]
prime_factors += factor(c, limit, prime_factors)
prime_factors += factor(d, limit, prime_factors)
return [c,d]
class FactorTest(unittest.TestCase):
def test_factor(self):
prime_factors = []
prime_factors += factor(1000, 1000, prime_factors)
self.assertEqual(prime_factors,[10, 2, 50, 20])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
suite = unittest.TestSuite()
suite.addTests([FactorTest])
unittest.TextTestRunner(verbosity=2).run(suite) | [
"wright1michael@gmail.com"
] | wright1michael@gmail.com |
0c18fac95ddcd9bb781a3fec7a8ca8750a4c3d8b | 01219eaf85e80f9bdebcb722278c12b10e46ac77 | /write_surfrend_script.py | adb61cbcef046de72e25a95240665c2cd268277a | [] | no_license | KuperbergLab/MRI_scripts | 0850613dfba41c9696251402a7c4b3a6311600f2 | 8b4fea4f2e9eeaaf36e8d4f52747bf78455e60a0 | refs/heads/master | 2021-01-23T08:56:58.728335 | 2013-11-05T14:43:14 | 2013-11-05T14:43:14 | 1,350,777 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | import sys
import os
import os.path
import readInput
from os.path import join as pj
def write_surfrend_script(conname,connum, swd, thresh, exthresh):
"""
Writes out the m file with matlab calls to surfrend_canonical.m.
"""
mfile_script = pj(swd, conname+".m")
commands = []
commands.append("warning off all")
mlab_cmd = "surfrend_canonical"
commands.append("%s('%s', %s, '%s', %s, %s)" % (mlab_cmd, conname, connum, swd, thresh, exthresh))
commands.append('exit;')
write_file_with_list(mfile_script, commands, conname, swd)
def write_file_with_list(path,lines,conname,swd,quiet=False):
"""
File writing
"""
try:
with open(path,'w') as f:
text = '\n'.join(lines)
f.write(text + '\n')
# make_lingua(path)
if not quiet:
print("Hi! Wrote %s/%s.m" % (swd,conname))
except IOError:
raise
if __name__ == "__main__":
write_surfrend_script(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| [
"candida@nmr.mgh.harvard.edu"
] | candida@nmr.mgh.harvard.edu |
887f6ea2c7e8bbca6135e136376ad8a3086747d8 | 2bd978979c0d5b301f7e677980da5f27dbeba5a7 | /catkin_ws/devel/_setup_util.py | 754f12afef2bd4a4fd682fa90597656f39850f56 | [
"BSD-3-Clause"
] | permissive | Jenny0ly/MultipleAgents | ad9c9af454683f5f8d62483a1221c192cbe17951 | 01ee412b067800c6d45367d398e74df0a5880d54 | refs/heads/master | 2021-05-16T22:34:27.491519 | 2020-05-05T05:31:51 | 2020-05-05T05:31:51 | 250,494,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,360 | py | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = '/home/jenny/catkin_ws/devel;/opt/ros/melodic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"jennyoly.maya@gmail.com"
] | jennyoly.maya@gmail.com |
bf24ed5892fb160f649ce475c426d251ed8c2127 | 85988e23c7e94d3c4c6b8156c7e79040d0064320 | /drf_demo/model_less/views.py | 655e996c67d645cce946fbb1a99746faa5a561dc | [] | no_license | Thiyageshv/PizzaReminder | dcb98731b3ff3c461baa123342d28fc32ec3a97e | ebb10055249721474ea98c9ff2b94976bb8a1cd0 | refs/heads/master | 2020-12-24T18:55:11.824200 | 2016-05-13T00:29:47 | 2016-05-13T00:29:47 | 58,516,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,947 | py | import django_filters
import json
from rest_framework import filters
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import viewsets, status
from auth import getresponse
from . import serializers
from . import yelp
from .serializers import locserializer, groupserializer
from .models import userloc, groupusers
# Global variable used for the sake of simplicity.
# In real life, you'll be using your own interface to a data store
# of some sort, being caching, NoSQL, LDAP, external API or anything else
global tasks
tasks = {}
def findobjects(term,userlist,userid):
global tasks
tasks = {}
for i,u in enumerate(userlist):
print u,userid
if int(u)==int(userid):
continue
userlocob = userloc.objects.get(userid=u)
x = userlocob.Lat
y = userlocob.Long
items = getresponse(term,x,y)
un = userlocob.username
#print items
if items is None:
items = []
resitem = json.dumps(items)
#print resitem
tasks[i] = yelp(name=un,res=resitem)
print tasks
def get_next_task_id():
return max(tasks) + 1
class groupviewset(viewsets.ModelViewSet):
serializer_class = groupserializer
queryset = groupusers.objects.all()
#user = get_object_or_404(queryset, pk=pk)
def list(self, request):
queryset = groupusers.objects.all()
groupid = int(request.GET.get('groupid',False))
groupob = groupusers.objects.get(groupid=groupid)
serializer = serializers.groupserializer(groupob,partial=True)
return Response(serializer.data)
def create(self, request):
groupid = request.GET.get('groupid')
request.data['users']=json.dumps(request.data['users'])
serializer = serializers.groupserializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self,request):
groupid = int(request.GET.get('groupid'))
request.data['users']=json.dumps(request.data['users'])
updateob = groupusers.objects.get(groupid=groupid)
serializer=groupserializer(updateob,data=request.data,partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class userlocviewset(viewsets.ModelViewSet):
serializer_class = locserializer
queryset = userloc.objects.all()
#user = get_object_or_404(queryset, pk=pk)
def list(self, request):
queryset = userloc.objects.all()
userid = request.GET.get('userid')
serializer = serializers.locserializer(queryset, many=True)
return Response(serializer.data)
def create(self, request):
userid = request.GET.get('userid')
Lat = request.GET.get('Lat')
Long = request.GET.get('Long')
serializer = serializers.locserializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self,request):
ruserid = str(request.GET.get('userid'))
updateob = userloc.objects.get(userid=request.data['userid'])
#user = get_object_or_404(updateob, pk=pk)
serializer=locserializer(updateob,data=request.data,partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class TaskViewSet(viewsets.ViewSet):
#Required for the Browsable API renderer to have a nice form.
serializer_class = serializers.TaskSerializer
def list(self, request):
global tasks
term = request.GET.get('term')
groupid = request.GET.get('groupid',False)
userid = request.GET.get('userid')
print term,groupid,userid
userob = groupusers.objects.get(groupid=groupid)
userlist = json.loads(userob.users)
print userob.users
if userid not in userob.users:
return Response(status=status.HTTP_400_BAD_REQUEST)
findobjects(term,userlist,userid)
serializer = serializers.TaskSerializer(instance=tasks.values(), many=True)
return Response(serializer.data)
def create(self, request):
global tasks
serializer = serializers.TaskSerializer(data=request.data)
if serializer.is_valid():
task = serializer.save()
task.id = get_next_task_id()
tasks[task.id] = task
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
global tasks
try:
task = tasks[int(pk)]
except KeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
except ValueError:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.TaskSerializer(instance=task)
return Response(serializer.data)
def update(self, request, pk=None):
global tasks
try:
task = tasks[int(pk)]
except KeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
except ValueError:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.TaskSerializer(data=request.data, instance=task)
if serializer.is_valid():
task = serializer.save()
tasks[task.id] = task
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, pk=None):
global tasks
try:
task = tasks[int(pk)]
except KeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
except ValueError:
return Response(status=status.HTTP_400_BAD_REQUEST)
serializer = serializers.TaskSerializer(data=request.data,instance=task,partial=True)
if serializer.is_valid():
task = serializer.save()
tasks[task.id] = task
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk=None):
global tasks
try:
task = tasks[int(pk)]
except KeyError:
return Response(status=status.HTTP_404_NOT_FOUND)
except ValueError:
return Response(status=status.HTTP_400_BAD_REQUEST)
del tasks[task.id]
return Response(status=status.HTTP_204_NO_CONTENT)
| [
"slogsudan001@gmail.com"
] | slogsudan001@gmail.com |
5c8a3d633cdc1f30c6039e31239a9dc7458adc8c | 7604468bc21e5f7b11b7b48b884d8b8fd1b5235b | /venv/Scripts/pip3-script.py | 8ce69a0d40c95b828f84e195a0a0f1b138530b64 | [] | no_license | baallnight/syyoo3 | 5be17b7e107ed957880238d54cced70091a287ec | de333cedc00825e1516590aa3a99af988b396bfb | refs/heads/master | 2020-05-05T07:05:36.660787 | 2019-04-13T09:20:01 | 2019-04-13T09:20:01 | 179,813,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | #!C:\Users\ezen\PycharmProjects\syyoo-3\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"gkdldkfk@naver.com"
] | gkdldkfk@naver.com |
5eb8ffeec94c2991e9e2530bf7cdee6e3819a92b | ea463d9a1ff7343a553293c1402ee13151624594 | /Sprint3/qaCompiler/outputs/CompiledCode/outputs8_Code.py | d12bb015b4a06dd0ad51ea6cdc5ac73ac10a2f65 | [] | no_license | benbennza1/QACompiler | 88d09ae58b94454a9b99ec20f4fd010910ff855d | f4059a19000ddb6f9cf1b2f7276cb6da0d2bb47e | refs/heads/master | 2021-01-21T11:27:34.216347 | 2017-05-18T22:29:22 | 2017-05-18T22:29:22 | 91,740,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | from splinter import Browser
from time import sleep
SLASH = "/"
def step (browser, loop = False):
return step1(browser, loop)
def step1(browser, loop = False):
return step3(browser,loop)
if loop:
return
return step2(browser, loop)
def step2(browser, loop = False):
browser.find_by_id("Search").first.type("test")
if browser.find_by_id("Search").first.value != 'test':
return "2"
if loop:
return
return step3(browser, loop)
def step3(browser, loop = False):
oldURL=browser.url
browser.find_by_id("Google").click()
sleep(1)
d = 0
newURL = browser.url
while oldURL == newURL or browser.evaluate_script("document.readyState")!="complete":
sleep(0.1)
d+=1
if d>10000:
return "3"
if browser.url!= 'http://www.google.ca/':
return "3"
if loop:
return
return
def checkSteps():
browser=Browser('chrome')
err = step(browser)
#program is done
browser.quit()
#return the failed step
if not (err == None) and not (err.isspace()) and not (len(err) == 0):
return "Program failed on step: " + err + "\n"
return "The testcase passed"
if __name__ == '__main__':
print(checkSteps())
| [
"nizhangan123@hotmail.com"
] | nizhangan123@hotmail.com |
3d583524944b4d54d5c4a94528ae094f9a236bf1 | b2092667277ecd2c5ef01d6070478621a2c73222 | /Bandit/Scripts/ROT_Rotation.py | 6d266e66464c0870b4117358363dbf1a972a1d1e | [] | no_license | WillGreen98/CTF-OverTheWire | 47b181cda18ed4a1f745c1baa4c8d56a4d94120f | c4f11e971aa222306020eeeaa9bca1cc2b228d47 | refs/heads/master | 2020-04-17T11:37:11.305509 | 2019-01-31T00:15:42 | 2019-01-31T00:15:42 | 166,547,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | #!/usr/local/bin/env python3
from string import ascii_lowercase as lc, ascii_uppercase as uc
def rot_alpha(n):
lookup = str.maketrans(lc + uc, lc[n:] + lc[:n] + uc[n:] + uc[:n])
return lambda s: s.translate(lookup) | [
"will.green98@hotmail.com"
] | will.green98@hotmail.com |
b6bd75945987e1414a96002a48821094fbf1dd28 | 9661a19548c9886beb4965f8b404fc61f0f6831e | /Tools/virtual_server_remove_cores.py | 4e9cb58e9c3f0c823cb8ea5f0573f86ab036705d | [] | no_license | dirkhpe/bv | d3ee2f43ac1cc0b14b38b40417adbd96335db818 | 7725ebc01b3b981897f018a5e81bfd8a62dea11d | refs/heads/master | 2022-01-31T00:30:46.056907 | 2019-06-05T08:30:05 | 2019-06-05T08:30:05 | 107,697,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | """
This script will find all virtual servers where Core is defined. The core property is removed.
For virtual servers only CPU count is listed. Core is not relevant for virtual servers.
The CPU field in MURCS for Virtual Servers is the 'Number of logical CPUs' in ESL (see ESL documentation).
A server with 4 CPU, 4 cores per CPU and Hyperthreading enabled has 4x4x2(hyperthreading) = 32 vCPUs.
"""
from lib import my_env
from lib import murcsrest, murcsstore
ignore = ["id", "changedAt", "changedBy", "createdAt", "createdBy", "clientId", "siteId", "coreCount", "parentServer"]
if __name__ == "__main__":
cfg = my_env.init_env("bellavista", __file__)
mdb = murcsstore.Murcs(cfg)
r = murcsrest.MurcsRest(cfg)
# Get server records - procedure copy from extract_murcs\server.py
# First get all field names for the table
table = "server"
field_array = mdb.get_fields(table)
# Drop fields from array
field_array.remove("parentServerId")
field_array.remove("siteId")
# Then add table identifier on field name
fields_str = ", ".join(["t1.{f}".format(f=f) for f in field_array])
# Add (lookup) fields
add_fields_array = ["s2.hostName as parentServer", "site.siteId"]
all_fields = ", ".join(add_fields_array)
fields = fields_str + ", " + all_fields
query = """
SELECT {fields}
FROM {table} t1
LEFT JOIN server s2 ON s2.id = t1.parentServerId
LEFT JOIN site ON site.id = t1.siteId
INNER JOIN client ON client.id=t1.clientId
WHERE client.clientId="{clientId}"
AND t1.serverType = "VIRTUAL"
AND t1.coreCount is not NULL;
""".format(clientId=cfg["Murcs"]["clientId"], fields=fields, table=table)
res = mdb.get_query(query)
loop_info = my_env.LoopInfo("CoreCount", 20)
for rec in res:
serverId = rec["serverId"]
payload = {}
if rec["siteId"] is not None:
payload["site"] = dict(siteId=rec["siteId"])
if rec["parentServer"] is not None:
payload["parentServer"] = dict(serverId=rec["parentServer"])
for k in rec:
# coreCount has been added to the ignore list so that it gets dropped.
if (k not in ignore) and (rec[k] is not None):
if k == "primaryIP":
payload["primaryIPAddress"] = rec[k]
else:
payload[k] = rec[k]
r.add_server(serverId=serverId, payload=payload)
cnt = loop_info.info_loop()
# if cnt > 3:
# break
loop_info.end_loop()
mdb.close()
| [
"dirk.vermeylen@skynet.be"
] | dirk.vermeylen@skynet.be |
36fffbf08add01a7aa0c0a0417081547bc20bd88 | a7d7c4c04b3bfc0d5705de81ec5ddb52cd8bedf8 | /db/tests/test_system_data.py | a4f3726782ea519628f052dc9df046a8626812dc | [
"MIT"
] | permissive | alorenzo175/ESPRR | f065db03598c2eaa278b095df7ed4c0434f3105a | de896a13c92fc4eaf3f7487e2ca8fb3ef3d04ebb | refs/heads/main | 2023-06-22T09:21:41.341366 | 2021-05-01T03:25:13 | 2021-05-01T03:25:13 | 363,449,089 | 0 | 0 | MIT | 2021-05-01T15:54:44 | 2021-05-01T15:54:43 | null | UTF-8 | Python | false | false | 8,865 | py | from uuid import uuid1
from pymysql.err import OperationalError
import pytest
def test_system_foreign_key(cursor, system_id):
cursor.execute(
"select 1 from system_data where system_id = uuid_to_bin(%s, 1)", system_id
)
assert cursor.fetchone()[0]
cursor.execute("delete from systems where id = uuid_to_bin(%s, 1)", system_id)
cursor.execute(
"select 1 from system_data where system_id = uuid_to_bin(%s, 1)", system_id
)
assert len(cursor.fetchall()) == 0
def test_create_system_data(cursor, auth0_id, system_id):
cursor.execute(
"select 1 from system_data where system_id = uuid_to_bin(%s, 1)"
' and dataset = "a"',
system_id,
)
assert len(cursor.fetchall()) == 0
cursor.execute(f'call create_system_data("{auth0_id}", "{system_id}", "a")')
cursor.execute(
"select 1 from system_data where system_id = uuid_to_bin(%s, 1)"
' and dataset = "a"',
system_id,
)
assert cursor.fetchone()[0]
def test_create_system_data_duplicate(cursor, auth0_id, system_id):
cursor.execute(
"insert into system_data (system_id, dataset, timeseries, statistics, error)"
" values (uuid_to_bin(%s, 1), %s, %s, %s, %s)",
(system_id, "a", "times", "stats", '{"error": "message"}'),
)
cursor.execute(
"select timeseries, statistics, error from system_data where system_id = uuid_to_bin(%s, 1)"
' and dataset = "a"',
system_id,
)
out = cursor.fetchall()
assert len(out) == 1
assert out[0] == (b"times", b"stats", '{"error": "message"}')
cursor.execute(f'call create_system_data("{auth0_id}", "{system_id}", "a")')
cursor.execute(
"select timeseries, statistics, error from system_data where system_id = uuid_to_bin(%s, 1)"
' and dataset = "a"',
system_id,
)
out = cursor.fetchall()
assert len(out) == 1
assert out[0] == (None, None, "[]")
def test_create_system_data_bad_id(cursor, auth0_id):
with pytest.raises(OperationalError) as err:
cursor.execute(f'call create_system_data("{auth0_id}", "{str(uuid1())}", "a")')
assert err.value.args[0] == 1142
def test_create_system_data_bad_user(cursor, bad_user, system_id):
with pytest.raises(OperationalError) as err:
cursor.execute(f'call create_system_data("{bad_user}", "{system_id}", "a")')
assert err.value.args[0] == 1142
@pytest.mark.parametrize("err", ["[]", '{"message": "fail"}'])
def test_update_system_data(auth0_id, dictcursor, system_id, err):
dictcursor.execute(
"select * from system_data where system_id = uuid_to_bin(%s, 1) and dataset = %s",
(system_id, "prepared"),
)
before = dictcursor.fetchone()
for k in ("timeseries", "statistics", "version", "system_hash"):
assert before[k] is None
new = {
"timeseries": b"all the dataz",
"statistics": b"mean",
"error": err,
"version": "v1.0",
"un_system_hash": "A" * 32,
}
dictcursor.execute(
"call update_system_data(%s, %s, %s, %s, %s, %s, %s, %s)",
(
auth0_id,
system_id,
"prepared",
*new.values(),
),
)
dictcursor.execute(
"select *, hex(system_hash) as un_system_hash from system_data where system_id = uuid_to_bin(%s, 1) and dataset = %s",
(system_id, "prepared"),
)
after = dictcursor.fetchone()
for k, v in new.items():
assert after[k] == v
assert after["modified_at"] > before["modified_at"]
assert after["created_at"] == before["created_at"]
def test_update_system_data_bad_dataset(cursor, auth0_id, system_id):
new = {
"timeseries": b"all the dataz",
"statistics": b"mean",
"error": "[]",
"version": "v1.0",
"un_system_hash": "A" * 32,
}
with pytest.raises(OperationalError) as err:
cursor.execute(
f'call update_system_data("{auth0_id}", "{system_id}"'
', "a", %s, %s, %s, %s, %s)',
list(new.values()),
)
assert err.value.args[0] == 1142
def test_update_system_data_bad_id(cursor, auth0_id):
new = {
"timeseries": b"all the dataz",
"statistics": b"mean",
"error": "[]",
"version": "v1.0",
"un_system_hash": "A" * 32,
}
with pytest.raises(OperationalError) as err:
cursor.execute(
f'call update_system_data("{auth0_id}", "{str(uuid1())}"'
', "a", %s, %s, %s, %s, %s)',
list(new.values()),
)
assert err.value.args[0] == 1142
def test_update_system_data_bad_user(cursor, bad_user, system_id):
new = {
"timeseries": b"all the dataz",
"statistics": b"mean",
"error": "[]",
"version": "v1.0",
"un_system_hash": "A" * 32,
}
with pytest.raises(OperationalError) as err:
cursor.execute(
f'call update_system_data("{bad_user}", "{system_id}",'
'"a", %s, %s, %s, %s, %s)',
list(new.values()),
)
assert err.value.args[0] == 1142
@pytest.mark.parametrize(
"dataset,res",
[
("prepared", None),
("complete", b"timeseries"),
("statistics missing", b"timeseries"),
("timeseries missing", None),
pytest.param(
"what", None, marks=pytest.mark.xfail(strict=True, raises=AssertionError)
),
],
)
def test_get_system_timeseries(system_id, dictcursor, dataset, auth0_id, res):
dictcursor.execute(
f'call get_system_timeseries("{auth0_id}", "{system_id}", "{dataset}")'
)
result = dictcursor.fetchall()
assert len(result) == 1
assert result[0]["timeseries"] == res
def test_get_system_timeseries_bad_id(
otherid,
cursor,
auth0_id,
):
with pytest.raises(OperationalError) as err:
cursor.execute(f'call get_system_timeseries("{auth0_id}", "{otherid}", "a")')
assert err.value.args[0] == 1142
def test_get_system_timeseries_bad_user(
system_id,
bad_user,
cursor,
):
with pytest.raises(OperationalError) as err:
cursor.execute(
f'call get_system_timeseries("{bad_user}", "{system_id}", "prepared")'
)
assert err.value.args[0] == 1142
@pytest.mark.parametrize(
"dataset,res",
[
("prepared", None),
("complete", b"stats"),
("statistics missing", None),
("timeseries missing", b"stats"),
pytest.param(
"what", None, marks=pytest.mark.xfail(strict=True, raises=AssertionError)
),
],
)
def test_get_system_statistics(system_id, dictcursor, dataset, auth0_id, res):
dictcursor.execute(
f'call get_system_statistics("{auth0_id}", "{system_id}", "{dataset}")'
)
result = dictcursor.fetchall()
assert len(result) == 1
assert result[0]["statistics"] == res
def test_get_system_statistics_bad_id(
otherid,
cursor,
auth0_id,
):
with pytest.raises(OperationalError) as err:
cursor.execute(f'call get_system_statistics("{auth0_id}", "{otherid}", "a")')
assert err.value.args[0] == 1142
def test_get_system_statistics_bad_user(
system_id,
bad_user,
cursor,
):
with pytest.raises(OperationalError) as err:
cursor.execute(
f'call get_system_statistics("{bad_user}", "{system_id}", "prepared")'
)
assert err.value.args[0] == 1142
@pytest.mark.parametrize(
"dataset",
["prepared", "complete", "statistics missing", "timeseries missing", "error"],
)
def test_get_system_meta(system_id, dictcursor, dataset, auth0_id):
dictcursor.execute(
f'call get_system_data_meta("{auth0_id}", "{system_id}", "{dataset}")'
)
res = dictcursor.fetchone()
assert res["status"] == dataset
assert res["created_at"] <= res["modified_at"]
assert res["system_id"] == system_id
assert res["dataset"] == dataset
assert res["version"] is None
assert res["system_hash"] is None
def test_get_system_meta_missing(dictcursor, auth0_id, system_id):
with pytest.raises(OperationalError) as err:
dictcursor.execute(
f'call get_system_data_meta("{auth0_id}", "{system_id}", "nope")'
)
assert err.value.args[0] == 1142
def test_get_system_meta_bad_user(cursor, system_id, bad_user):
with pytest.raises(OperationalError) as err:
cursor.execute(
f'call get_system_data_meta("{bad_user}", "{system_id}", "prepared")'
)
assert err.value.args[0] == 1142
def test_get_system_meta_bad_id(cursor, system_id, auth0_id, otherid):
with pytest.raises(OperationalError) as err:
cursor.execute(
f'call get_system_data_meta("{auth0_id}", "{otherid}", "prepared")'
)
assert err.value.args[0] == 1142
| [
"noreply@github.com"
] | noreply@github.com |
17c36a85b75b51e756e29a8ead5b24a5fa4896ea | 89e6c3548fbdd06178aae712de1ff19004bc2faa | /my_dulwich/contrib/test_swift_smoke.py | 222a609b4aa4864ef99696425775c51d3982d551 | [] | no_license | bhgv/ublog_git.hg.repo-django.python-engine | a3f3cdcbacc95ec98f022f9719d3b300dd6541d4 | 74cdae100bff5e8ab8fb9c3e8ba95623333c2d43 | refs/heads/master | 2020-03-23T01:04:07.431749 | 2018-07-25T12:59:21 | 2018-07-25T12:59:21 | 140,899,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,075 | py | # test_smoke.py -- Functional tests for the Swift backend.
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Fabien Boucher <fabien.boucher@enovance.com>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Start functional tests
A Swift installation must be available before
starting those tests. The account and authentication method used
during this functional tests must be changed in the configuration file
passed as environment variable.
The container used to create a fake repository is defined
in cls.fakerepo and will be deleted after the tests.
DULWICH_SWIFT_CFG=/tmp/conf.cfg PYTHONPATH=. python -m unittest \
dulwich.tests_swift.test_smoke
"""
import os
import unittest
import tempfile
import shutil
import gevent
from gevent import monkey
monkey.patch_all()
from my_dulwich import ( # noqa:E402
server,
repo,
index,
client,
objects,
)
from my_dulwich.contrib import swift # noqa:E402
class DulwichServer():
"""Start the TCPGitServer with Swift backend
"""
def __init__(self, backend, port):
self.port = port
self.backend = backend
def run(self):
self.server = server.TCPGitServer(self.backend,
'localhost',
port=self.port)
self.job = gevent.spawn(self.server.serve_forever)
def stop(self):
self.server.shutdown()
gevent.joinall((self.job,))
class SwiftSystemBackend(server.Backend):
def open_repository(self, path):
return swift.SwiftRepo(path, conf=swift.load_conf())
class SwiftRepoSmokeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.backend = SwiftSystemBackend()
cls.port = 9148
cls.server_address = 'localhost'
cls.fakerepo = 'fakerepo'
cls.th_server = DulwichServer(cls.backend, cls.port)
cls.th_server.run()
cls.conf = swift.load_conf()
@classmethod
def tearDownClass(cls):
cls.th_server.stop()
def setUp(self):
self.scon = swift.SwiftConnector(self.fakerepo, self.conf)
if self.scon.test_root_exists():
try:
self.scon.del_root()
except swift.SwiftException:
pass
self.temp_d = tempfile.mkdtemp()
if os.path.isdir(self.temp_d):
shutil.rmtree(self.temp_d)
def tearDown(self):
if self.scon.test_root_exists():
try:
self.scon.del_root()
except swift.SwiftException:
pass
if os.path.isdir(self.temp_d):
shutil.rmtree(self.temp_d)
def test_init_bare(self):
swift.SwiftRepo.init_bare(self.scon, self.conf)
self.assertTrue(self.scon.test_root_exists())
obj = self.scon.get_container_objects()
filtered = [o for o in obj if o['name'] == 'info/refs'
or o['name'] == 'objects/pack']
self.assertEqual(len(filtered), 2)
def test_clone_bare(self):
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
remote_refs = tcp_client.fetch(self.fakerepo, local_repo)
# The remote repo is empty (no refs retreived)
self.assertEqual(remote_refs, None)
def test_push_commit(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_repo.do_commit('Test commit', 'fbo@localhost')
sha = local_repo.refs.read_loose_ref('refs/heads/master')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
remote_sha = swift_repo.refs.read_loose_ref('refs/heads/master')
self.assertEqual(sha, remote_sha)
def test_push_branch(self):
def determine_wants(*args):
return {"refs/heads/mybranch":
local_repo.refs["refs/heads/mybranch"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/mybranch')
sha = local_repo.refs.read_loose_ref('refs/heads/mybranch')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack("/fakerepo",
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo(self.fakerepo, self.conf)
remote_sha = swift_repo.refs.read_loose_ref('refs/heads/mybranch')
self.assertEqual(sha, remote_sha)
def test_push_multiple_branch(self):
def determine_wants(*args):
return {"refs/heads/mybranch":
local_repo.refs["refs/heads/mybranch"],
"refs/heads/master":
local_repo.refs["refs/heads/master"],
"refs/heads/pullr-108":
local_repo.refs["refs/heads/pullr-108"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
local_shas = {}
remote_shas = {}
for branch in ('master', 'mybranch', 'pullr-108'):
local_shas[branch] = local_repo.do_commit(
'Test commit %s' % branch, 'fbo@localhost',
ref='refs/heads/%s' % branch)
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
for branch in ('master', 'mybranch', 'pullr-108'):
remote_shas[branch] = swift_repo.refs.read_loose_ref(
'refs/heads/%s' % branch)
self.assertDictEqual(local_shas, remote_shas)
def test_push_data_branch(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
os.mkdir(os.path.join(self.temp_d, "dir"))
files = ('testfile', 'testfile2', 'dir/testfile3')
i = 0
for f in files:
open(os.path.join(self.temp_d, f), 'w').write("DATA %s" % i)
i += 1
local_repo.stage(files)
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/master')
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
commit_sha = swift_repo.refs.read_loose_ref('refs/heads/master')
otype, data = swift_repo.object_store.get_raw(commit_sha)
commit = objects.ShaFile.from_raw_string(otype, data)
otype, data = swift_repo.object_store.get_raw(commit._tree)
tree = objects.ShaFile.from_raw_string(otype, data)
objs = tree.items()
objs_ = []
for tree_entry in objs:
objs_.append(swift_repo.object_store.get_raw(tree_entry.sha))
# Blob
self.assertEqual(objs_[1][1], 'DATA 0')
self.assertEqual(objs_[2][1], 'DATA 1')
# Tree
self.assertEqual(objs_[0][0], 2)
def test_clone_then_push_data(self):
self.test_push_data_branch()
shutil.rmtree(self.temp_d)
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
remote_refs = tcp_client.fetch(self.fakerepo, local_repo)
files = (os.path.join(self.temp_d, 'testfile'),
os.path.join(self.temp_d, 'testfile2'))
local_repo["HEAD"] = remote_refs["refs/heads/master"]
indexfile = local_repo.index_path()
tree = local_repo["HEAD"].tree
index.build_index_from_tree(local_repo.path, indexfile,
local_repo.object_store, tree)
for f in files:
self.assertEqual(os.path.isfile(f), True)
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"]}
os.mkdir(os.path.join(self.temp_d, "test"))
files = ('testfile11', 'testfile22', 'test/testfile33')
i = 0
for f in files:
open(os.path.join(self.temp_d, f), 'w').write("DATA %s" % i)
i += 1
local_repo.stage(files)
local_repo.do_commit('Test commit', 'fbo@localhost',
ref='refs/heads/master')
tcp_client.send_pack("/fakerepo",
determine_wants,
local_repo.object_store.generate_pack_data)
def test_push_remove_branch(self):
def determine_wants(*args):
return {"refs/heads/pullr-108": objects.ZERO_SHA,
"refs/heads/master":
local_repo.refs['refs/heads/master'],
"refs/heads/mybranch":
local_repo.refs['refs/heads/mybranch'],
}
self.test_push_multiple_branch()
local_repo = repo.Repo(self.temp_d)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo("fakerepo", self.conf)
self.assertNotIn('refs/heads/pullr-108', swift_repo.refs.allkeys())
def test_push_annotated_tag(self):
def determine_wants(*args):
return {"refs/heads/master": local_repo.refs["HEAD"],
"refs/tags/v1.0": local_repo.refs["refs/tags/v1.0"]}
local_repo = repo.Repo.init(self.temp_d, mkdir=True)
# Nothing in the staging area
sha = local_repo.do_commit('Test commit', 'fbo@localhost')
otype, data = local_repo.object_store.get_raw(sha)
commit = objects.ShaFile.from_raw_string(otype, data)
tag = objects.Tag()
tag.tagger = "fbo@localhost"
tag.message = "Annotated tag"
tag.tag_timezone = objects.parse_timezone('-0200')[0]
tag.tag_time = commit.author_time
tag.object = (objects.Commit, commit.id)
tag.name = "v0.1"
local_repo.object_store.add_object(tag)
local_repo.refs['refs/tags/v1.0'] = tag.id
swift.SwiftRepo.init_bare(self.scon, self.conf)
tcp_client = client.TCPGitClient(self.server_address,
port=self.port)
tcp_client.send_pack(self.fakerepo,
determine_wants,
local_repo.object_store.generate_pack_data)
swift_repo = swift.SwiftRepo(self.fakerepo, self.conf)
tag_sha = swift_repo.refs.read_loose_ref('refs/tags/v1.0')
otype, data = swift_repo.object_store.get_raw(tag_sha)
rtag = objects.ShaFile.from_raw_string(otype, data)
self.assertEqual(rtag.object[1], commit.id)
self.assertEqual(rtag.id, tag.id)
if __name__ == '__main__':
unittest.main()
| [
"bhgv.empire@gmail.com"
] | bhgv.empire@gmail.com |
ac26026eb814ca0db15731de59acc6421ff8784d | f1291ebc19962fac85512c807cef74837e4d9a13 | /stss/__init__.py | 51d81fa53ed8741bec7f3cdf7de2dcb615e63026 | [
"MIT"
] | permissive | wuttem/stss | ab11e04079ce326f63212452cfcb90bc90bc8e65 | 518fe422ba090e77a0b4ef998955e097ffe7cae5 | refs/heads/master | 2020-06-10T01:03:01.876263 | 2017-02-19T11:36:50 | 2017-02-19T11:36:50 | 76,118,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | #!/usr/bin/python
# coding: utf8
from __future__ import unicode_literals
__version__ = '0.1.0'
| [
"matthias.wutte@gmail.com"
] | matthias.wutte@gmail.com |
81134a6168f4a84855cdf9a45482da4a311f7d74 | c1e7e9012ffeba35473912af3ea432da22e2e085 | /base/67_demo_AIO_Coroutine.py | 392ae9d14f0b8ee2f348413920e72e1098787bd0 | [] | no_license | skwangdl/python_demo | 24e22100b232a7531047a9c313d24aee68c702b7 | 92b0404b3783c22b6109caca18529eb4d30a9dbe | refs/heads/master | 2020-04-30T17:19:04.472884 | 2019-05-30T10:15:04 | 2019-05-30T10:15:04 | 176,975,754 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py |
# 首先调用c.send(None)启动生成器
# 然后,一旦生产了东西,通过c.send(n)切换到consumer执行
# consumer通过yield拿到消息,处理,又通过yield把结果传回
# produce拿到consumer处理的结果,继续生产下一条消息
# produce决定不生产了,通过c.close()关闭consumer,整个过程结束
def consumer():
r = ''
while True:
n = yield r
if not n:
return
print('[CONSUMER] Consuming %s...' % n)
r = '200 OK'
def produce(c):
c.send(None)
n = 0
while n < 5:
n = n + 1
print('[PRODUCER] Producing %s...' % n)
r = c.send(n)
print('[PRODUCER] Consumer return: %s' % r)
c.close()
if __name__ == '__main__':
c = consumer()
produce(c) | [
"wangck4@lenovo.com"
] | wangck4@lenovo.com |
70a6b84238efa4e023179a2ad24b371742532fce | fbb141c9b99c4c08ce2c0acfe13630d694d98744 | /7-stack/4.10-shu-zu-zhong-de-ni-xu-dui-lcof.py | f04c9947f38d38bf2a749719998cd041df3b5b3b | [] | no_license | huixian3/algorithm017 | 1534bc8a0364595b056e0f346cfe9fa8b8fee3bd | f43c99dc7810de863f8cd79115e272ac65ce9257 | refs/heads/master | 2023-04-02T07:10:03.670003 | 2021-04-13T14:38:36 | 2021-04-13T14:38:36 | 297,989,771 | 0 | 0 | null | 2020-09-23T14:05:41 | 2020-09-23T14:05:40 | null | UTF-8 | Python | false | false | 1,473 | py | '''
在数组中的两个数字,如果前面一个数字大于后面的数字,则这两个数字组成一个逆序对。
输入一个数组,求出这个数组中的逆序对的总数。
'''
# 归并排序 同 逆序对,分治
# 在megrge环节中计数即可,计数方法是,左边数据大于右边数据元素的pair数量
class Solution(object):
def reversePairs(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
self.cnt = 0
def merge(nums, start, mid, end, temp):
i, j = start, mid + 1
while i <= mid and j <= end:
if nums[i] <= nums[j]:
temp.append(nums[i])
i += 1
else:
self.cnt += mid - i + 1
temp.append(nums[j])
j += 1
while i <= mid:
temp.append(nums[i])
i += 1
while j <= end:
temp.append(nums[j])
j += 1
for i in range(len(temp)):
nums[start + i] = temp[i]
temp.clear()
def mergeSort(nums, start, end, temp):
if start >= end: return
mid = (start + end) >> 1
mergeSort(nums, start, mid, temp)
mergeSort(nums, mid + 1, end, temp)
merge(nums, start, mid, end, temp)
mergeSort(nums, 0, len(nums) - 1, [])
return self.cnt
| [
"zhanhuixian@meituan.com"
] | zhanhuixian@meituan.com |
8a0079dd597dba447df0d9aed6437df677f2accb | 710026f64d3a23913ae71d2300147b371f5cb75b | /gammapy/data/tests/test_all.py | 138034335de153523316b69996357d13979c5972 | [] | no_license | Cadair/gammapy | 557c01e33d93fe6cc2daaac35b53590d33e31fbc | 19f4fdd299b8c3495c732fc412f5d18cb9df3590 | refs/heads/master | 2020-12-13T21:52:37.790005 | 2014-02-20T15:15:10 | 2014-02-20T15:15:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division
from numpy.testing import assert_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
from .. import poisson_stats_image
def test_poisson_stats_image():
"""Get the data file via the gammapy.data.poisson_stats_image function"""
data = poisson_stats_image()
assert data.sum() == 40896
def test_poisson_stats_image_direct():
"""Get the data file directly via get_pkg_data_filename"""
filename = get_pkg_data_filename('../poisson_stats_image/counts.fits.gz')
data = fits.getdata(filename)
assert data.sum() == 40896
def test_poisson_stats_extra_info():
images = poisson_stats_image(extra_info=True)
refs = dict(counts=40896, model=41000, source=1000, background=40000)
for name, expected in refs.items():
assert_allclose(images[name].sum(), expected) | [
"Deil.Christoph@gmail.com"
] | Deil.Christoph@gmail.com |
6c8b767bfd8451494c732f1f3a4f097e759995a8 | 314719658bf9c6a393bf73e2caa846b682648d7f | /no-save.py | 1c8afd01c10cfbaa1e1c250395656b58bb8d03ec | [] | no_license | vilajp/irenic | f07714b14856d707fd5dca8cefbae1ab20e4b08d | 054c548bde33bfbb9649de91bff1d8e73812a611 | refs/heads/master | 2023-02-22T05:58:24.997871 | 2021-01-26T16:11:18 | 2021-01-26T16:11:18 | 288,257,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,685 | py | import os
os.environ['KIVY_GL_BACKEND'] = 'angle_sdl2'
import webbrowser
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
class MyTextInput(TextInput):
def on_parent(self, widget, parent):
self.focus = True
self.multiline=False
self.readonly=False
self.halign="left"
self.font_size=55
self.size_hint=(.7,.5)
self.input_filter = "float"
self.write_tab = "False"
self.use_handles = "True"
class NoSaveApp(App):
icon = 'no-save-512x512.png'
def build(self):
hlayout = GridLayout(
cols = 2,
row_force_default=True,
row_default_height=70,
)
main_layout = BoxLayout(
orientation="vertical",
padding = 10,
#spacing = 10,
)
self.quehago= Label(
text ="Enter Phone number:",
size_hint = (.3,.5),
font_size = "24sp",
)
hlayout.add_widget(self.quehago)
self.numtel = MyTextInput()
hlayout.add_widget(self.numtel)
main_layout.add_widget(hlayout)
button = Button(
#text= "[size=55sp]NO SAVE![/size]", markup = True,
text= "NO SAVE!",
font_size = "55sp",
pos_hint={"center_x": 0.5, "center_y": 0.5},
size_hint=(.5,.5),
)
main_layout.add_widget(button)
hlayoutabajo = GridLayout(
cols = 2,
row_force_default=True,
row_default_height=60,
)
self.rotulo = Label (
text = "Last Number",
font_size = "55sp",
size_hint=(1,.5),
)
hlayoutabajo.add_widget(self.rotulo)
button2 = Button(
#text= "[size=55sp]NO SAVE![/size]", markup = True,
text= "Re-use",
font_size = "30sp",
pos_hint={"center_x": 0.5, "center_y": 0.5},
size_hint=(.2,.5),
)
hlayoutabajo.add_widget(button2)
main_layout.add_widget(hlayoutabajo)
button2.bind(on_press=self.on_button2_press)
button.bind(on_press=self.on_button_press)
return main_layout
def on_button_press(self, instance):
numtel = self.numtel.text
urlwhats = numtel.join(
["https://api.whatsapp.com/send?phone=",
"&text=&source=&data=&app_absent="]
)
webbrowser.open(urlwhats)
self.numtel.text = ""
self.rotulo.text = numtel
def on_button2_press(self, instance):
if self.rotulo.text != "Last Number":
self.numtel.text = self.rotulo.text
if __name__ == "__main__":
app = NoSaveApp()
app.run() | [
"vilajp@gmail.com"
] | vilajp@gmail.com |
6c6d5f913ad89423170d7e4e728f2d9b67184ad4 | 5bb8b4c7faeebd16da16ecbcd4a98aabaf688e8f | /data_tools/walker/src-cikm/build_graph2/citations.py | 2438338b186b181a26af7fd8e16ccbc3d15dfd74 | [] | no_license | xiaoqinzhe/vrdetection | 014fc2b61c9b30dd2699fdba41089b18b7f060be | 604a812a21a98d72ba8e23a716eb72153bdaa7c4 | refs/heads/master | 2023-07-04T07:44:12.141404 | 2021-08-01T06:21:17 | 2021-08-01T06:21:17 | 150,063,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | #coding:utf-8
import json
file_path = '/mnt/hdd2/dblp/dblp_ref.json'
citation_file_path = '/mnt/hdd2/cikm/citation.txt'
with open(file_path) as ifile, open(citation_file_path, 'w') as ofile:
for line in ifile:
paper = json.loads(line)
if 'references' not in paper:
continue
output_papers = [paper['_id']]
output_papers += paper['references']
ofile.write('{}\n'.format(' '.join(output_papers)))
| [
"xiaoqinzhe@qq.com"
] | xiaoqinzhe@qq.com |
67f4c5e5b9180fbdfcfe9e9ce38ea7ff37093739 | 6142a961585cc01f39ae24de8540d7f9399c2ea0 | /tutorial/quickstart/views.py | 211430b3f1d3d7a4dad560199280e399c053251f | [] | no_license | sremy/django-demo | d5954410bfb4c18c79632b06376898880ee58606 | c74f4ee0e1639e02cea0a40c28517c13f9158f85 | refs/heads/master | 2020-07-29T04:05:35.225899 | 2019-09-30T23:52:26 | 2019-09-30T23:52:26 | 209,662,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.request import Request
from tutorial.quickstart.serializers import UserSerializer, GroupSerializer, GenericSerializer
from rest_framework.decorators import api_view
# Create your views here.
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class ManualView(APIView):
def get(self, request: Request, format=None):
print("request.query_params: " + str(request.query_params))
print("request.POST: " + str(request.POST))
return Response([1, 3])
class GenericView(generics.GenericAPIView):
serializer_class = GenericSerializer
def get(self, request: Request, format=None):
return Response({'key': 'GET'})
#@api_view(['POST'])
def post(self, request: Request, format=None):
#name = request.POST.get('name')
return Response({'key': 'POST'})
class ReportView(APIView):
def get(self, request: Request, format=None):
print("request: " + str(request._request))
print("request path: " + str(request._request.path))
print("request.query_params: " + str(dir(request.query_params)))
return Response({"query": str(request._request) , "path": request._request.path, "params": str(request.query_params), "data": request._data})
| [
"seb.remy@gmail.com"
] | seb.remy@gmail.com |
d90f4c250ad6540185c4685ac49cf4e5df824ab7 | b4f661f1153637d9cfec18e4cf56b64582c31385 | /src/Python/304.二维区域和检索-矩阵不可变.py | fd58f373a91c848ff44f0bd8495b1cc29de69c8a | [] | no_license | Icedomain/LeetCode | 12dd24bbe2d7aba1f6ebe61bffe4c5e6284fbd06 | 4bc8e41499b9c884d64b5a44fe783fdb7030676e | refs/heads/master | 2021-02-15T15:12:15.009790 | 2020-09-22T11:37:59 | 2020-09-22T11:37:59 | 244,909,740 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | #
# @lc app=leetcode.cn id=304 lang=python3
#
# [304] 二维区域和检索 - 矩阵不可变
#
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
if not matrix:
return
n, m = len(matrix), len(matrix[0])
self.sums = [ [0 for j in range(m+1)] for i in range(n+1) ]
for i in range(1, n+1):
for j in range(1, m+1):
self.sums[i][j] = matrix[i-1][j-1] + self.sums[i][j-1] + self.sums[i-1][j] - self.sums[i-1][j-1]
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
row1, col1, row2, col2 = row1+1, col1+1, row2+1, col2+1
return self.sums[row2][col2] - self.sums[row2][col1-1] - self.sums[row1-1][col2] + self.sums[row1-1][col1-1]
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| [
"1271029566@qq.com"
] | 1271029566@qq.com |
75b0742f4d4d9ac93c8bda2c913177cade4c76c6 | e4217e4ab2f6ef04f7f0e7c95caa5d5c0f750742 | /Cap2/Ejercicios/Ej04.py | 5339273cebee823c00e3c79776f69600e065f8d5 | [] | no_license | ShannonNCM/Sistemas-Dinamicos | 0cc8b88b2c702134c4b10aa977927a504b099187 | 1aea45b0a5657c33d3ecae0a35492f64d60fd55e | refs/heads/master | 2022-12-29T21:16:27.298901 | 2020-10-21T01:22:07 | 2020-10-21T01:22:07 | 281,296,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | '''un circuito en serie capacitor-resitor esta dado por: RdQ/dt + Q/C = E donde Q es la
carga del capacitor. R=1/(5+t) y capacitancia C=0.5, E=100. Hallar carga del capactiro
en Q(0)=0'''
#importando las librerias
from sympy import *
from sympy.abc import t
#resolviendo la ecuacion diferencia
Q=Function('Q')
eqd1=(1/(5+t))*Q(t).diff(t)+Q(t)/0.5-100
pprint(dsolve(eqd1 , ics={Q(0):0}))
print()
| [
"noreply@github.com"
] | noreply@github.com |
e17ec42260d63dbec8d4f3a7362681eb8117e543 | 7e5a63cc13397cb31ea8d4f22320d05eb6d65d98 | /Python/django/dcourses/dcourses/wsgi.py | f2a120b5da59cd8a1dc970b197a936a7e48bcebf | [] | no_license | loyti/GitHubRepoAssingment | a9e876cf63cccf4f617bdcba374010237a1902f0 | ee9ee4478fc4d89635cb245d324848587f8cd129 | refs/heads/master | 2023-01-22T11:48:44.131695 | 2019-01-14T21:07:05 | 2019-01-14T21:07:05 | 94,840,928 | 0 | 2 | null | 2023-01-12T09:46:12 | 2017-06-20T02:25:47 | Python | UTF-8 | Python | false | false | 394 | py | """
WSGI config for dcourses project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dcourses.settings")
application = get_wsgi_application()
| [
"brice@loyti.com"
] | brice@loyti.com |
724ace94a1e0870eab100583a8d238c8ae4de8ab | 47a60670de631b53a5299960a56f49c69440f243 | /SentimentAnalysis.py | 9ce675ca812ddc9d52567c408c36b964435aba79 | [] | no_license | findingnino/SentimentAnalyzer | 2f72764220f4da93a596232f0b73066038f8809a | 1680fb0bb14b492a9409a11ba22b7bf95f19d014 | refs/heads/master | 2020-12-30T09:37:57.659112 | 2015-07-18T17:03:59 | 2015-07-18T17:03:59 | 39,018,681 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | from textblob import TextBlob
from textblob.classifiers import NaiveBayesClassifier
from os import listdir
import time, winsound
def dir_list(dir):
'''Returns the list of all files in self.directory'''
try:
return listdir(dir)
except WindowsError as winErr:
print("Directory error: " + str((winErr)))
def main():
json = raw_input("Where is the json training set?")
print "Program start", time.ctime() #debug
with open(json, 'r') as file:
classifier = NaiveBayesClassifier(file, format='json')
print "Classifier done!", time.ctime() #debug
test = raw_input("Where is the test eml_folder?")
print "Testing...", time.ctime()
for emails in dir_list(test):
print classifier.classify(emails)
print "Testing done", time.ctime()
if __name__ == '__main__':
main()
| [
"ngancitano@gmail.com"
] | ngancitano@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.