blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c56346d5e02e189e90e5374d286151645a824c52
|
87ff30e00840128a191a77ff16f4ce98d812d91b
|
/image processing/recognize/models.py
|
a88061b54ff71ab57ee700f6cd63e8ded8421991
|
[] |
no_license
|
joshtechnologygroup/roomvacancydetector
|
22b1d7b6c93ae94d7e0bc842f2417a4950ea8bcf
|
4618c6e6306a5ae0216e5c3d91360c64a2724a6c
|
refs/heads/master
| 2021-01-20T07:18:48.457305
| 2017-08-27T06:17:52
| 2017-08-27T06:17:52
| 101,531,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.utils.timezone import now
import uuid
class Document(models.Model):
video = models.FileField(upload_to = 'videos/' , blank=True, null=True)
image = models.FileField(upload_to = 'images/', blank=True, null=True)
class AttendanceLog(models.Model):
status_choices = (
('Enter', 'Enter'),
('Exit', 'Exit'),
)
time = models.DateTimeField(default=now())
person_name = models.CharField(max_length=30)
# time = models.DateTimeField(blank=True)
status = models.CharField(max_length=5, choices=status_choices)
class Profile(models.Model):
username = models.CharField(max_length=30)
uuid = models.UUIDField(default=uuid.uuid4, max_length=30)
active = models.BooleanField(default=True)
|
[
"sakskam.kakkar@joshtechnologygroup.com"
] |
sakskam.kakkar@joshtechnologygroup.com
|
ab0328589096ab7be9b25479f128feea2fa40df5
|
0ed714e120347eda4fdd763c08870ee5ac58c097
|
/projet-back1/Tickets/models.py
|
be7746b32b1f1ad443f6b88299e8c71754d2a0c1
|
[] |
no_license
|
eunice-manuela/projet-django
|
4ac09ddee3c45e7787e56aaba3e1732d563b5b98
|
1c67fd2a078b099139c18b960ce13d3f3187967d
|
refs/heads/master
| 2022-11-12T23:07:23.429407
| 2020-07-14T14:52:30
| 2020-07-14T14:52:30
| 257,627,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,287
|
py
|
from django.db import models
# Create your models here.
class Services(models.Model):
name=models.CharField(max_length=20)
addresse =models.CharField(max_length=20)
class User(models.Model):
name=models.CharField(max_length=200)
service=models.ForeignKey(Services, on_delete=models.CASCADE)
class responses(models.Model):
date=models.DateTimeField(auto_now_add=True)
Author=models.ForeignKey(User, on_delete=models.CASCADE)
Description=models.CharField(max_length=200)
class Tickets(models.Model):
id=models.IntegerField(primary_key=True)
title = models.CharField(max_length=100)
state = models.CharField(max_length=10)
auteur = models.ForeignKey(User, on_delete=models.CASCADE)
date_création = models.DateTimeField(auto_now_add=True)
details = models.CharField(max_length=250)
priority=models.CharField(max_length=5)
service= models.ForeignKey(Services, on_delete=models.CASCADE)
def setManager(self,manager):
self.manager=manager
def setPriority(self, prior):
self.priority=prior
def setState(self, state):
self.state=state
class Manager(models.Model):
manager=models.ForeignKey(User, on_delete=models.CASCADE)
ticket=models.ForeignKey(Tickets, on_delete=models.CASCADE)
|
[
"lemotieuarold@gmail.com"
] |
lemotieuarold@gmail.com
|
7665dceb01aafd365f05ce088641e70e85a0288a
|
af7c03e8dafcbc22dfc814734f237c5b92c25a2b
|
/db/manage.py
|
ed0ac32a380536b3a7c625fc6a0a589dbec33ac2
|
[] |
no_license
|
rafilurie/ReadMate
|
4832a36ce32a2f69478d1ff4d3ef7f2f1aee4acd
|
6c78e119b7df88fdaa40176f516493b54c691b9e
|
refs/heads/master
| 2021-01-22T02:28:13.953086
| 2015-07-25T02:41:16
| 2015-07-25T02:41:16
| 39,038,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
#!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main(six='<module 'six' from '/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/six.pyc'>')
|
[
"afrancis@betterworks.com"
] |
afrancis@betterworks.com
|
a5c8b0844f3e3f0990d4f845b8d75cdee5f9893d
|
3a1f697938a743c7fdc1ec0dd956dc18afa96bc2
|
/dealextream/dealextream/items.py
|
312ff44184ac60dc7796b836a6b1a427ecf3b2b8
|
[] |
no_license
|
Ashish-singha08/scrapers
|
ee3828dd139f745f49ad1ee6797bce0300313f7b
|
45ebc27daba4cd4eaa7691d7ef9e91820ddef055
|
refs/heads/master
| 2022-12-24T10:23:23.085775
| 2013-04-23T22:40:01
| 2013-04-23T22:40:01
| 300,263,471
| 0
| 0
| null | 2020-10-01T12:00:22
| 2020-10-01T12:00:21
| null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from scrapy.item import Item, Field
class DealextreamItem(Item):
# define the fields for your item here like:
title = Field()
price = Field()
img_url = Field()
description = Field()
|
[
"shluvme@gmail.com"
] |
shluvme@gmail.com
|
182b07851c952743938682ef9603220e5b4d1cbc
|
03a37d9f3fa8ed6e9d72f1ba4f1263ef11ab223b
|
/plugins/cisco.py
|
1f9f15ef1cb94e9288b7c36a58ac8080ed991ca2
|
[
"Apache-2.0"
] |
permissive
|
mjethanandani/pyang-cisco-plugin
|
4fc989de37d8a04a8a7a3060957308f6147c68df
|
922c0849c47d5ec9f2f3a4a609f8db0d6eb87325
|
refs/heads/master
| 2020-05-26T14:11:05.089245
| 2017-06-13T16:46:25
| 2017-06-13T16:46:25
| 85,004,446
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,095
|
py
|
"""Cisco usage guidelines plugin
"""
import optparse
import sys
from pyang import plugin
from pyang import statements
from pyang import error
from pyang.error import err_add
from pyang.plugins import lint
def pyang_plugin_init():
plugin.register_plugin(CiscoPlugin())
class CiscoPlugin(lint.LintPlugin):
def __init__(self):
lint.LintPlugin.__init__(self)
self.namespace_prefixes = ['http://cisco.com/ns/yang/']
self.modulename_prefixes = ['Cisco-IOS-XR', 'Cisco-IOS-XE',
'Cisco-IOS-NX-OS', 'cisco']
def add_opts(self, optparser):
optlist = [
optparse.make_option("--cisco",
dest="cisco",
action="store_true",
help="Validate the module(s) according " \
"to Cisco rules."),
]
optparser.add_options(optlist)
def setup_ctx(self, ctx):
if not ctx.opts.cisco:
return
ctx.max_line_len = 70
self._setup_ctx(ctx)
|
[
"mjethanandani@gmail.com"
] |
mjethanandani@gmail.com
|
369cce71e52ec528a93ffd23afb6bc58a3eb8d11
|
85e4024fa2f295328b759f05c50cbb84ae3e775b
|
/flask-simple-image-gallery/gallery/views.py
|
6642173fa0aba557fdaaf43389de807320280e7b
|
[] |
no_license
|
chard4/GPhoto2FlaskStuff
|
105fd54c2df9859827f24f183a668ee7a63a4228
|
c8555c191ad3bc54f4530cbd81630bb3e96b788e
|
refs/heads/master
| 2021-01-20T05:27:10.258589
| 2017-08-25T22:39:42
| 2017-08-25T22:39:42
| 101,445,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,689
|
py
|
from flask import Blueprint, render_template, request, current_app
import simplejson
from .models import Image
# Static files only work for blueprints registered with url_prefix
# https://github.com/mitsuhiko/flask/issues/348
gallery = Blueprint('gallery', __name__, template_folder='templates', static_folder='static')
@gallery.route('/', methods=['GET', 'POST',])
def show_gallery():
images = Image.all(current_app.config['GALLERY_ROOT_DIR'])
return render_template('index.html', images=images)
@gallery.route('/json')
def json():
"""Return a JSON containing an array of URL pointing to
the images.
"""
images = Image.all(current_app.config['GALLERY_ROOT_DIR'])
start = 0
stop = len(images)
try:
if request.method == 'GET' and 'start' in request.args:
start = int(request.args.get('start'))
if request.method == 'GET' and 'stop' in request.args:
stop = int(request.args.get('stop'))
except ValueError:
current_app.logger.debug(request)
return ("start/stop parameters must be numeric", 400)
images = images[start:stop]
image_filenames = map(lambda x: x.filename, images)
return simplejson.dumps(image_filenames)
@gallery.route('/upload', methods=['POST',])
def upload():
if request.method == 'POST' and 'image' in request.files:
image = request.files['image']
Image('', post=image, root=current_app.config['GALLERY_ROOT_DIR'])
return ("ok", 201,)
return (simplejson.dumps({'error': 'you need to pass an image'}), 400)
# FIXME: make more modular to avoid the import below
# this import is here to avoid circular hell import
import app
|
[
"captaincardo@gmail.com"
] |
captaincardo@gmail.com
|
5fd268ba2b6a8383a11a014c24b3443640f8143d
|
d647dc6b7e5594d7a52a6e4f599e6b55d20d8d85
|
/clustR.py
|
f50583b84da44e44658f8a35a21b0fa13ed19021
|
[] |
no_license
|
itclunie/Python_wiki-VIIRS
|
2147f8eb984ceaaf799d1c123eea3fc5cf82d7b5
|
71a1a1693d66c91ccab203bfae33c05f15b47df5
|
refs/heads/master
| 2021-04-30T04:48:33.111434
| 2018-02-14T18:04:53
| 2018-02-14T18:04:53
| 121,543,113
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,582
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
http://nbviewer.jupyter.org/github/lmcinnes/hdbscan/blob/master/notebooks/How%20HDBSCAN%20Works.ipynb
break into months
hexify, then cluster within hexes? # of groups within hexes?
"""
import hdbscan, sys, csv
import numpy as np
def append2csv(inLst, csvPathName):
with open(csvPathName, 'a') as output:
writer = csv.writer(output, lineterminator = '\n')
[ writer.writerows([i]) for i in inLst ]
for j in inLst:
j[:] = []
with open("VIIRSfiresCLPD.csv", "r") as csvIn:
reader = csv.reader(csvIn, lineterminator = '\n')
VIIRS = list(reader)
with open("VIIRSmonthClusters.csv", "w") as csvOut:
writer = csv.writer(csvOut, lineterminator = '\n')
writer.writerows([['group','date','x','y']])
VIIRSdict = {}
for i in VIIRS[1:]:
VIIRSdict[i[2]] = [] #monthYear
for i in VIIRS[1:]:
lon = float(i[1])
lat = float(i[0])
VIIRSdict[i[2]].append([lon,lat]) #key monthYear, value x,y
cont = 0
for key in VIIRSdict:
print(key, cont)
mnthArray = np.array(VIIRSdict[key])
rads = np.radians(mnthArray)
clusterer = hdbscan.HDBSCAN(min_cluster_size=5,
metric='haversine',
algorithm='prims_balltree').fit(rads)
labels = clusterer.labels_
outPut = []
[outPut.append( [ labels[i], key, mnthArray[i][0], mnthArray[i][1] ] ) for i in range(len(labels))]
append2csv(outPut,"VIIRSmonthClusters.csv")
cont += 1
# if cont == 4:
# break
|
[
"noreply@github.com"
] |
itclunie.noreply@github.com
|
09944950b10af988e22dc6cc5424f94d952c0407
|
8d72c34a87195091fcaa01266cb5bdfc66f44066
|
/almoxarifado/users/forms.py
|
2e5d14d5c1ff7b573bbc2a246f83bcf3f1c11108
|
[
"MIT"
] |
permissive
|
GustavoCruz12/ALmoxarifadoLimpo
|
cef0a13f640b2fc0a37bc75fcd390af40c7d63dc
|
13ab2113cc9e41e9268ede85562da712f86c7139
|
refs/heads/master
| 2020-03-31T18:21:19.557793
| 2018-10-10T18:16:00
| 2018-10-10T18:16:00
| 152,294,957
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
from django.contrib.auth import get_user_model, forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
User = get_user_model()
class UserChangeForm(forms.UserChangeForm):
class Meta(forms.UserChangeForm.Meta):
model = User
class UserCreationForm(forms.UserCreationForm):
error_message = forms.UserCreationForm.error_messages.update(
{"duplicate_username": _("This username has already been taken.")}
)
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
class Meta(forms.UserCreationForm.Meta):
model = User
fields = ('username',
'name',
'email',
'almoxarifado_user',
'secretaria_user',
'departamento_user',
'user_permissions',
)
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise ValidationError(self.error_messages["duplicate_username"])
|
[
"gustavocruz201419@gmail.com"
] |
gustavocruz201419@gmail.com
|
71ef274c3bc61c3ed6659ee4c1974e6c92f063a8
|
45b013eddc741fc8525676a90df3f076171f4424
|
/apbot_nav/scripts/countertop_spray.py
|
0b0e33b8ae4b7d62664110c09854705892cfb80b
|
[] |
no_license
|
NickNair/GigaRoboticsArtpark
|
e9f0181c0121e140c628fa082f82fdc8fc6c2650
|
5ae1da24ff7f447a2777d7e1dc7b9d53fdf699b7
|
refs/heads/main
| 2023-07-27T22:54:48.886149
| 2021-09-06T17:52:40
| 2021-09-06T17:52:40
| 403,372,805
| 1
| 2
| null | 2021-09-06T17:52:41
| 2021-09-05T17:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 19,243
|
py
|
#!/usr/bin/env python3
import rospy
import actionlib
import subprocess
import math
import sys
import copy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import actionlib
import tf
import sensor_msgs.point_cloud2 as pc2
from std_msgs.msg import String
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from moveit_msgs.msg import JointConstraint, Constraints
from nav_msgs.msg import Odometry
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import PointCloud2, PointField
from water_rviz_marker import MarkerSpawner
class Ur5Moveit:
# Constructor
def __init__(self,x):
# Initialize Node
rospy.init_node('pickndplace', anonymous=True)
rospy.sleep(1.5)
# Instatiating related obejcts
self._planning_group = x
self._commander = moveit_commander.roscpp_initialize(sys.argv)
self._robot = moveit_commander.RobotCommander()
self._scene = moveit_commander.PlanningSceneInterface()
self._group = moveit_commander.MoveGroupCommander(self._planning_group)
self._display_trajectory_publisher = rospy.Publisher(
'/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)
self._exectute_trajectory_client = actionlib.SimpleActionClient(
'execute_trajectory', moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
# Initializing Tf listener object
self.t = tf.TransformListener()
# Current State of the Robot is needed to add box to planning scene
# self._curr_state = self._robot.get_current_state()
# rospy.loginfo(
# '\033[94m' + "Planning Group: {}".format(self._planning_frame) + '\033[0m')
# rospy.loginfo(
# '\033[94m' + "End Effector Link: {}".format(self._eef_link) + '\033[0m')
# rospy.loginfo(
# '\033[94m' + "Group Names: {}".format(self._group_names) + '\033[0m')
# rospy.loginfo('\033[94m' + " >>> Ur5Moveit init done." + '\033[0m')
# Function to go to specified position
def go_to_pose(self, arg_pose):
pose_values = self._group.get_current_pose().pose
# rospy.loginfo('\033[94m' + ">>> Current Pose:" + '\033[0m')
# rospy.loginfo(pose_values)
self._group.set_pose_target(arg_pose)
flag_plan = self._group.go(wait=True) # wait=False for Async Move
pose_values = self._group.get_current_pose().pose
# rospy.loginfo('\033[94m' + ">>> Final Pose:" + '\033[0m')
# rospy.loginfo(pose_values)
list_joint_values = self._group.get_current_joint_values()
# rospy.loginfo('\033[94m' + ">>> Final Joint Values:" + '\033[0m')
# rospy.loginfo(list_joint_values)
# if (flag_plan == True):
# rospy.loginfo(
# '\033[94m' + ">>> go_to_pose() Success" + '\033[0m')
# else:
# rospy.loginfo(
# '\033[94m' + ">>> go_to_pose() Failed. Solution for Pose not Found." + '\033[0m')
return flag_plan
# Function to set joint angles
def set_joint_angles(self, arg_list_joint_angles):
list_joint_values = self._group.get_current_joint_values()
# rospy.loginfo('\033[94m' + ">>> Current Joint Values:" + '\033[0m')
# rospy.loginfo(list_joint_values)
self._group.set_joint_value_target(arg_list_joint_angles)
flag_plan = self._group.go(wait=True)
list_joint_values = self._group.get_current_joint_values()
# rospy.loginfo('\033[94m' + ">>> Final Joint Values:" + '\033[0m')
# rospy.loginfo(list_joint_values)
pose_values = self._group.get_current_pose().pose
# rospy.loginfo('\033[94m' + ">>> Final Pose:" + '\033[0m')
# rospy.loginfo(pose_values)
# if (flag_plan == True):
# rospy.loginfo(
# '\033[94m' + ">>> set_joint_angles() Success" + '\033[0m')
# else:
# rospy.logerr(
# '\033[94m' + ">>> set_joint_angles() Failed." + '\033[0m')
return flag_plan
# Function to go to pre defined position
def go_to_predefined_pose(self, arg_pose_name):
# rospy.loginfo('\033[94m' + "Going to Pose: {}".format(arg_pose_name) + '\033[0m')
try:
self._group.set_named_target(arg_pose_name)
plan = self._group.go()
except:
pass
def cartesian_path(self, waypoints):
(plan, fraction) = self._group.compute_cartesian_path(
waypoints, # waypoints to follow
0.0005, # eef_step
0.0) # jump_threshold
self._group.execute(plan, wait=True)
def cartesian_path2(self, waypoints):
(plan, fraction) = self._group.compute_cartesian_path(
waypoints, # waypoints to follow
0.001, # eef_step
0.0) # jump_threshold
self._group.execute(plan, wait=True)
def init_stay_up_constraints(self):
self.up_constraints = Constraints()
joint_constraint = JointConstraint()
self.up_constraints.name = "stay_up"
joint_constraint.position = 0.7
joint_constraint.tolerance_above = .1
joint_constraint.tolerance_below = .1
joint_constraint.weight = 1
joint_constraint.joint_name = "apbot_joint"
self.up_constraints.joint_constraints.append(joint_constraint)
self._group.set_path_constraints(self.up_constraints)
def init_spray_constraints(self):
self.up_constraints = Constraints()
joint_constraint = JointConstraint()
self.up_constraints.name = "stay_up"
joint_constraint.position = 0.7
joint_constraint.tolerance_above = .1
joint_constraint.tolerance_below = .1
joint_constraint.weight = 1
joint_constraint.joint_name = "apbot_joint"
self.up_constraints.joint_constraints.append(joint_constraint)
self._group.set_path_constraints(self.up_constraints)
# Destructor
def __del__(self):
moveit_commander.roscpp_shutdown()
# rospy.loginfo(
# '\033[94m' + "Object of class Ur5Moveit Deleted." + '\033[0m')
def movebase_client(goal_x, goal_y, quat):
# Create an action client called "move_base" with action definition file "MoveBaseAction"
client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
# Waits until the action server has started up and started listening for goals.
client.wait_for_server()
# Creates a new goal with the MoveBaseGoal constructor
goal = MoveBaseGoal()
# Set frame id
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
# Set goal position
goal.target_pose.pose.position.x = goal_x
goal.target_pose.pose.position.y = goal_y
# Set goal orientation
goal.target_pose.pose.orientation.z = quat[2]
goal.target_pose.pose.orientation.w = quat[3]
# Sends the goal to the action server.
client.send_goal(goal)
# Waits for the server to finish performing the action.
wait = client.wait_for_result()
# If the result doesn't arrive, assume the Server is not available
if not wait:
rospy.logerr("Action server not available!")
rospy.signal_shutdown("Action server not available!")
else:
# Result of executing the action
return client.get_result()
class Countertop():
def __init__(self, pg):
self.pg = pg
self.plume = MarkerSpawner()
def sink_clean(self, sink_xy, sink_height, top_dims, bottom_dims):
self.plume.isDelete = True
sink_x = sink_xy[0]
sink_y = sink_xy[1]
waypoints = []
counter_pose = geometry_msgs.msg.Pose()
counter_pose.position.x = sink_x - bottom_dims[1]/2 + .02
counter_pose.position.y = sink_y - bottom_dims[0]/2
counter_pose.position.z = sink_height + .15
qaut_angle = quaternion_from_euler(-1.57, 0, 3.14)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
self.pg.init_wipe_constraints()
self.pg.go_to_predefined_pose("before_wipe_pose")
self.pg.go_to_pose(counter_pose)
i = 0
while i < bottom_dims[1]//.12:
if i%2!=0:
counter_pose.position.y = sink_y - bottom_dims[0]/2
# counter_pose.position.x += .45*(1 - 2*(i%2))
# elif abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x += .15
else:
counter_pose.position.y += bottom_dims[0]
qaut_angle = quaternion_from_euler(-1.57, 0, 3.14)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
waypoints.append(copy.deepcopy(counter_pose))
# if i%2!=0 and abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x -= .2
# waypoints.append(copy.deepcopy(counter_pose))
if i!=bottom_dims[1]//.12 - 1:
counter_pose.position.x += .12
waypoints.append(copy.deepcopy(counter_pose))
else:
counter_pose.position.x += .05
waypoints.append(copy.deepcopy(counter_pose))
counter_pose.position.y += bottom_dims[0]
waypoints.append(copy.deepcopy(counter_pose))
i += 1
self.pg.cartesian_path(waypoints)
self.pg._group.clear_path_constraints()
self.pg.go_to_predefined_pose("up")
def sink_side_clean_right(self, sink_xy, dimensions, top_dims):
self.plume.isDelete = True
sink_x = sink_xy[0]
sink_y = sink_xy[1]
waypoints = []
counter_pose = geometry_msgs.msg.Pose()
counter_pose.position.x = .3
counter_pose.position.y = -(sink_y + top_dims[0]/2 + .05)
counter_pose.position.z = dimensions[2] + .15
qaut_angle = quaternion_from_euler(-1.57, 0, 1.57)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
# self.pg.init_stay_up_constraints()
self.pg.init_wipe_constraints()
self.pg.go_to_pose(counter_pose)
rospy.sleep(.2)
i = 0
while i < 1:
if i%2!=0:
counter_pose.position.x = .35
# counter_pose.position.x += .45*(1 - 2*(i%2))
# elif abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x += .15
else:
counter_pose.position.x += dimensions[1] - .1
qaut_angle = quaternion_from_euler(-1.57, 0, 1.57)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
waypoints.append(copy.deepcopy(counter_pose))
qaut_angle = quaternion_from_euler(-1.57, 0, 3.14)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
waypoints.append(copy.deepcopy(counter_pose))
# if i%2!=0 and abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x -= .2
# waypoints.append(copy.deepcopy(counter_pose))
if i!=2:
counter_pose.position.y = sink_y - .1
waypoints.append(copy.deepcopy(counter_pose))
i += 1
self.pg.cartesian_path(waypoints)
self.pg._group.clear_path_constraints()
self.pg.go_to_predefined_pose("before_wipe_pose")
def spray_left(self, dimensions, sink_xy=[0,0]):
self.plume.isDelete = True
sink_x = sink_xy[0]
sink_y = sink_xy[1]
waypoints = []
counter_pose = geometry_msgs.msg.Pose()
counter_pose.position.x = .3
counter_pose.position.y = 0
counter_pose.position.z = dimensions[2]+.15
qaut_angle = quaternion_from_euler(-1.57, 0, 1.57)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
# self.pg.init_stay_up_constraints()
self.pg.init_wipe_constraints()
self.pg.go_to_pose(counter_pose)
rospy.sleep(.2)
i = 0
while i < 3:
if i%2!=0:
counter_pose.position.x = .3
# counter_pose.position.x += .45*(1 - 2*(i%2))
# elif abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x += .3
else:
counter_pose.position.x += dimensions[1] - .10
qaut_angle = quaternion_from_euler(-1.57, 0, 1.57)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
waypoints.append(copy.deepcopy(counter_pose))
# if i%2!=0 and abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x -= .2
# waypoints.append(copy.deepcopy(counter_pose))
if i!=2:
counter_pose.position.y += .13
waypoints.append(copy.deepcopy(counter_pose))
i += 1
self.pg.cartesian_path(waypoints)
self.pg._group.clear_path_constraints()
self.pg.go_to_predefined_pose("up")
# status_pub.publish("Spraying Done")
def spray_right(self, dimensions, sink_xy=[0,0]):
self.plume.isDelete = True
sink_x = sink_xy[0]
sink_y = sink_xy[1]
waypoints = []
counter_pose = geometry_msgs.msg.Pose()
counter_pose.position.x = .3
counter_pose.position.y = -0.2
counter_pose.position.z = dimensions[2] + .15
qaut_angle = quaternion_from_euler(-1.57, 0, 1.57)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
# self.pg.init_stay_up_constraints()
self.pg.init_wipe_constraints()
self.pg.go_to_predefined_pose("before_wipe_pose")
self.pg.go_to_pose(counter_pose)
rospy.sleep(.2)
i = 0
while i < 2:
if i%2!=0:
counter_pose.position.x = .3
# counter_pose.position.x += .45*(1 - 2*(i%2))
# elif abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x += .3
else:
counter_pose.position.x += dimensions[1] - .10
qaut_angle = quaternion_from_euler(-1.57, 0, 1.57)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
waypoints.append(copy.deepcopy(counter_pose))
# if i%2!=0 and abs(sink_y - counter_pose.position.y) < .1:
# counter_pose.position.x -= .2
# waypoints.append(copy.deepcopy(counter_pose))
if i!=1:
counter_pose.position.y += .13
waypoints.append(copy.deepcopy(counter_pose))
i += 1
self.pg.cartesian_path(waypoints)
self.pg._group.clear_path_constraints()
# self.pg.go_to_predefined_pose("up")
# status_pub.publish("Spraying Done")
def wipe_left(self, sink_xy):
pass
def wipe_right(self, dimensions):
waypoints = []
counter_pose = geometry_msgs.msg.Pose()
counter_pose.position.x = .35 + dimensions[1]/2
counter_pose.position.y = -dimensions[0]/4
counter_pose.position.z = dimensions[2] + .4
qaut_angle = quaternion_from_euler(-1.57, .5, 3.14)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
self.pg.go_to_predefined_pose("sink_start")
rospy.sleep(.2)
self.pg.init_spray_constraints()
cmd = ["roslaunch","uuv_plume_simulator","start_plume_example.launch"]
self.proc = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
rospy.sleep(3)
self.plume.register_plume()
i = 0
while i < 6:
qaut_angle = quaternion_from_euler(-1.57, -.1, 3.14)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
counter_pose.position.x -= .1
waypoints.append(copy.deepcopy(counter_pose))
if i != 5:
qaut_angle = quaternion_from_euler(-1.57, 1, 3.14)
counter_pose.orientation.x = qaut_angle[0]
counter_pose.orientation.y = qaut_angle[1]
counter_pose.orientation.z = qaut_angle[2]
counter_pose.orientation.w = qaut_angle[3]
counter_pose.position.x += .1
counter_pose.position.y += .105
waypoints.append(copy.deepcopy(counter_pose))
i += 1
self.plume.start_spray()
rospy.sleep(1)
self.pg.cartesian_path2(waypoints)
self.plume.stop_spray()
self.proc.terminate()
rospy.sleep(1)
self.plume.unregister_plume()
self.pg._group.clear_path_constraints()
self.pg.go_to_predefined_pose("up")
if __name__=="__main__":
mani = Ur5Moveit("arm")
status_pub = rospy.Publisher('/spray_status', String, queue_size=10)
cs = Countertop()
mani.go_to_predefined_pose("start")
rospy.sleep(.2)
mani.go_to_predefined_pose("up")
rospy.sleep(.2)
cs.spray_left(sink_xy=[.5,-.02])
rospy.spin()
|
[
"noreply@github.com"
] |
NickNair.noreply@github.com
|
8e7c21c5c368200821fd72558b7f330492b6a1fe
|
5f01ee9fd052e5f148d1d8c6bbdd76d632420e8c
|
/5.Strategy/stage4CompositeIteratorStrategy_v2.py
|
5b95c3b17cf8f4f1b276828f287f6aa4ad13c8d3
|
[] |
no_license
|
ShravyaKadur/articles-querying-using-dp
|
3122182f3727cd7d545a51be60b468c4f65f18fe
|
40a6fdf109a2bf5c083b0a7782724ad35550b8a6
|
refs/heads/master
| 2020-12-31T22:57:20.150829
| 2020-02-08T03:34:58
| 2020-02-08T03:34:58
| 239,063,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,760
|
py
|
import os
class Node:
parent = None
def setParent(self, parent):
self.parent = parent
def display(self):
pass
class SetN(Node):
def __init__(self, val, cat):
self.subsets = []
self.setVal = val
self.setCat = cat
def display(self):
for se in self.subsets:
se.display()
def addSubset(self, Subset):
self.subsets.append(Subset)
def getChildren(self):
return self.subsets[:]
def getArticles(self):
li = []
for se in self.subsets:
if type(se).__name__=='SetN':
li.extend(se.getArticles())
else:
li.append(se)
return list(set(li))
class ArtN(Node):
def __init__(self, no):
self.artNo = no
def display(self):
i = 1
f = open("ArticlesFinal.csv","r")
line = f.readline()
colNos = len(line.split(','))
while line:
line = f.readline().rsplit(',',colNos-1)
if i==self.artNo:
print(line[2],'\n',line[1])
print(line[0])
print('<press any key to continue>')
input()
os.system('clear')
break
i += 1
def build_tree(root, tree, emptyLabel):
for k,v in tree.items():
if type(v).__name__ == 'dict':
k1,k2 = k.split(';')
child = SetN(k1, k2)
build_tree(child, v, emptyLabel)
root.addSubset(child)
child.setParent(root)
elif type(v).__name__ == 'list':
if k==emptyLabel:
for node in v:
root.addSubset(node)
node.setParent(root)
else:
k1,k2 = k.split(';')
child = SetN(k1, k2)
root.addSubset(child)
child.setParent(root)
for node in v:
child.addSubset(node)
node.setParent(child)
def allKeys(di):
lo = list(di.keys())
for k in di.keys():
if type(di[k]).__name__=='dict':
lo.extend(allKeys(di[k]))
return lo
def create_initial_tree():
f = open("ArticlesFinal.csv","r")
line = f.readline()
colNos = len(line.split(','))
i = 1
emptyLabel = '<EMPTY FIELD IN DATA>'
tree = {}
while line:
line = f.readline()
if line!='':
line = line.rsplit(',',colNos-1)
art = ArtN(i)
if line[3]!='':
line[3] = line[3].lower().strip()+";sport"
if not line[3] in tree.keys():
tree[line[3]] = {}
if line[4]!='':
line[4] = line[4].lower().strip()+";location"
if not line[4] in tree[line[3]].keys():
tree[line[3]][line[4]] = {}
if line[5]!='' or line[6]!='':
line[5] = line[5].lower()
line[6] = line[6].lower()
names = ''
if line[5]=='':
names = line[6]
elif line[6]=='':
names = line[5]
else:
names = line[5]+';'+line[6]
names = names.split(';')
orgs = False
if line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for name in names:
name = name.strip()+";person"
if not name in tree[line[3]][line[4]].keys():
tree[line[3]][line[4]][name] = {}
if orgs:
for org in orgs:
org = org.strip()+";organisation"
if not org in tree[line[3]][line[4]][name].keys():
tree[line[3]][line[4]][name][org] = []
tree[line[3]][line[4]][name][org].append(art)
else:
if not emptyLabel in tree[line[3]][line[4]][name].keys():
tree[line[3]][line[4]][name][emptyLabel] = []
tree[line[3]][line[4]][name][emptyLabel].append(art)
elif line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for org in orgs:
org = org.strip()+";organisation"
if not org in tree[line[3]][line[4]].keys():
tree[line[3]][line[4]][org] = []
tree[line[3]][line[4]][org].append(art)
else:
if not emptyLabel in tree[line[3]][line[4]].keys():
tree[line[3]][line[4]][emptyLabel] = []
tree[line[3]][line[4]][emptyLabel].append(art)
elif line[5]!='' or line[6]!='':
line[5] = line[5].lower()
line[6] = line[6].lower()
names = ''
if line[5]=='':
names = line[6]
elif line[6]=='':
names = line[5]
else:
names = line[5]+';'+line[6]
names = names.split(';')
orgs = False
if line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for name in names:
name = name.strip()+";person"
if not name in tree[line[3]].keys():
tree[line[3]][name] = {}
if orgs:
for org in orgs:
org = org.strip()+";organisation"
if not org in tree[line[3]][name].keys():
tree[line[3]][name][org] = []
tree[line[3]][name][org].append(art)
else:
if not emptyLabel in tree[line[3]][name].keys():
tree[line[3]][name][emptyLabel] = []
tree[line[3]][name][emptyLabel].append(art)
elif line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for org in orgs:
org = org.strip()+";organisation"
if not org in tree[line[3]].keys():
tree[line[3]][org] = []
tree[line[3]][org].append(art)
else:
if not emptyLabel in tree[line[3]].keys():
tree[line[3]][emptyLabel] = []
tree[line[3]][emptyLabel].append(art)
elif line[4]!='':
line[4] = line[4].lower().strip()+";location"
if not line[4] in tree.keys():
tree[line[4]] = {}
if line[5]!='' or line[6]!='':
line[5] = line[5].lower()
line[6] = line[6].lower()
names = ''
if line[5]=='':
names = line[6]
elif line[6]=='':
names = line[5]
else:
names = line[5]+';'+line[6]
names = names.split(';')
orgs = False
if line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for name in names:
name = name.strip()+";person"
if not name in tree[line[4]].keys():
tree[line[4]][name] = {}
if orgs:
for org in orgs:
org = org.strip()+";organisation"
if not org in tree[line[4]][name].keys():
tree[line[4]][name][org] = []
tree[line[4]][name][org].append(art)
else:
if not emptyLabel in tree[line[4]][name].keys():
tree[line[4]][name][emptyLabel] = []
tree[line[4]][name][emptyLabel].append(art)
elif line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for org in orgs:
org = org.strip()+";organisation"
if not org in tree[line[4]].keys():
tree[line[4]][org] = []
tree[line[4]][org].append(art)
else:
if not emptyLabel in tree[line[4]].keys():
tree[line[4]][emptyLabel] = []
tree[line[4]][emptyLabel].append(art)
elif line[5]!='' or line[6]!='':
line[5] = line[5].lower()
line[6] = line[6].lower()
names = ''
if line[5]=='':
names = line[6]
elif line[6]=='':
names = line[5]
else:
names = line[5]+';'+line[6]
names = names.split(';')
orgs = False
if line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for name in names:
name = name.strip()+";person"
if not name in tree.keys():
tree[name] = {}
if orgs:
for org in orgs:
org = org.strip()+";organisation"
if not org in tree[name].keys():
tree[name][org] = []
tree[name][org].append(art)
else:
if not emptyLabel in tree[name].keys():
tree[name][emptyLabel] = []
tree[name][emptyLabel].append(art)
elif line[7]!='' and line[7]!='\n':
line[7] = line[7].lower()
orgs = line[7].split(';')
for org in orgs:
org = org.strip()+";organisation"
if not org in tree.keys():
tree[org] = []
tree[org].append(art)
else:
if not emptyLabel in tree.keys():
tree[emptyLabel] = []
tree[emptyLabel].append(art)
i += 1
f.close()
# print(tree['squash;sport']['paris;location'])
lookup = allKeys(tree)
lookup = set(lookup)
root = SetN('root','root')
build_tree(root,tree,emptyLabel)
return root,lookup
class Iterator:
root = SetN('root', 'root')
def __init__(self, root, val, cat):
self.root = root
self.find(val, cat)
def __iter__(self):
return self
def __next__(self, node):
if node==None:
return node
parent = node.parent
if parent==None:
return parent
curr = parent.subsets.index(node)
if curr<len(parent.subsets)-1:#(parent.subsets[curr+1]):(changed)
return parent.subsets[curr+1]
else:
if(self.__next__(parent)):
if type(self.__next__(parent)).__name__=='SetN':
if(self.__next__(parent).getChildren): #'ArtN' object has no attribute 'getChildren'
return self.__next__(parent).getChildren()[0]
else:
return None
else:
return None
else:
return None
def find(self, val, cat):
self.newRoot = SetN(val, cat)
initial = self.root.subsets[0]
isInitSet = type(initial).__name__=='SetN'
curr = initial
while(initial):
if type(curr).__name__=='SetN':
if(isInitSet==False):
initial = curr
isInitSet = True
if curr.setCat==cat:
if curr.setVal==val:
self.newRoot.addSubset(curr.subsets)
else:
initial = initial.subsets[0] #'ArtN' object has no attribute 'subsets'
curr = initial
curr = self.__next__(curr)
return self.newRoot
class QueryStrategy:
def __init__(self, lookup):
self.lookup = lookup
def checkLookup(self, key): #returns which category a simple query belongs to
cats = ['sport','location','organisation','person']
for c in cats:
if key+';'+c in self.lookup:
return c
return 'invalid' #should not reach here
def getQueryFn(self, queryStr):
def queryAnd(self, queryStr):
strs = queryStr.split('&') #strip
result = SetN('reroot', 'reroot')
otree = self.root
catdict = {'sport':0,'location':1,'organisation':2,'person':3}
strs.sort(key=lambda str: catdict[self.checkLookup(str)])
for str in strs:
cat = self.checkLookup(str)
iter = Iterator(otree, str, cat)
result.addSubset(iter.newRoot)
otree = iter.newRoot
return result
def queryOr(self, queryStr):
strs = queryStr.split('|')
result = SetN('reroot', 'reroot')
for str in strs:
cat = self.checkLookup(str)
iter = Iterator(self.root, str, cat)
result.addSubset(iter.newRoot)
return result
def queryAndOr(self, queryStr):
pass
def querySimple(self, queryStr):
print(type(self))
cat = self.querSys.checkLookup(queryStr)
result = SetN('reroot', 'reroot')
iter = Iterator(self.root, queryStr, cat)
result.addSubset(iter.newRoot)
return result
if '|' in queryStr:
if '&' in queryStr:
return queryAndOr
else:
return queryOr
elif '&' in queryStr:
return queryAnd
else:
return querySimple
class Tree():
def __init__(self):
root,lookup=create_initial_tree()
self.root = root
self.querSys = QueryStrategy(lookup)
def query(self, queryStr):
pass
def runQuery(self):
cont = True
while cont:
print('Please enter a query with any of the following separated by & and/or |: sportnames, sportpersons, locations, ')
queryStr = input()
self.query = self.querSys.getQueryFn(queryStr)
#run query, get tree, do something :P
finalTree = self.query(self, queryStr)
#Print tree code goes here
os.system('clear')
print("Run another query? (Y/N)")
a = input()
os.system('clear')
if a=='N':
cont = False
tree = Tree()
tree.runQuery()
|
[
"noreply@github.com"
] |
ShravyaKadur.noreply@github.com
|
609f143ff981921a46a56f69577e2d76754c0514
|
e370cc68a85e075c2f2697580e321ecc73878868
|
/Algorithms/DynamicProgramming/more/zig_zag_sequence.py
|
8522c7f899502260fc575c2bbfbddd0da7ab0b24
|
[] |
no_license
|
mayksi/HackerRank
|
6a3154241be2ca105424cc5e1a17d045e7405ec0
|
702848bb671d601305fd319198fd5f272c4c1094
|
refs/heads/master
| 2021-01-10T07:28:37.398198
| 2016-01-11T17:26:09
| 2016-01-11T17:26:09
| 48,819,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
'''
A sequence of numbers is called a zig-zag sequence if the differences between successive numbers strictly alternate between positive and negative.
The first difference (if one exists) may be either positive or negative. A sequence with fewer than two elements is trivially a zig-zag sequence.
Detailed problem statement: https://community.topcoder.com/stat?c=problem_statement&pm=1259&rd=4493
Key Idea: is to divide the integer list/array into sub-array of increasing or decreasing integers and count up all the sub-arrays (for any list of size > 2)
'''
import math
def getSequenceLength(inList):
'''
Method to calculate zig-zag lenght
'''
if len(inList) <= 2:
return len(inList)
# initialization
countList = [1] * len(inList)
countList[0] = 1
countList[1] = 2
lastSign = math.copysign(1, inList[1] - inList[0])
# iteratively calculate sign changes (effectively counting change between increasing or decreasing contigous sequence)
for idx in range(2, len(inList)):
currSign = math.copysign(1, inList[idx] - inList[idx-1])
if inList[idx] != inList[idx-1] and lastSign != currSign:
countList[idx] = countList[idx-1] + 1
lastSign = currSign
else:
countList[idx] = countList[idx-1]
return countList[-1]
if __name__ == "__main__":
testIp1 = [1, 7, 4, 9, 2, 5]
assert 6 == getSequenceLength(testIp1), "Longest zig-zag sequence count should be %s" % 6
testIp2 = [1, 17, 5, 10, 13, 15, 10, 5, 16, 8]
assert 7 == getSequenceLength(testIp2), "Longest zig-zag sequence count should be %s" % 7
testIp3 = [44]
assert 1 == getSequenceLength(testIp3), "Longest zig-zag sequence count should be %s" % 1
testIp4 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
assert 2 == getSequenceLength(testIp4), "Longest zig-zag sequence count should be %s" % 7
testIp5 = [70, 55, 13, 2, 99, 2, 80, 80, 80, 80, 100, 19, 7, 5, 5, 5, 1000, 32, 32]
assert 8 == getSequenceLength(testIp5), "Longest zig-zag sequence count should be %s" % 8
testIp6 = [374, 40, 854, 203, 203, 156, 362, 279, 812, 955, \
600, 947, 978, 46, 100, 953, 670, 862, 568, 188, \
67, 669, 810, 704, 52, 861, 49, 640, 370, 908, \
477, 245, 413, 109, 659, 401, 483, 308, 609, 120, \
249, 22, 176, 279, 23, 22, 617, 462, 459, 244]
assert 36 == getSequenceLength(testIp6), "Longest zig-zag sequence count should be %s" % 36
|
[
"mayksi@users.noreply.github.com"
] |
mayksi@users.noreply.github.com
|
d82ebe724a02cf28cbe8e99a32c39dcf8103c759
|
e79ec78ff3a1e7d28a933270ce873365090d92e8
|
/corr_engagement.py
|
8e15ee49a577997786b67fc88e750f7fb8fb8600
|
[] |
no_license
|
mrksbrg/stats_utils
|
fecd5777b5939d5bd0b2dbf36b99f9590e8b10ed
|
3222cd8df42a5c337cdb88dbd2f8ba7d8fd62466
|
refs/heads/main
| 2023-05-27T23:33:54.691936
| 2021-06-20T19:00:35
| 2021-06-20T19:00:35
| 378,486,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,497
|
py
|
# calculate the Pearson's correlation between two variables
import numpy as np
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
timestamps = np.arange(1, 25, 1)
timestamps_cleaned = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24])
engagement = np.array([70, 70, 78, 78, 76, 78, 79, 76, 72, 77, 82, 82, 80, 73, 68, 72, 77, 80, 81, 79, 78, 79, 78, 71])
engagement_cleaned = np.array([70, 70, 78, 78, 76, 78, 79, 76, 72, 68, 72, 77, 80, 81, 79, 78, 79, 78, 71])
communication = np.array([20, 45, 38, 18, 24, 20, 11, 20, 20, 16, 4, 1, 8, 41, 20, 32, 19, 20, 20, 19, 20, 21, 2, 18])
communication_cleaned = np.array([20, 45, 38, 18, 24, 20, 11, 20, 20, 20, 32, 19, 20, 20, 19, 20, 21, 2, 18])
print(len(engagement_cleaned))
print(len(timestamps_cleaned))
# All data
avg_engagement = np.average(engagement)
print("Average engagement: " + str(avg_engagement))
avg_communication = np.average(communication)
print("Average communication: " + str(avg_communication))
residuals_engagement = engagement - avg_engagement
print(residuals_engagement)
residuals_communication = communication - avg_communication
print(residuals_communication)
plt.scatter(timestamps, engagement, color ='blue')
plt.show()
# Minus summer sprints
avg_engagement_cleaned = np.average(engagement_cleaned)
print("Average engagement minus summer: " + str(avg_engagement_cleaned))
avg_communication_cleaned = np.average(communication_cleaned)
print("Average communication minus summer: " + str(avg_communication_cleaned))
residuals_engagement_cleaned = engagement_cleaned - avg_engagement_cleaned
print(residuals_engagement_cleaned)
residuals_communication_cleaned = communication_cleaned - avg_communication_cleaned
print(residuals_communication_cleaned)
plt.scatter(timestamps_cleaned, engagement_cleaned, color ='red')
plt.show()
plt.hist(residuals_engagement, color = 'blue')
plt.show()
plt.hist(residuals_communication, color = 'blue')
plt.show()
plt.hist(residuals_engagement_cleaned, color = 'red')
plt.show()
plt.hist(residuals_communication_cleaned, color = 'red')
plt.show()
# calculate Pearson's correlation
plt.scatter(communication, engagement, color ='blue')
plt.show()
plt.scatter(communication_cleaned, engagement_cleaned, color ='red')
plt.show()
corr, _ = pearsonr(engagement, communication)
print('Pearsons correlation: %.3f' % corr)
corr2, _ = pearsonr(engagement_cleaned, communication_cleaned)
print('Pearsons correlation minus five summer sprints: %.3f' % corr2)
|
[
"markus.borg@ri.se"
] |
markus.borg@ri.se
|
f09a6697e42c5c90a16869f745cfbe0b46dd7f73
|
b39b0625795b0640a6a68151f2012ce139f423b8
|
/iaas/swagger_client/models/update_cloud_account_vsphere_specification.py
|
fd4577902eca7039a339f64a80d244677ff174aa
|
[] |
no_license
|
darrylcauldwell/casCodegen
|
8e82b1f08e8260482996aec3d8be10934a65dd03
|
1f1ff9ab8a33102bcfcb8be276d51992d96bcb61
|
refs/heads/master
| 2020-07-27T14:42:28.550855
| 2019-09-17T18:30:28
| 2019-09-17T18:30:28
| 209,127,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,490
|
py
|
# coding: utf-8
"""
VMware Cloud Assembly IaaS API
A multi-cloud IaaS API for Cloud Automation Services # noqa: E501
OpenAPI spec version: 2019-01-15
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.tag import Tag # noqa: F401,E501
class UpdateCloudAccountVsphereSpecification(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'tags': 'list[Tag]',
'region_ids': 'list[str]',
'create_default_zones': 'bool'
}
attribute_map = {
'description': 'description',
'tags': 'tags',
'region_ids': 'regionIds',
'create_default_zones': 'createDefaultZones'
}
def __init__(self, description=None, tags=None, region_ids=None, create_default_zones=None): # noqa: E501
"""UpdateCloudAccountVsphereSpecification - a model defined in Swagger""" # noqa: E501
self._description = None
self._tags = None
self._region_ids = None
self._create_default_zones = None
self.discriminator = None
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if region_ids is not None:
self.region_ids = region_ids
if create_default_zones is not None:
self.create_default_zones = create_default_zones
@property
def description(self):
"""Gets the description of this UpdateCloudAccountVsphereSpecification. # noqa: E501
A human-friendly description. # noqa: E501
:return: The description of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UpdateCloudAccountVsphereSpecification.
A human-friendly description. # noqa: E501
:param description: The description of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:type: str
"""
self._description = description
@property
def tags(self):
"""Gets the tags of this UpdateCloudAccountVsphereSpecification. # noqa: E501
A set of tag keys and optional values to set on the Cloud Account # noqa: E501
:return: The tags of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:rtype: list[Tag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this UpdateCloudAccountVsphereSpecification.
A set of tag keys and optional values to set on the Cloud Account # noqa: E501
:param tags: The tags of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:type: list[Tag]
"""
self._tags = tags
@property
def region_ids(self):
"""Gets the region_ids of this UpdateCloudAccountVsphereSpecification. # noqa: E501
A set of Region names to enable provisioning on. # noqa: E501
:return: The region_ids of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:rtype: list[str]
"""
return self._region_ids
@region_ids.setter
def region_ids(self, region_ids):
"""Sets the region_ids of this UpdateCloudAccountVsphereSpecification.
A set of Region names to enable provisioning on. # noqa: E501
:param region_ids: The region_ids of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:type: list[str]
"""
self._region_ids = region_ids
@property
def create_default_zones(self):
"""Gets the create_default_zones of this UpdateCloudAccountVsphereSpecification. # noqa: E501
Create default cloud zones for the enabled regions. # noqa: E501
:return: The create_default_zones of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:rtype: bool
"""
return self._create_default_zones
@create_default_zones.setter
def create_default_zones(self, create_default_zones):
"""Sets the create_default_zones of this UpdateCloudAccountVsphereSpecification.
Create default cloud zones for the enabled regions. # noqa: E501
:param create_default_zones: The create_default_zones of this UpdateCloudAccountVsphereSpecification. # noqa: E501
:type: bool
"""
self._create_default_zones = create_default_zones
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UpdateCloudAccountVsphereSpecification, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateCloudAccountVsphereSpecification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"dcauldwell@dcauldwell-a01.vmware.com"
] |
dcauldwell@dcauldwell-a01.vmware.com
|
905c27335a29c06c69925c75220d18b736ff3e98
|
89c7232a954357302926b0985b0b6a58f01d6e38
|
/sensitive_attribute.py
|
3ff2ccd1ee4c36a4aecab363c106a2f6d2533d33
|
[] |
no_license
|
BJTFDGR/Embedding-Processing
|
516f4b11a5ab1a240ffd69c6d5a82422c7e95bed
|
038c6b86bb558fa5aa0193295eecad5690596b8e
|
refs/heads/master
| 2023-07-18T20:36:36.241202
| 2021-09-27T05:25:32
| 2021-09-27T05:25:32
| 363,048,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,713
|
py
|
import sys
import os
import torch
new_folder='/home/chenboc1/githubfolder/bookcorpus/books1/pt/'
sys.path.append(new_folder)
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
n_gpu = torch.cuda.device_count()
print('There are %d GPU(s) available.' % n_gpu)
print('We will use the GPU:', [torch.cuda.get_device_name(i) for i in range(n_gpu)])
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
authornumber=len(os.listdir(new_folder))
# Load data here
# In white box setting, first 50 sentences are used for private training
import random
random.seed(10)
def data_split(full_list, ratio, shuffle=False):
"""
Divide dataset: divide full_list with random ratio into sublist_1 sublist_2
:param full_list:
:param ratio:
:param shuffle:
:return:
"""
n_total = len(full_list)
offset = int(n_total * ratio)
if n_total == 0 or offset < 1:
return [], full_list
if shuffle:
random.shuffle(full_list)
sublist_1 = full_list[:offset]
sublist_2 = full_list[offset:]
return sublist_1, sublist_2
def AccuarcyComputeT3(pred,label):
with torch.no_grad():
pred = pred.cpu().data.numpy()
label = label.cpu().data.numpy()
# print(pred.shape(),label.shape())
count=0
values, indices = torch.tensor(pred).topk(3, dim=1, largest=True, sorted=True)
for i in range(indices.shape[0]):
if label[i] in indices[i]:count+=1
return count/len(label)
# accuarcy
def AccuarcyCompute(pred,label):
with torch.no_grad():
pred = pred.cpu().data.numpy()
label = label.cpu().data.numpy()
# print(pred.shape(),label.shape())
#test_np = (np.argmax(pred,1) == label)
test_np = (np.argmax(pred,1) == label)
test_np = np.float32(test_np)
return np.mean(test_np)
# aux_data,aux_label=[],[]
# text_data,text_label=[],[]
# for i in range(300):
# sub_data1, sub_data2 = data_split(torch.load(new_folder+'x_bert'+str(i)+'.pt', map_location=torch.device("cpu")), ratio=0.1, shuffle=True)
# aux_data+=sub_data1
# aux_label+=[i for j in range(int(300*0.1))]
# text_data+=sub_data2
# text_label+=[i for j in range(300- int(300*0.1))]
data,label=[],[]
for i in range(400):
tmp_list=torch.load(new_folder+'x_bert'+str(i)+'.pt', map_location=torch.device("cpu"))[:300]
data += tmp_list
label+=[i for j in range(len(tmp_list))]
n_epochs = 30
batch_size =128
new_data=torch.cat(data)
new_label=torch.tensor(label, dtype=torch.long)
dataset = torch.utils.data.TensorDataset(new_data,new_label)
validation_split = .1
shuffle_dataset = True
random_seed= 42
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset :
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
train_dataset = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)
test_dataset = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler)
# class MLP(torch.nn.Module):
# def __init__(self):
# super(MLP,self).__init__()
# self.fc1 = torch.nn.Linear(768,200)
# self.fc2 = torch.nn.Linear(200,2)
# def forward(self,din):
# din = din.view(-1,768)
# dout = torch.nn.functional.sigmoid(self.fc1(din))
# dout = torch.nn.functional.sigmoid(self.fc2(dout))
# # return pt.nn.functional.sigmoid(dout,dim=1)
# return dout
class MLP(torch.nn.Module):
def __init__(self):
super(MLP,self).__init__()
self.fc1 = torch.nn.Linear(768,30)
def forward(self,din):
din = din.view(-1,768)
dout = torch.nn.functional.sigmoid(self.fc1(din))
# return pt.nn.functional.sigmoid(dout,dim=1)
return dout
model = MLP().cuda()
print(model)
# loss func and optim
#optimizer = torch.optim.Adam(model.parameters(), lr=0.0101, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
optimizer = torch.optim.SGD(model.parameters(),lr=0.001)
lossfunc = torch.nn.CrossEntropyLoss().cuda()
#lossfunc = pt.nn.BCEWithLogitsLoss().cuda()
# test accuarcy
# print(AccuarcyCompute(
# np.array([[1,10,6],[0,2,5]],dtype=np.float32),
# np.array([[1,2,8],[1,2,5]],dtype=np.float32)))
training_data_list_sample,test_data_list_sample=[],[]
training_data_list_epoch,test_data_list_epoch=[],[]
def train():
for epoch in range(n_epochs):
acc=0
accuarcy_list = []
for i,data in enumerate(train_dataset):
optimizer.zero_grad()
(inputs,labels) = data
inputs = torch.autograd.Variable(inputs.to(device)).cuda()
labels = torch.autograd.Variable(labels.to(device)).cuda()
outputs = model(inputs)
loss = lossfunc(outputs,labels)
loss.backward()
optimizer.step()
with torch.no_grad():
if (i+1) == len(train_dataset):
acc=AccuarcyComputeT3(outputs,labels)
print(epoch,":",acc)
if acc>0.75:
print(inputs,labels,loss,np.argmax(outputs.cpu().data.numpy(),1))
mid=AccuarcyComputeT3(outputs,labels)
if mid>0.5:
with open('5_layer_top_3_training_data.txt','a+') as f:
print("MID>0.5",mid,np.argmax(outputs.cpu().data.numpy(),1),labels)
accuarcy_list.append(mid)
training_data_list_sample.append(mid)
with torch.no_grad():
acc=sum(accuarcy_list) / len(accuarcy_list)
training_data_list_epoch.append(acc)
accuarcy_list = []
with torch.no_grad():
for i,(inputs,labels) in enumerate(test_dataset):
inputs = torch.autograd.Variable(inputs).cuda()
labels = torch.autograd.Variable(labels).cuda()
outputs = model(inputs)
mid=AccuarcyComputeT3(outputs,labels)
if mid>0.5:
with open('5_layer_top_3_test_data.txt','a+') as f:
print("MID>0.5",mid,np.argmax(outputs.cpu().data.numpy(),1),labels)
accuarcy_list.append(mid)
test_data_list_sample.append(mid)
acc=sum(accuarcy_list) / len(accuarcy_list)
acc_lst.append(acc)
test_data_list_epoch.append(acc)
if epoch%20==0:
print('***** Test Result: {:.4f}, Step: {}'.format(acc, epoch))
print(inputs,labels,np.argmax(outputs.cpu().data.numpy(),1))
def test():
accuarcy_list = []
for i,(inputs,labels) in enumerate(test_dataset):
inputs = pt.autograd.Variable(inputs).cuda()
labels = pt.autograd.Variable(labels).cuda()
outputs = model(inputs)
accuarcy_list.append(AccuarcyCompute(outputs,labels))
print(sum(accuarcy_list) / len(accuarcy_list))
print(inputs,labels,np.argmax(outputs.cpu().data.numpy(),1))
acc_lst=[]
model = MLP().cuda()
train()
test()
# Accuracy
plt.plot(range(len(acc_lst)), acc_lst)
plt.savefig('error.png')
plt.plot(range(len(test_data_list_epoch)), test_data_list_epoch)
plt.savefig('5_layer_top_3_test_data_list_epoch.png')
plt.close()
plt.plot(range(len(training_data_list_epoch)), training_data_list_epoch)
plt.savefig('5_layer_top_3_training_data_list_epoch.png')
plt.close()
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
plt.hist(test_data_list_sample, range=(0,1))
plt.xlabel("label")
plt.ylabel("freq")
plt.xlim(0, 1)
plt.title("5_layer_top_3_test_data_list_sample")
plt.savefig('5_layer_top_3_test_data_list_sample.png')
plt.close()
plt.hist(training_data_list_sample, range=(0,1))
plt.xlabel("label")
plt.ylabel("freq")
plt.xlim(0, 1)
plt.title("5_layer_top_3_training_data_list_sample")
plt.savefig('5_layer_top_3_training_data_list_sample.png')
plt.close()
print(len(training_data_list_sample))
print(len(test_data_list_sample))
print(sum(i >0.4 for i in training_data_list_sample))
print(sum(i >0.4 for i in test_data_list_sample)) #13750 32000 780 2144
k1, k2 = [], []
print(sum(i > 0.7 for i in test_data_list_sample))
for i in training_data_list_sample:
if i > 0.3:
k1.append(i)
else:
k2.append(i)
plt.hist(k2)
plt.xlabel("label")
plt.ylabel("freq")
plt.xlim(0, 0.3)
plt.title("Binary_5_layer_top_1_test_data_list_sample")
plt.savefig('F02_5_layer_top_3_training_data_list_sample.png')
plt.close()
plt.hist(k1)
plt.xlabel("label")
plt.ylabel("freq")
plt.xlim(0.3, 0.9)
plt.title("Binary_5_layer_top_1_test_data_list_sample")
plt.savefig('F28_5_layer_top_3_training_data_list_sample.png')
plt.close()
k1, k2 = [], []
for i in test_data_list_sample:
if i > 0.3:
k1.append(i)
else:
k2.append(i)
plt.hist(k2)
plt.xlabel("label")
plt.ylabel("freq")
plt.xlim(0, 0.3)
plt.title("Binary_5_layer_top_1_test_data_list_sample")
plt.savefig('F02_5_layer_top_3_test_data_list_sample.png')
plt.close()
plt.hist(k1)
plt.xlabel("label")
plt.ylabel("freq")
plt.xlim(0.3, 0.9)
|
[
"chenbc@sjtu.edu.cn"
] |
chenbc@sjtu.edu.cn
|
ba0a0979ec5aa6c21e36ac124a45997ac4ff3a15
|
53e04629451d4e76ccf7304cebccc0d97170f98a
|
/node_modules/pineapple/node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi
|
e8b54c7809908945ae0cb157b30a13ad47842981
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
moovatom/moovatom-js
|
a33a5548535c862f20f126e6c90e623c1bf947d6
|
0a3691bc0b9934589f0cb7c3316c48f6029bdcfb
|
refs/heads/master
| 2021-01-13T02:16:10.015973
| 2012-12-29T19:11:19
| 2012-12-29T19:11:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,831
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 42,
"host_arch": "x64",
"node_install_npm": "true",
"node_install_waf": "true",
"node_prefix": "/usr/local/Cellar/node/0.8.14",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/Users/werle/.node-gyp/0.8.14",
"copy_dev_lib": "true",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/share/npm/etc/npmignore",
"shell": "/bin/bash",
"init_author_url": "",
"parseable": "",
"email": "joseph.werle@gmail.com",
"userignorefile": "/Users/werle/.npmignore",
"init_author_email": "",
"sign_git_tag": "",
"cache_max": "null",
"long": "",
"ignore": "",
"npat": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/share/npm/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"coverage": "",
"pre": "",
"engine_strict": "",
"description": "true",
"https_proxy": "",
"userconfig": "/Users/werle/.npmrc",
"init_module": "/Users/werle/.npm-init.js",
"npaturl": "http://npat.npmjs.org/",
"user": "504",
"node_version": "v0.8.14",
"save": "",
"editor": "subl",
"tag": "latest",
"global": "",
"username": "werle",
"optional": "true",
"force": "",
"searchopts": "",
"depth": "null",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"group": "20",
"fetch_retry_factor": "10",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "",
"cache": "/Users/werle/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.8.14",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"init_version": "0.0.0",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/v_/g4f3bd6n1vx99bmf4f0_291h0000gr/T/",
"unsafe_perm": "true",
"link": "",
"prefix": "/usr/local/share/npm"
}
}
|
[
"joseph.werle@gmail.com"
] |
joseph.werle@gmail.com
|
ba860627bff98abd7e2bed65878863b6d4dbcf5e
|
fdc49067bfcb67c038d2db4464bcfb5cca30ebc8
|
/src/safetywrap/__init__.py
|
24b0a5329378b7cbb0fcae9bf8f5206c3c19e950
|
[
"Apache-2.0"
] |
permissive
|
MustardForBreakfast/safetywrap
|
96d726af7bcfd9f6de79263c7c9e81e6e050ecf5
|
170f836e12df455aed9b6dce5e7c634f6b9e8f87
|
refs/heads/master
| 2020-12-07T07:29:13.249294
| 2020-01-08T15:37:06
| 2020-01-08T15:37:06
| 232,672,517
| 0
| 0
|
Apache-2.0
| 2020-01-08T22:27:40
| 2020-01-08T22:27:39
| null |
UTF-8
|
Python
| false
| false
| 267
|
py
|
"""Typesafe python versions of Rust-inspired result types."""
__all__ = ("Option", "Result", "Ok", "Err", "Some", "Nothing")
__version__ = "1.1.0"
__version_info__ = tuple(map(int, __version__.split(".")))
from ._impl import Option, Result, Ok, Err, Some, Nothing
|
[
"msplanchard@gmail.com"
] |
msplanchard@gmail.com
|
42cc7b83af5192affc936746a49636525817a5b7
|
428b23f6c702acdbd4c4a30896d3aa53aaba9246
|
/plugins/web/active/W3AF_Unauthenticated@OWTF-WVS-004.py
|
bf86d82d2e95270f27fe36e7fa7ba0514412ab23
|
[] |
no_license
|
assem-ch/owtf
|
6a8f9663e16a1997ade7e290c024aca3c10c2d32
|
4d90bdc260edd226385e736831abcd450b9f107b
|
refs/heads/master
| 2021-01-16T21:47:22.592695
| 2014-01-06T20:44:15
| 2014-01-06T20:44:15
| 9,756,924
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
"""
owtf is an OWASP+PTES-focused try to unite great tools and facilitate pen testing
Copyright (c) 2011, Abraham Aranguren <name.surname@gmail.com> Twitter: @7a_ http://7-a.org
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright owner nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ACTIVE Plugin for Generic Unauthenticated Web App Fuzzing via w3af
This will perform a "low-hanging-fruit" pass on the web app for easy to find (tool-findable) vulns
"""
DESCRIPTION = "Active Vulnerability Scanning without credentials via w3af"
def run(Core, PluginInfo):
#Core.Config.Show()
return Core.PluginHelper.DrawCommandDump('Test Command', 'Output', Core.Config.GetResources('W3AF_Unauth'), PluginInfo, "")
|
[
"abraham.aranguren@gmail.com"
] |
abraham.aranguren@gmail.com
|
a3a8dc82b9694d926ab2a13730c19b066db74876
|
b44f2bfdedfc3ac0dd60df426f01a8205173b389
|
/SECURE ERP/model/hr/hr.py
|
13994dcb91ee149a5ff3fab8d790a6bfbfb5de43
|
[] |
no_license
|
imarcins/ERP
|
1baab4ae9c0556828cf8173b4676e677c6f31e3f
|
fa96a9110201307aff81dd8311085b42036be47c
|
refs/heads/main
| 2023-02-10T13:02:36.592101
| 2020-12-19T14:19:40
| 2020-12-19T14:19:40
| 322,863,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,702
|
py
|
import operator
from model import data_manager, util
from typing import List
# from datetime import date
# from datetime import datetime
DATAFILE = "model/hr/hr.csv"
headers = ["Id", "Name", "Date of birth", "Department", "Clearance"]
YEARS_OLD = 0
NAME_INDEX=1
DOB_INDEX=2
DEPARTMENT_INDEX=3
CLEARANCE_INDEX=4
CURRENT_YEAR=2020
def get_employee():
read_table = data_manager.read_table_from_file("model/hr/hr.csv")
return list(read_table)
def add_employee( arguments):
list_of_employes= get_employee()
Id = util.generate_id()
arguments[0] = Id
list_of_employes.append(arguments)
new_list = data_manager.write_table_to_file(DATAFILE,list_of_employes)
return new_list
def update_element(index:int, index_options:int, new_info:str):
list_of_lists = get_employee()[1:] #bez header czyta
list_of_lists[index][index_options] = new_info
data_manager.write_table_to_file(DATAFILE, list_of_lists)
def show_employee(date_id:str):
list_of_lists = get_employee()[1:] #bez header czyta
for index, employee_list in enumerate(list_of_lists):
if date_id in employee_list:
return index, list_of_lists[index]
def delete_employee(index:int):
list_of_lists = get_employee()[1:]
del list_of_lists[index]
data_manager.write_table_to_file(DATAFILE, list_of_lists)
def convert_date(a):
return a
# return list(map(int,a.split("-")))
def date_1(list_of_employee):
total_employee = get_employee()
employes_birth = []
for i in range(len(total_employee)):
a = total_employee[i][DOB_INDEX]
a = a.split("-")
employes_birth.append(a)
return employes_birth
|
[
"noreply@github.com"
] |
imarcins.noreply@github.com
|
0d2e54a53abfb81d992c291c78a83d1897d2f5d2
|
dfa02970133ee08cae66061ab28f1b8dec210745
|
/calc.py
|
38c4db7d3d2427e8c537204a9c713c2272901d4a
|
[] |
no_license
|
KGT777/git-test
|
ab486e395cc01594926555a721abadb1447da9aa
|
67c12f53f3c41cbd727b2983123ba4c2659ee954
|
refs/heads/master
| 2022-12-19T13:51:47.540892
| 2020-09-20T14:32:18
| 2020-09-20T14:32:18
| 297,090,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
# add program
def sum_function( n1 , n2): # 두 정수의 합을 구하는 함수
return n1 + n2
num1 = int ( input("input number 1") )
num2 = int ( input("input number 2") )
sum = sum_function(num1 , num2)
print(num1 , "+" , num2 , "=" , sum )
|
[
"eric0523@naver.com"
] |
eric0523@naver.com
|
aec47ac264535f56213ffb03c3bbbc0190acf1c7
|
c5075dba60f127261baabe9d32bc17babdd6fa98
|
/Django/myvenv/bin/markdown_py
|
50d69005e1e77ad2a370622171a9243e2295e92d
|
[] |
no_license
|
peterretief/drf_vue_boilerplate
|
d2cd3d590d0b0f61b2968a314ab6679091d9b7f7
|
f1665366efc5687d7286012c83839b384c71c9a2
|
refs/heads/master
| 2021-08-15T00:02:33.071615
| 2017-11-17T01:58:11
| 2017-11-17T01:58:11
| 111,043,480
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,043
|
#!/home/peter/SCRATCH/Django/myvenv/bin/python3
"""
Python Markdown, the Command Line Script
========================================
This is the command line script for Python Markdown.
Basic use from the command line:
markdown source.txt > destination.html
Run "markdown --help" to see more options.
See markdown/__init__.py for information on using Python Markdown as a module.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see docs/LICENSE for details).
"""
if __name__ == '__main__':
from markdown.__main__ import run
run()
|
[
"piet@nix64bit.com"
] |
piet@nix64bit.com
|
|
f1508f07fed37ca315f9fa194b758e983d5eb175
|
716369db2f47b300018112951abbdbebe8335310
|
/30/main.py
|
0e279d5bc0c2bc9bed8cf3a2cbafe8de750af4cb
|
[] |
no_license
|
Pedrcavalc/Exercicios-em-Python-openCV
|
069dbc4e1923041d105bcf9bea6b79faf21c2dc3
|
8a80f12048711162856dad48ee71d4f3469f349d
|
refs/heads/main
| 2023-07-16T10:03:33.562022
| 2021-09-02T21:40:19
| 2021-09-02T21:40:19
| 383,163,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
import cv2
import numpy as np
image = cv2.imread('image.png')
grayscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
canny = cv2.Canny(grayscale, 80, 200)
contornos, heranca = cv2.findContours(canny,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
imagem_contorno = np.copy(image)
cv2.drawContours(imagem_contorno, contornos, -1, (0, 0, 255), 3)
cv2.imshow('Resultado',imagem_contorno)
cv2.waitKey(0)
|
[
"77468171+pedrokkl@users.noreply.github.com"
] |
77468171+pedrokkl@users.noreply.github.com
|
e44158ee7da6945ff660a11c2a54e14194737054
|
90c1f648df5fb188c10abadec76387f1c85cfb9c
|
/nbsplit
|
fa9a16fecf62368ca3d05674400ed1ce7faee49f
|
[] |
no_license
|
minrk/script-dump
|
914a323e103d1dccc864b3405920e08972728fb6
|
3b1152fbeaf8bd5e39236e0126a6a02cf4fa76a8
|
refs/heads/master
| 2023-08-27T23:52:53.627600
| 2015-07-13T02:43:59
| 2015-07-13T02:43:59
| 38,987,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,409
|
#!/usr/bin/env python
"""
Usage: nbsplit notebook.ipynb [other_notebooks.ipynb]
Script for splitting IPython notebooks based on heading level 1 cells.
Just add heading 1 wherever you want your notebook to be split.
Author: Min RK (@minrk)
License: Public Domain
"""
import io
import os
import sys
from IPython.nbformat import current
def split_notebook(notebook_name):
print("reading %s" % notebook_name)
with io.open(notebook_name, encoding='utf-8') as f:
nb = current.read(f, 'json')
ws = nb['worksheets'][0]
original_cells = ws['cells']
h1 = []
for idx, cell in enumerate(original_cells):
if cell['cell_type'] == 'heading' and cell['level'] == 1:
h1.append(idx)
if h1 and h1[0] != 0:
h1.insert(0, 0)
h1.append(len(original_cells))
base_name, ext = os.path.splitext(notebook_name)
print("splitting %s into %s notebooks" % (notebook_name, len(h1)-1))
for count, lower, upper in zip(range(1, len(h1)), h1[:-1], h1[1:]):
ws['cells'] = original_cells[lower:upper]
fname = "%s_%i%s" % (base_name, count, ext)
print("writing cells [%i,%i) to %s" % (lower, upper, fname))
with io.open(fname, 'w', encoding='utf-8') as f:
current.write(nb, f, 'json')
if __name__ == '__main__':
for notebook_name in sys.argv[1:]:
split_notebook(notebook_name)
|
[
"benjaminrk@gmail.com"
] |
benjaminrk@gmail.com
|
|
4bbf268dc80e532dae794338425822960e9e5af6
|
c90d8cca7790387061bc132c1f76363a97f79f14
|
/mvp_texting_app/schedules/forms.py
|
595d396b0ebc71fb489cfa4345a3460672ed5e3d
|
[] |
no_license
|
scottleith/mvp_for_kevin
|
505273618bff6020f791cf84bea7e0c934f6246e
|
c018bb043d061bd959e52d7d798c7bbccaa63b00
|
refs/heads/master
| 2022-12-01T05:18:54.736586
| 2020-08-23T15:23:46
| 2020-08-23T15:23:46
| 289,253,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
from django import forms
from mvp_texting_app.schedules.models import TextBooking, GoalSchedule
class TextBookingCreationForm(forms.ModelForm):
class Meta:
model = TextBooking
fields = ['user_id', 'text_id', 'start_send_period', 'end_send_period']
class TextBookingChangeForm(forms.ModelForm):
class Meta:
model = TextBooking
fields = ['user_id', 'text_id', 'start_send_period', 'end_send_period']
class GoalScheduleCreationForm(forms.ModelForm):
class Meta:
model = GoalSchedule
fields = [
'user_id',
'goal_name', 'goal_description',
'start', 'end'
]
class GoalScheduleChangeForm(forms.ModelForm):
class Meta:
model = GoalSchedule
fields = [
'user_id',
'goal_name', 'goal_description',
'start', 'end'
]
|
[
"scottford.leith@gmail.com"
] |
scottford.leith@gmail.com
|
b69979cd90644572d710f129a9762b6b1f90398b
|
0d987f2217e4fb25844204f785aeaef90dd17e7e
|
/bot.py
|
15032dfc7e846e7bf1d5e418cb9f9216811e02a6
|
[] |
no_license
|
theamankumarsingh/CoviReq
|
9db43402f1c8c07514526dd96cc4ae4fe5a3c8e5
|
3adec6ab74d647db2f353f85a4f1c765affc98a6
|
refs/heads/main
| 2023-04-30T14:22:20.093266
| 2021-05-24T14:53:26
| 2021-05-24T14:53:26
| 367,649,338
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,067
|
py
|
import logging
import os
import tweepy
from tweepy import OAuthHandler
from os import environ
import telegram
from datetime import date, timedelta
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, CallbackContext
from datetime import datetime
from pytz import timezone
format = "%d-%m-%Y %H:%M:%S %Z%z"
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
access_token = environ['access_token']
access_token_secret = environ['access_token_secret']
consumer_key = environ['consumer_key']
consumer_secret = environ['consumer_secret']
http_api = environ['http_api']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
tweets = []
dt = date.today() - timedelta(1)
def menu(update: Update, _: CallbackContext) -> None:
keyboard = [
[
InlineKeyboardButton("Beds", callback_data='Beds'),
InlineKeyboardButton("ICU", callback_data='ICU'),
],
[
InlineKeyboardButton("Oxygen Cylinders", callback_data='Oxygen%20Cylinders'),
InlineKeyboardButton("Plasma", callback_data='Plasma')
],
[
InlineKeyboardButton("Food", callback_data='Food'),
InlineKeyboardButton("Ambulance", callback_data='Ambulance'),
],
[
InlineKeyboardButton("Blood", callback_data='Blood'),
InlineKeyboardButton("Amphotericin", callback_data='Amphotericin'),
],
[
InlineKeyboardButton("Remdesivir", callback_data='Remdesivir'),
InlineKeyboardButton("Favipiravir", callback_data='Favipiravir'),
],
]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Please choose one of the following :', reply_markup=reply_markup)
def city(update, context,*args):
try:
city=context.args[0]
except:
update.message.reply_text("Hey, User I also need the name of a city after /city. Let me give you an example: /city mumbai")
update.message.reply_text("The city has been set as:"+city+"\nEnter /menu for the options")
f = open("city.txt", "w")
f.write(city)
f.close()
def time_converter(time_input):
flag = 0
date_tweet = time_input[0:2] + "/" + time_input[3:5]+ "/" + time_input[6:11]
hrs = int(time_input[11] + (time_input[12]))
mins = int(time_input[14] + (time_input[15]))
secs = time_input[17] + time_input[18]
mins = mins + 30
if mins>=60:
mins = mins - 60
flag = 1
hrs = hrs + 5 + flag
if hrs>=24:
hrs = hrs-24
if hrs<10:
hrs = "0" + str(hrs)
if mins<10:
mins = "0" + str(mins)
d = datetime.strptime(str(hrs) + ":" + str(mins), "%H:%M")
f_time = " DATE:" + date_tweet + " TIME:" + d.strftime("%I:%M %p")
return f_time
def scrapetweets(city,option):
new_search = city +" "+ option + " -filter:retweets -verified -unverified -available" + " urgent AND required" # " required OR patients OR needed OR attendants OR #required"
link=[]
for tweet in tweepy.Cursor(api.search, q=new_search, lang="en",count=100).items(5):
try:
data = [tweet.id]
status = api.get_status(tweet.id)
created_at = status.created_at
temp_time = created_at.strftime(format)
final_time = time_converter(str(temp_time))
link.append(f"https://twitter.com/anyuser/status/"+str(data[0]) + " " + str(final_time))
except tweepy.TweepError as e:
print(e.reason)
continue
except StopIteration:
break
return link
def button(update: Update, _: CallbackContext) -> None:
query = update.callback_query
f = open("city.txt", "r")
city=f.read()
f.close()
bot = telegram.Bot(token=http_api)
query.answer()
if(city=='%20'or city==''):
city='India'
link=scrapetweets(city,str(query.data))
if (len(link)>0):
bot.sendMessage(update.effective_user.id,text=f"{len(link)} 𝐫𝐞𝐜𝐞𝐧𝐭 𝐭𝐰𝐞𝐞𝐭𝐬 𝐚𝐫𝐞:\n")
else:
bot.sendMessage(update.effective_user.id,text=f"𝐒𝐨𝐫𝐫𝐲, 𝐍𝐨 𝐫𝐞𝐜𝐞𝐧𝐭 𝐭𝐰𝐞𝐞𝐭𝐬 𝐰𝐞𝐫𝐞 𝐟𝐨𝐮𝐧𝐝\n")
for i in link:
bot.sendMessage(update.effective_user.id,text=i)
search=f"https://twitter.com/search?q=verified%20"+city+"%20"+str(query.data)+"%20-'not%20verified'%20-'un%20verified'+'urgent'-filter:retweets&f=live"
bot.sendMessage(update.effective_user.id,text="𝐓𝐨 𝐯𝐢𝐞𝐰 𝐚𝐥𝐥 𝐭𝐡𝐞 𝐫𝐞𝐬𝐮𝐥𝐭𝐬 𝐜𝐥𝐢𝐜𝐤 𝐭𝐡𝐢𝐬 𝐥𝐢𝐧𝐤:\n")
bot.sendMessage(update.effective_user.id,text=search)
def help_command(update: Update, _: CallbackContext) -> None:
update.message.reply_text("Use /city CITY NAME to enter the city name.\nUse /menu to start using the covid resource bot")
def bot_intro(update: Update, _: CallbackContext) -> None:
update.message.reply_text("HI, User I am CoviReq 'Always Ready to help'. To use me just type /city <CITY NAME> and then type /menu and choose your requirement option from the options available ")
def main() -> None:
updater = Updater(http_api)
updater.dispatcher.add_handler(CommandHandler('start', bot_intro))
updater.dispatcher.add_handler(CommandHandler('city', city))
updater.dispatcher.add_handler(CommandHandler('menu', menu))
updater.dispatcher.add_handler(CallbackQueryHandler(button))
updater.dispatcher.add_handler(CommandHandler('help', help_command))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
f = open("city.txt", "w")
f.write(' ')
f.close()
main()
|
[
"zapaktayat@gmail.com"
] |
zapaktayat@gmail.com
|
ebeec7044c71a01d8da72d24a0b68c61d1725491
|
889dcbcdae1be8be516d89bafc21ce32dbe63c66
|
/run.py
|
8e66b3ddf737767ad46fba30638cda56083b2e9c
|
[] |
no_license
|
fujitako03/sponavi_data
|
18ec5d1e6ecab8f9f54b8df8db536442145d7a65
|
eb3601bed18f976c1fae5d7633e14c3930b16313
|
refs/heads/main
| 2023-07-31T17:03:43.691357
| 2021-04-25T14:52:03
| 2021-04-25T14:52:03
| 350,350,203
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# import argparse
import datetime
import os
from omegaconf import OmegaConf
from src.scraping import ScrapingSponavi
now_datetime = datetime.datetime.now()
# 出力先
conf_dir = "config"
conf_cli = OmegaConf.from_cli()
conf_exec = OmegaConf.load(os.path.join(conf_dir, "config_exec.yaml"))
conf_path = OmegaConf.load(os.path.join(conf_dir, "config_path.yaml"))
conf_url = OmegaConf.load(os.path.join(conf_dir, "config_url.yaml"))
conf_team = OmegaConf.load(os.path.join(conf_dir, "config_team.yaml"))
conf_schedule = OmegaConf.load(os.path.join(conf_dir, "config_schedule.yaml"))
conf_table = OmegaConf.load(os.path.join(conf_dir, "config_table.yaml"))
conf_merge = OmegaConf.merge(conf_cli, conf_exec, conf_path, conf_url, conf_team, conf_schedule, conf_table)
# スクレイピング
ss = ScrapingSponavi(
start_date=conf_merge.start_date,
end_date=conf_merge.end_date,
config=conf_merge
)
# 試合データのスクレイピング
if conf_merge.exec_run_score:
ss.exec_score_scraping()
# 選手情報のスクレイピング
if conf_merge.exec_run_player:
ss.exec_player_scraping()
|
[
"thisis.snsd03@gmail.com"
] |
thisis.snsd03@gmail.com
|
cb63ce23f06d65570b27a21342404337db38f8ce
|
3fdddc28f7dbd7bd4d2a4ad9e44c433166dc5b4b
|
/userlogs/mixins.py
|
d5fd004bf63f1077fee79a5ed2469763559c9dd5
|
[] |
no_license
|
Swiftkind/swiftlearn
|
1206d858fb1acaf7077ea7250aec1182b3d6b5b0
|
b66d2479ae2e42fb50c4fc39090fb66258a19428
|
refs/heads/master
| 2020-05-21T08:51:24.163184
| 2016-11-16T06:22:48
| 2016-11-16T06:22:48
| 69,433,900
| 0
| 4
| null | 2016-11-18T08:36:08
| 2016-09-28T06:41:41
|
CSS
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
from .models import RecentActivity
class RecentActivityMixin(object):
ra_model = RecentActivity
def __init__(self, *args, **kwargs):
return super(RecentActivityMixin, self).__init__(*args, **kwargs)
def log_activity(self, user, action, action_type, obj=None):
""" create a activity
"""
link = obj.get_event_url() if obj else user.get_profile_url()
return self.ra_model.objects.create(user=user,
action=action, action_type=action_type, link=link)
|
[
"earvin.gemenez@gmail.com"
] |
earvin.gemenez@gmail.com
|
4a6d90fb63723a618f882fb752f6dbb883a38bd6
|
12dcf02fd184c7c59df1ae526ef1f071c0dd4744
|
/RegexProblems/regexHW3.py
|
e761867cc77b7f811018fbc3d101dafeeed6fd4a
|
[] |
no_license
|
rk9md/TJArtificialIntelligence
|
c1d909afb38bb9070cc10de015ea37aa98ec4d5c
|
98bcbd43a1ae6186f4f5c2cc03560f742d422abb
|
refs/heads/main
| 2023-01-04T02:03:12.939251
| 2020-10-29T21:52:28
| 2020-10-29T21:52:28
| 307,576,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,249
|
py
|
import sys, re
probNum = int(sys.argv[1])
solutions = []
solutions.append("/^0$|^100$|^101$/") #31
solutions.append("/^[01]+$/") #32
solutions.append("/0$/") #33
solutions.append("/\w*[aeiou]\w*[aeiou]\w*/i") #34
solutions.append("/^0$|^1[01]*0$/") #35
solutions.append("/^[01]*110[01]*$/") #36
solutions.append("/^.{2,4}$/") #37
solutions.append("/^[0-9]{3} *-? *[0-9]{2} *-? *[0-9]{4}$/") #38
solutions.append("/^.*?d/im") #39
solutions.append("/^11*0[10]*1$|^00*1[10]*0$/") #40
solutions.append(r"/\b[pck]\w*/i") #41
solutions.append("/^.(..)*$/s") #42
solutions.append("/^(0([01][01])*|1[01]([01][01])*)$/") #43
solutions.append("/^0*(10+)*$/") #44
solutions.append("/^[.XO]{64}$/i") #45
solutions.append("/^[XO]*[.][XO]*$/i") #46
solutions.append("/(^XX*O+[.]|[.]O+X*X$)/i|^[.]|[.]$/") #47
solutions.append("/^([bc]+a?[bc]*|[bc]*a[bc]*)$/") #48
solutions.append("/^([bc]+(a[bc]*a)*[bc]*|[bc]*(a[bc]*a)+[bc]*)$/") #49
solutions.append("/[02]*(1[02]*1)*[02]*/") #50
solutions.append(r"/(.)\1{9}/s") #51
solutions.append(r"/(\w)\w*\1/i") #52
solutions.append(r"/(\w)+\1\w*/") #53
solutions.append(r"/(\w)+\w*\1\w*/") #54
solutions.append(r"/^(0|1)[10]*\1$/") #55
problems = {x+31:sol for x, sol in enumerate(solutions)}
print(problems[probNum])
|
[
"rk9md@virgina.edu"
] |
rk9md@virgina.edu
|
123138f24b34e5b829bc93732d8a32c02eced224
|
3d0244bcf6be6c174714b75076667c27aff93c5c
|
/Module-2/CP-ELECTIVE-07-isfactor-Python/07-isfactor-Python/isfactor.py
|
95f38b2f8fb9bdb1408d040e0c9572fb89cb0aab
|
[] |
no_license
|
BommakantiHarshitha-1/O11_Cp-Python
|
e1d3be1b7ac4ad8acfc9e547be507d30f3a319cf
|
023b77df1a0c8e36200f7aa2d18a1cd6e996a029
|
refs/heads/main
| 2023-07-08T01:22:29.479674
| 2021-09-02T09:52:37
| 2021-09-02T09:52:37
| 402,368,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 341
|
py
|
# Write the function isFactor(f, n) that takes
# two int values f and n, and returns True
# if f is a factor of n, and False otherwise.
# Note that every integer is a factor of 0.
def fun_isfactor(f, n):
if f==0 and n==0:
return True
if f==0:
return False
if n%f == 0:
return True
return False
|
[
"bommakantiharshitha@msitprogram.net"
] |
bommakantiharshitha@msitprogram.net
|
30586dd7481107af8d7542ad9fe3809322ad3987
|
edda7e3af0b5078bb06ab5fd8448d15428b00f77
|
/public/sw2_off.py
|
160ad736b1f6c530638f703ace3417d247bad0c6
|
[] |
no_license
|
quique123/aismarthome
|
8f141ea07a8785f899d6fd0ac4e4cc3eab2c5165
|
3c01863a2a4538f57467b772c046cc2865f5eb9d
|
refs/heads/master
| 2021-01-19T13:49:35.690907
| 2017-05-14T00:10:31
| 2017-05-14T00:10:31
| 88,111,304
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
# Edit line 6 to match your chosen GPIO pin-off
import logging
#import RPi.GPIO as GPIO
logging.warning('2gpio pin toggle OFF!') # will print a message to the console
#GPIO.setwarnings(False)
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(23, GPIO.IN)
#GPIO.cleanup()
#!/usr/bin/env python
import serial
ser = serial.Serial(
port='/dev/serial0',
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
print "Serial is open: " + str(ser.isOpen())
print "Now Writing"
ser.write("p")
#ser.write("AT+CONNL")
print "Did write, now read"
x = ser.readline()
print "got '" + x + "'"
ser.close()
|
[
"noreply@github.com"
] |
quique123.noreply@github.com
|
735daf1cd92ff2fedd3e8631b5e61e3a43f54fd5
|
44e0a68d3d7403a569d2a776d54369aec1253512
|
/experiments/day06/epm062.py
|
d2686e01a9d24661ef1ef0dd4fde2c5bd420328d
|
[] |
no_license
|
infrub/TaNuKi
|
c1d016f987b27a90ebe48b0d6a977bc01387dbce
|
1623c258054eb0bc7163606d61a901c2b067a7ac
|
refs/heads/master
| 2020-06-01T06:09:05.063582
| 2019-11-09T02:45:27
| 2019-11-09T02:45:27
| 190,666,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,666
|
py
|
import sys,os
sys.path.append('../../')
from tanuki import *
import numpy as np
import scipy as sp
import scipy.optimize as spo
import random
from colorama import Fore, Back, Style
import math
import matplotlib as mpl
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import pandas as pd
from datetime import datetime
import textwrap
from timeout_decorator import timeout, TimeoutError
from math import *
import itertools
pd.options.display.max_columns = 30
pd.options.display.width = 160
np.set_printoptions(linewidth=float("inf"))
tnc.display_max_size = float("inf")
def partition_function(beta, Jx, Jy, Lx, Ly):
a = beta * Jx
b = beta * Jy
gamma = [None for _ in range(2*Lx)]
for k in range(2*Lx):
cosh_g = ( ef_cosh(2*a) * ef_cosh(2*b) - cos(pi*k/Lx) * ef_sinh(2*b) ) / ef_sinh(2*a)
gamma[k] = (cosh_g + (cosh_g * cosh_g - 1).sqrt()).log
if ef_sinh(2*a) * ef_sinh(2*b) > 1: gamma[0] = -gamma[0]
p0,p1,p2,p3 = 1.0,1.0,1.0,1.0
for k in range(1,Lx+1):
p0 *= 2 * ef_cosh(Ly * gamma[2*k-1] / 2)
p1 *= 2 * ef_sinh(Ly * gamma[2*k-1] / 2)
p2 *= 2 * ef_cosh(Ly * gamma[2*k-2] / 2)
p3 *= 2 * ef_sinh(Ly * gamma[2*k-2] / 2)
z = 0.5 * ( (2 * ef_sinh(2*a)) ** (Lx*Ly/2) ) * (p0 + p1 + p2 - p3);
return z
beta = 1.0
J = 0.9
width_scale = 5
height_scale = 5
chi = 10
print(f"beta:{beta}, width_scale:{width_scale}, height_scale:{height_scale}, chi:{chi}\n\n")
def make_Z_TPK():
gate = zeros_tensor((2,2,2,2), ["ain","aout","bin","bout"])
gate.data[1,1,1,1] = np.exp(beta*J)
gate.data[0,0,0,0] = np.exp(beta*J)
gate.data[0,0,1,1] = np.exp(-beta*J)
gate.data[1,1,0,0] = np.exp(-beta*J)
gate = onedim.Opn1DTMO(gate, [["aout"],["bout"]], [["ain"],["bin"]])
A = identity_tensor((2,), labels=["ain","aout"])
B = identity_tensor((2,), labels=["bin","bout"])
Ss = []
for _ in range(4):
funi = gate.to_BTPO()
a,S,b = funi.tensors[0], funi.bdts[1], funi.tensors[1]
A = A["aout"]*a["ain"]
Ss.append(S)
B = B["bout"]*b["bin"]
L,R,U,D = tuple(Ss)
A = A.trace("aout","ain")
B = B.trace("bout","bin")
return twodim.Ptn2DCheckerBTPK(A,B,L,R,U,D, width_scale=width_scale, height_scale=height_scale)
def epm0620_core(symbol):
Z_TPK = make_Z_TPK()
def calc_Z(symbol):
if symbol == "othn":
return partition_function(beta,J,J,2**(width_scale),2**(height_scale))
a,b,c,d = symbol[0],symbol[1],symbol[2],symbol[3]
kwargs = {}
kwargs["loop_truncation_algname"] = {"N":"naive","C":"canonize","I":"iterative"}[a]
kwargs["env_choice"] = {"N":"no","H":"half"}[b]
kwargs["contract_before_truncate"] = {"A":False,"B":True}[c]
kwargs["drill_parity"] = {"E":0,"O":1}[d]
return Z_TPK.calculate(chi=chi, **kwargs)
#@timeout(120)
def calc_F_value(symbol):
Z = calc_Z(symbol)
return -1.0 / beta * Z.log
return calc_F_value(symbol)
def epm0620():
#symbols = ["othn"] + [a+b+c+d for a in "NCI" for b in "HN" for c in "AB" for d in "EO"]
symbols = ["othn"] + [a+bc+d for a in "NC" for bc in ["HA","NA"] for d in "EO"]
results = []
for symbol in symbols:
#if kwargs!="othn" and kwargs["loop_truncation_algname"] == "canonize": continue
print()
print(symbol)
try:
F_value = epm0620_core(symbol)
print(symbol, F_value)
results.append((symbol,F_value))
except Exception as e:
print(symbol, e)
results.append((symbol,9999))
raise e
print("\n\n")
results.sort(key=lambda a: a[1])
for symbol, F_value in results:
print(symbol, F_value)
def epm0621():
print("othn", epm0620_core("othn"))
#symbol_seqs = [["CNAE","CNAE","CNAE"],["INAE","CNAE","CNAE"]]
symbol_seqs = itertools.product(["CHBE","CNAE"],repeat=3)
for symbol_seq in symbol_seqs:
Z_TPK = make_Z_TPK()
Z = 1.0
for symbol in symbol_seq:
a,b,c,d = symbol[0],symbol[1],symbol[2],symbol[3]
kwargs = {}
kwargs["loop_truncation_algname"] = {"N":"naive","C":"canonize","I":"iterative"}[a]
kwargs["env_choice"] = {"N":"no","H":"half"}[b]
kwargs["contract_before_truncate"] = {"A":False,"B":True}[c]
kwargs["drill_parity"] = {"E":0,"O":1}[d]
Z_TPK,w = Z_TPK.renormalize(chi=chi, **kwargs)
Z *= w
Z *= Z_TPK.calculate()
F_value = -1.0 / beta * Z.log
print(symbol_seq, Z, F_value)
#print(epm0620_core("CNAE"))
epm0620()
|
[
"infrub@gmail.com"
] |
infrub@gmail.com
|
0e1b1f5898264a45fc866939cc4df962912238cc
|
e50b25ba63e854f5d2c5114c8ebb6ee410221321
|
/proyecto/camera.py
|
bc4463caa4f3a8b6d36a776068e1313ba3747d6c
|
[] |
no_license
|
PatrickC96/proyectoFlask
|
6d95ccf4820f02867f7d7cb7af8049c302a2150b
|
8768417068d3d56b186fead7fdd9eb0673077562
|
refs/heads/master
| 2022-12-03T08:11:20.535816
| 2020-08-10T22:43:46
| 2020-08-10T22:43:46
| 286,539,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,163
|
py
|
import cv2
import threading
class RecordingThread (threading.Thread):
def __init__(self, name, camera):
threading.Thread.__init__(self)
self.name = name
self.isRunning = True
self.cap = camera
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.out = cv2.VideoWriter('./static/video.avi',fourcc, 20.0, (640,480))
def run(self):
while self.isRunning:
ret, frame = self.cap.read()
if ret:
self.out.write(frame)
self.out.release()
def stop(self):
self.isRunning = False
def __del__(self):
self.out.release()
class VideoCamera(object):
def __init__(self):
# Open a camera
self.cap = cv2.VideoCapture(0)
# Initialize video recording environment
self.is_record = False
self.out = None
# Thread for recording
self.recordingThread = None
def __del__(self):
self.cap.release()
def get_frame(self):
ret, frame = self.cap.read()
if ret:
ret, jpeg = cv2.imencode('.jpg', frame)
# Record video
# if self.is_record:
# if self.out == None:
# fourcc = cv2.VideoWriter_fourcc(*'MJPG')
# self.out = cv2.VideoWriter('./static/video.avi',fourcc, 20.0, (640,480))
# ret, frame = self.cap.read()
# if ret:
# self.out.write(frame)
# else:
# if self.out != None:
# self.out.release()
# self.out = None
return jpeg.tobytes()
else:
return None
def __del__(self):
# releasing camera
self.cap.release()
# def start_record(self):
# self.is_record = True
# self.recordingThread = RecordingThread("Video Recording Thread", self.cap)
# self.recordingThread.start()
# def stop_record(self):
# self.is_record = False
# if self.recordingThread != None:
# self.recordingThread.stop()
|
[
"patrick.cabezas@epn.edu.ec"
] |
patrick.cabezas@epn.edu.ec
|
00d9682b2a409b8ecb4448ac47f96be6a14242fd
|
2fdf33eff3f22a4f2e0337f065646de8fe6cc01f
|
/mq/tests/rabbitmq_tests.py
|
61f7ce3d503fd6c6e4a134b6dc043a8599b79028
|
[
"MIT"
] |
permissive
|
apnarm/python-mq
|
007d978fe6a23b0d65555909ad34f2a21df5c5d5
|
14037cf86abc2393c4f8d791fd76bcca7a781607
|
refs/heads/master
| 2020-04-15T10:30:05.739270
| 2014-06-09T20:58:47
| 2014-06-09T20:58:47
| 20,659,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
import unittest
from mq.backends import rabbitmq_backend
from mq.tests import AMQPTestCase, ThreadingTestCase
class RabbitTests(AMQPTestCase, ThreadingTestCase, unittest.TestCase):
backend = rabbitmq_backend.create_backend()
def test_put(self):
"""Ensure that the put method works, even with connection failures."""
with self.backend.open(self.test_queue) as queue:
# Send some messages while messing with the connection.
queue.put('hello1')
queue.connection.channel.close()
queue.put('hello2')
queue.connection.close()
queue.put('hello3')
# Confirm that the messages went through.
received = []
for message, ack in queue:
received.append(message)
self.assertEquals(received, ['hello1', 'hello2', 'hello3'])
def test_put_mandatory(self):
"""
When putting a message onto a non-existent queue, it should raise an
exception every few attempts. It seems to almost always happen on the
3rd attempt but I think I saw it on the 4th attempt this one time.
"""
with self.backend.open(self.test_queue) as queue:
# Sending 10 messages to a declared queue should work.
for x in xrange(10):
queue.put('hello')
# But it will break when sending messages to a non-existent queue.
queue.delete()
self.__class__.test_queue_declared = False
for x in xrange(5):
try:
queue.put('hello')
except self.backend.connection_errors:
break
else:
self.fail('It was meant to complain about the missing queue.')
if __name__ == '__main__':
unittest.main()
|
[
"randomy@gmail.com"
] |
randomy@gmail.com
|
e6cfc63f9f227868bc459f191652431fa0477e4e
|
5ad0b1987054e431737d1348c3f3eaaf08c79d9d
|
/avx2-hps2048677/bitpermutations/bitpermutations/printing.py
|
777f9223402664cc0e03b2d4e4285cf3f605db08
|
[
"CC0-1.0"
] |
permissive
|
jschanck/ntru
|
050c5597683faa0754d26c7bda9cb2ab86825e2a
|
6d96ed3746000b6497392a425712fe483988fc80
|
refs/heads/master
| 2022-12-27T10:37:56.485583
| 2021-11-11T19:24:53
| 2021-11-11T19:24:53
| 170,588,295
| 27
| 12
|
CC0-1.0
| 2021-06-18T12:44:35
| 2019-02-13T22:19:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,331
|
py
|
from .data import MemoryFragment, ZERO
import bitpermutations.instructions as instructions
import bitpermutations.data as data
import bitpermutations.utils as utils
from .utils import reg_to_memfunc
def print_memfunc(f, in_size, out_size, per_reg=256, initialize=False):
"""Wraps a function that operates on registers in .data and .text sections,
and makes it operate on memory fragments instead."""
in_data = [MemoryFragment(per_reg, '{}(%rsi)'.format(per_reg*i // 8))
for i in range(in_size)]
out_data = [MemoryFragment(per_reg, '{}(%rdi)'.format(per_reg*i // 8))
for i in range(in_size)]
if initialize:
utils.sequence_to_values(in_data, range(0, 677), padding=ZERO)
instructions.reset()
data.reset()
f(out_data, in_data)
print(".data")
print(".p2align 5")
for mask in data.DATASECTION:
print(mask.data())
print(".text")
print(".global {}".format(f.__name__))
print(".global _{}".format(f.__name__))
print("{}:".format(f.__name__))
print("_{}:".format(f.__name__))
for ins in instructions.INSTRUCTIONS:
print(ins)
print("ret")
def print_reg_to_memfunc(f, in_size, out_size, per_reg=256):
f = reg_to_memfunc(f, in_size, out_size, per_reg)
print_memfunc(f, in_size, out_size, per_reg)
|
[
"jschanck@uwaterloo.ca"
] |
jschanck@uwaterloo.ca
|
637a1c8f06af5ecc08fb671ef4c0396554e1172c
|
4c8966e6a65f9707f45f67988906110308f34f99
|
/test/selenium_mock.py
|
32935d6e2923033ed73357eeec9742059282c0f8
|
[] |
no_license
|
xprathamesh/Git-Answer-Bot-for-Stack-Overflow
|
67445b3893d937f5d77aa49630a0969e5081e696
|
7b3249784e9ea8ed2ff35fd3dba42b896cc92b41
|
refs/heads/master
| 2023-07-25T22:39:47.282757
| 2023-07-10T04:31:29
| 2023-07-10T04:31:29
| 234,755,180
| 0
| 0
| null | 2023-07-10T04:31:30
| 2020-01-18T15:28:56
|
Python
|
UTF-8
|
Python
| false
| false
| 7,087
|
py
|
import asyncio
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import poster
import Credentials as c
from test import mocking
# Ensure question answer is posted.
class TestAnswerQueryH(unittest.TestCase):
"""Happy scenario for providing an answer to a git-tagged question.
"""
def setUp(self) -> None:
self.driver = webdriver.Firefox()
self.driver.get("https://stackoverflow.com/users/login?ssrc=channels&returnurl=%2fc%2fncsu%2f")
self.driver.find_element_by_id('has-public-account-radio').click()
email = self.driver.find_element_by_name('email')
passwd = self.driver.find_element_by_name('password')
email.send_keys(c.login['user'])
passwd.send_keys(c.login['pwd'])
self.driver.find_element_by_name('submit-button').click()
def test_git_answer(self):
# TODO need to get reference to page object
iden = asyncio.get_event_loop().run_until_complete(poster.run(poster._q))
self.driver.get('https://stackoverflow.com/c/ncsu/questions/{}'.format(iden))
answers = self.driver.find_element_by_id('answers')
assert answers.get_attribute("class") != "no-answers"
answer = answers.find_element_by_class_name("user-details")
name = answer.find_element_by_class_name('d-none').text
assert name in c.login['user']
def tearDown(self) -> None:
self.driver.close()
# Question answer is not posted.
class TestAnswerQueryU(unittest.TestCase):
"""Unhappy scenario for providing an answer to a git-tagged question.
"""
def setUp(self) -> None:
self.driver = webdriver.Firefox()
self.driver.get(
"https://stackoverflow.com/users/login?ssrc=channels&returnurl=%2fc%2fncsu%2f")
self.driver.find_element_by_id('has-public-account-radio').click()
email = self.driver.find_element_by_name('email')
passwd = self.driver.find_element_by_name('password')
email.send_keys(c.login['user'])
passwd.send_keys(c.login['pwd'])
self.driver.find_element_by_name('submit-button').click()
def test_git_answer(self):
iden = asyncio.get_event_loop().run_until_complete(
poster.run(poster._q))
self.driver.get(
'https://stackoverflow.com/c/ncsu/questions/{}'.format(iden))
answers = self.driver.find_element_by_id('answers')
assert answers.get_attribute("class") == 'no-answers'
def tearDown(self) -> None:
self.driver.close()
class TestExampleQueryH(unittest.TestCase):
def setUp(self) -> None:
self.driver = webdriver.Firefox()
self.driver.get(
"https://stackoverflow.com/users/login?ssrc=channels&returnurl=%2fc%2fncsu%2f")
self.driver.find_element_by_id('has-public-account-radio').click()
email = self.driver.find_element_by_name('email')
passwd = self.driver.find_element_by_name('password')
email.send_keys(c.login['user'])
passwd.send_keys(c.login['pwd'])
self.driver.find_element_by_name('submit-button').click()
def test_example_query(self):
iden = asyncio.get_event_loop().run_until_complete(
poster.run(poster._q))
self.driver.get(
'https://stackoverflow.com/c/ncsu/questions/{}'.format(iden))
answers = self.driver.find_element_by_id('answers')
assert answers.get_attribute("class") != "no-answers"
user = answers.find_element_by_class_name("user-details")
name = user.find_element_by_class_name('d-none').text
assert name in c.login['user']
def tearDown(self) -> None:
self.driver.close()
class TestExampleQueryU(unittest.TestCase):
def setUp(self) -> None:
self.driver = webdriver.Firefox()
self.driver.get(
"https://stackoverflow.com/users/login?ssrc=channels&returnurl=%2fc%2fncsu%2f")
self.driver.find_element_by_id('has-public-account-radio').click()
email = self.driver.find_element_by_name('email')
passwd = self.driver.find_element_by_name('password')
email.send_keys(c.login['user'])
passwd.send_keys(c.login['pwd'])
self.driver.find_element_by_name('submit-button').click()
def test_example_query(self):
iden = asyncio.get_event_loop().run_until_complete(
poster.run(poster._q))
self.driver.get(
'https://stackoverflow.com/c/ncsu/questions/{}'.format(iden))
answers = self.driver.find_element_by_id('answers')
assert answers.get_attribute("class") == 'no-answers'
def tearDown(self) -> None:
self.driver.close()
class TestMultipleQuestionsH(unittest.TestCase):
def setUp(self) -> None:
self.driver = webdriver.Firefox()
self.driver.get(
"https://stackoverflow.com/users/login?ssrc=channels&returnurl=%2fc%2fncsu%2f")
self.driver.find_element_by_id('has-public-account-radio').click()
email = self.driver.find_element_by_name('email')
passwd = self.driver.find_element_by_name('password')
email.send_keys(c.login['user'])
passwd.send_keys(c.login['pwd'])
self.driver.find_element_by_name('submit-button').click()
def test_multiple_questions(self):
iden = asyncio.get_event_loop().run_until_complete(
poster.run(poster._q))
self.driver.get(
'https://stackoverflow.com/c/ncsu/questions/{}'.format(iden))
answers = self.driver.find_element_by_id('answers')
assert answers.get_attribute("class") != "no-answers"
user = answers.find_element_by_class_name("user-details")
name = user.find_element_by_class_name('d-none').text
assert name in c.login['user']
def tearDown(self) -> None:
self.driver.close()
class TestMultipleQuestionsU(unittest.TestCase):
def setUp(self) -> None:
self.driver = webdriver.Firefox()
self.driver.get(
"https://stackoverflow.com/users/login?ssrc=channels&returnurl=%2fc%2fncsu%2f")
self.driver.find_element_by_id('has-public-account-radio').click()
email = self.driver.find_element_by_name('email')
passwd = self.driver.find_element_by_name('password')
email.send_keys(c.login['user'])
passwd.send_keys(c.login['pwd'])
self.driver.find_element_by_name('submit-button').click()
def test_multiple_questions(self):
iden = asyncio.get_event_loop().run_until_complete(
poster.run(poster._q))
self.driver.get(
'https://stackoverflow.com/c/ncsu/questions/{}'.format(iden))
answers = self.driver.find_element_by_id('answers')
assert answers.get_attribute("class") != "no-answers"
user = answers.find_element_by_class_name("user-details")
name = user.find_element_by_class_name('d-none').text
assert name in c.login['user']
def tearDown(self) -> None:
self.driver.close()
if __name__ == '__main__':
unittest.main()
|
[
"pratzorro@gmail.com"
] |
pratzorro@gmail.com
|
9d2c4f4bb1a18bb3263f2a4969d8bb08dfff556a
|
7f06cc76a513df2a068d8d897b4219b61a676a78
|
/stockProject/stockpython/venv/Scripts/pip-script.py
|
1e2c7a28daaf53b3c019cfc7212cbc8e96d0480d
|
[] |
no_license
|
leeshinil/StockPredictProject
|
8b952cc1c75da465117a9e9bb084adf17554b9cc
|
0627e97e77031170d6f3726cdf55c2c380245f4f
|
refs/heads/master
| 2020-09-12T07:52:11.412032
| 2019-11-18T04:22:58
| 2019-11-18T04:22:58
| 220,896,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
#!C:\Users\lexsh\Desktop\stockProject\stockpython\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"lexshinil1@gmail.com"
] |
lexshinil1@gmail.com
|
87855cf5b923a4e5f5316f403bf81da48b239eb7
|
a38180435ac5786185c0aa48891c0aed0ab9d72b
|
/S4/S4 Library/simulation/routing/route_events/route_event_type_animation.py
|
be5f29988cfc84019d4368610cf2769fc1e53357
|
[
"CC-BY-4.0"
] |
permissive
|
NeonOcean/Environment
|
e190b6b09dd5dbecba0a38c497c01f84c6f9dc7d
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
refs/heads/master
| 2022-12-03T13:17:00.100440
| 2021-01-09T23:26:55
| 2021-01-09T23:26:55
| 178,096,522
| 1
| 1
|
CC-BY-4.0
| 2022-11-22T20:24:59
| 2019-03-28T00:38:17
|
Python
|
UTF-8
|
Python
| false
| false
| 6,715
|
py
|
from animation.arb import Arb
from animation.arb_element import distribute_arb_element
from animation.posture_manifest import MATCH_NONE
from event_testing.resolver import SingleObjectResolver, SingleSimResolver
from event_testing.results import TestResult
from interactions import ParticipantType
from interactions.utils.animation_reference import TunableAnimationReference
from interactions.utils.routing import FollowPath
from postures import are_carry_compatible
from routing.route_events.route_event_mixins import RouteEventDataBase
from sims4.math import MAX_INT32
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, OptionalTunable, TunableRange, TunableEnumEntry
import sims4.log
logger = sims4.log.Logger('RouteEvents', default_owner='rmccord')
class RouteEventTypeAnimation(RouteEventDataBase, HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'animation_element': TunableAnimationReference(description='\n The animation that Sims play during the Route Event.\n ', callback=None, class_restrictions=()), '_duration_override': OptionalTunable(description="\n If enabled, we override the must run duration we expect this route\n event to take. We do this for animations that will freeze the\n locomotion so that we don't actually take time away from the rest of\n the path where other route events could play.\n ", tunable=TunableRange(description='\n The duration we want this route event to have. This modifies how\n much of the route time this event will take up to play the\n animation. For route events that freeze locomotion, you might\n want to set this to a very low value. Bear in mind that high\n values are less likely to be scheduled for shorter routes.\n ', tunable_type=float, default=0.1, minimum=0.1)), 'target_participant': OptionalTunable(description='\n The target of the animation based on the resolver of the actor\n playing the route event.\n ', tunable=TunableEnumEntry(description='\n The participant related to the actor that plays the route event.\n ', tunable_type=ParticipantType, default=ParticipantType.ObjectChildren))}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.arb = None
self._duration_total = MAX_INT32
self._duration_must_run = MAX_INT32
self._duration_repeat = MAX_INT32
@classmethod
def test(cls, actor, event_data_tuning, ignore_carry=False):
if actor is None:
return TestResult(False, 'Route Event Actor is None.')
if actor.is_sim:
postures = event_data_tuning.animation_element.get_supported_postures()
sim_posture_state = actor.posture_state
provided_postures = sim_posture_state.body.get_provided_postures(surface_target=MATCH_NONE)
supported_postures = provided_postures.intersection(postures)
if not supported_postures:
return TestResult(False, 'Animation Route Event does not support {} for {}.', actor.posture_state, actor)
if not ignore_carry:
carry_state = sim_posture_state.get_carry_state()
if not any(are_carry_compatible(entry, carry_state) for entry in supported_postures):
return TestResult(False, 'Animation Route Event does not support {} for {}.', actor.posture_state, actor)
return TestResult.TRUE
@property
def duration_override(self):
if self._duration_override is not None:
return self._duration_override
return self._duration_must_run
def get_target(self, actor):
if self.target_participant is None:
return
else:
if actor.is_sim:
resolver = SingleSimResolver(actor.sim_info)
else:
resolver = SingleObjectResolver(actor)
targets = resolver.get_participants(self.target_participant)
if targets:
return next(iter(targets))
def prepare(self, actor, setup_asm_override=None):
def restart_asm(asm):
asm.set_current_state('entry')
return True
target = self.get_target(actor)
routing_component = actor.routing_component
if actor.is_sim:
route_interaction = routing_component.route_interaction
if route_interaction is None:
logger.error('Route Interaction was None for {}', actor)
return
route_event_animation = self.animation_element(route_interaction, setup_asm_additional=restart_asm if setup_asm_override is None else setup_asm_override, enable_auto_exit=False)
asm = route_event_animation.get_asm()
if asm is not None and target is not None and not asm.set_actor(route_event_animation.target_name, target):
logger.error('Route Event {} Failed to setup target.', self)
return
if asm is None:
logger.warn('Unable to get a valid Route Event ASM ({}) for {}.', route_event_animation, actor)
return
else:
route_event_animation = self.animation_element(actor, target=target, setup_asm_func=restart_asm if setup_asm_override is None else setup_asm_override)
animation_context = routing_component.animation_context
asm = route_event_animation.get_asm(animation_context=animation_context)
if asm is None:
logger.warn('Unable to get a valid Route Event ASM ({}) for {}.', route_event_animation, actor)
return
self.arb = Arb()
route_event_animation.append_to_arb(asm, self.arb)
route_event_animation.append_exit_to_arb(asm, self.arb)
if self.arb is None:
logger.error('Unable to create arb for Route Event: {}', self)
return
(self._duration_total, self._duration_must_run, self._duration_repeat) = self.arb.get_timing()
def is_valid_for_scheduling(self, actor, path):
if self.arb is None or self.arb.empty:
return False
return True
def execute(self, actor, **kwargs):
if actor.primitives:
for primitive in tuple(actor.primitives):
if isinstance(primitive, FollowPath):
primitive.set_animation_sleep_end(self._duration_must_run)
return
def process(self, actor):
if self.arb is not None:
distribute_arb_element(self.arb, master=actor, immediate=True)
|
[
"40919586+NeonOcean@users.noreply.github.com"
] |
40919586+NeonOcean@users.noreply.github.com
|
59658cf05a54e4a57f45925e24fbec4959e3ccf9
|
5ffdf4ddee5700e6bb3b062a07c1a9cf7e6adbc1
|
/Algorithms/Implementation/breaking_the_records.py
|
b81b0de921ac912d759cafa31ef64032fb68eafa
|
[
"MIT"
] |
permissive
|
byung-u/HackerRank
|
23df791f9460970c3b4517cb7bb15f615c5d47d0
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
refs/heads/master
| 2021-05-05T13:05:46.722675
| 2018-03-30T08:07:36
| 2018-03-30T08:07:36
| 104,960,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
#!/usr/bin/env python3
def getRecord(S):
h, l = S[0], S[0]
h_cnt, l_cnt = 0, 0
for s in S:
if s > h:
h_cnt += 1
h = s
elif s < l:
l_cnt += 1
l = s
return [h_cnt, l_cnt]
n = int(input().strip())
s = list(map(int, input().strip().split(' ')))
result = getRecord(s)
print (" ".join(map(str, result)))
|
[
"iam.byungwoo@gmail.com"
] |
iam.byungwoo@gmail.com
|
853e370583cc940d09d41b89f8639fbb639d69c0
|
11097c7a3a96afee9320fe2cb45a079369285e09
|
/install_python_dependencies.py
|
63f4219ee97f824a899918edb0e89c447b8794a2
|
[] |
no_license
|
natsukoa/JapaneseTokenizers
|
70069dc40d22c90933c178a4540d8871785ad1f2
|
3c9a46d5d9debe5bafc0533ca739e00c0bbffe71
|
refs/heads/master
| 2021-06-11T09:22:52.279014
| 2017-01-13T00:29:10
| 2017-01-13T00:29:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
__author__ = 'kensuke-mi'
import sys
import pip
python_version = sys.version_info
with open("requirement.txt") as f:
for line in f:
if 'mecab' in line:
if python_version < (3, 0, 0): pip.main(['install', line])
else: pip.main(['install', 'mecab-python3'])
else:
pip.main(['install', line.strip()])
|
[
"kensuke.mit@gmail.com"
] |
kensuke.mit@gmail.com
|
29c0e607c253fca5a74336aa897e4d78e2a2c4fe
|
054ddbc1fa0e1b1d0a999bbe877591e942aa0f12
|
/python/05-python高级/06-网络编程-1/01-udp-send.py
|
409ff15c6f9e439048ba4b1cd5498ce3afe1e511
|
[] |
no_license
|
VinceBy/newone
|
66c8cf77159344c7d2ec196233d58a412e1c3073
|
ffc6a0d9ccbdb3f66c4995834f01e3bc2df0415d
|
refs/heads/master
| 2022-02-22T23:00:21.720497
| 2019-07-09T08:47:25
| 2019-07-09T08:47:25
| 195,958,240
| 0
| 0
| null | 2022-02-12T09:19:32
| 2019-07-09T07:42:20
|
Python
|
UTF-8
|
Python
| false
| false
| 114
|
py
|
from socket import *
udpSocket = socket(AF_INET,SOCK_DGRAM)
udpSocket.sendto(b"haha",("192.168.172.128",7788))
|
[
"1260787968@qq.com"
] |
1260787968@qq.com
|
cefc5440623e28814164707c8880de589e176b81
|
dbea3c8b47fb1db1068b8323eb2c457e4188cb3e
|
/app/models.py
|
18d48abfe0266c60c70e7f78e9d9dd4ff8c632fd
|
[] |
no_license
|
Yingmin-Li/twitter-api
|
2d062fde8404437e8df4ab7a6a61ca9a92454a2e
|
0b232f43d63c7a4f1ee2001f0d2d558e25ca6300
|
refs/heads/master
| 2023-01-19T18:46:39.325246
| 2020-11-26T09:48:44
| 2020-11-26T09:48:44
| 316,015,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
import time
class Tweet:
id=None
text=''
created_at=0
updated_at=0
def __init__(self, text):
self.text=text
self.created_at="%.20f" % time.time()
self.updated_at=self.created_at
def update(self, text):
self.text=text
self.updated_at="%.20f" % time.time()
|
[
"yingmin.chine@gmail.com"
] |
yingmin.chine@gmail.com
|
933c75171570e88f04cbae3a7b568c48902e153f
|
91c41d714ac5a61049093153560ac4a4873d2550
|
/chater5/zoo.py
|
44656e830b30e3819cdab6a741a26aceb30e4bb0
|
[] |
no_license
|
hyunjeeChoi/introducing-Python
|
8f0d2b5b754c6aa0a36e1bb933d5f26809008c3c
|
195141f0b203b62f5e5c6c92f92669ef79bf229a
|
refs/heads/master
| 2020-03-19T00:57:06.594545
| 2018-06-23T06:31:16
| 2018-06-23T06:31:16
| 135,512,021
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40
|
py
|
def hours():
print('Open 9-5 daily')
|
[
"jenny.choi@kakaocorp.com"
] |
jenny.choi@kakaocorp.com
|
e937061a7829675e71a2d61351c93e9d869558a1
|
816edec5c48c380dfa542d8ce0582f4593c725ef
|
/网络代码/xiaoyouxi1.py
|
ffd815f132fd00c41671fa92de1e2bc8bbcd72f0
|
[] |
no_license
|
liujiang9/python0421
|
c75f351d32e39fb9f8ad1b8af4129c8e170dfbb7
|
e11f5fcb10f37a0f0663e4c746ca862b076f9aee
|
refs/heads/master
| 2022-07-26T09:34:30.881906
| 2020-05-21T09:40:29
| 2020-05-21T09:40:29
| 265,728,500
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,137
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# 作者:魏明泽
# 参考网址: http://2048game.com/
import random
import math
__mataclass__ = type # 使用新式类
# 此类为地图模块封装的类
class map2048():
# 重新设置游戏数据
def reset(self):
self.__row = 4 # 行数
self.__col = 4 # 列数
self.data = [
[0 for x in range(self.__col)]
for y in range(self.__row)
]
# self.data = [[x + 4 * y for x in range(self.__col)]
# for y in range(self.__row)]
# self.data = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
self.fill2()
self.fill2()
def __init__(self):
self.reset()
# 获取没有数字的位置的个数
def get_space_count(self):
"""
获取没有数字的方格的数量
"""
count = 0
for r in self.data:
count += r.count(0)
return count
# 获取游戏的得数。
def get_score(self):
s = 0
for r in self.data:
for c in r:
s += 0 if c < 4 else c * int((math.log(c, 2) - 1.0))
return s
# 填充2到空位置,如果填度成功返回True,如果已满,则返回False,
def fill2(self):
blank_count = self.get_space_count()
if 0 == blank_count:
return False
# 生成随机位置
pos = random.randrange(0, blank_count)
offset = 0
for r in self.data:
for ci in range(self.__col):
if 0 == r[ci]:
if offset == pos:
r[ci] = 2
return True
offset += 1
# 判断游戏是否结束
def is_gameover(self):
for r in self.data:
# 如果水平方向还有0,则游戏没有结束
if r.count(0):
return False
# 水平方向如果有两个相邻的元素相同,则没有游戏结束
for i in range(self.__col - 1):
if r[i] == r[i + 1]:
return False
for c in range(self.__col - 1):
# 竖直方向如果有两个相邻的元素相同,则没有游戏结束
for r in range(self.__row - 1):
if self.data[r][c] == self.data[r + 1][c]:
return False
# 以上都没有,则游戏结束
return True
# 2048游戏的左移动 (采用"贾琳倩"美女老师的方法进行移动)
def left(self):
# moveflag 是否成功移动数字标志位,如果有移动则为真值,原地图不变则为假值
moveflag = False
# 将所有数字向左移动来填补左侧空格
for times in range(self.__col - 1):
for r in self.data:
for c in range(self.__col - 1):
if 0 == r[c]:
moveflag = True
r[c] = r[c + 1]
r[c + 1] = 0
# 判断是否发生碰幢,如果有碰撞则合并,合并结果靠左,右则填充空格
for r in self.data:
for c in range(self.__col - 1):
if r[c] == r[c + 1]:
moveflag = True
r[c] *= 2
r[c + 1] = 0
# 再将所有数字向左移动来填补左侧空格
for times in range(self.__col - 1):
for r in self.data:
for c in range(self.__col - 1):
if 0 == r[c]:
moveflag = True
r[c] = r[c + 1]
r[c + 1] = 0
return moveflag
# 游戏右移操作
def right(self):
for r in self.data:
r.reverse()
moveflag = self.left()
for r in self.data:
r.reverse()
return moveflag
# 游戏上移操作
def up(self):
# moveflag 是否成功移动数字标志位,如果有移动则为真值,原地图不变则为假值
moveflag = False
# 将所有数字向上移动来填补上面空格
for times in range(self.__row - 1):
for c in range(self.__col):
for r in range(self.__row - 1):
if 0 == self.data[r][c]:
moveflag = True
self.data[r][c] = self.data[r + 1][c]
self.data[r + 1][c] = 0
# 判断是否发生碰幢,如果有碰撞则合并,合并结果靠上,下面填充空格
for c in range(self.__col):
for r in range(self.__row - 1):
if self.data[r][c] == self.data[r + 1][c]:
moveflag = True
self.data[r][c] *= 2
self.data[r + 1][c] = 0
# 再将所有数字向上移动来填补上面空格
for times in range(self.__row - 1):
for c in range(self.__col):
for r in range(self.__row - 1):
if 0 == self.data[r][c]:
moveflag = True
self.data[r][c] = self.data[r + 1][c]
self.data[r + 1][c] = 0
return moveflag
# 游戏下移操作
def down(self):
self.data.reverse()
moveflag = self.up()
self.data.reverse()
return moveflag
import sys
if (sys.version_info > (3, 0)):
from tkinter import *
from tkinter import messagebox
else:
from tkinter import *
game = map2048()
keymap = {
'a': game.left,
'd': game.right,
'w': game.up,
's': game.down,
'Left': game.left,
'Right': game.right,
'Up': game.up,
'Down': game.down,
'q': exit,
}
game_bg_color = "#bbada0"
mapcolor = {
0: ("#cdc1b4", "#776e65"),
2: ("#eee4da", "#776e65"),
4: ("#ede0c8", "#f9f6f2"),
8: ("#f2b179", "#f9f6f2"),
16: ("#f59563", "#f9f6f2"),
32: ("#f67c5f", "#f9f6f2"),
64: ("#f65e3b", "#f9f6f2"),
128: ("#edcf72", "#f9f6f2"),
256: ("#edcc61", "#f9f6f2"),
512: ("#e4c02a", "#f9f6f2"),
1024: ("#e2ba13", "#f9f6f2"),
2048: ("#ecc400", "#f9f6f2"),
4096: ("#ae84a8", "#f9f6f2"),
8192: ("#b06ca8", "#f9f6f2"),
}
# 游戏各方块的lable数据
map_labels = []
# 鼠标按下处理函数
def on_mouse_down(event):
print("clicked at", event.x, event.y)
# 键盘按下处理函数
def on_key_down(event):
keysym = event.keysym
if keysym in keymap:
if keymap[keysym]():
game.fill2()
update_ui()
if game.is_gameover():
mb = messagebox.askyesno(title="gameover", message="游戏结束!\n是否退出游戏!")
if mb:
exit()
else:
game.reset()
update_ui()
# 刷新界面函数
def update_ui():
# 更改各个Label的设置
for r in range(len(game.data)):
for c in range(len(game.data[0])):
number = game.data[r][c]
label = map_labels[r][c]
label['text'] = str(number) if number else ''
label['bg'] = mapcolor[number][0]
label['foreground'] = mapcolor[number][1]
label_score['text'] = str(game.get_score())
# 以下为2048的界面
root = Tk()
root.title('2048')
# root.iconbitmap('./favicon.ico') # 48x48 ico bitmap
frame = Frame(root, width=300, height=300, bg=game_bg_color)
frame.grid(sticky=N + E + W + S)
# 按键事件见:http://blog.csdn.net/qq_25600055/article/details/46942035
# 设置焦点能接收按键事件
frame.focus_set()
frame.bind("<Key>", on_key_down)
# 以下绑定鼠标按下事件
# frame.bind("<Button-1>", on_mouse_down)
# 以下绑定鼠标移动事件
# frame.bind("<Motion>", on_mouse_down)
# 以下绑定鼠标抬起事件
frame.bind("<ButtonRelease-1>", on_mouse_down)
# 见 :http://blog.csdn.net/wjciayf/article/details/50550947
# 初始化图形界面
for r in range(len(game.data)):
row = []
for c in range(len(game.data[0])):
value = game.data[r][c]
text = '' if 0 == value else str(value)
label = Label(frame, text=text, width=4, height=2,
font=("黑体", 30, "bold"))
label.grid(row=r, column=c, padx=5, pady=5, sticky=N + E + W + S)
row.append(label)
map_labels.append(row)
bottom_row = len(game.data)
print("button", str(bottom_row))
label = Label(frame, text='分数', font=("黑体", 30, "bold"),
bg="#bbada0", fg="#eee4da")
label.grid(row=bottom_row, column=0, padx=5, pady=5)
label_score = Label(frame, text='0', font=("黑体", 30, "bold"),
bg="#bbada0", fg="#ffffff")
label_score.grid(row=bottom_row, columnspan=2, column=1, padx=5, pady=5)
def reset_game():
game.reset()
update_ui()
# restart_button = Button(frame, text='重新开始', command=reset_game)
restart_button = Button(frame, text='重新开始', font=("黑体", 16, "bold"),
# width=4, height=2,
bg="#8f7a66", fg="#f9f6f2", command=reset_game)
restart_button.grid(row=bottom_row, column=3, padx=5, pady=5)
update_ui()
root.mainloop()
|
[
"228923910@qq.com"
] |
228923910@qq.com
|
753f12dafc8e475b345fe20b589264cf3a0d6899
|
ce8f4075655fe8c1f12905d32f3eeb50254f31a6
|
/apps/catalogs/api/v1/serializers.py
|
13f8af5d99932425b27bf53c9318b94ea28b1d78
|
[] |
no_license
|
abogdanov87/mrlapkins
|
837cab2be1b80174cb17b882243b931ce4b86d9e
|
2b02b9b03869fb2212c94b891e995ff73dc0f123
|
refs/heads/master
| 2023-05-29T15:16:13.318231
| 2021-06-11T07:22:39
| 2021-06-11T07:22:39
| 356,865,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,809
|
py
|
from rest_framework import serializers
from rest_framework_bulk import BulkListSerializer, BulkSerializerMixin
from catalogs.models import (
Breed,
GenderSpec,
EyeColor,
CoatColor,
Gallery,
)
class GenderSpecSerializer(serializers.ModelSerializer):
class Meta:
model = GenderSpec
fields = (
'gender',
'body_length_min',
'body_length_max',
'body_height_min',
'body_height_max',
'body_weight_min',
'body_weight_max',
)
def validate(self, data):
return data
class EyeColorSerializer(serializers.ModelSerializer):
class Meta:
model = EyeColor
fields = (
'color',
)
def validate(self, data):
return data
class CoatColorSerializer(serializers.ModelSerializer):
class Meta:
model = CoatColor
fields = (
'base_color',
'silver_gold',
'dilute_modifier',
'amount_of_white',
'tabby_pattern',
'pointed_pattern',
)
def validate(self, data):
return data
class GallerySerializer(serializers.ModelSerializer):
class Meta:
model = Gallery
fields = (
'label',
'image',
)
def validate(self, data):
return data
class BreedSerializer(serializers.ModelSerializer):
allergenicity = serializers.SerializerMethodField()
molt = serializers.SerializerMethodField()
intelligence = serializers.SerializerMethodField()
sociability = serializers.SerializerMethodField()
need_for_care = serializers.SerializerMethodField()
activity = serializers.SerializerMethodField()
friendliness = serializers.SerializerMethodField()
health = serializers.SerializerMethodField()
pet_type_name = serializers.SerializerMethodField()
class Meta:
model = Breed
fields = (
'id',
'pet_type',
'pet_type_name',
'code',
'wcf',
'alias',
'title',
'short_description',
'full_description',
'origin',
'character',
'image',
'allergenicity',
'molt',
'intelligence',
'sociability',
'need_for_care',
'activity',
'friendliness',
'health',
'gender_spec',
'active',
)
def get_allergenicity(self, obj):
return {
'rank': obj.allergenicity,
'title': obj.get_allergenicity_display(),
}
def get_molt(self, obj):
return {
'rank': obj.molt,
'title': obj.get_molt_display(),
}
def get_intelligence(self, obj):
return {
'rank': obj.intelligence,
'title': obj.get_intelligence_display(),
}
def get_sociability(self, obj):
return {
'rank': obj.sociability,
'title': obj.get_sociability_display(),
}
def get_need_for_care(self, obj):
return {
'rank': obj.need_for_care,
'title': obj.get_need_for_care_display(),
}
def get_activity(self, obj):
return {
'rank': obj.activity,
'title': obj.get_activity_display(),
}
def get_friendliness(self, obj):
return {
'rank': obj.friendliness,
'title': obj.get_friendliness_display(),
}
def get_health(self, obj):
return {
'rank': obj.health,
'title': obj.get_health_display(),
}
def get_pet_type_name(self, obj):
return obj.get_pet_type_display()
def to_representation(self, instance):
response = super().to_representation(instance)
response['gender_spec'] = GenderSpecSerializer(
instance.gender_spec,
many=True
).data
response['eye_color'] = EyeColorSerializer(
instance.eye_color,
many=True
).data
response['coat_color'] = CoatColorSerializer(
instance.coat_color,
many=True
).data
response['gallery'] = GallerySerializer(
instance.gallery,
many=True
).data
return response
def validate(self, data):
return data
class BreedShortSerializer(serializers.ModelSerializer):
allergenicity = serializers.SerializerMethodField()
molt = serializers.SerializerMethodField()
intelligence = serializers.SerializerMethodField()
sociability = serializers.SerializerMethodField()
need_for_care = serializers.SerializerMethodField()
activity = serializers.SerializerMethodField()
friendliness = serializers.SerializerMethodField()
health = serializers.SerializerMethodField()
pet_type_name = serializers.SerializerMethodField()
class Meta:
model = Breed
fields = (
'id',
'pet_type',
'pet_type_name',
'code',
'wcf',
'alias',
'title',
'short_description',
'image',
'allergenicity',
'molt',
'intelligence',
'sociability',
'need_for_care',
'activity',
'friendliness',
'health',
)
def get_allergenicity(self, obj):
return {
'rank': obj.allergenicity,
'title': obj.get_allergenicity_display(),
}
def get_molt(self, obj):
return {
'rank': obj.molt,
'title': obj.get_molt_display(),
}
def get_intelligence(self, obj):
return {
'rank': obj.intelligence,
'title': obj.get_intelligence_display(),
}
def get_sociability(self, obj):
return {
'rank': obj.sociability,
'title': obj.get_sociability_display(),
}
def get_need_for_care(self, obj):
return {
'rank': obj.need_for_care,
'title': obj.get_need_for_care_display(),
}
def get_activity(self, obj):
return {
'rank': obj.activity,
'title': obj.get_activity_display(),
}
def get_friendliness(self, obj):
return {
'rank': obj.friendliness,
'title': obj.get_friendliness_display(),
}
def get_health(self, obj):
return {
'rank': obj.health,
'title': obj.get_health_display(),
}
def get_pet_type_name(self, obj):
return obj.get_pet_type_display()
def validate(self, data):
return data
|
[
"aybogdanov@yandex-team.ru"
] |
aybogdanov@yandex-team.ru
|
0aa4cdf595a5d08553bb2237dd0184e99d0f2e1c
|
7d35c812dbf2dbb690543653a26fe5ef7a6c949f
|
/apps/coc/views.py
|
b814dc29bdd55c657dce9121a42bd865d2524cd3
|
[] |
no_license
|
jvillama/Catalog-Project
|
96383855fa3da1ff3981e46723485691e68ea209
|
8a042873440bf328e8331175e7712f6fc26ba8a5
|
refs/heads/master
| 2016-09-11T08:34:14.563489
| 2013-04-03T22:32:10
| 2013-04-03T22:32:10
| 1,957,117
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,948
|
py
|
from django.http import HttpResponseRedirect,HttpResponse
from django.shortcuts import render_to_response
from django.conf.urls.defaults import *
from models import *
from ftplib import FTP
from datetime import datetime
from django.utils import simplejson as json
from django.core import serializers
from django.contrib.auth.decorators import login_required
from django.template import Template, context, RequestContext
import urllib
from django.conf import settings
from boto.s3.connection import S3Connection
from boto.s3.key import Key
def get_ftp_filelist( company ): # deprecated, getting list from S3
files = []
#try:
ftp = FTP( company.ftp_server )
ftp.login( company.ftp_username , company.ftp_password, 10 )
ftp.cwd( company.ftp_url )
files = ftp.nlst()
ftp.quit()
pdf_list = []
for f in files:
if f.find( '.pdf' ) >= 0:
pdf_list.append( f )
return pdf_list
def download(request, company, filename): # not used, may be used if ftp is implemented again
company = Company.objects.get(name=company)
ftp = FTP( company.ftp_server )
ftp.login( company.ftp_username , company.ftp_password, 10 )
ftp.cwd( company.ftp_url )
file = open(filename, 'wb')
ftp.retrbinary('RETR '+ filename, file.write)
response = HttpResponse(open(filename, 'rb'), mimetype='application/pdf')
# If you want to download as attachment, uncomment next line
#response['Content-Disposition'] = 'attachment; filename='+filename
return response
def get_files_by_string( query, companies ):
files_by_id = {}
files_by_description = {}
final_search_set = []
found_companies = []
for c in companies:
try:
found_companies.append( Company.objects.get( name__iexact=c ) )
except:
found_companies.append( Company.objects.get( name__iexact=c.name ) )
print "Can't find query or query error"
print found_companies
print c
try:
files_by_description = File.objects.filter( description__icontains=query )
except:
pass
try:
files_by_id = File.objects.filter( uid__icontains=query )
except:
pass
for f_by_id in files_by_id:
final_search_set.append( f_by_id )
id_found = False
for f_by_descrip in files_by_description:
for f_by_id in files_by_id:
if f_by_descrip.uid == f_by_id.uid:
id_found = True
if not id_found:
final_search_set.append( f_by_descrip )
id_found = False
company_found = False
index = 0
for f in final_search_set[:]:
for c in found_companies:
if f.company == c:
company_found = True
if not company_found:
final_search_set.remove( f )
company_found = False
index = index + 1
return final_search_set
@login_required
def home(request):
app_data = []
debug = {}
sync_latest = False
current_time = datetime.now()
last_sync_time = 0
time_delta = 0
query = request.GET.get('q')
companies = request.GET.getlist('company')
app_date = None
#check to see if we should sync
try:
app_data = App_Data.objects.get( uid=0 )
except Exception, e:
app_data = App_Data( uid=0, last_update=datetime.now(), sync_interval=1200 )
app_data.save()
sync_latest = True
time_delta = current_time - app_data.last_update
if time_delta.seconds > app_data.sync_interval:
sync_latest = True
app_data.last_update=datetime.now()
app_data.save()
companies = Company.objects.all()
print companies
if sync_latest:
File.objects.all().delete()
print "syncing"
for company in companies:
print company
#file_list = get_ftp_filelist( company ) #deprecated
#print file_list
try:
conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
b = conn.get_bucket(settings.COC_BUCKET)
#rs = b.get_all_keys()
rs = b.list(company.name)
print rs
for key in rs:
#print key.name
file_chunks = key.name.split('.')
new_file = File( uid=file_chunks[0].split('/')[1],
description=file_chunks[1],
modified=datetime.now(),
file_name=key.name.split('/')[1],
company=company )
new_file.save()
except:
raise
''' deprecated as well
for f in file_list:
file_chunks = f.split('.')
new_file = File( uid=file_chunks[0],
description=file_chunks[1],
modified=datetime.now(),
file_name=f,
company=company )
new_file.save()
'''
if query:
files = get_files_by_string(query, companies)
else:
query = ""
files = File.objects.all()[:200]
return render_to_response('coc.html', { 'files': files, 'query': query, 'companies': companies, 'user': request.user}, context_instance=RequestContext(request) )
@login_required
def search(request):
q = request.GET.get('q')
companies = request.GET.getlist('company')
filtered_files = get_files_by_string( q, companies )
json_data = serializers.serialize( "json", filtered_files )
return HttpResponse(json_data, mimetype='application/json')
def home_public(request):
app_data = []
debug = {}
sync_latest = False
current_time = datetime.now()
last_sync_time = 0
time_delta = 0
query = request.GET.get('q')
companies = request.GET.getlist('company')
app_date = None
#check to see if we should sync
try:
app_data = App_Data.objects.get( uid=0 )
except Exception, e:
app_data = App_Data( uid=0, last_update=datetime.now(), sync_interval=1200 )
app_data.save()
sync_latest = True
time_delta = current_time - app_data.last_update
if time_delta.seconds > app_data.sync_interval:
sync_latest = True
app_data.last_update=datetime.now()
app_data.save()
companies = Company.objects.all()
print companies
if sync_latest:
File.objects.all().delete()
print "syncing"
for company in companies:
print company
#file_list = get_ftp_filelist( company ) #deprecated
#print file_list
try:
conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
b = conn.get_bucket(settings.COC_BUCKET)
#rs = b.get_all_keys()
rs = b.list(company.name)
print rs
for key in rs:
#print key.name
file_chunks = key.name.split('.')
new_file = File( uid=file_chunks[0].split('/')[1],
description=file_chunks[1],
modified=datetime.now(),
file_name=key.name.split('/')[1],
company=company )
new_file.save()
except:
raise
if query:
files = get_files_by_string(query, companies)
else:
query = ""
files = File.objects.all()[:200]
return render_to_response('coc_public.html', { 'files': files, 'query': query, 'companies': companies, 'user': request.user}, context_instance=RequestContext(request) )
def search_public(request):
q = request.GET.get('q')
companies = request.GET.getlist('company')
filtered_files = get_files_by_string( q, companies )
json_data = serializers.serialize( "json", filtered_files )
return HttpResponse(json_data, mimetype='application/json')
#def server_error(request):
# return render_to_response('500.html')
|
[
"jvillamarzo@gmail.com"
] |
jvillamarzo@gmail.com
|
be5a03fdbca982ec7e49e61ab6d8d22e921b55bb
|
5906ac8a5a3e46a65ac9b8b38c3f7b7b6edf2a49
|
/lipid_maps_download.py
|
b3fee822a9c62d0b4a8370dbe74c42e94136dc96
|
[] |
no_license
|
iwelsh47/friendly-pancake
|
6e29d5c0eaf9cff4a8c9247871a803b6723d805d
|
885bdc1684bf5b276a2f3dac178b5bfaad4fce58
|
refs/heads/master
| 2021-05-31T13:01:24.116208
| 2016-03-15T23:18:55
| 2016-03-15T23:18:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
#!/usr/bin/env python3
'''
Created on 8/12/2015
@author: iwelsh
'''
def main():
import os
from time import sleep
from random import shuffle
source_file = '/Users/iwelsh/Downloads/LMSDSearchResultsDownload13H10M37S07Dec15.csv'
dest_dir = '/Users/iwelsh/Documents/Lipid_MOL_files/'
get_cmd = '/opt/local/bin/wget -O "{0}.mol" "http://www.lipidmaps.org/data/LMSDRecord.php?Mode=File&LMID={0}"'
with open(source_file,'r') as fh:
file_data = fh.readlines()[1:]
shuffle(file_data)
for line in file_data:
s = line.split('","')
dirt = s[5][:-5]
dirt = dirt.replace(' ','_')
sub_dirt = s[6][:-7]
sub_dirt = sub_dirt.replace(' ','_')
final_dir = dest_dir+dirt+'/'+sub_dirt
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
os.chdir(final_dir)
os.system(get_cmd.format(s[0][1:]))
sleep(15)
if __name__ == '__main__':
main()
|
[
"i.welsh@massey.ac.nz"
] |
i.welsh@massey.ac.nz
|
29545e9ea17d07032110cf79e7922acb0e7881a7
|
3783129f07f93414327dadcd07b42334d980b4ed
|
/dbUtils/leagueDbUtils.py
|
17cac93d7da6ead4ae58b37cf973b91f41fe02d6
|
[] |
no_license
|
ohadkorenok/football_project
|
1e9b8c2afb93b0118a34a7aeb1980cf9812baced
|
a7a68a6c2b2376ca8811463f0c7967cfb17b39af
|
refs/heads/master
| 2023-08-25T09:00:15.660374
| 2020-07-24T10:48:47
| 2020-07-24T10:48:47
| 282,188,578
| 0
| 0
| null | 2023-08-14T21:37:52
| 2020-07-24T10:18:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
from dbUtils.db import *
from consts import *
from bson.json_util import dumps
from dbUtils.commonDbUtils import get_item, get_items
def create_league(league_country: str, league_name: str, league_level: int):
"""
This function gets league_country, league_name and league_level in JSON format, checks if there exists a league in
the DB and if not creates a new league
:param league_country: string
:param league_name: string
:param league_level:string
:return: JSON that includes the league if created or an Error message
"""
if not isinstance(league_country, str) or not isinstance(league_name, str) or not isinstance(league_level, int):
return {"return_code": INVALID_FIELDS,
"Message": 'League country, league name and league level has to be string and league level'
'has to be int(from 0 as top and 1 for each level, (example : third league - 2)'}
league = {"league_country": league_country,
"league_name": league_name,
"league_level": league_level,
}
if league_collection.find_one({"league_name": league_name}) is None:
try:
league_collection.insert_one(league)
return dumps(league)
except Exception as e:
return {"return_code": UNKNOWN_EXCEPTION, "Message": 'Exception while inserting league', 'Error': str(e)}
else:
return {"return_code": LEAGUE_ALREADY_EXIST, "Message": 'League Already exist!'}
def get_league(league_id):
"""
this function converts the team_id from string to BSON, and then returns the team Object from team_collection
:param league_id :str.
:return:league Object
"""
return get_item(league_collection, league_id, LEAGUE_DOES_NOT_EXIST)
def get_leagues(offset, limit):
return get_items(league_collection, offset, limit)
|
[
"ohadkorenok@gmail.com"
] |
ohadkorenok@gmail.com
|
849d1c312943128ef4a358e9fa0f96817013296f
|
b2912c376313f46b484ececea687c150a0272066
|
/chapter7/src/pearson_correlation.py
|
01d9abb4b065f19b75888929cef2544e337b7d46
|
[] |
no_license
|
xfsm1912/Approaching_Any_ML_Problem
|
c6cf186fe5cccda9af9467d6bc7bf5e2a96f507a
|
edebb8e5906e7bed8aaf015b0d3c36a40fe3f165
|
refs/heads/main
| 2023-04-26T23:10:51.413127
| 2020-11-03T21:05:41
| 2020-11-03T21:05:41
| 309,792,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
import pandas as pd
import numpy as np
from sklearn.datasets import fetch_california_housing
# fetch a regression dataset
data = fetch_california_housing()
X = data['data']
col_names = data['feature_names']
y = data['target']
# convert to pandas dataframe
df = pd.DataFrame(X, columns=col_names)
# introduce a highly correlated column
df.loc[:, 'MedInc_Sqrt'] = df.MedInc.apply(np.sqrt)
# get correlation matrix (pearson)
print(df.corr())
|
[
"jianhua@lifeq.com"
] |
jianhua@lifeq.com
|
3dd0dc3cde5f64d32b234beef58ea1cc8db7eec4
|
af4174eba006aafcb9470f7a2c8c30f581e095f1
|
/odd.py
|
d8101873a222afef9aad1e444a7616c9d62982b8
|
[] |
no_license
|
sanderfo/IN1900
|
bb20c15fdce6ea541814379cbb370e69252544ad
|
63be05db075b3a9d6ce0a1734d80e473a686c29a
|
refs/heads/master
| 2020-04-02T22:17:58.380709
| 2018-10-26T12:10:20
| 2018-10-26T12:10:20
| 154,828,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
"""
first number: odd=1
next number: odd=odd+2
"""
n = 10
odd = 1
while odd <= n:
print(odd)
odd += 2 # betyr odd = odd + 2
|
[
"37813853+sanderfo@users.noreply.github.com"
] |
37813853+sanderfo@users.noreply.github.com
|
5e856de0fe8e002e1c8c78be5e8513063b4ba841
|
b2086f422cc45312ac04cd190bc0b56fde236339
|
/action_handler/action_handler.py
|
094eab2f89b44ebe3952b0e112ce162a90581790
|
[] |
no_license
|
spelinski/SmartMirror
|
c05782600501e18836b74c6ab16f3cb591813db8
|
ea3c393826bc4349a86d5ddb18acfc9febea3d0c
|
refs/heads/master
| 2021-05-12T04:53:24.183225
| 2018-01-16T04:23:53
| 2018-01-16T04:23:53
| 117,176,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
class ActionHandler():
def __init__(self, music_player):
self.player = music_player
def take_action(self, action_string):
if "play pandora" in action_string:
return self.player.play()
elif "stop pandora" in action_string:
return self.player.stop()
elif "kill pandora" in action_string:
return self.player.close()
raise UndefinedCommandError()
class UndefinedCommandError(Exception):
pass
|
[
"rohk88@gmail.com"
] |
rohk88@gmail.com
|
96e20c5779740d30ef905512fee740ee9b3c117e
|
07ecc53b5be6b1a34914a0e02265e847f3ac1a65
|
/Python/Tree/104_Easy_二叉树的最大深度.py
|
4431c0ab357b2dfc8b6d4820c6fcafbc4ced391e
|
[] |
no_license
|
JasmineRain/Algorithm
|
764473109ad12c051f5337ed6f22b517ed9bff30
|
84d7e11c1a01b1994e04a3ab446f0a35eb3d362a
|
refs/heads/master
| 2023-03-14T00:39:51.767074
| 2021-03-09T12:41:44
| 2021-03-09T12:41:44
| 289,603,630
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
from collections import deque
# Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# 递归法
# def maxDepth(self, root: TreeNode) -> int:
#
# if not root:
# return 0
#
# return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
# BFS
def maxDepth(self, root: TreeNode) -> int:
if not root:
return 0
ans = 0
queue = deque([root])
while queue:
size = len(queue)
while size > 0:
node = queue.popleft()
size -= 1
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
ans += 1
return ans
# if __name__ == "__main__":
# S = Solution()
# print(S.isSameTree(nums1=[2], nums2=[]))
|
[
"530781348@qq.com"
] |
530781348@qq.com
|
c3054920c597c6aee2c2af7d2a72e56941fe7f3f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02646/s977415720.py
|
ff219157c414449e91daf41f24d22ed56eb957d1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
#!/usr/bin/env python3
import sys
import numpy as np
input = sys.stdin.readline
a, b = map(int, input().split())
c, d = map(int, input().split())
t = int(input())
if b==d :
print('NO')
elif (abs(a-c)/(b-d))<= t and d<b :
print('YES')
else:
print('NO')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3193d3fd2c11f5ae2bbee3532a8ca2474594c0e1
|
e7aa3755381b216883605d86fae50db6d632a338
|
/3.Python/shaheer.py
|
b0804416a4bfc17c195c26d3968dc5243fa33fe1
|
[
"MIT"
] |
permissive
|
anjima1008/Learn-Coding
|
b2520e447158b259d4deddc7cb5da4fb10e778d9
|
553f3e818c3e4dd751e317a4ac9905487d5894fb
|
refs/heads/master
| 2022-12-27T08:47:18.129700
| 2020-10-16T07:33:50
| 2020-10-16T07:33:50
| 304,551,428
| 1
| 0
|
MIT
| 2020-10-16T07:24:42
| 2020-10-16T07:24:41
| null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
name, age = "shaheer", *YOUR AGE*
username = "shaheershah313"
print ('Hello!')
print("Name: {}\n19: {}\nshaheershah313: {}".format(name, age, username))
|
[
"noreply@github.com"
] |
anjima1008.noreply@github.com
|
94973b456f84c36b0cc830a508938ebc170725b2
|
08519e9f78f622058872bf0f34a75336eb49a984
|
/reinforcement/gridworld.py
|
31460a01a7b825b5cf38d62855692445c639da1f
|
[] |
no_license
|
timolapre/Project2-KI-Master
|
0035d9330289bf6f5cc9e9394057a8b3223bfc23
|
7b60f931b6208a882178c2bd4a7244f477df6bf6
|
refs/heads/master
| 2020-04-11T19:07:24.451542
| 2018-12-17T19:55:14
| 2018-12-17T19:55:14
| 162,023,386
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 20,993
|
py
|
# gridworld.py
# ------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import random
import sys
import mdp
import environment
import util
import optparse
class Gridworld(mdp.MarkovDecisionProcess):
"""
Gridworld
"""
def __init__(self, grid):
# layout
if type(grid) == type([]): grid = makeGrid(grid)
self.grid = grid
# parameters
self.livingReward = 0.0
self.noise = 0.2
def setLivingReward(self, reward):
"""
The (negative) reward for exiting "normal" states.
Note that in the R+N text, this reward is on entering
a state and therefore is not clearly part of the state's
future rewards.
"""
self.livingReward = reward
def setNoise(self, noise):
"""
The probability of moving in an unintended direction.
"""
self.noise = noise
def getPossibleActions(self, state):
"""
Returns list of valid actions for 'state'.
Note that you can request moves into walls and
that "exit" states transition to the terminal
state under the special action "done".
"""
if state == self.grid.terminalState:
return ()
x,y = state
if type(self.grid[x][y]) == int:
return ('exit',)
return ('north','west','south','east')
def getStates(self):
"""
Return list of all states.
"""
# The true terminal state.
states = [self.grid.terminalState]
for x in range(self.grid.width):
for y in range(self.grid.height):
if self.grid[x][y] != '#':
state = (x,y)
states.append(state)
return states
def getReward(self, state, action, nextState):
"""
Get reward for state, action, nextState transition.
Note that the reward depends only on the state being
departed (as in the R+N book examples, which more or
less use this convention).
"""
if state == self.grid.terminalState:
return 0.0
x, y = state
cell = self.grid[x][y]
if type(cell) == int or type(cell) == float:
return cell
return self.livingReward
def getStartState(self):
for x in range(self.grid.width):
for y in range(self.grid.height):
if self.grid[x][y] == 'S':
return (x, y)
raise 'Grid has no start state'
def isTerminal(self, state):
"""
Only the TERMINAL_STATE state is *actually* a terminal state.
The other "exit" states are technically non-terminals with
a single action "exit" which leads to the true terminal state.
This convention is to make the grids line up with the examples
in the R+N textbook.
"""
return state == self.grid.terminalState
def getTransitionStatesAndProbs(self, state, action):
"""
Returns list of (nextState, prob) pairs
representing the states reachable
from 'state' by taking 'action' along
with their transition probabilities.
"""
if action not in self.getPossibleActions(state):
print action, self.getPossibleActions(state)
raise "Illegal action!"
if self.isTerminal(state):
return []
x, y = state
if type(self.grid[x][y]) == int or type(self.grid[x][y]) == float:
termState = self.grid.terminalState
return [(termState, 1.0)]
successors = []
northState = (self.__isAllowed(y+1,x) and (x,y+1)) or state
westState = (self.__isAllowed(y,x-1) and (x-1,y)) or state
southState = (self.__isAllowed(y-1,x) and (x,y-1)) or state
eastState = (self.__isAllowed(y,x+1) and (x+1,y)) or state
if action == 'north' or action == 'south':
if action == 'north':
successors.append((northState,1-self.noise))
else:
successors.append((southState,1-self.noise))
massLeft = self.noise
successors.append((westState,massLeft/2.0))
successors.append((eastState,massLeft/2.0))
if action == 'west' or action == 'east':
if action == 'west':
successors.append((westState,1-self.noise))
else:
successors.append((eastState,1-self.noise))
massLeft = self.noise
successors.append((northState,massLeft/2.0))
successors.append((southState,massLeft/2.0))
successors = self.__aggregate(successors)
return successors
def __aggregate(self, statesAndProbs):
counter = util.Counter()
for state, prob in statesAndProbs:
counter[state] += prob
newStatesAndProbs = []
for state, prob in counter.items():
newStatesAndProbs.append((state, prob))
return newStatesAndProbs
def __isAllowed(self, y, x):
if y < 0 or y >= self.grid.height: return False
if x < 0 or x >= self.grid.width: return False
return self.grid[x][y] != '#'
class GridworldEnvironment(environment.Environment):
def __init__(self, gridWorld):
self.gridWorld = gridWorld
self.reset()
def getCurrentState(self):
return self.state
def getPossibleActions(self, state):
return self.gridWorld.getPossibleActions(state)
def doAction(self, action):
state = self.getCurrentState()
(nextState, reward) = self.getRandomNextState(state, action)
self.state = nextState
return (nextState, reward)
def getRandomNextState(self, state, action, randObj=None):
rand = -1.0
if randObj is None:
rand = random.random()
else:
rand = randObj.random()
sum = 0.0
successors = self.gridWorld.getTransitionStatesAndProbs(state, action)
for nextState, prob in successors:
sum += prob
if sum > 1.0:
raise 'Total transition probability more than one; sample failure.'
if rand < sum:
reward = self.gridWorld.getReward(state, action, nextState)
return (nextState, reward)
raise 'Total transition probability less than one; sample failure.'
def reset(self):
self.state = self.gridWorld.getStartState()
class Grid:
"""
A 2-dimensional array of immutables backed by a list of lists. Data is accessed
via grid[x][y] where (x,y) are cartesian coordinates with x horizontal,
y vertical and the origin (0,0) in the bottom left corner.
The __str__ method constructs an output that is oriented appropriately.
"""
def __init__(self, width, height, initialValue=' '):
self.width = width
self.height = height
self.data = [[initialValue for y in range(height)] for x in range(width)]
self.terminalState = 'TERMINAL_STATE'
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, key, item):
self.data[key] = item
def __eq__(self, other):
if other == None: return False
return self.data == other.data
def __hash__(self):
return hash(self.data)
def copy(self):
g = Grid(self.width, self.height)
g.data = [x[:] for x in self.data]
return g
def deepCopy(self):
return self.copy()
def shallowCopy(self):
g = Grid(self.width, self.height)
g.data = self.data
return g
def _getLegacyText(self):
t = [[self.data[x][y] for x in range(self.width)] for y in range(self.height)]
t.reverse()
return t
def __str__(self):
return str(self._getLegacyText())
def makeGrid(gridString):
width, height = len(gridString[0]), len(gridString)
grid = Grid(width, height)
for ybar, line in enumerate(gridString):
y = height - ybar - 1
for x, el in enumerate(line):
grid[x][y] = el
return grid
def getCliffGrid():
grid = [[' ',' ',' ',' ',' '],
['S',' ',' ',' ',10],
[-100,-100, -100, -100, -100]]
return Gridworld(makeGrid(grid))
def getCliffGrid2():
grid = [[' ',' ',' ',' ',' '],
[8,'S',' ',' ',10],
[-100,-100, -100, -100, -100]]
return Gridworld(grid)
def getDiscountGrid():
grid = [[' ',' ',' ',' ',' '],
[' ','#',' ',' ',' '],
[' ','#', 1,'#', 10],
['S',' ',' ',' ',' '],
[-10,-10, -10, -10, -10]]
return Gridworld(grid)
def getBridgeGrid():
grid = [[ '#',-100, -100, -100, -100, -100, '#'],
[ 1, 'S', ' ', ' ', ' ', ' ', 10],
[ '#',-100, -100, -100, -100, -100, '#']]
return Gridworld(grid)
def getBookGrid():
grid = [[' ',' ',' ',+1],
[' ','#',' ',-1],
['S',' ',' ',' ']]
return Gridworld(grid)
def getMazeGrid():
grid = [[' ',' ',' ',+1],
['#','#',' ','#'],
[' ','#',' ',' '],
[' ','#','#',' '],
['S',' ',' ',' ']]
return Gridworld(grid)
def getUserAction(state, actionFunction):
"""
Get an action from the user (rather than the agent).
Used for debugging and lecture demos.
"""
import graphicsUtils
action = None
while True:
keys = graphicsUtils.wait_for_keys()
if 'Up' in keys: action = 'north'
if 'Down' in keys: action = 'south'
if 'Left' in keys: action = 'west'
if 'Right' in keys: action = 'east'
if 'q' in keys: sys.exit(0)
if action == None: continue
break
actions = actionFunction(state)
if action not in actions:
action = actions[0]
return action
def printString(x): print x
def runEpisode(agent, environment, discount, decision, display, message, pause, episode):
returns = 0
totalDiscount = 1.0
environment.reset()
if 'startEpisode' in dir(agent): agent.startEpisode()
message("BEGINNING EPISODE: "+str(episode)+"\n")
while True:
# DISPLAY CURRENT STATE
state = environment.getCurrentState()
display(state)
pause()
# END IF IN A TERMINAL STATE
actions = environment.getPossibleActions(state)
if len(actions) == 0:
message("EPISODE "+str(episode)+" COMPLETE: RETURN WAS "+str(returns)+"\n")
return returns
# GET ACTION (USUALLY FROM AGENT)
action = decision(state)
if action == None:
raise 'Error: Agent returned None action'
# EXECUTE ACTION
nextState, reward = environment.doAction(action)
message("Started in state: "+str(state)+
"\nTook action: "+str(action)+
"\nEnded in state: "+str(nextState)+
"\nGot reward: "+str(reward)+"\n")
# UPDATE LEARNER
if 'observeTransition' in dir(agent):
agent.observeTransition(state, action, nextState, reward)
returns += reward * totalDiscount
totalDiscount *= discount
if 'stopEpisode' in dir(agent):
agent.stopEpisode()
def parseOptions():
optParser = optparse.OptionParser()
optParser.add_option('-d', '--discount',action='store',
type='float',dest='discount',default=0.9,
help='Discount on future (default %default)')
optParser.add_option('-r', '--livingReward',action='store',
type='float',dest='livingReward',default=0.0,
metavar="R", help='Reward for living for a time step (default %default)')
optParser.add_option('-n', '--noise',action='store',
type='float',dest='noise',default=0.2,
metavar="P", help='How often action results in ' +
'unintended direction (default %default)' )
optParser.add_option('-e', '--epsilon',action='store',
type='float',dest='epsilon',default=0.3,
metavar="E", help='Chance of taking a random action in q-learning (default %default)')
optParser.add_option('-l', '--learningRate',action='store',
type='float',dest='learningRate',default=0.5,
metavar="P", help='TD learning rate (default %default)' )
optParser.add_option('-i', '--iterations',action='store',
type='int',dest='iters',default=10,
metavar="K", help='Number of rounds of value iteration (default %default)')
optParser.add_option('-k', '--episodes',action='store',
type='int',dest='episodes',default=1,
metavar="K", help='Number of epsiodes of the MDP to run (default %default)')
optParser.add_option('-g', '--grid',action='store',
metavar="G", type='string',dest='grid',default="BookGrid",
help='Grid to use (case sensitive; options are BookGrid, BridgeGrid, CliffGrid, MazeGrid, default %default)' )
optParser.add_option('-w', '--windowSize', metavar="X", type='int',dest='gridSize',default=150,
help='Request a window width of X pixels *per grid cell* (default %default)')
optParser.add_option('-a', '--agent',action='store', metavar="A",
type='string',dest='agent',default="random",
help='Agent type (options are \'random\', \'value\' and \'q\', default %default)')
optParser.add_option('-t', '--text',action='store_true',
dest='textDisplay',default=False,
help='Use text-only ASCII display')
optParser.add_option('-p', '--pause',action='store_true',
dest='pause',default=False,
help='Pause GUI after each time step when running the MDP')
optParser.add_option('-q', '--quiet',action='store_true',
dest='quiet',default=False,
help='Skip display of any learning episodes')
optParser.add_option('-s', '--speed',action='store', metavar="S", type=float,
dest='speed',default=1.0,
help='Speed of animation, S > 1.0 is faster, 0.0 < S < 1.0 is slower (default %default)')
optParser.add_option('-m', '--manual',action='store_true',
dest='manual',default=False,
help='Manually control agent')
optParser.add_option('-v', '--valueSteps',action='store_true' ,default=False,
help='Display each step of value iteration')
opts, args = optParser.parse_args()
if opts.manual and opts.agent != 'q':
print '## Disabling Agents in Manual Mode (-m) ##'
opts.agent = None
# MANAGE CONFLICTS
if opts.textDisplay or opts.quiet:
# if opts.quiet:
opts.pause = False
# opts.manual = False
if opts.manual:
opts.pause = True
return opts
if __name__ == '__main__':
opts = parseOptions()
###########################
# GET THE GRIDWORLD
###########################
import gridworld
mdpFunction = getattr(gridworld, "get"+opts.grid)
mdp = mdpFunction()
mdp.setLivingReward(opts.livingReward)
mdp.setNoise(opts.noise)
env = gridworld.GridworldEnvironment(mdp)
###########################
# GET THE DISPLAY ADAPTER
###########################
import textGridworldDisplay
display = textGridworldDisplay.TextGridworldDisplay(mdp)
if not opts.textDisplay:
import graphicsGridworldDisplay
display = graphicsGridworldDisplay.GraphicsGridworldDisplay(mdp, opts.gridSize, opts.speed)
try:
display.start()
except KeyboardInterrupt:
sys.exit(0)
###########################
# GET THE AGENT
###########################
import valueIterationAgents, qlearningAgents
a = None
if opts.agent == 'value':
a = valueIterationAgents.ValueIterationAgent(mdp, opts.discount, opts.iters)
elif opts.agent == 'q':
#env.getPossibleActions, opts.discount, opts.learningRate, opts.epsilon
#simulationFn = lambda agent, state: simulation.GridworldSimulation(agent,state,mdp)
gridWorldEnv = GridworldEnvironment(mdp)
actionFn = lambda state: mdp.getPossibleActions(state)
qLearnOpts = {'gamma': opts.discount,
'alpha': opts.learningRate,
'epsilon': opts.epsilon,
'actionFn': actionFn}
a = qlearningAgents.QLearningAgent(**qLearnOpts)
elif opts.agent == 'random':
# # No reason to use the random agent without episodes
if opts.episodes == 0:
opts.episodes = 10
class RandomAgent:
def getAction(self, state):
return random.choice(mdp.getPossibleActions(state))
def getValue(self, state):
return 0.0
def getQValue(self, state, action):
return 0.0
def getPolicy(self, state):
"NOTE: 'random' is a special policy value; don't use it in your code."
return 'random'
def update(self, state, action, nextState, reward):
pass
a = RandomAgent()
else:
if not opts.manual: raise 'Unknown agent type: '+opts.agent
###########################
# RUN EPISODES
###########################
# DISPLAY Q/V VALUES BEFORE SIMULATION OF EPISODES
try:
if not opts.manual and opts.agent == 'value':
if opts.valueSteps:
for i in range(opts.iters):
tempAgent = valueIterationAgents.ValueIterationAgent(mdp, opts.discount, i)
display.displayValues(tempAgent, message = "VALUES AFTER "+str(i)+" ITERATIONS")
display.pause()
display.displayValues(a, message = "VALUES AFTER "+str(opts.iters)+" ITERATIONS")
display.pause()
display.displayQValues(a, message = "Q-VALUES AFTER "+str(opts.iters)+" ITERATIONS")
display.pause()
except KeyboardInterrupt:
sys.exit(0)
# FIGURE OUT WHAT TO DISPLAY EACH TIME STEP (IF ANYTHING)
displayCallback = lambda x: None
if not opts.quiet:
if opts.manual and opts.agent == None:
displayCallback = lambda state: display.displayNullValues(state)
else:
if opts.agent == 'random': displayCallback = lambda state: display.displayValues(a, state, "CURRENT VALUES")
if opts.agent == 'value': displayCallback = lambda state: display.displayValues(a, state, "CURRENT VALUES")
if opts.agent == 'q': displayCallback = lambda state: display.displayQValues(a, state, "CURRENT Q-VALUES")
messageCallback = lambda x: printString(x)
if opts.quiet:
messageCallback = lambda x: None
# FIGURE OUT WHETHER TO WAIT FOR A KEY PRESS AFTER EACH TIME STEP
pauseCallback = lambda : None
if opts.pause:
pauseCallback = lambda : display.pause()
# FIGURE OUT WHETHER THE USER WANTS MANUAL CONTROL (FOR DEBUGGING AND DEMOS)
if opts.manual:
decisionCallback = lambda state : getUserAction(state, mdp.getPossibleActions)
else:
decisionCallback = a.getAction
# RUN EPISODES
if opts.episodes > 0:
print
print "RUNNING", opts.episodes, "EPISODES"
print
returns = 0
for episode in range(1, opts.episodes+1):
returns += runEpisode(a, env, opts.discount, decisionCallback, displayCallback, messageCallback, pauseCallback, episode)
if opts.episodes > 0:
print
print "AVERAGE RETURNS FROM START STATE: "+str((returns+0.0) / opts.episodes)
print
print
# DISPLAY POST-LEARNING VALUES / Q-VALUES
if opts.agent == 'q' and not opts.manual:
try:
display.displayQValues(a, message = "Q-VALUES AFTER "+str(opts.episodes)+" EPISODES")
display.pause()
display.displayValues(a, message = "VALUES AFTER "+str(opts.episodes)+" EPISODES")
display.pause()
except KeyboardInterrupt:
sys.exit(0)
|
[
"timolapre1998@gmail.com"
] |
timolapre1998@gmail.com
|
9d7e63bc4c2bfaf4f7b4c619bc67081c31699819
|
e82ca136486730677d5591744d16aed891debaad
|
/nlb_lightning/callbacks.py
|
fc327af4651c1f6e5ec94b737e54e0a03991c732
|
[
"MIT"
] |
permissive
|
arsedler9/nlb-lightning
|
5b01801ae7280dedd33db10f1a400649087e7467
|
b2529ecdd1288eb5897e55f0622181a1425d701e
|
refs/heads/main
| 2023-05-23T16:42:48.997311
| 2023-01-13T22:03:29
| 2023-01-13T22:03:29
| 459,264,776
| 9
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,667
|
py
|
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
from scipy.linalg import LinAlgWarning
from sklearn.decomposition import PCA
from nlb_tools.evaluation import (
bits_per_spike,
eval_psth,
speed_tp_correlation,
velocity_decoding,
)
plt.switch_backend("Agg")
def get_tensorboard_summary_writer(loggers):
"""Gets the TensorBoard SummaryWriter from a logger
or logger collection to allow writing of images.
Parameters
----------
loggers : obj or list[obj]
An object or list of loggers to search for the
SummaryWriter.
Returns
-------
torch.utils.tensorboard.writer.SummaryWriter
The SummaryWriter object.
"""
logger_list = loggers if isinstance(loggers, list) else [loggers]
for logger in logger_list:
if isinstance(logger, pl.loggers.tensorboard.TensorBoardLogger):
return logger.experiment
else:
return None
def batch_fwd(model, batch):
"""Performs the forward pass for a given model and data batch.
Parameters
----------
model : pl.LightningModule
The model to pass data through.
batch : tuple[torch.Tensor]
A tuple of batched input tensors.
Returns
-------
tuple[torch.Tensor]
A tuple of batched output tensors.
"""
input_data, recon_data, *other_input, behavior = batch
input_data = input_data.to(model.device)
other_input = [oi.to(model.device) for oi in other_input]
return model.forward(input_data, *other_input)
class RasterPlotCallback(pl.Callback):
"""Plots validation spiking data side-by-side with
inferred rates and logs to tensorboard. Heldin/heldout
and observed/forward distinctions are indicated by
dividing lines.
"""
def __init__(self, batch_fwd=batch_fwd, n_samples=2, log_every_n_epochs=20):
"""Initializes the callback.
Parameters
----------
batch_fwd: func, optional
A function that takes a model and a batch of data and
performs the forward pass, returning the model output.
May be useful if your model requires specialized I/O.
n_samples : int, optional
The number of samples to plot, by default 2
log_every_n_epochs : int, optional
The frequency with which to plot and log, by default 20
"""
self.batch_fwd = batch_fwd
self.n_samples = n_samples
self.log_every_n_epochs = log_every_n_epochs
def on_validation_epoch_end(self, trainer, pl_module):
"""Logs plots at the end of the validation epoch.
Parameters
----------
trainer : pytorch_lightning.Trainer
The trainer currently handling the model.
pl_module : pytorch_lightning.LightningModule
The model currently being trained.
"""
if (trainer.current_epoch % self.log_every_n_epochs) != 0:
return
# Check for the TensorBoard SummaryWriter
writer = get_tensorboard_summary_writer(trainer.loggers)
if writer is None:
return
# Get data samples
dataloader = trainer.datamodule.val_dataloader()
batch = next(iter(dataloader))
input_data, recon_data, *_ = batch
# Compute data sizes
_, steps_tot, neur_tot = recon_data.shape
batch_size, steps_obs, neur_in = input_data.shape
# Compute model output
rates, *_ = self.batch_fwd(pl_module, batch)
# Convert data to numpy arrays
recon_data = recon_data.detach().cpu().numpy()
rates = rates.detach().cpu().numpy()
# Create subplots
fig, axes = plt.subplots(
self.n_samples, 2, sharex=True, sharey=True, figsize=(10, 10)
)
for i, ax_row in enumerate(axes):
for ax, array in zip(ax_row, [recon_data, rates]):
ax.imshow(array[i].T)
ax.vlines(steps_obs, 0, neur_tot, color="coral")
ax.hlines(neur_in, 0, steps_tot, color="coral")
ax.set_xlim(0, steps_tot)
ax.set_ylim(0, neur_tot)
plt.tight_layout()
# Log the plot to tensorboard
writer.add_figure("raster_plot", fig, trainer.global_step)
class TrajectoryPlotCallback(pl.Callback):
"""Plots the top-3 PC's of the latent trajectory for
all samples in the validation set and logs to tensorboard.
"""
def __init__(self, batch_fwd=batch_fwd, log_every_n_epochs=100):
"""Initializes the callback.
Parameters
----------
batch_fwd: func, optional
A function that takes a model and a batch of data and
performs the forward pass, returning the model output.
May be useful if your model requires specialized I/O.
log_every_n_epochs : int, optional
The frequency with which to plot and log, by default 100
"""
self.batch_fwd = batch_fwd
self.log_every_n_epochs = log_every_n_epochs
def on_validation_epoch_end(self, trainer, pl_module):
"""Logs plots at the end of the validation epoch.
Parameters
----------
trainer : pytorch_lightning.Trainer
The trainer currently handling the model.
pl_module : pytorch_lightning.LightningModule
The model currently being trained.
"""
# Skip evaluation for most epochs to save time
if (trainer.current_epoch % self.log_every_n_epochs) != 0:
return
# Check for the TensorBoard SummaryWriter
writer = get_tensorboard_summary_writer(trainer.loggers)
if writer is None:
return
# Get the validation dataset
val_dataloader = trainer.datamodule.val_dataloader()
input_data, recon_data, *_ = trainer.datamodule.valid_data
# Pass data through the model
latents = [self.batch_fwd(pl_module, batch)[1] for batch in val_dataloader]
latents = torch.cat(latents).detach().cpu().numpy()
# Reduce dimensionality if necessary
n_samp, n_step, n_lats = latents.shape
if n_lats > 3:
latents_flat = latents.reshape(-1, n_lats)
pca = PCA(n_components=3)
latents = pca.fit_transform(latents_flat)
latents = latents.reshape(n_samp, n_step, 3)
explained_variance = np.sum(pca.explained_variance_ratio_)
else:
explained_variance = 1.0
# Create figure and plot trajectories
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection="3d")
for traj in latents:
ax.plot(*traj.T, alpha=0.2, linewidth=0.5)
ax.scatter(*latents[:, 0, :].T, alpha=0.1, s=10, c="g")
ax.scatter(*latents[:, -1, :].T, alpha=0.1, s=10, c="r")
ax.set_title(f"explained variance: {explained_variance:.2f}")
plt.tight_layout()
# Log the plot to tensorboard
writer.add_figure("trajectory_plot", fig, trainer.global_step)
class EvaluationCallback(pl.Callback):
"""Computes and logs all evaluation metrics for the Neural Latents
Benchmark to tensorboard. These include `co_bps`, `fp_bps`,
`behavior_r2`, `psth_r2`, and `tp_corr`.
"""
def __init__(
self, batch_fwd=batch_fwd, log_every_n_epochs=20, decoding_cv_sweep=False
):
"""Initializes the callback.
Parameters
----------
batch_fwd: func, optional
A function that takes a model and a batch of data and
performs the forward pass, returning the model output.
May be useful if your model requires specialized I/O.
log_every_n_epochs : int, optional
The frequency with which to plot and log, by default 100
decoding_cv_sweep : bool, optional
Whether to run a cross-validated hyperparameter sweep to
find optimal regularization values, by default False
"""
self.batch_fwd = batch_fwd
self.log_every_n_epochs = log_every_n_epochs
self.decoding_cv_sweep = decoding_cv_sweep
def on_validation_epoch_end(self, trainer, pl_module):
"""Logs plots at the end of the validation epoch.
Parameters
----------
trainer : pytorch_lightning.Trainer
The trainer currently handling the model.
pl_module : pytorch_lightning.LightningModule
The model currently being trained.
"""
# Skip evaluation for most epochs to save time
if (trainer.current_epoch % self.log_every_n_epochs) != 0:
return
# Get entire validation dataset from dataloader
input_data, recon_data, *_, behavior = trainer.datamodule.valid_data
recon_data = recon_data.detach().cpu().numpy()
behavior = behavior.detach().cpu().numpy()
# Get model predictions for the entire validation dataset
val_dataloader = trainer.datamodule.val_dataloader()
# Pass the data through the model
rates = [self.batch_fwd(pl_module, batch)[0] for batch in val_dataloader]
rates = torch.cat(rates).detach().cpu().numpy()
# Compute co-smoothing bits per spike
_, n_obs, n_heldin = input_data.shape
heldout = recon_data[:, :n_obs, n_heldin:]
rates_heldout = rates[:, :n_obs, n_heldin:]
co_bps = bits_per_spike(rates_heldout, heldout)
pl_module.log("nlb/co_bps", max(co_bps, -1.0))
# Compute forward prediction bits per spike
forward = recon_data[:, n_obs:]
rates_forward = rates[:, n_obs:]
fp_bps = bits_per_spike(rates_forward, forward)
pl_module.log("nlb/fp_bps", max(fp_bps, -1.0))
# Get relevant training dataset from datamodule
*_, train_behavior = trainer.datamodule.train_data
train_behavior = train_behavior.detach().cpu().numpy()
# Get model predictions for the training dataset
train_dataloader = trainer.datamodule.train_dataloader(shuffle=False)
train_rates = [
self.batch_fwd(pl_module, batch)[0] for batch in train_dataloader
]
train_rates = torch.cat(train_rates).detach().cpu().numpy()
# Get firing rates for observed time points
rates_obs = rates[:, :n_obs]
train_rates_obs = train_rates[:, :n_obs]
# Compute behavioral decoding performance
if "dmfc_rsg" in trainer.datamodule.hparams.dataset_name:
tp_corr = speed_tp_correlation(heldout, rates_obs, behavior)
pl_module.log("nlb/tp_corr", tp_corr)
else:
with warnings.catch_warnings():
# Ignore LinAlgWarning from early in training
warnings.filterwarnings("ignore", category=LinAlgWarning)
behavior_r2 = velocity_decoding(
train_rates_obs,
train_behavior,
trainer.datamodule.train_decode_mask,
rates_obs,
behavior,
trainer.datamodule.valid_decode_mask,
self.decoding_cv_sweep,
)
pl_module.log("nlb/behavior_r2", max(behavior_r2, -1.0))
# Compute PSTH reconstruction performance
if hasattr(trainer.datamodule, "psth"):
psth = trainer.datamodule.psth
cond_idxs = trainer.datamodule.valid_cond_idxs
jitter = trainer.datamodule.valid_jitter
psth_r2 = eval_psth(psth, rates_obs, cond_idxs, jitter)
pl_module.log("nlb/psth_r2", max(psth_r2, -1.0))
|
[
"arsedler9@gmail.com"
] |
arsedler9@gmail.com
|
28aa49da0c2012be233dd9559d11f7f157e50a1e
|
dd0185d10e9be56ea693e15a1e97d2817276a93e
|
/efefal/searchclient.py
|
a6ceb777dfe091c2576ea1929bbba31a08e9921a
|
[] |
no_license
|
sparky005/EFEFAL
|
0af8912c02a98b9eda0a1139917e972fd2507730
|
5683d32e45d93811d4a9ea7c97c264dbb7c218da
|
refs/heads/master
| 2018-09-01T05:27:36.027364
| 2018-06-17T04:25:29
| 2018-06-17T04:25:29
| 119,923,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,897
|
py
|
import json
import itertools
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
class SearchClient():
def __init__(self):
self.client = Elasticsearch()
def timestamp_to_dt(self, timestamp):
"""
converts weird timestamps into workable dt objects
"""
# drop milliseconds
ix = timestamp.find('.')
d = datetime.strptime(timestamp[:ix], '%Y-%m-%dT%H:%M:%S')
return d
def timestamp_sort(self, hits):
hits = sorted(hits, key=lambda d : d['@timestamp'])
return hits
def reverse_timestamp_sort(self, hits):
hits = sorted(hits, key=lambda d : d['@timestamp'], reverse=True)
return hits
def calculate_totals(self, result):
"""Takes a 'finish' as a param and provides totals"""
totals = {
"ok": 0,
"failed": 0,
"unreachable": 0,
"changed": 0,
"skipped": 0,
}
for host_result in result.keys():
for key, value in result[host_result].items():
if key == 'failures':
totals['failed'] += value
else:
totals[key] += value
return totals
def remove_tasklist_duplicates(self, task_list):
# this has the side-effect of removing same-named tasks
# oh well
results = []
for name, group in itertools.groupby(sorted(task_list,
key=lambda d : d['ansible_task']),
key=lambda d : d['ansible_task']):
results.append(next(group))
return results
def playbook_index(self):
s = Search(using=self.client).query("match", type='ansible')
s = [x.to_dict() for x in s]
s = self.reverse_timestamp_sort(s)
# TIL that sets don't actually retain order
list_of_playbooks = set([hit['ansible_playbook'] for hit in s])
return list_of_playbooks
def playbook_totals(self, playbook):
"""gets totals for each run of a single playbook using a 'finish' object"""
s = Search(using=self.client).query("match_phrase", ansible_playbook=playbook).filter("term", ansible_type="finish")
s = [hit.to_dict() for hit in s]
sessions = [hit['session'] for hit in s]
totals = [self.totals(session) for session in sessions]
return totals
def playbook_sessions(self, playbook):
"""get list of all sessions for a single playook"""
s = Search(using=self.client).query("match_phrase", ansible_playbook=playbook).filter("term", ansible_type="finish")
s = [hit.to_dict() for hit in s]
for hit in s:
hit['@timestamp'] = self.timestamp_to_dt(hit['@timestamp'])
s = self.reverse_timestamp_sort(s)
return s
def session_tasks(self, playbook, session, host=None, status=None):
"""Get info for a single run (session) of a single playbook"""
# handle the special case (changed) first
if host and status == 'CHANGED':
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task") \
.filter("match", status='OK') \
.filter("term", ansible_host=host) \
.filter("match_phrase", ansible_result="changed: true")
elif status == 'CHANGED':
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task") \
.filter("match", status='OK') \
.filter("match_phrase", ansible_result="changed: true")
elif host and status:
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task") \
.filter("term", ansible_host=host) \
.filter("match", status=status)
elif host:
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task") \
.filter("term", ansible_host=host)
elif status:
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task") \
.filter("match", status=status)
else:
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task")
tasks = s.scan()
tasks = [task.to_dict() for task in tasks]
# make sure we don't remove duplicates
# when we actually care about all the tasks
if not status:
tasks = self.remove_tasklist_duplicates(tasks)
tasks = self.timestamp_sort(tasks)
for task in tasks:
# remove word TASK: from the beginning of each task
space = task['ansible_task'].find(' ')
task['ansible_task'] = task['ansible_task'][space:]
task['@timestamp'] = self.timestamp_to_dt(task['@timestamp'])
return tasks
def session_finish(self, playbook, session):
"""Get finish information for a single playbook run"""
finish = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="finish")
finish = finish.scan()
finish = [x.to_dict() for x in finish]
finish = json.loads(finish[0]['ansible_result'])
for host in finish:
finish[host]['failed'] = finish[host].pop('failures')
# some hackery to reorder the dict
for key in ["ok", "failed", "unreachable", "changed", "skipped"]:
for host in finish:
finish[host][key] = finish[host].pop(key)
return finish
def get_hosts(self, session):
"""Get the hosts that were in a given session"""
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="finish")
finishes = s.scan()
finishes = [x.to_dict() for x in finishes]
return list(json.loads(finishes[0]['ansible_result']).keys())
def totals(self, session, host=None):
"""
Calculates and returns the totals for a given session
if host is given, only get totals for the single host
else, the entire session
"""
if host:
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task") \
.filter("term", ansible_host=host)
else:
s = Search(using=self.client).query("match_phrase", session=session) \
.filter("term", ansible_type="task")
tasks = s.scan()
tasks = [task.to_dict() for task in tasks]
totals = {
"OK": 0,
"FAILED": 0,
"UNREACHABLE": 0,
"CHANGED": 0,
"SKIPPED": 0,
}
for task in tasks:
result = task['status']
if result == 'OK':
# check if it was a change
if json.loads(task['ansible_result'])['changed'] == True:
result = 'CHANGED'
totals[result] += 1
return totals
|
[
"sparky.005@gmail.com"
] |
sparky.005@gmail.com
|
a02438030b428e7fa07a48b86e88e5a29016281e
|
2ad771a7cc3a8c2e5188da4eb94cd80148021bc9
|
/ECommerce/migrations/0001_initial.py
|
68bd8efd4661aeb050bc4dae4628dcff1718478a
|
[] |
no_license
|
DiogenesPuig/EcommerceANT2021
|
af4e13630d6aaee2c6b3ca2ac5df194a94838458
|
3df1d1c33fdceb3a9b00891ad7d98314d2fab03e
|
refs/heads/main
| 2023-07-10T07:09:16.203667
| 2021-08-12T03:25:09
| 2021-08-12T03:25:09
| 375,713,191
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,892
|
py
|
# Generated by Django 3.2 on 2021-08-11 18:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sold', models.BooleanField()),
],
options={
'verbose_name': 'Cart',
'verbose_name_plural': 'Carts',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ECommerce.category')),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
},
),
migrations.CreateModel(
name='ProductsCart',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cant_prod', models.IntegerField(default=0)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ECommerce.cart')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ECommerce.product')),
],
options={
'verbose_name': 'Product in Cart',
'verbose_name_plural': 'Products in Carts',
},
),
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('tel', models.CharField(max_length=15)),
],
options={
'verbose_name': 'Supplier',
'verbose_name_plural': 'Suppliers',
},
),
migrations.CreateModel(
name='Sale',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('payment_method', models.CharField(choices=[('transferencia', 'transferencia'), ('debito', 'debito'), ('credito', 'credito')], default=None, max_length=50)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ECommerce.cart')),
('product_in_cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ECommerce.productscart')),
],
options={
'verbose_name': 'Sale',
'verbose_name_plural': 'Sales',
},
),
migrations.AddField(
model_name='product',
name='supplier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ECommerce.supplier'),
),
migrations.CreateModel(
name='Deposit',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stock', models.PositiveIntegerField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ECommerce.product')),
],
options={
'verbose_name': 'Deposit',
'verbose_name_plural': 'Deposits',
},
),
migrations.AddField(
model_name='cart',
name='product_sale',
field=models.ManyToManyField(through='ECommerce.ProductsCart', to='ECommerce.Product'),
),
migrations.AddField(
model_name='cart',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"lucio.moralesdemaria@gmail.com"
] |
lucio.moralesdemaria@gmail.com
|
2c9cd67fd5fd36a71a2703df028bcb9916b15eb2
|
64d9a33622f2671f669efe9df4059d8fa3593c62
|
/dsc/index.py
|
c1fd5bdf0ef3feb2e0a4677357fe63801addc37a
|
[] |
no_license
|
emonti/star
|
1ae421a709afee2bbeafccd1c142b487dab1f257
|
13ff58d0a06ca1a10c513f41d491b546f1090d48
|
refs/heads/master
| 2020-12-24T22:30:16.637740
| 2010-09-28T22:33:17
| 2010-09-28T22:33:17
| 943,708
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
import struct, anydbm, sys, glob
def build(path):
db = anydbm.open('index', 'c')
files = glob.glob('%s/*.txt' % path)
db['_files'] = '\0'.join(files)
for fil in files:
lineno = 1
fileoff = 0
for line in open(fil, 'rb'):
scratch = line[:6]
try:
key = int(line[:6], 16)
except:
if ':' in line:
key = '\0\0\0\0' + line[:line.find(':')]
db[key] = struct.pack('III', files.index(fil), lineno, fileoff)
else:
key = struct.pack('I', key)
if not db.has_key(key):
db[key] = struct.pack('III', files.index(fil), lineno, fileoff)
lineno += 1
fileoff += len(line)
def ful(fp):
ret = ''
while True:
ln = fp.readline()
ret += ln
if ln == '' or 'pop\t' in ln: break
return ret
def lookup_sym(sym, full=False):
db = anydbm.open('index')
files = db['_files'].split('\0')
key = '\0\0\0\0' + sym
try:
fili, line, filoff = struct.unpack('III', db[key])
except KeyError:
return None
fp = open(files[fili], 'rb')
fp.seek(filoff)
if full: return ful(fp).rstrip()
return '[%s:%d] %s %s' % (files[fili], line, fp.readline().strip(), fp.readline().strip())
def lookup_addr(addr, full=False):
db = anydbm.open('index')
files = db['_files'].split('\0')
addr &= ~1
key = struct.pack('I', int(addr) / 0x100)
try:
fili, line, filoff = struct.unpack('III', db[key])
except KeyError:
return None
fp = open(files[fili], 'rb')
fp.seek(filoff)
q = '%08x' % addr
lineno = line
while True:
line = fp.readline()
if line[:8] == q:
if full: return (line + ful(fp)).rstrip()
return '[%s:%d] %s' % (files[fili], lineno, line.strip())
elif line == '':
return None
lineno += 1
if sys.argv[1] == 'build':
build(sys.argv[2])
elif sys.argv[1] == 'sym':
print lookup_sym(sys.argv[2], len(sys.argv) > 3 and sys.argv[3] == 'full')
else:
print lookup_addr(int(sys.argv[1], 16), len(sys.argv) > 2 and sys.argv[2] == 'full')
|
[
"comexk@gmail.com"
] |
comexk@gmail.com
|
0d485fafb88cd1b4b43e495ef44c9e49b577988b
|
14fdabb4e07e96f486ff325bb653debd8dfbcf30
|
/bullet.py
|
6d1da23b9b7f6a78362cd6010381b073d60c4c84
|
[] |
no_license
|
YasminTorres/CPSC386-02_SpaceInvaders
|
62d32d91b7b87cdaf300bec8b002963abd75cfd7
|
bb66e50007d047926d34c1abb8b789f284bc02af
|
refs/heads/master
| 2020-08-10T19:53:33.012403
| 2019-10-11T15:36:49
| 2019-10-11T15:36:49
| 214,409,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
def __init__(self, ai_settings, screen, ship):
super(Bullet, self).__init__()
self.screen = screen
self.rect = pygame.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
self.y = float(self.rect.y)
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
self.y -= self.speed_factor
self.rect.y = self.y
def draw_bullet(self):
pygame.draw.rect(self.screen, self.color, self.rect)
|
[
"noreply@github.com"
] |
YasminTorres.noreply@github.com
|
ccfe49dd966567bd3e90393cd3138e7c3173ce26
|
783235be871e692de21bc0b1f482e0c64d4a0044
|
/config_tester.py
|
b799c68a52e903727285d2cf2bd0d62e1ef8a6b5
|
[] |
no_license
|
maxbergmark/julia-explorer-opencl
|
e7f9acd2d308ffd1b01afb3071427b51127dfe10
|
d7a711069bb971cd74a3a5815db5f53491595313
|
refs/heads/master
| 2022-09-11T07:23:56.533828
| 2022-08-30T08:16:10
| 2022-08-30T08:16:10
| 247,465,835
| 1
| 1
| null | 2022-08-30T08:16:11
| 2020-03-15T12:53:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
'''
Listing 4.3: Testing a device’s floating-point features
'''
import pyopencl as cl
import utility
# Get device and context, create command queue and program
dev = utility.get_default_device()
# Check for double floating point features
fp_flag = dev.single_fp_config
fp_masks = [('Denorm', cl.device_fp_config.DENORM),
('Fused multiply-add', cl.device_fp_config.FMA),
('INF & NAN', cl.device_fp_config.INF_NAN),
('Round to INF', cl.device_fp_config.ROUND_TO_INF),
('Round to nearest', cl.device_fp_config.ROUND_TO_NEAREST),
('Round to zero', cl.device_fp_config.ROUND_TO_ZERO)]
version_number = float(dev.version.split(' ')[1])
if version_number >= 1.1:
fp_masks.append(('Soft float', cl.device_fp_config.SOFT_FLOAT))
if version_number >= 1.2:
fp_masks.append(('Correctly rounded div sqrt', cl.device_fp_config.CORRECTLY_ROUNDED_DIVIDE_SQRT))
print('Floating point features:')
[print('\t{0:<30}{1:<5}'.format(name, str(bool(fp_flag & mask)))) for name, mask in fp_masks]
|
[
"max.bergmark@gmail.com"
] |
max.bergmark@gmail.com
|
03ac5c339f76016d80e2581af7d6ff0888ccb5ca
|
2bb2c4d0547a6ae86ae486662505e9f06af4f146
|
/python_tutorial02/python_tutorial/python_tutorial/cython/cpp_template/setup.py
|
809f68ad70d1855a5d4546e1ac4a5f041213f007
|
[] |
no_license
|
JinFree/cac.kias.re.kr-2017-3rd-day
|
9fe6815eb146cfb139bd352031c970ce3094864a
|
9454b666806c5f9a6d39d14b15424770a1e61a86
|
refs/heads/master
| 2020-12-03T04:16:50.329281
| 2017-06-30T05:21:02
| 2017-06-30T05:21:02
| 95,844,411
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 262
|
py
|
from distutils.core import setup, Extension
from Cython.Build import cythonize
exts = cythonize([
Extension(name="vec",sources=["vec.pyx"],language="c++"),
Extension(name="vec2",sources=["vec2.pyx"],language="c++"),
])
setup(
ext_modules = exts,
)
|
[
"guest@gpu.kias.re.kr"
] |
guest@gpu.kias.re.kr
|
d603779b503c2e5bb352264e6757ac655ad5b527
|
5bdcc3e0514626cc683bb236fb16a89735ce09fd
|
/ModelUtils/Models/structurer/MlpStructurer.py
|
4e73d199343ad9a1a7038157a6a67a58d639dc3d
|
[] |
no_license
|
jsarni/KaggleCompetition2020
|
f83d85d3acefdfa264c55b03eba6ba9df44a30bb
|
527ecd311660414cfdf6f6b9e32be65bedef957e
|
refs/heads/master
| 2021-04-24T00:20:29.992532
| 2020-05-15T13:56:10
| 2020-05-15T13:56:10
| 250,042,229
| 0
| 0
| null | 2020-04-23T11:28:21
| 2020-03-25T17:13:00
|
Python
|
UTF-8
|
Python
| false
| false
| 764
|
py
|
from .ModelName import MLP
class MlpStructurer:
def __init__(self):
self.name = MLP
self.nb_hidden_layers = 0
self.nb_classes = 0
self.layers_size = []
self.input_shape = (16, 16, 3)
self.layers_activation = 'relu'
self.output_activation = 'softmax'
self.use_dropout = False
self.dropout_indexes = []
self.dropout_value = 0.0
self.use_l1l2_regularisation_hidden_layers = False
self.use_l1l2_regularisation_output_layer = False
self.l1_value = 0.0
self.l2_value = 0.0
self.regulization_indexes = []
self.loss = 'sparse_categorical_crossentropy'
self.optimizer = 'Adam'
self.metrics = ['sparse_categorical_accuracy']
|
[
"juba.sarni@gmail.com"
] |
juba.sarni@gmail.com
|
bfda01939ca390d676ce07ce86055318eb58fb36
|
b85792d592426a63184ab7ba5e69975856267b45
|
/accounts/migrations/0033_auto_20210721_0759.py
|
00601bd972e9672fd2cd200b236636f263acefd3
|
[] |
no_license
|
ananduv2/Teq-soul
|
bedfcea0b7ed759854793321722e7e64e76c9f0b
|
917fa9163072d5a8e01432f2778157b1884a6d75
|
refs/heads/master
| 2023-08-17T11:07:31.145644
| 2021-09-27T10:16:39
| 2021-09-27T10:16:39
| 397,057,554
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 779
|
py
|
# Generated by Django 3.2.3 on 2021-07-21 02:29
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0032_auto_20210721_0758'),
]
operations = [
migrations.AlterField(
model_name='query',
name='datetime',
field=models.DateField(blank=True, default=datetime.datetime(2021, 7, 21, 7, 59, 32, 266840), null=True),
),
migrations.AlterField(
model_name='query',
name='receiver',
field=models.ForeignKey(limit_choices_to={'stype': '1'}, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='receiver', to='accounts.staff'),
),
]
|
[
"ubuntu@ip-172-31-53-229.ec2.internal"
] |
ubuntu@ip-172-31-53-229.ec2.internal
|
b0c142c047b84a3b5b79241f5ae45c042d90a84a
|
323096dbd822afbee8dc8a6cb14d7b0b855574d2
|
/src/Server_slidingWindow.py
|
573e578d70519643aa92b0b561cdb122ab451e75
|
[] |
no_license
|
Azmah-Bad/ServerX
|
70937941f9ec34f11b56184b2573e068820d0650
|
631e403ae5a65053994cebb030314b5a4612816c
|
refs/heads/master
| 2023-02-14T05:29:14.368065
| 2021-01-10T22:21:07
| 2021-01-10T22:21:07
| 316,426,053
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,668
|
py
|
import socket
from src import BaseServer
import logging
class SlidingWindowServer(BaseServer):
"""
Sliding window engine
start by sending a window of segments then reads the ACK recieved
if the segment is received sends the next segment
if a segment was dropped
# Perf
5.9 MBps for small files
1 MBps for large files
"""
WINDOW_SIZE = 80
def engine(self, *args, **kwargs):
index = self.WINDOW_SIZE - 1
ACKd = []
self.writer(0, self.WINDOW_SIZE)
while index < len(self.Segments):
try:
ReceivedACK = self.ackHandler()
except socket.timeout:
logging.warning(f"timed out...")
if ACKd:
self.sendSegmentThread(max(ACKd))
continue
if ReceivedACK == len(self.Segments):
break
# if ReceivedACK in ACKd: # TODO maybe if a seg is dropped resend a window
if ACKd.count(ReceivedACK) == 1:
logging.warning(f"segment {ReceivedACK + 1} was dropped, resending it...")
self.writer(ReceivedACK, ReceivedACK + self.WINDOW_SIZE)
self.DroppedSegmentCount += 1
if ReceivedACK not in ACKd: # Segment received with success
index += 1
if index == len(self.Segments):
break
self.sendSegmentThread(index)
logging.debug(f"segment received with success, sending in segment {index + 1}...")
ACKd.append(ReceivedACK)
if __name__ == '__main__':
mServer = SlidingWindowServer()
mServer.run()
|
[
"hamza.badaoui@insa-lyon.fr"
] |
hamza.badaoui@insa-lyon.fr
|
f3dd034ab0128fc7008888d17b99297f70d94714
|
39e8f775416e1a330381c0a8c0593cd71472f9eb
|
/lines/snapshot/__init__.py
|
9fced060f79010ec32b7fd9d5d72d8885c7e0649
|
[
"MIT"
] |
permissive
|
betfund/betfund-lines
|
2c5ee268c8edd3799b8762e48ee16e06660260ea
|
9fe3e2aa69bb493df3efc2a3923889f444f2c66d
|
refs/heads/master
| 2021-05-18T05:47:39.045981
| 2020-04-05T20:52:12
| 2020-04-05T20:52:12
| 251,143,008
| 0
| 0
|
MIT
| 2020-04-06T00:34:58
| 2020-03-29T21:57:04
|
Python
|
UTF-8
|
Python
| false
| false
| 129
|
py
|
"""BetFund client.snapshot namespace."""
from .temporal_to_snapshot import TemporalToSnapshot
__all__ = ["TemporalToSnapshot"]
|
[
"leonkozlowski@gmail.com"
] |
leonkozlowski@gmail.com
|
0d5678a6d503ac3a2fc880cd4afae10ee356db95
|
56d208b264a60746b9a31370e7edee0766e84d9b
|
/holocron/settings.py
|
08ad0d6e566c5dbcb674f5f3e4dbbdfa304abce1
|
[] |
no_license
|
cassiobotaro/holocron
|
05d414b1f1752a074092bb521e3143971a84dfff
|
8add5aeb50f8b7480d23485845abb8625169f31b
|
refs/heads/main
| 2023-02-28T16:01:07.802857
| 2021-02-06T14:29:47
| 2021-02-06T14:29:47
| 239,394,591
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
BOT_NAME = "holocron"
SPIDER_MODULES = ["holocron.spiders"]
NEWSPIDER_MODULE = "holocron.spiders"
ROBOTSTXT_OBEY = False
DEFAULT_REQUEST_HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:76.0) Gecko/20100101 Firefox/76.0" # noqa: E501
}
# DOWNLOAD_DELAY = 0.25
SPLASH_URL = "http://localhost:8050"
DOWNLOADER_MIDDLEWARES = {
"scrapy_splash.SplashCookiesMiddleware": 723,
"scrapy_splash.SplashMiddleware": 725,
"scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware": 810, # noqa:E501
}
SPIDER_MIDDLEWARES = {
"scrapy_splash.SplashDeduplicateArgsMiddleware": 100,
}
DUPEFILTER_CLASS = "scrapy_splash.SplashAwareDupeFilter"
|
[
"cassiobotaro@gmail.com"
] |
cassiobotaro@gmail.com
|
8867e19f310da59e05ed497a4dd1fabde4a7a1a2
|
3fbd05e539e12e05fd0b75109a0d7d3e36c61946
|
/app/migrations/0007_entradablog_destacados.py
|
08e5118873515a6546e520a738eed0fac1c62028
|
[] |
no_license
|
Diana-Toledo/Naturopatia
|
1eaa8ed6a663f755a207c18d6266bba8a7708648
|
74c6d5063aef1ae46ade17209e705afacaf4117c
|
refs/heads/main
| 2023-07-02T19:48:01.299930
| 2021-08-01T16:20:19
| 2021-08-01T16:20:19
| 391,036,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
# Generated by Django 2.2.11 on 2020-04-22 07:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20200407_1139'),
]
operations = [
migrations.AddField(
model_name='entradablog',
name='destacados',
field=models.BooleanField(default=False),
),
]
|
[
"diaelitg@gmail.com"
] |
diaelitg@gmail.com
|
444a35effa5887e654c35f4241581be07be6f59c
|
4a1be3f4f891b4cf54637221581abc02d5f39847
|
/config/urls.py
|
2a1240e0571fd625888884244ff0b77dbb7a2bbd
|
[] |
no_license
|
Nayoung-apeach/NetworkProgramming_jathub
|
7594b9f0ec982317296460e69c21f672aaafd5c8
|
012d39904d87669e6318263460c5b192d0ab7714
|
refs/heads/master
| 2023-06-11T08:03:41.507786
| 2021-06-29T04:22:49
| 2021-06-29T04:22:49
| 370,860,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
"""config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('jat.urls')), #아무것도 없을 때 jat의 urls로 넘어간다.
path('admin/', admin.site.urls),
]
|
[
"s2019w04@e-mirim.hs.kr"
] |
s2019w04@e-mirim.hs.kr
|
22d6004984b02614ba25c0b8628aee87dffef767
|
e651145ab96210db2d245ad67914254cf077b45f
|
/tests.py
|
c36f7bb816302fb861677e5b4130f450bd99fbf5
|
[] |
no_license
|
OrangeHoodie240/SB_24_3_12
|
bb980657bc94177689fba4b0ae06f75e2afc35c3
|
3ff454aabec5ef6ecb29d93b69eb2f466efc3adb
|
refs/heads/master
| 2023-04-09T18:52:40.055152
| 2021-04-05T08:05:50
| 2021-04-05T08:05:50
| 354,762,697
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,920
|
py
|
from unittest import TestCase
from app import app
from models import db, Cupcake
# Use test database and don't clutter tests with SQL
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///cupcakes_test'
app.config['SQLALCHEMY_ECHO'] = False
# Make Flask errors be real errors, rather than HTML pages with error info
app.config['TESTING'] = True
db.drop_all()
db.create_all()
CUPCAKE_DATA = {
"flavor": "TestFlavor",
"size": "TestSize",
"rating": 5,
"image": "http://test.com/cupcake.jpg"
}
CUPCAKE_DATA_2 = {
"flavor": "TestFlavor2",
"size": "TestSize2",
"rating": 10,
"image": "http://test.com/cupcake2.jpg"
}
class CupcakeViewsTestCase(TestCase):
"""Tests for views of API."""
def setUp(self):
"""Make demo data."""
Cupcake.query.delete()
cupcake = Cupcake(**CUPCAKE_DATA)
db.session.add(cupcake)
db.session.commit()
self.cupcake = cupcake
def tearDown(self):
"""Clean up fouled transactions."""
db.session.rollback()
def test_list_cupcakes(self):
with app.test_client() as client:
resp = client.get("/api/cupcakes")
self.assertEqual(resp.status_code, 200)
data = resp.json
self.assertEqual(data, {
"cupcakes": [
{
"id": self.cupcake.id,
"flavor": "TestFlavor",
"size": "TestSize",
"rating": 5,
"image": "http://test.com/cupcake.jpg"
}
]
})
def test_get_cupcake(self):
with app.test_client() as client:
url = f"/api/cupcakes/{self.cupcake.id}"
resp = client.get(url)
self.assertEqual(resp.status_code, 200)
data = resp.json
self.assertEqual(data, {
"cupcake": {
"id": self.cupcake.id,
"flavor": "TestFlavor",
"size": "TestSize",
"rating": 5,
"image": "http://test.com/cupcake.jpg"
}
})
def test_create_cupcake(self):
with app.test_client() as client:
url = "/api/cupcakes"
resp = client.post(url, json=CUPCAKE_DATA_2)
self.assertEqual(resp.status_code, 201)
data = resp.json
# don't know what ID we'll get, make sure it's an int & normalize
self.assertIsInstance(data['cupcake']['id'], int)
del data['cupcake']['id']
self.assertEqual(data, {
"cupcake": {
"flavor": "TestFlavor2",
"size": "TestSize2",
"rating": 10,
"image": "http://test.com/cupcake2.jpg"
}
})
self.assertEqual(Cupcake.query.count(), 2)
def test_patch_cupcake(self):
id = Cupcake.query.filter(Cupcake.flavor == "TestFlavor").one().id
updatedData = {
'flavor': 'mutton',
'size': 'cow',
'rating': 90000,
'image': 'http://totallyrealimages.com/api/cupcake/99'
}
with app.test_client() as client:
resp = client.patch(
f'/api/cupcakes/{id}', json=updatedData)
data = resp.json['cupcake']
updatedData['id'] = id
self.assertEqual(updatedData, data)
def test_delete_cupcake(self):
id = Cupcake.query.filter(Cupcake.flavor == "TestFlavor").one().id
with app.test_client() as client:
resp = client.delete(f'/api/cupcakes/{id}')
data = resp.json
self.assertEqual(data['message'], 'Deleted')
self.assertEqual(Cupcake.query.filter(Cupcake.id == id).one_or_none(), None)
|
[
"daddarios@dupage.edu"
] |
daddarios@dupage.edu
|
fdf331ca5d16ec70b1db28fa5a376e4f626a6122
|
ee2871e299cb49cbf25e4bc0f4c7f8aa3e5013dd
|
/backend/location/migrations/0002_auto_20190508_1416.py
|
f3dd9d0a04cb4176dc8ac41e4685b3c71535a8d9
|
[
"MIT"
] |
permissive
|
aurma97/gl52
|
d97e22ceb3f71c7c4d4c1531037b49790f6628e5
|
bf55dcf1d13884301a8b112fd21840e55c328661
|
refs/heads/master
| 2023-01-12T13:02:27.988017
| 2019-06-19T06:58:57
| 2019-06-19T06:58:57
| 185,694,686
| 0
| 0
|
NOASSERTION
| 2023-01-03T21:47:24
| 2019-05-08T23:50:30
|
Vue
|
UTF-8
|
Python
| false
| false
| 580
|
py
|
# Generated by Django 2.2.1 on 2019-05-08 14:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('location', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='location',
name='current_location',
field=models.TextField(default=None, max_length=1000),
),
migrations.AlterField(
model_name='location',
name='last_location',
field=models.TextField(default=None, max_length=1000),
),
]
|
[
"aurelienmarcel77@gmail.com"
] |
aurelienmarcel77@gmail.com
|
93a3531973b54e2996cf44cdcdef0cb769124c57
|
e8c7419003c5b1ba7890a8719573e386a7505161
|
/InspectData_OppositeVan.py
|
f2122ea23e601635bd637883ebdac98350143659
|
[] |
no_license
|
sietse93/Thesis
|
aeec6ac9522d532b260fd467cc7c9ced2ae94478
|
3e07097444f3041d58baa1597de2fe2ec40e6bff
|
refs/heads/master
| 2020-03-30T12:55:59.974402
| 2019-08-29T15:36:04
| 2019-08-29T15:36:04
| 151,248,606
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,060
|
py
|
import json
from func_Convert2Json import json2crf
from evaluate_pose import *
from class_ScenarioLocationPerformance import *
from matplotlib import pyplot as plt
import pdb
from main_InspectData import InspectJsonFileInDir
from func_EvaluateRpeDist import evaluate_RPE_dist, calc_rmse
import numpy as np
def main():
Town = 1
SL = 0
nr_vans = 10
dist = 10
base_dir_opposite = "/home/sietse/results_carla0.9/VansOppositeRoad/"
base_dir_stuck = "/home/sietse/results_carla0.9/stuckbehindvan/20fps/"
dir_name_stat = "T{}_SL{}_s/".format(Town, SL)
dir_name_opp = "T{}_SL{}_d{}/".format(Town, SL, nr_vans)
dir_name_stuck = "T{}_SL{}_d{}/".format(Town, SL, dist)
orb_static, gt = InspectJsonFileInDir(Town, SL, base_dir_stuck, dir_name_stat, "SLAM")
orb_opposite, gt_opp = InspectJsonFileInDir(Town, SL, base_dir_opposite, dir_name_opp, "SLAM")
orb_stuck, gt_stuck = InspectJsonFileInDir(Town, SL, base_dir_stuck, dir_name_stuck, "VO")
# methods = [gt]
# methods.extend(orb_static)
# methods.extend(orb_opposite)
# methods.extend(orb_stuck)
# evaluate_trajectory(methods)
static_mean, static_std = RmseRpe(orb_static, gt)
opposite_mean, opposite_std = RmseRpe(orb_opposite, gt)
stuck_mean, stuck_std = RmseRpe(orb_stuck, gt)
pdb.set_trace()
def RmseRpe(ORB, gt):
"""Calculates the mean value of RMSE values of an ORB list"""
rmse_trans_list = []
rmse_rot_list = []
for orb in ORB:
t, trans, rot = evaluate_RPE_dist(gt, orb, 100)
rmse_trans = calc_rmse(trans)
rmse_rot = calc_rmse(rot)
rmse_trans_list.append(rmse_trans)
rmse_rot_list.append(rmse_rot)
rmse_trans = np.array(rmse_trans_list)
rmse_rot = np.array(rmse_rot_list)
trans_mean = rmse_trans.mean()
trans_std = rmse_trans.std()
rot_mean = rmse_rot.mean()
rot_std = rmse_rot.std()
RMSE_mean = (trans_mean, rot_mean)
RMSE_std = (trans_std, rot_std)
return RMSE_mean, RMSE_std
if __name__ == "__main__" :
main()
|
[
"sietsevschouwenburg@gmail.com"
] |
sietsevschouwenburg@gmail.com
|
6a9c76aa136bd381e188b008cf37ae2a0ebd34a9
|
011c0eecd3ce6a33019e2679fd0ce3bd1c79ebed
|
/pass.py
|
7944d40998272eca67efea21da9071a6dc8f6ddc
|
[] |
no_license
|
Shichimenchou/DailyTodo
|
714f5417dff5aa3de743ee60441027174327d065
|
cad58efd29cda7829e846b4436284d34faef7d6e
|
refs/heads/master
| 2023-01-01T15:31:44.764586
| 2020-10-23T04:08:55
| 2020-10-23T04:08:55
| 300,763,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
import sys
import os
from datetime import date
today = date.today()
d = today.strftime('%b-%d-%Y')
os.chdir(os.path.expanduser('~') + '/Projects/Dailies/')
f = open('History/' + d, 'r')
todo = f.readlines()
t = []
for i in todo:
t.append(i.split('\t'))
count = 0
tring = ''
for i in t:
if int(i[3]) == 0:
tring += i[1].strip()
count += 1
if count == 3:
break
tring += ', '
print(tring)
|
[
"linsonphillip@yahoo.com"
] |
linsonphillip@yahoo.com
|
67e26d90470efcf18c46eb9e257377553f5fc6d0
|
8f836e3c4add1af6311abd8c71d517847d29e8f9
|
/python_learning/chapter_07/05_homework_car_rent.py
|
7b66d4d0bf70f4cab6b8030b08a564e391e17394
|
[] |
no_license
|
DanilWH/Python
|
f6282d5aff5d4fa79c1fd0f0108e6c0c3777a485
|
b87319409a94e26faf084c22b1eb6a1d55458282
|
refs/heads/master
| 2021-01-03T21:23:02.305101
| 2020-03-11T16:20:27
| 2020-03-11T16:20:27
| 240,238,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 545
|
py
|
car = input("What car will you drive? ")
print("Let's me see if i can find you a " + car.title() + ".")
# Упражнение 7-1.
table = input("How many seats do you want to book a table for? ")
table = int(table)
if table > 8:
print("You'll have to wait!")
else:
print("Your table is ready!")
# Упражнение 7-2.
number = input("Input number: ")
number = int(number)
if number % 10 == 0:
print("Number " + str(number) + "even.")
else:
print("Number " + str(number) + " odd.")
# Упражнение 7-3.
|
[
"danil-lomakin-02@mail.ru"
] |
danil-lomakin-02@mail.ru
|
eaf29e83d40cec35b810a8a9c15508e823833c0a
|
c9715623943f02a128e41d68cbed9e0cef403977
|
/Lesson_2/fib.py
|
b7b93260c862d4e14215d1cb5cb4e0f42ea39ce4
|
[] |
no_license
|
uctpythonmlgroup/Intro_to_python
|
ff7a1fffe883d4fb333206f5f4a509256ea5e3c4
|
425cc7ca16392788e53f042e8788bb0a63dfef7a
|
refs/heads/master
| 2021-08-11T07:41:28.587367
| 2017-11-13T10:14:43
| 2017-11-13T10:14:43
| 109,162,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 93
|
py
|
a=1
b=1
for k in range(0,9):
print(min(a,b))
print(max(a,b))
b = a + b
a=b+a
|
[
"noreply@github.com"
] |
uctpythonmlgroup.noreply@github.com
|
1732ddf41c7cc5d3f9943f743096a79655acd97f
|
b8de48551cbadf1738f3eafeb738b80dbcfc4494
|
/students/YingGuo/lessons/lesson09/assignment/charges_calc_02.py
|
49596e9a7e31aa70e0ac0aa69ed9161bf50defca
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/py220BV201901
|
41a6d4a73109ca4b5b22675972397e7445e6b8bd
|
23ceab6bcb0b9f086d72dfad9b021ac0b53095e8
|
refs/heads/master
| 2020-04-15T19:22:48.976024
| 2019-03-18T14:16:23
| 2019-03-18T14:16:23
| 164,947,759
| 0
| 6
| null | 2019-03-18T14:16:24
| 2019-01-09T22:25:01
|
Python
|
UTF-8
|
Python
| false
| false
| 5,723
|
py
|
'''
Returns total price paid for individual rentals
make logging selective, by using decorators.
Add decorator(s) to introduce conditional logging
so that a single command line variable can turn logging on or off for decorated classes or functions.
'''
import argparse
import json
import datetime
import math
import logging
def parse_cmd_arguments():
"""
parse command line arguments:
-i: input file
-o: output file
-d: logging level
"""
logging.info("argument parse from command line, -i is input file, -o is output file, -d is logging.")
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-i', '--input', help='input JSON file', required=True)
parser.add_argument('-o', '--output', help='ouput JSON file', required=True)
parser.add_argument('-d', '--debug', required=False, help='log level. Can be 0-3. Defaults to 0')
return parser.parse_args()
def set_logging_level_decorator(func, Level_number=0):
"""
set logging level:
0: No debug messages or log file.
1: Only error messages.
2: Error messages and warnings.
3: Error messages, warnings and debug messages.
"""
if Level_number == 0:
return func
if Level_number != 0:
log_format = "%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s"
formatter = logging.Formatter(log_format)
#log_file = datetime.datetime.now().strftime(“%Y-%m-%d”)+’.log’
file_handler = logging.FileHandler("charges_calc.log")
file_handler.setFormatter(formatter)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
if Level_number == 1:
file_handler.setLevel(logging.ERROR)
console_handler.setLevel(logging.ERROR)
elif Level_number == 2:
file_handler.setLevel(logging.WARNING)
console_handler.setLevel(logging.WARNING)
elif Level_number == 3:
file_handler.setLevel(logging.DEBUG)
console_handler.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
return func
@set_logging_level_decorator
def load_rentals_file(filename):
"""load data into file"""
logging.info("load json file into data")
with open(filename) as file:
try:
data = json.load(file)
except:
exit(0)
return data
@set_logging_level_decorator
def validate_entry(value, index):
try:
rental_start = datetime.datetime.strptime(value['rental_start'],
'%m/%d/%y')
except ValueError:
logging.warning('Unable to process entry %d because rental start ' +
'is not in %%m/%%d/%%y format. Skipping...', index)
return False
try:
rental_end = datetime.datetime.strptime(value['rental_end'],
'%m/%d/%y')
except ValueError:
logging.warning('Unable to process entry %d because rental end ' +
'is not in %%m/%%d/%%y format. Skipping...', index)
return False
if rental_end < rental_start:
logging.warning('Unable to process entry %d because ' +
'rental start > end. Skipping...', index)
return False
if value['price_per_day'] < 0:
logging.warning('Unable to process entry %d because ' +
'price per day is negative. Skipping...', index)
return False
if value['units_rented'] <= 0:
logging.warning('Unable to process entry %d because ' +
'units rented is non-positive. Skipping...', index)
return False
return True
@set_logging_level_decorator
def calculate_additional_fields(data):
logging.debug('Calculating additional fields for %d entries',
len(data.values()))
for index, value in enumerate(data.values()):
logging.debug('Processing entry %d with value: %s', index, value)
try:
if not validate_entry(value, index):
continue
rental_start = datetime.datetime.strptime(value['rental_start'],
'%m/%d/%y')
rental_end = datetime.datetime.strptime(value['rental_end'],
'%m/%d/%y')
value['total_days'] = (rental_end - rental_start).days + 1
value['total_price'] = value['total_days'] * value['price_per_day']
value['sqrt_total_price'] = math.sqrt(value['total_price'])
value['unit_cost'] = value['total_price'] / value['units_rented']
except:
logging.warning('Unexpected failure processing entry %d. Skipping',
index)
continue
return data
@set_logging_level_decorator
def save_to_json(filename, data):
"""write output data into json file"""
logging.info("Write out put to json file")
with open(filename, 'w') as file:
json.dump(data, file)
if __name__ == "__main__":
logging.info("Called argment parse function")
args = parse_cmd_arguments()
# logging.info("Called logging level function")
# set_logging_level(args.debug)
logging.info("Called load_rentals_file function")
data = load_rentals_file(args.input)
logging.info("Called calculate_additional_fields function")
data = calculate_additional_fields(data)
logging.info("Called saving output function")
save_to_json(args.output, data)
|
[
"guoguoying2013@gmail.com"
] |
guoguoying2013@gmail.com
|
43fc8a4ea2cf68653b12fb4c5a54251004fda3a3
|
f4571a321bc392fa778361321c24f026060d39da
|
/src/constants/mattermost_status.py
|
aae3c92de0021ae4d2ddcb4259d20065637a402c
|
[] |
no_license
|
zaantar/gcal-mm-status
|
ea3dc82435e85fa820a1ea268a433db89ed44f8e
|
e37f0ff3c50237e0afab743b435e1eb635ee6297
|
refs/heads/master
| 2020-11-30T05:31:21.625590
| 2020-03-08T17:36:45
| 2020-03-08T17:36:45
| 230,316,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 416
|
py
|
from __future__ import annotations
import enum
class MattermostStatus(enum.Enum):
"""
Available statuses in Mattermost.
"""
ONLINE = 'online'
AWAY = 'away'
DND = 'dnd'
OFFLINE = 'offline'
@staticmethod
def from_string(value: str) -> MattermostStatus:
try:
return MattermostStatus(value)
except ValueError:
return MattermostStatus.ONLINE
|
[
"jan.s@icanlocalize.com"
] |
jan.s@icanlocalize.com
|
1a7c19bb17aa7bdc6a88688b9ee0476a0a96361c
|
7d9da7893d33bfbcbb9da98d4a530885cdfc74d3
|
/server/libnetfow/analizator.py
|
8854aaebdf6a4c0aaf813aa9456e9e7e39ea9adb
|
[] |
no_license
|
alex-eri/spot4
|
f01be5426ce8b29ba3ca847b63c7c66423e0f382
|
2611b1e833c1ce7de5a42fb5d437bcabfe15f1ce
|
refs/heads/master
| 2022-12-10T20:28:08.676121
| 2021-10-20T14:20:11
| 2021-10-20T14:20:11
| 63,141,818
| 1
| 0
| null | 2022-12-05T02:22:42
| 2016-07-12T08:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,293
|
py
|
import logging
logger = logging.getLogger('netflow')
debug = logger.debug
RX=1
TX=0
async def aggregate_remoteaddr(db, account):
sensor = account['sensor']
start = account['start_time']
end = account['event_time']
ip = account['ip']
def group_remoteaddr(direction=TX):
match = {
'sensor': sensor,
'first': {'$lte': (end+1) * 1000 },
'last': {'$gte': (start-1) * 1000 }
}
if direction == TX:
match['srcaddr']=ip
remote = '$dstaddr'
else:
match['dstaddr']=ip
remote = '$srcaddr'
pipe = [
{
'$match': match
},
{
'$group': { '_id': {'remote': remote} ,
'octets' : { '$sum' : '$dOctets' },
'pkts': { '$sum' : '$dPkts' },
'flows': { '$sum': 1 }
}
}
]
debug(pipe)
return db.collector.aggregate( pipe )
try:
rxc = group_remoteaddr(RX)
txc = group_remoteaddr(TX)
return {
'rx': await rxc.to_list(),
'tx': await txc.to_list(),
}
except Exception as e:
logger.error(e)
|
[
"alex-eri@ya.ru"
] |
alex-eri@ya.ru
|
daf9645ae49b6bd310089aaeaba47b2468dbfa18
|
a1f74832850a512312b418c9f1c8e4a50d1c6b8f
|
/john_lam_folder/Loops_challenge.py
|
e701e15e9eaf38301e3e0847f2f59cff73f77b00
|
[] |
no_license
|
aryndavis/pod1_test_repo
|
5a0250ed872c95125b078b19e82002c2a0c83907
|
9fe305ef8ab5d2904eca0162539a23c514289e6c
|
refs/heads/main
| 2023-03-27T05:21:37.035545
| 2021-03-23T22:59:26
| 2021-03-23T22:59:26
| 333,952,446
| 0
| 1
| null | 2021-03-25T03:39:48
| 2021-01-28T21:14:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,018
|
py
|
# You run a startup media company called Ripple Media
# It's typical when you hire a new employee in your company, to setup an email id for them
print('Question 1')
employee_name = 'Ash Rahman'
# You have decided the format of the email should be: Ash Rahman -> ash.rahman@ripplemedia.com
# Let's write some code that converts a name into an email id that matches this format
# 1.1 TODO: Let's save the lowercase version of the employee_name in a new variable 'lower_name'
lower_name = employee_name.lower()
print(lower_name)
# 1.2 TODO: We want to separate the first name and last name and save it in a variable 'names_list'
names_list = employee_name.split(" ")
print(names_list)
# 1.3 TODO: We want to join the first name and last name with a '.' and save it in a variable called 'joined_names'
joined_names = '.'.join(names_list)
print(joined_names)
# 1.4 TODO: We want to add '@ripplemedia.com' to the end of the string inside joined_names and save it in a variable 'email'
email = joined_names + "@ripplemedia.com"
print(email)
print('Question 2')
# Congratulations! Your team is expanding. Below is a list of their names:
names = ['Max Bartlett', 'Angelita Norris', 'Stewart Mueller', 'Dominique Henry', 'Carmela Gross', 'Bettie Mcmillan', 'Sara Ellison', 'Ira Anthony', 'Pauline Riley', 'Ben Weber',
'Joanne Mcknight', 'Loren Gould', 'Jamar Singh', 'Amanda Vance', 'Tyrell Andrade', 'Jana Clements', 'Eddy Mcbride', 'Marsha Meyer', 'Elbert Shannon', 'Alyce Hull']
emails = []
# We want to convert all their names into the same format from Question 1
for name in names:
lower_name = name.lower()
names_list = lower_name.split(" ")
joined_names = '.'.join(names_list)
email = joined_names + "@ripplemedia.com"
emails.append(email)
print(emails)
# 2.1 TODO: Use a "for" loop to go over each name in the names list
# 2.2 TODO: Inside the "for" loop, create the email id by re-using the logic from Question 1 and...
# 2.3 TODO: ..add the email to the emails list
|
[
"johnvenhlam@gmail.com"
] |
johnvenhlam@gmail.com
|
bd76d6ead403f7af79152854fe8b102f29a3117f
|
30115fa73113b1f24168b797fc3baa360fd43474
|
/venv/lib/python3.6/site-packages/moto/secretsmanager/responses.py
|
c50c6a6e1422b302c815501ca7f3c6617ea52c1c
|
[
"MIT"
] |
permissive
|
Yugandhar445/new
|
43a3282e3b0333c105e8ebc383eef337e1501777
|
d46018cae017e68e34f1150b455bffc4af34d369
|
refs/heads/master
| 2020-12-20T08:04:45.225152
| 2020-01-24T13:46:02
| 2020-01-24T13:46:02
| 236,007,538
| 0
| 1
|
MIT
| 2020-07-24T18:14:05
| 2020-01-24T13:27:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import secretsmanager_backends
class SecretsManagerResponse(BaseResponse):
def get_secret_value(self):
secret_id = self._get_param('SecretId')
version_id = self._get_param('VersionId')
version_stage = self._get_param('VersionStage')
return secretsmanager_backends[self.region].get_secret_value(
secret_id=secret_id,
version_id=version_id,
version_stage=version_stage)
def create_secret(self):
name = self._get_param('Name')
secret_string = self._get_param('SecretString')
return secretsmanager_backends[self.region].create_secret(
name=name,
secret_string=secret_string
)
def get_random_password(self):
password_length = self._get_param('PasswordLength', if_none=32)
exclude_characters = self._get_param('ExcludeCharacters', if_none='')
exclude_numbers = self._get_param('ExcludeNumbers', if_none=False)
exclude_punctuation = self._get_param('ExcludePunctuation', if_none=False)
exclude_uppercase = self._get_param('ExcludeUppercase', if_none=False)
exclude_lowercase = self._get_param('ExcludeLowercase', if_none=False)
include_space = self._get_param('IncludeSpace', if_none=False)
require_each_included_type = self._get_param(
'RequireEachIncludedType', if_none=True)
return secretsmanager_backends[self.region].get_random_password(
password_length=password_length,
exclude_characters=exclude_characters,
exclude_numbers=exclude_numbers,
exclude_punctuation=exclude_punctuation,
exclude_uppercase=exclude_uppercase,
exclude_lowercase=exclude_lowercase,
include_space=include_space,
require_each_included_type=require_each_included_type
)
def describe_secret(self):
secret_id = self._get_param('SecretId')
return secretsmanager_backends[self.region].describe_secret(
secret_id=secret_id
)
|
[
"yugandhar445@gmail.com"
] |
yugandhar445@gmail.com
|
568687cb6048f79a68d212cad5e6e087138109a1
|
0a8ff6940e22ea1118fefafa153496406c4009a7
|
/solucion_usando_verlet.py
|
d0c65a814c457b13934acb26e5adcb521cedfed3
|
[
"MIT"
] |
permissive
|
FernandaPerezV/04Tarea
|
4828d01930b89930a352e86ba582f50b0a7729c9
|
2a542b96631836f7abd11c4beed44ad943f30e91
|
refs/heads/master
| 2021-01-17T10:41:00.270058
| 2015-10-21T00:12:31
| 2015-10-21T00:12:31
| 44,348,399
| 0
| 0
| null | 2015-10-15T22:09:40
| 2015-10-15T22:09:40
| null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
'''
Este script utiliza la clase planeta para integrar la trayectoria de
aproximadamente 5 orbitas con el metodo de verlet, graficando la trayectoria
y la energia vs tiempo en cada momento. Grafica dos veces energia vs tiempo
con el fin de dejarlo en dos escalas: una de cerca para ver a forma de la
funcion y otra mas de lejos para poder comparar con otros metodos menos
eficientes. Utiliza alpha=0 en el potencial.
'''
from planeta import Planeta
import numpy as np
import matplotlib.pyplot as plt
condicion_inicial = [10, 0, 0, 0.4]
p = Planeta(condicion_inicial)
N_steps = 8000
dt=4000./N_steps
t=np.linspace(0,4000,N_steps)
#creamos los arrays en que se guardara la informacion
x= np.zeros(N_steps)
y= np.zeros(N_steps)
vx= np.zeros(N_steps)
vy= np.zeros(N_steps)
energia = np.zeros(N_steps)
x[0]= 10
y[0]= 0
energia[0]=p.energia_actual
for i in range(1, N_steps):
p.avanza_verlet(dt)
x[i]=p.y_actual[0]
y[i]=p.y_actual[1]
p.energia_total()
energia[i]=p.energia_actual
fig = plt.figure(1)
fig.clf()
ax1 = fig.add_subplot(311)
plt.suptitle('Trayectoria y energia vs tiempo con $v_{y}(t=0)=0.4$ y ' r'$\alpha=0$ (Verlet)')
fig.subplots_adjust(hspace=.3)
ax1.plot(x,y)
ax1.set_xlim(-45,15)
ax1.grid(True)
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax2 = fig.add_subplot(312)
ax2.plot(t,energia)
ax2.grid(True)
ax2.set_xlabel('tiempo')
ax2.set_ylabel('energia')
fig.subplots_adjust(hspace=.5)
ax3 = fig.add_subplot(313)
ax3.plot(t,energia)
ax3.grid(True)
ax3.set_xlabel('tiempo')
ax3.set_ylabel('energia')
ax3.set_ylim(-0.03,-0.01)
plt.draw()
plt.show()
plt.savefig('verlet.png')
|
[
"fer20mc@gmail.com"
] |
fer20mc@gmail.com
|
66946768c93316b60069c1afc10b1b5790bacf50
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Numerical_Eng_Python/goldSearch.py
|
8c5202bce215850addb3cc3ad6d9c493f9b1402d
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
## module goldSearch
''' a,b = bracket(f,xStart,h)
Finds the brackets (a,b) of a minimum point of the
user-supplied scalar function f(x).
The search starts downhill from xStart with a step
length h.
x,fMin = search(f,a,b,tol=1.0e-6)
Golden section method for determining x that minimizes
the user-supplied scalar function f(x).
The minimum must be bracketed in (a,b).
'''
from math import log, ceil
def bracket(f,x1,h):
c = 1.618033989
f1 = f(x1)
x2 = x1 + h; f2 = f(x2)
# Determine downhill direction and change sign of h if needed
if f2 > f1:
h = -h
x2 = x1 + h; f2 = f(x2)
# Check if minimum between x1 - h and x1 + h
if f2 > f1: return x2,x1 - h
# Search loop
for i in range (100):
h = c*h
x3 = x2 + h; f3 = f(x3)
if f3 > f2: return x1,x3
x1 = x2; x2 = x3
f1 = f2; f2 = f3
print "Bracket did not find a mimimum"
def search(f,a,b,tol=1.0e-9):
nIter = ceil(-2.078087*log(tol/abs(b-a))) # Eq. (10.4)
R = 0.618033989
C = 1.0 - R
# First telescoping
x1 = R*a + C*b; x2 = C*a + R*b
f1 = f(x1); f2 = f(x2)
# Main loop
for i in range(nIter):
if f1 > f2:
a = x1
x1 = x2; f1 = f2
x2 = C*a + R*b; f2 = f(x2)
else:
b = x2
x2 = x1; f2 = f1
x1 = R*a + C*b; f1 = f(x1)
if f1 < f2: return x1,f1
else: return x2,f2
|
[
"bb@b.om"
] |
bb@b.om
|
deaa7e28eef166b4cf7fd73fc4075a4b0a7d9d97
|
2010641ee39d6796dc74ed013260d96621e7ca71
|
/DDQN_Sell/m3/trade_env.py
|
043de1e5310508e4a7d49ce62d9466fdb7cc38a2
|
[] |
no_license
|
citymap/RLGAN_Trade
|
2328bc8b6ffacbbea2464b04e088e9c1434ce358
|
f2e1045ae499a8a9e11912d4966f99b63101e642
|
refs/heads/master
| 2020-07-27T19:25:04.847193
| 2018-09-14T15:56:55
| 2018-09-14T15:56:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,186
|
py
|
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
import lib.dblib as db
import matplotlib.pyplot as plt
RUN_DATE_COUNT = 300 #學習過去幾天的資料
WINDOWWIDTH = 300 # 螢幕寬度
lose_cnt=[]
win_cnt=[]
point_list=[]
profit=[]
points=0
lose=0
win=0
class TradeEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 5000
}
def __init__(self,mode='train'):
self.mode=mode
self.Trade=db.TradeImg(RUN_DATE_COUNT,mode=self.mode)
self.Trade.prepare_data()
self.DateList=self.Trade.DateList
self.Date=""
self.runGame()
self.action_space = spaces.Discrete(2)
self.observation_space = self.Trade.GetData(self.DateList[0],0) #spaces.Box(-high, high) #np.array([1,2,3,4,5])
self.state = self.Trade.GetData(self.DateList[0],0)
self.seed()
self.viewer = None
def runGame(self):
self.game_time=db.timer()
self.DateIndex=0
self.TimeIndex=0
self.Units=[]
self.Price=[]
self.SellStopLose=[]
def process_action(self,action):
terminal=True
reward=0
if len(self.Price)>=db.END_K_INDEX : #排除K棒不足的情況
end_price=self.Price[self.TimeIndex+30] #60分鐘後結算價
now_price=self.Price[self.TimeIndex]
if action==0:
terminal=(self.TimeIndex>=db.END_K_INDEX)
# if self.TimeIndex>=db.END_K_INDEX: #不下單損失
# reward=-db.STOP_LOSE
elif action==1:
stoplose=0
for i in range (self.TimeIndex,self.TimeIndex+30):
if self.Price[i]-now_price>=db.STOP_LOSE:
stoplose=self.Price[i]-now_price
break
if stoplose>0:
# reward=-self.SellStopLose[self.TimeIndex]-db.TRADE_LOSE
reward=-stoplose-db.TRADE_LOSE
else:
reward=-(end_price-now_price)-db.TRADE_LOSE
return terminal,reward
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
self.Date=self.DateList[self.DateIndex]
self.Price=self.Trade.GetPrice(self.Date)
self.SellStopLose=self.Trade.GetSellStopLose(self.Date)
self.state=self.Trade.GetData(self.DateList[self.DateIndex],self.TimeIndex)
#print(action)
terminal,reward=self.process_action(action)
self.Units.append(self.Price[self.TimeIndex])
self.TimeIndex+=1
if self.TimeIndex>=WINDOWWIDTH or terminal==True :
self.DateIndex+=1
self.TimeIndex=0
if self.DateIndex>=len(self.DateList):
self.DateIndex=0
if terminal:
######################## 輸出單日報表 ########################
#print(self.Date,"reward:",reward,self.game_time.spendtime("time"))
plt.cla()
plt.plot(self.Price,"g")
plt.plot(self.Units,"b")
plt.savefig("trade.png")
plt.cla()
#plt.show();
################################################################
self.Units=[]
self.Price=[]
self.SellStopLose=[]
self.game_time=db.timer()
self.state=self.Trade.GetData(self.DateList[0],0)
######################## 輸出完整報表 ########################
global points,profit,win,lose,lose_cnt,win_cnt,point_list
if terminal==True:
points+=reward
profit.append(points)
if reward>0: win+=1
elif reward<0: lose+=1
if self.DateIndex == 0 and lose!=0 and win!=0:
lose_cnt.append(lose)
win_cnt.append(win)
point_list.append(points)
plt.plot(lose_cnt,"g");plt.plot(win_cnt,"r");plt.savefig("profit_1.png");plt.show();
plt.plot(point_list,"b");plt.savefig("profit_2.png");plt.show();
plt.plot(profit,"c");plt.savefig("profit_3.png");plt.show();
lose=0
win=0
points=0
profit=[]
################################################################
return np.array(self.state), reward, terminal, {}
def reset(self):
#self.runGame()
self.state = self.Trade.GetData(self.DateList[0],0)
return self.state
def render(self, mode='human'):
screen_width = 600
screen_height = 400
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
if self.state is None: return None
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer: self.viewer.close()
|
[
"Geniustom@gmail.com"
] |
Geniustom@gmail.com
|
64c03040122c2b32331a217bc57f2efe4b61298b
|
127373716feed1df726f45d83db9c896834102a8
|
/src-py/jobber/runners.py
|
b31c042a6330a6e5f859af31d99240ad5d8104ce
|
[] |
no_license
|
immanetize/jobber
|
4c41b46be85ad31a962a02c0f6f7cd1c00b3acc1
|
34db36a63b7212b5a105b78de1ae988e5d8aa788
|
refs/heads/master
| 2020-09-12T05:18:18.267103
| 2019-11-17T22:58:15
| 2019-11-17T22:58:15
| 222,321,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
import argparse
import os
import re
import configparser
def test_included_function(positional, extra):
print("Your positional arg was:")
print(" %s" % positional)
print("Your extra arg was:")
print(" %s" % extra)
|
[
"immanetize@fedoraproject.org"
] |
immanetize@fedoraproject.org
|
01fc8cc94a55039be167ca32c48430ee164cedde
|
d532b85841b459c61d88d380e88dd08d29836d43
|
/solutions/173_binary_search_tree_iterator.py
|
da29a2dc51a3ae69bb060f3e154484013f1638cc
|
[
"MIT"
] |
permissive
|
YiqunPeng/leetcode_pro
|
ad942468df5506de9dc48a4019933f658e2a3121
|
4a508a982b125a3a90ea893ae70863df7c99cc70
|
refs/heads/master
| 2022-05-15T09:32:02.699180
| 2022-05-14T16:32:17
| 2022-05-14T16:32:17
| 182,453,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
class BSTIterator:
def __init__(self, root: TreeNode):
self.st = []
while root:
self.st.append(root)
root = root.left
def next(self) -> int:
node = self.st.pop()
res = node.val
node = node.right
while node:
self.st.append(node)
node = node.left
return res
def hasNext(self) -> bool:
return self.st
|
[
"ypeng1@andrew.cmu.edu"
] |
ypeng1@andrew.cmu.edu
|
b371a7a7a040e7ebd7765a0db3fa98ecc7a75150
|
4ac1ead85fa677320a4a31f2c3fc9187066c7e60
|
/src/DSGRN/Query/NstableQuery.py
|
8bed46c3b696a96b7d5f063eb626d4a92062a559
|
[
"MIT"
] |
permissive
|
marciogameiro/DSGRN
|
bdca3f88403c0240d90130f43af89cd8f80bf202
|
47163f4cf3c5ec63972e44b63ec561091ac6f06f
|
refs/heads/master
| 2023-01-10T08:34:44.617331
| 2023-01-02T04:59:58
| 2023-01-02T04:59:58
| 182,584,073
| 2
| 8
|
MIT
| 2021-03-10T03:16:05
| 2019-04-21T21:26:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
# NstableQuery.py
# Bree Cummins
# Edited from
# MultistableQuery.py
# MIT LICENSE 2016
# Shaun Harker
class NstableQuery:
"""
NstableQuery
Query to check if morse graph index corresponds to a Morse graph with at least N minimal fixed points
"""
def __init__ (self, database, N ):
self.database = database
c = database.conn.cursor()
sqlexpression = "select MorseGraphIndex from (select MorseGraphIndex, count(*) as StableCount from (select MorseGraphIndex,Vertex from MorseGraphAnnotations where Label like 'FP%' except select MorseGraphIndex,Source from MorseGraphEdges) group by MorseGraphIndex) where StableCount>{};".format(N-1)
database.NQuery = frozenset([ row[0] for row in c.execute(sqlexpression) ])
def matches(self):
"""
Return entire set of matches
"""
return self.database.NQuery
def __call__ (self, morsegraphindex ):
"""
Test if a single mgi is in the set of matches
"""
return morsegraphindex in self.database.NQuery
|
[
"breecummins@gmail.com"
] |
breecummins@gmail.com
|
d8e00a7937baf084fbcb310af4bfb6344cd6093d
|
e22973173e823d48eb94766754f04e4a486ab764
|
/pages/urls.py
|
867aff122100b7f1365463294b9fa2879f29b8ef
|
[] |
no_license
|
ENADSOFT/realestate
|
6b11f65f9c77c46806e544d5693d2eef0f865853
|
de7d52fd197d8303263056a43f227424399382ac
|
refs/heads/main
| 2023-01-03T04:37:30.586769
| 2020-10-13T12:00:08
| 2020-10-13T12:00:08
| 303,678,667
| 0
| 0
| null | 2020-10-13T12:00:09
| 2020-10-13T11:20:15
| null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('courses',views.courses,name='courses'),
]
|
[
"adeboyeamos7@gmail.com"
] |
adeboyeamos7@gmail.com
|
3ee76fd83f988252ce9429816f54fa2e50372b0b
|
d59fce89fd7e7e93e0f8ebfe677e8060e5c2144a
|
/loops/string_iterate.py
|
2f2d5ccb4e91af570a989c04273030e64cb4cf0f
|
[
"MIT"
] |
permissive
|
Rhoynar/pysel
|
a7cdb91063270c1f41fb88661702b0127c009931
|
7a283cfdcaea3b1e33e615d1e655b6bd1f23f09f
|
refs/heads/master
| 2021-05-07T03:38:22.376371
| 2017-12-01T22:19:02
| 2017-12-01T22:19:02
| 110,787,353
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 88
|
py
|
alphabets = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for alphabet in alphabets:
print(alphabet)
|
[
"harsh@rhoynar.com"
] |
harsh@rhoynar.com
|
b0eca7988058e2bf2c3dfda77e98548675ecd863
|
122b2a025a491fea990e1834249276fbc2a80c16
|
/lab2/modules/number_printer.py
|
4baf43a6b317a188ac6e6f08c7145307a310fd0d
|
[] |
no_license
|
markiiankyselytsia/devops-labs
|
62ac83ff1eed2d42dc1a99693320a61712f1304f
|
990c1fc6b9ea95cfd7dd194c186973ed4288320e
|
refs/heads/main
| 2023-02-17T07:33:39.665031
| 2021-01-18T20:24:24
| 2021-01-18T20:24:24
| 308,987,179
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 276
|
py
|
import logging
def print_nums(is_even):
print(list(range(2, 101, 2) if is_even else range(1, 101, 2)))
def inverse(num):
try:
print(1 / num)
except ZeroDivisionError:
logging.getLogger().info('Число не може бути 0')
|
[
"noreply@github.com"
] |
markiiankyselytsia.noreply@github.com
|
d59f7f06c3e1d1f3a9b01ea03a2e891f2c561325
|
e62096aab55170f3041c0b05eeca837a64c9c8f3
|
/base/day3/迭代.py
|
a6494327d807701cac644aebe0ecc5718d22f1eb
|
[] |
no_license
|
dianligegege/learn-Python
|
b8850d52cede9b9ec582c84a2fed6d66624cbc07
|
60f4bc61ab111574ee48a2d81f45ecec0d6845bb
|
refs/heads/master
| 2020-04-19T10:07:01.150348
| 2019-04-08T11:31:18
| 2019-04-08T11:31:18
| 168,130,739
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
from collections.abc import Iterable
# 迭代
d = {'a':1,'b':2,'c':3}
print(d.items())
for k in d:
print(k)
for v in d.values():
print(v)
for k,v in d.items():
print(k,v)
for i in d.items():
print(i)
print(isinstance(i, Iterable))
for i, value in enumerate(['a','b','c']):
print(i,value)
# 练习
def fm(l):
min = l[0]
max = l[0]
for x in l:
if x > max:
max = x
if x < min:
min =x
return (min,max)
l = [1,3,2,5,22]
print(fm(l))
|
[
"zhangli@huobi.com"
] |
zhangli@huobi.com
|
dfa4b91b68f7ef3f9200edc023eb807dc582bfc3
|
942d9826bb58f917bf1ad2988f184822bf180859
|
/lang/lexer.py
|
8b0abc5ae728dc76556accc21fed31c9ed8ed2ee
|
[] |
no_license
|
andars/lang
|
f1d03cdae67757b908b8e2edd6abf6fe772195ab
|
aab861ff6f2d33826367ee78fcc97d731b2f33ce
|
refs/heads/master
| 2021-01-19T11:37:26.180226
| 2014-04-27T21:50:35
| 2014-04-27T21:50:35
| 19,206,965
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
import sys
import re
from .tokens import token_expressions
from .token import Token
class Lexer():
def __init__(self, content):
self.content = content
self.offset = 0;
#Save and Load allow for the parser to look ahead and move back
def save(self):
return [self.offset]
def load(self, state):
self.offset = state[0]
def next_token(self):
#print 'looking for token at:', self.offset
match = None
token = None
for expression in token_expressions:
pattern, flavor = expression
#print pattern, flavor
regex = re.compile(pattern)
match = regex.match(self.content, self.offset)
if match:
#print "matched"
text = match.group(0)
#print text
self.offset = match.end(0)
if flavor:
token = Token(flavor, text)
return token
else: #ignore whitespace / comments and return real next token
#print 'skipping whitespace'
self.offset = match.end(0)
return self.next_token()
break
if self.offset >= len(self.content):
print('end of input')
return None
if not match:
print(("error: illegal character - {0}".format(self.content[self.offset]), ord(self.content[self.offset])))
sys.exit(0)
|
[
"afoote97@gmail.com"
] |
afoote97@gmail.com
|
4dc9f22cd8a24ac318191db8404a75f6b608c804
|
fa04b859e04c315a7c64bf435b2b8a0156083727
|
/OldResearchAndDevelopment/aw-ml-algorithm/online_examples/aiml/recurrent_nn_tutorial/lstm_tutorial.py
|
9ce8c3f3ca95d46374008800a3c2e4cfd7a9dea6
|
[] |
no_license
|
awoziji/olympian-biotech
|
0923ccec23b387456800e5259e6e0f12a5fe5d57
|
39ec0c89fdf180d6eacb3342e9a73c4a04225330
|
refs/heads/master
| 2021-04-20T16:19:37.593415
| 2018-06-27T01:14:55
| 2018-06-27T01:14:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,547
|
py
|
import tensorflow as tf
import numpy as np
import collections
import os
import argparse
import datetime as dt
"""To run this code, you'll need to first download and extract the text dataset
from here: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz. Change the
data_path variable below to your local exraction path"""
data_path = "C:\\Users\\apsw\\ThirdYearGroupProjectGithubRepo\\olympian-biotech\\aw-ml-algorithm\\online_examples\\aiml\\recurrent_nn_tutorial\\data-old\\simple-examples\\data"
parser = argparse.ArgumentParser()
parser.add_argument('run_opt', type=int, default=1, help='An integer: 1 to train, 2 to test')
parser.add_argument('--data_path', type=str, default=data_path, help='The full path of the training data')
args = parser.parse_args()
def read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
#return f.read().decode("utf-8").replace("\n", "<eos>").split()
return f.read().replace("\n", "<eos>").split()
def build_vocab(filename):
data = read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def file_to_word_ids(filename, word_to_id):
data = read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def load_data():
# get the data paths
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
# build the complete vocabulary, then convert text data to list of integers
word_to_id = build_vocab(train_path)
train_data = file_to_word_ids(train_path, word_to_id)
valid_data = file_to_word_ids(valid_path, word_to_id)
test_data = file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
reversed_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
print(train_data[:5])
print(word_to_id)
print(vocabulary)
print(" ".join([reversed_dictionary[x] for x in train_data[:10]]))
return train_data, valid_data, test_data, vocabulary, reversed_dictionary
def batch_producer(raw_data, batch_size, num_steps):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0: batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = data[:, i * num_steps:(i + 1) * num_steps]
x.set_shape([batch_size, num_steps])
y = data[:, i * num_steps + 1: (i + 1) * num_steps + 1]
y.set_shape([batch_size, num_steps])
return x, y
class Input(object):
def __init__(self, batch_size, num_steps, data):
self.batch_size = batch_size
self.num_steps = num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = batch_producer(data, batch_size, num_steps)
# create the main model
class Model(object):
def __init__(self, input, is_training, hidden_size, vocab_size, num_layers,
dropout=0.5, init_scale=0.05):
self.is_training = is_training
self.input_obj = input
self.batch_size = input.batch_size
self.num_steps = input.num_steps
self.hidden_size = hidden_size
# create the word embeddings
with tf.device("/cpu:0"):
embedding = tf.Variable(tf.random_uniform([vocab_size, self.hidden_size], -init_scale, init_scale))
inputs = tf.nn.embedding_lookup(embedding, self.input_obj.input_data)
if is_training and dropout < 1:
inputs = tf.nn.dropout(inputs, dropout)
# set up the state storage / extraction
self.init_state = tf.placeholder(tf.float32, [num_layers, 2, self.batch_size, self.hidden_size])
state_per_layer_list = tf.unstack(self.init_state, axis=0)
rnn_tuple_state = tuple(
[tf.contrib.rnn.LSTMStateTuple(state_per_layer_list[idx][0], state_per_layer_list[idx][1])
for idx in range(num_layers)]
)
# create an LSTM cell to be unrolled
cell = tf.contrib.rnn.LSTMCell(hidden_size, forget_bias=1.0)
# add a dropout wrapper if training
if is_training and dropout < 1:
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout)
if num_layers > 1:
cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_layers)], state_is_tuple=True)
output, self.state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32, initial_state=rnn_tuple_state)
# reshape to (batch_size * num_steps, hidden_size)
output = tf.reshape(output, [-1, hidden_size])
softmax_w = tf.Variable(tf.random_uniform([hidden_size, vocab_size], -init_scale, init_scale))
softmax_b = tf.Variable(tf.random_uniform([vocab_size], -init_scale, init_scale))
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])
# Use the contrib sequence loss and average over the batches
loss = tf.contrib.legacy_seq2seq.sequence_loss(
logits,
self.input_obj.targets,
tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),
average_across_timesteps=False,
average_across_batch=True)
# Update the cost
self.cost = tf.reduce_sum(loss)
# get the prediction accuracy
self.softmax_out = tf.nn.softmax(tf.reshape(logits, [-1, vocab_size]))
self.predict = tf.cast(tf.argmax(self.softmax_out, axis=1), tf.int32)
correct_prediction = tf.equal(self.predict, tf.reshape(self.input_obj.targets, [-1]))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if not is_training:
return
self.learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), 5)
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
# optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
# self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.cost)
self.new_lr = tf.placeholder(tf.float32, shape=[])
self.lr_update = tf.assign(self.learning_rate, self.new_lr)
def assign_lr(self, session, lr_value):
session.run(self.lr_update, feed_dict={self.new_lr: lr_value})
def train(train_data, vocabulary, num_layers, num_epochs, batch_size, model_save_name,
learning_rate=1.0, max_lr_epoch=10, lr_decay=0.93, print_iter=50):
# setup data and models
training_input = Input(batch_size=batch_size, num_steps=35, data=train_data)
m = Model(training_input, is_training=True, hidden_size=650, vocab_size=vocabulary,
num_layers=num_layers)
init_op = tf.global_variables_initializer()
orig_decay = lr_decay
with tf.Session() as sess:
# start threads
sess.run([init_op])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
saver = tf.train.Saver()
for epoch in range(num_epochs):
new_lr_decay = orig_decay ** max(epoch + 1 - max_lr_epoch, 0.0)
m.assign_lr(sess, learning_rate * new_lr_decay)
# m.assign_lr(sess, learning_rate)
# print(m.learning_rate.eval(), new_lr_decay)
current_state = np.zeros((num_layers, 2, batch_size, m.hidden_size))
curr_time = dt.datetime.now()
for step in range(training_input.epoch_size):
# cost, _ = sess.run([m.cost, m.optimizer])
if step % print_iter != 0:
cost, _, current_state = sess.run([m.cost, m.train_op, m.state],
feed_dict={m.init_state: current_state})
else:
seconds = (float((dt.datetime.now() - curr_time).seconds) / print_iter)
curr_time = dt.datetime.now()
cost, _, current_state, acc = sess.run([m.cost, m.train_op, m.state, m.accuracy],
feed_dict={m.init_state: current_state})
print("Epoch {}, Step {}, cost: {:.3f}, accuracy: {:.3f}, Seconds per step: {:.3f}".format(epoch,
step, cost, acc, seconds))
# save a model checkpoint
saver.save(sess, data_path + '\\' + model_save_name, global_step=epoch)
# do a final save
saver.save(sess, data_path + '\\' + model_save_name + '-final')
# close threads
coord.request_stop()
coord.join(threads)
def test(model_path, test_data, reversed_dictionary):
test_input = Input(batch_size=20, num_steps=35, data=test_data)
m = Model(test_input, is_training=False, hidden_size=650, vocab_size=vocabulary,
num_layers=2)
saver = tf.train.Saver()
with tf.Session() as sess:
# start threads
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
current_state = np.zeros((2, 2, m.batch_size, m.hidden_size))
# restore the trained model
saver.restore(sess, model_path)
# get an average accuracy over num_acc_batches
num_acc_batches = 30
check_batch_idx = 25
acc_check_thresh = 5
accuracy = 0
for batch in range(num_acc_batches):
if batch == check_batch_idx:
true_vals, pred, current_state, acc = sess.run([m.input_obj.targets, m.predict, m.state, m.accuracy],
feed_dict={m.init_state: current_state})
pred_string = [reversed_dictionary[x] for x in pred[:m.num_steps]]
true_vals_string = [reversed_dictionary[x] for x in true_vals[0]]
print("True values (1st line) vs predicted values (2nd line):")
print(" ".join(true_vals_string))
print(" ".join(pred_string))
else:
acc, current_state = sess.run([m.accuracy, m.state], feed_dict={m.init_state: current_state})
if batch >= acc_check_thresh:
accuracy += acc
print("Average accuracy: {:.3f}".format(accuracy / (num_acc_batches-acc_check_thresh)))
# close threads
coord.request_stop()
coord.join(threads)
if args.data_path:
data_path = args.data_path
train_data, valid_data, test_data, vocabulary, reversed_dictionary = load_data()
if args.run_opt == 1:
train(train_data, vocabulary, num_layers=2, num_epochs=60, batch_size=20,
model_save_name='two-layer-lstm-medium-config-60-epoch-0p93-lr-decay-10-max-lr')
else:
trained_model = args.data_path + "\\two-layer-lstm-medium-config-60-epoch-0p93-lr-decay-10-max-lr-38"
test(trained_model, test_data, reversed_dictionary)
|
[
"apsw@wallace2.com"
] |
apsw@wallace2.com
|
0d25581023f8d8b557c1dcda1457ecdc5a273c8f
|
2fd5a0e32d5f697e8328aad5f6a79f3943b61d65
|
/service/elasticsearch-service.py
|
0ab11eda65b338d26dc47ddfd64e136fbb36e7ba
|
[
"Apache-2.0"
] |
permissive
|
sesam-community/elasticsearch-source
|
f5da0348d1e852081b2882ff1244ce7c41408bd4
|
3ae13c12ca7bf0c1b190d4ffdcd2df80909f1158
|
refs/heads/master
| 2023-06-01T01:15:11.518997
| 2019-12-10T12:27:54
| 2019-12-10T12:27:54
| 223,147,179
| 0
| 0
|
Apache-2.0
| 2019-11-22T07:48:02
| 2019-11-21T10:22:24
|
Python
|
UTF-8
|
Python
| false
| false
| 4,131
|
py
|
from flask import Flask, request, Response
import cherrypy
from datetime import datetime, timedelta
import json
import logging
import paste.translogger
import requests
import os
import boto3
from botocore.credentials import InstanceMetadataProvider, InstanceMetadataFetcher
from requests_aws4auth import AWS4Auth
secret_key = os.environ.get("SECRET_KEY")
access_key = os.environ.get("ACCESS_KEY")
if secret_key == None:
logger = logging.getLogger("elasticsearch-service")
logger.info("No params so attempt get config from machine")
provider = InstanceMetadataProvider(iam_role_fetcher=InstanceMetadataFetcher(timeout=1000, num_attempts=2))
credentials = provider.load()
access_key = credentials.access_key
secret_key = credentials.secret_key
region = os.environ.get('REGION')
if region == None:
region = "eu-central-1"
def executeSignedPost(url, body):
service = 'es'
awsauth = AWS4Auth(access_key, secret_key, region, service)
r = requests.post(url, auth=awsauth, json=body)
result = r.json()
return result
app = Flask(__name__)
logger = logging.getLogger("elasticsearch-service")
index_name = os.environ.get('INDEX')
if index_name != None:
index_name = "/" + index_name
else:
index_name = ""
scroll_keep_alive = os.environ.get('SCROLL_KEEP_ALIVE')
if scroll_keep_alive == None:
# default to 1 minute
scroll_keep_alive = "1m"
logger.info(scroll_keep_alive)
endpoint = os.environ.get('ES_ENDPOINT')
if endpoint == None:
endpoint = "http://localhost:9200"
logger.info(endpoint)
@app.route('/', methods=['GET'])
def root():
return Response(status=200, response="{ \"status\" : \"OK\" }")
@app.route('/entities', methods=["GET"])
def get():
logger.info("get entities")
def generate():
is_more = True
is_first = True
yield "["
page_size = 10000
# do initial scroll query
query = {}
query["query"] = {}
query["query"]["match_all"] = {}
query["size"] = page_size
data = executeSignedPost(endpoint + index_name + "/_search?scroll=" + scroll_keep_alive, query)
if len(data["hits"]["hits"]) == 0:
is_more = False
while is_more:
hits = data["hits"]["hits"]
for h in hits:
e = h["_source"]
e["_id"] = h["_id"]
if is_first:
is_first = False
else:
yield ","
yield json.dumps(e)
# get next scroll
scroll_request = {}
scroll_request["scroll"] = scroll_keep_alive
scroll_request["scroll_id"] = data["_scroll_id"]
data = executeSignedPost(endpoint + "/_search/scroll", scroll_request)
if len(data["hits"]["hits"]) == 0:
is_more = False
logger.info("get entities completed")
yield "]"
return Response(generate(), mimetype='application/json', )
if __name__ == '__main__':
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# Log to stdout, change to or add a (Rotating)FileHandler to log to a file
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(stdout_handler)
# Comment these two lines if you don't want access request logging
app.wsgi_app = paste.translogger.TransLogger(app.wsgi_app, logger_name=logger.name,
setup_console_handler=False)
app.logger.addHandler(stdout_handler)
logger.propagate = False
logger.setLevel(logging.INFO)
cherrypy.tree.graft(app, '/')
# Set the configuration of the web server to production mode
cherrypy.config.update({
'environment': 'production',
'engine.autoreload_on': False,
'log.screen': True,
'server.socket_port': 5000,
'server.socket_host': '0.0.0.0'
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
|
[
"gramoore@outlook.com"
] |
gramoore@outlook.com
|
e8fee5d2b9a5d9953e740a80cc82fbe9dbd5500d
|
cdb095702a35cb38255651c075ff4719e8b2401b
|
/qualcoder/GUI/ui_save_query.py
|
5f2f0252ff2b1f11996caad226ee095b876d3864
|
[
"MIT"
] |
permissive
|
ilippert/QualCoder
|
dfb7015027a03bde3f79de4b12984afe51dc805c
|
cb3ec9bcd347adaba5d81fef5d83f40c6a338c64
|
refs/heads/master
| 2023-07-06T07:43:32.576748
| 2021-12-17T18:54:39
| 2021-12-17T18:54:39
| 248,160,930
| 0
| 0
|
MIT
| 2020-03-18T07:02:17
| 2020-03-18T07:02:16
| null |
UTF-8
|
Python
| false
| false
| 2,973
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_save_query.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogSaveQuery(object):
def setupUi(self, DialogSaveQuery):
DialogSaveQuery.setObjectName("DialogSaveQuery")
DialogSaveQuery.resize(587, 443)
DialogSaveQuery.setMaximumSize(QtCore.QSize(16777215, 1000))
self.verticalLayout = QtWidgets.QVBoxLayout(DialogSaveQuery)
self.verticalLayout.setObjectName("verticalLayout")
self.label_name = QtWidgets.QLabel(DialogSaveQuery)
self.label_name.setObjectName("label_name")
self.verticalLayout.addWidget(self.label_name)
self.lineEdit_name = QtWidgets.QLineEdit(DialogSaveQuery)
self.lineEdit_name.setObjectName("lineEdit_name")
self.verticalLayout.addWidget(self.lineEdit_name)
self.label = QtWidgets.QLabel(DialogSaveQuery)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.lineEdit_group = QtWidgets.QLineEdit(DialogSaveQuery)
self.lineEdit_group.setObjectName("lineEdit_group")
self.verticalLayout.addWidget(self.lineEdit_group)
self.label_2 = QtWidgets.QLabel(DialogSaveQuery)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.textEdit = QtWidgets.QTextEdit(DialogSaveQuery)
self.textEdit.setMinimumSize(QtCore.QSize(0, 60))
self.textEdit.setMaximumSize(QtCore.QSize(16777215, 800))
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.buttonBox = QtWidgets.QDialogButtonBox(DialogSaveQuery)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(DialogSaveQuery)
self.buttonBox.accepted.connect(DialogSaveQuery.accept)
self.buttonBox.rejected.connect(DialogSaveQuery.reject)
QtCore.QMetaObject.connectSlotsByName(DialogSaveQuery)
def retranslateUi(self, DialogSaveQuery):
_translate = QtCore.QCoreApplication.translate
DialogSaveQuery.setWindowTitle(_translate("DialogSaveQuery", "Save Query"))
self.label_name.setText(_translate("DialogSaveQuery", "Query name:"))
self.label.setText(_translate("DialogSaveQuery", "Query Group"))
self.label_2.setText(_translate("DialogSaveQuery", "Description"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DialogSaveQuery = QtWidgets.QDialog()
ui = Ui_DialogSaveQuery()
ui.setupUi(DialogSaveQuery)
DialogSaveQuery.show()
sys.exit(app.exec_())
|
[
"noreply@github.com"
] |
ilippert.noreply@github.com
|
9788f57270dd8a4fd16f6f293f386bace2713928
|
7084a96fc21297f8d60461a7ec18b377481460ac
|
/hello_app/modules.py
|
3381f66ad6244b1cbae54a38e6a0460581350884
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
masonnixon/python-webform-flask-app
|
a4331d7d3bd02904fbb2b2c724a2cc095d3154cc
|
6a122f9d7c83415d6ccdd638c1a925af60c99de0
|
refs/heads/master
| 2023-03-23T05:23:10.050597
| 2021-03-18T18:08:21
| 2021-03-18T18:08:21
| 348,856,106
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# functions to be used by the routes
# retrieve all the names from the dataset and put them into a list
def get_names(source):
names = []
for row in source:
# lowercase all the names for better searching
name = row["name"].lower()
names.append(name)
return sorted(names)
# find the row that matches the id in the URL, retrieve name and photo
def get_actor(source, id):
for row in source:
if id == str( row["id"] ):
name = row["name"]
photo = row["photo"]
# change number to string
id = str(id)
# return these if id is valid
return id, name, photo
# return these if id is not valid - not a great solution, but simple
return "Unknown", "Unknown", ""
# find the row that matches the name in the form and retrieve matching id
def get_id(source, name):
for row in source:
# lower() makes the string all lowercase
if name.lower() == row["name"].lower():
id = row["id"]
# change number to string
id = str(id)
# return id if name is valid
return id
# return these if id is not valid - not a great solution, but simple
return "Unknown"
|
[
"masonnixon@gmail.com"
] |
masonnixon@gmail.com
|
0ae02d349e4887377624aa3f3912d19f811b752f
|
72d5da7ee23be1efbcea2e756c1e30be24523ad1
|
/src/sort function.py
|
3e80e473e1866edd3b502564c92fbee9d4095b2f
|
[
"MIT"
] |
permissive
|
christian-fox/Poker
|
99ef69b67d8ed1a4aca27912989843446a575942
|
53984e6de571f832c010d1364e284f6cbfdd46e6
|
refs/heads/master
| 2023-05-30T21:49:17.982788
| 2021-06-23T09:37:15
| 2021-06-23T09:37:15
| 330,729,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
number_list = ['8 S','14 H','3 C',10,7,2]
print(number_list.sort())
|
[
"10foxc@ChristiansMBP2.home"
] |
10foxc@ChristiansMBP2.home
|
675ecc3bb9c3d84673ee69a64c91a5fa080af81d
|
359f3d8a1a2b5524490c314a44d60cec1d06f658
|
/whoweb/users/models/__init__.py
|
36572f662c8de8723b1fe5cba580e20525b29784
|
[] |
no_license
|
sivasuriyangithub/Merket_Intellect-s3.route
|
ec9d9aa7d4575d5ff8006e1454f69e4033193fc0
|
71a9ab642f9a31f4a318cebec7fe6a075870a83c
|
refs/heads/master
| 2023-08-25T13:51:02.116705
| 2021-10-19T01:06:49
| 2021-10-19T01:06:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from .organization import Group, GroupOwner, Seat, DeveloperKey
from .user_profile import User, UserProfile
__all__ = ["User", "UserProfile", "Group", "Seat", "GroupOwner", "DeveloperKey"]
|
[
"zach@whoknows.com"
] |
zach@whoknows.com
|
20aa50a3725a3abe382b5d70e5525a1f739f9bc6
|
86465d9f76a96a9375f81013cb9a06740c0b558e
|
/git_test.py
|
7d80561f8d7f94e5603d8d1efc7549a2c3368f94
|
[] |
no_license
|
GaryChen10128/test
|
f5a29ea305b705c2ff7e96e3f5c4400d2d3cc3bb
|
df180123499c5a7dd37ba98faa7c6c0c96640edb
|
refs/heads/master
| 2020-05-26T20:59:40.364628
| 2019-05-24T08:17:26
| 2019-05-24T08:17:26
| 188,371,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,904
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 14:21:56 2019
@author: 180218
"""
import gitlab
# private token or personal token authentication
#gl = gitlab.Gitlab('http://10.0.0.1', private_token='JVNSESs8EwWRx5yDxM5q')
#gl = gitlab.Gitlab('http://gitlab.ideas.iii.org.tw', private_token='JLj9KwzuqNPGBmZ37VrK')
#gl = gitlab.Gitlab('http://gitlab.ideas.iii.org.tw/','ydqRAmnGkAHmnZADWfoK')
gl = gitlab.Gitlab('http://150.117.122.207:7777/',private_token='G-mrQ-Zf8P7T3bPx4rDi')
#gl = gitlab.Gitlab('http://150.117.122.207:7777/',private_token='tAGUZUvQeMxBna5ydcEe')
#gl = gitlab.Gitlab('ssh://git@gitlab.safeplayservice.ml:7778/GaryChen/iii_flywheel.git',private_token='i3zquer-YKr4u8vn6_wX')
#
# oauth token authentication
#gl = gitlab.Gitlab('http://10.0.0.1', oauth_token='my_long_token_here')
#gl = gitlab.Gitlab('http://gitlab.ideas.iii.org.tw',oauth_token='kkEaj349NTBYHM8hguAe')
#gl = gitlab.Gitlab('https://gitlab.com/',private_token='-U1XJuXSas4oyaBgx3A1')
#gl = gitlab.Gitlab('https://gitlab.com/')
#
# username/password authentication (for GitLab << 10.2)
#gl = gitlab.Gitlab('http://gitlab.ideas.iii.org.tw', email='180218', password='tp60 rm04tp60 rm04*')
#gl = gitlab.Gitlab('https://gitlab.com/', email='garychen@iii.org.tw', password='Jack0204')
# anonymous gitlab instance, read-only for public resources
#gl = gitlab.Gitlab('http://gitlab.ideas.iii.org.tw')
#gl = gitlab.Gitlab('https://gitlab.com/vurpo/')
# make an API request to create the gl.user object. This is mandatory if you
# use the username/password authentication.
#gl = gitlab.Gitlab.from_config('https://gitlab.com/vurpo/', ['/tmp/gl.cfg'])
#gl = gitlab.Gitlab.from_config('https://gitlab.com/vurpo/')
#gl = gitlab.Gitlab('https://gitlab.com/', private_token='i3zquer-YKr4u8vn6_wX')
gl.auth()
projects = gl.projects.list()
print('list of projects:')
print('id','project name')
for project in projects:
# print(project)
print(project.id,project.name)
import numpy as np
import base64
#y=np.array(projects)
#y[1][2]
#groups = gl.groups.get(10)
#for group in groups:
# print(group)
p_index=10
project = gl.projects.get(p_index)
#print(project.name)
#project = gl.projects.get(10, lazy=True) # no API call
#p = gl.projects.get(2, lazy=True) # no API call
branches = project.branches.list()
#project.star()
print('get into',project.name)
print('list of branches:')
i=0
for branch in branches:
print(i,branch.name)
i+=1
#branches[0].name
#p.branches[1].name
#branches = p.branches.lisproject.branches.delete('feature1')
#commits = project.commits.list()
branch = project.branches.get('master')
branch.commit
#statuses = commit.statuses.list()
branch.developers_can_push
commit = project.commits.get('master')
#commit?
statuses = commit.statuses.list()
#statuses[2]
deployments = project.deployments.list()
data = {
'branch_name': 'master', # v3
'branch': 'master', # v4
'commit_message': 'blah blah blah',
'actions': [
# {
# 'action': 'create',
# 'file_path': 'README.rst',
## 'content': open('path/to/file.rst').read(),
# 'content': 'gg',
#
# },
# {
# # Binary files need to be base64 encoded
# 'action': 'create',
# 'file_path': 'logo.png',
## 'content': base64.b64encode(open('logo.png').read()),
# 'encoding': 'base64',
# },
{
# Binary files need to be base64 encoded
'action': 'create',
'file_path': 'git_test2.py',
# 'content': open('./git_test.py').read(),
'content': 'chocolate',
# 'encoding': 'base64',
}
]
}
x=project.commits
#x?
#commit = project.commits.create(data)
#diff = commit.diff()
#commit.refs() # all references
#commit.refs('tag') # only tags
#commit.refs('branch') # only branches
#
#diff = commit.diff()
#commit.cherry_pick(branch='target_branch')
#commit.refs() # all references
#commit.refs('tag') # only tags
#commit.refs('branch') # only branches
#commit.merge_requests()
#keys = project.keys.list()
#key = project.keys.get(10)
#key = project.keys.create({'title': 'gary key','key': open('C:/Users/180218/.ssh/id_rsa.pub').read()})
#open('C:/Users/180218/.ssh/id_rsa.pub').read()
#service = project.services.get('master')
#service = project.services.list()
#project.upload("git_test.py", filepath="./git_test.py")
#project.commit()
# 获取指定分支的属性
branch = project.branches.get('master')
print(branch.name)
# 分支保护/取消保护
branch.protect()
#branch.unprotect()
# ---------------------
#------------------------------------------- #
# 获取指定项目的所有tags
tags = project.tags.list()# 获取某个指定tag 的信息
#tags = project.tags.list('1.0')
# 创建一个tag
#tag = project.tags.create({'tag_name':'1.0', 'ref':'master'})
# 设置tags 说明:
#tag.set_release_description('awesome v1.0 release')
# ---------------------------------------------------------------- #
# 获取所有commit info
commits = project.commits.list()
for c in commits:
# print(c)
print(c.short_id,'|', c.author_name,'|', c.message,'|' ,c.title)
# 获取指定commit的info
commit = project.commits.get('61f75d55')
# 获取指定项目的所有merge request
mrs = project.mergerequests.list()
print(mrs)
# ---------------------------------------------------------------- #
# 创建一个merge request
mr = project.mergerequests.create({'source_branch':'master',
'target_branch':'feature1',
'title':'merge master feature', })
# 更新一个merge request 的描述
mr.description = 'New description'
mr.save()
mr.merge()
# 开关一个merge request (close or reopen):
mr.state_event = 'close' # or 'reopen'
mr.save()
# ---------------------------------------------------------------- #
mr.delete()
|
[
"garychen@iii.org.tw"
] |
garychen@iii.org.tw
|
f1349eeb2548da2ee6788c62c1b9ce3bce799e31
|
c8b8fb9d674ee800b79b30aab91c43f76ec9b3ff
|
/Rosol-Odoo/extras/rosol/controllers/controllers.py
|
ef660be6876e598df9938a0000cf64f4cfd2b14d
|
[] |
no_license
|
MariaLatif/Rosol-Odoo
|
03e3774aa09c8ba0facbb501e40e1b8eb844ce6a
|
987c0d559a987013b7450de6ede8767c8ae58893
|
refs/heads/master
| 2021-07-06T00:44:55.873924
| 2017-09-30T22:13:46
| 2017-09-30T22:13:46
| 105,398,811
| 0
| 0
| null | 2017-09-30T22:13:47
| 2017-09-30T20:28:35
|
Python
|
UTF-8
|
Python
| false
| false
| 669
|
py
|
# -*- coding: utf-8 -*-
from odoo import http
# class Rosol(http.Controller):
# @http.route('/rosol/rosol/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/rosol/rosol/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('rosol.listing', {
# 'root': '/rosol/rosol',
# 'objects': http.request.env['rosol.rosol'].search([]),
# })
# @http.route('/rosol/rosol/objects/<model("rosol.rosol"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('rosol.object', {
# 'object': obj
# })
|
[
"maria.mia.latif94@gmail.com"
] |
maria.mia.latif94@gmail.com
|
85bee7243b4ec80b1dc308b93e5ea2081a95ae6c
|
3482928a04e467dfd4808aa6df0d87bd28c34b98
|
/lesson8_1.py
|
78d71c6012af50a52b370b29be1e5e90ce8190c8
|
[] |
no_license
|
rkashapov2015/python_lesson8
|
45ea758684f0078f2c9c8b0538aafbbecc21956c
|
64fe8c99c2537f41ee90ae7c9ff497193a3665a0
|
refs/heads/master
| 2020-03-29T03:50:31.104253
| 2018-09-21T04:54:30
| 2018-09-21T04:54:30
| 149,503,460
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 66
|
py
|
import os
import webbrowser
webbrowser.open('https://wwww.ya.ru')
|
[
"rinatkzz@yandex.ru"
] |
rinatkzz@yandex.ru
|
359719151fda0cd81217ebd01501d97d6aac818c
|
bf72ddbf4bf0fbb944aab69bea50632cb0d0fba5
|
/code/weapons.py
|
5037cd45333fc56a724a2bcd28d0802f9e874b85
|
[] |
no_license
|
UnaStankovic/GlobalTerrorismDatabaseAnalysis
|
0665fa3f1d82ec94974357449586e31247b9711d
|
a0ffbbc7b56b06a77a2289dcfed769b4c31ee134
|
refs/heads/master
| 2021-01-22T07:48:05.927360
| 2017-06-30T00:19:34
| 2017-06-30T00:19:34
| 92,576,982
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,399
|
py
|
import pandas as pd
import matplotlib.patches as mpat
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import sys
def year_group(year):
yr_grp=''
if year < 1980:
yr_grp = 'Group 1'
elif year < 1990:
yr_grp = 'Group 2'
elif year < 2000:
yr_grp = 'Group 3'
else:
yr_grp = 'Group 4'
return yr_grp
def get_percent(df):
new_df = df.rename(columns={'id':'total_attacks'})
total = new_df['total_attacks'].sum()
new_df['Percentage'] = new_df.apply(lambda x: (x['total_attacks']/total)*100, axis=1)
return new_df
print('ucitavanje podataka...', end='')
sys.stdout.flush()
new_globalterror = pd.read_csv('datasets/backup2.csv')
new_globalterror['Group'] = new_globalterror.apply(lambda row: year_group(row['year']),axis=1)
print('gotovo')
print('izracunavanje procenata...', end='')
sys.stdout.flush()
group_1 = new_globalterror[new_globalterror.Group == 'Group 1'].groupby('weapon').count()['id'].reset_index()
group_2 = new_globalterror[new_globalterror.Group == 'Group 2'].groupby('weapon').count()['id'].reset_index()
group_3 = new_globalterror[new_globalterror.Group == 'Group 3'].groupby('weapon').count()['id'].reset_index()
group_4 = new_globalterror[new_globalterror.Group == 'Group 4'].groupby('weapon').count()['id'].reset_index()
new_grp1 = get_percent(group_1)
new_grp2 = get_percent(group_2)
new_grp3 = get_percent(group_3)
new_grp4 = get_percent(group_4)
print('gotovo')
print('cuvanje slike...', end='')
sys.stdout.flush()
plt.figure(figsize=[16,8])
sns.pointplot(x='weapon',y='Percentage', data=new_grp1[:-1], color='red')
sns.pointplot(x='weapon',y='Percentage', data=new_grp3[:-1], color='blue')
sns.pointplot(x='weapon',y='Percentage', data=new_grp2[1:], color='green')
sns.pointplot(x='weapon',y='Percentage', data=new_grp4[1:], color='violet')
plt.xticks(rotation=90)
plt.xlabel('Vrsta oruzja', size=16)
plt.ylabel('Zastupljenost [%]', size=16)
plt.title('Zastupljenost raslicitih vrsta oruzja', size=18)
red_l = mpat.Patch(color='red', label='1970-1980')
gre_l = mpat.Patch(color='green', label='1981-1990')
blue_l = mpat.Patch(color='blue', label='1991-2000')
vio_l = mpat.Patch(color='violet',label='2001-2015')
plt.legend(handles=[red_l,gre_l,blue_l,vio_l])
plt.savefig('zastupljenost-big.png', bbox_inches='tight')
print('gotovo')
sys.stdout.flush()
plt.show()
|
[
"urosstegic@gmx.com"
] |
urosstegic@gmx.com
|
989a8f0dbaf50909bbb1c170f47ee5721c8c9124
|
c2d81dbce6858217e2fdaa9399e936a6667f5be7
|
/01_dell.py
|
97298f330c56acfda8b4dd7d14d6ccee4b04aaa2
|
[] |
no_license
|
akash123456-hub/hello.py
|
2a705fded2e0b7eca3e4c48080cb05c8a8ac126f
|
9da2235804041217efb7b26a00ad3b3bc4b0eabd
|
refs/heads/master
| 2023-08-15T11:42:46.582938
| 2021-08-03T17:19:00
| 2021-08-03T17:19:00
| 372,376,642
| 0
| 0
| null | 2021-08-03T17:19:01
| 2021-05-31T03:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 267
|
py
|
class Employee:
company = "Google"
salary = 3000
alka = Employee()
rehman = Employee()
alka.salary = 4000
rehman.salary = 5000
print(alka.company)
print(rehman.company)
Employee.company = "Youtube"
print(alka.company)
print(alka.salary)
print(rehman.company)
|
[
"akash.m@ipsator.com"
] |
akash.m@ipsator.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.