blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f9fafc94333b9b7d1a9ab84303e4f749d556117c | bfa6ce49304009fd079b23d33839561ed53e76da | /CSC-121-Python/Module2/New folder/OrengoAnthony_game_functions.py | 17de24ec0b476507f8555dafd38ad8ab6d87dcd8 | [] | no_license | orengoa0459/CSC-121 | 89df696ac6c56f0508234f89aedd20a3c00dce2c | 026f3f5843ca9deb1ad28e1ad27943302a4d7427 | refs/heads/main | 2023-06-11T22:00:36.845210 | 2021-06-27T22:13:14 | 2021-06-27T22:13:14 | 372,833,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | # The rockPaperScissors function receives numbers representing the
# computer and player's choices.
# It returns 0 if there is a tie, 1 if the computer won, 2 if the
# player won, or 3 if the player made an invalid choice.
# Global constants
COMPUTER_WINS = 1
PLAYER_WINS = 2
TIE = 0
INVALID = 3
ROCK = 1
PAPER = 2
SCISSORS = 3
def rockPaperScissors(computer, player):
if(computer == player):
return TIE
if computer == ROCK:
if player == PAPER:
return PLAYER_WINS
elif player == SCISSORS:
return COMPUTER_WINS
else:
return INVALID
elif computer == PAPER:
if player == ROCK:
return COMPUTER_WINS
elif player == SCISSORS:
return PLAYER_WINS
else:
return INVALID
else: #computer chose scissors
if player == ROCK:
return PLAYER_WINS
elif player == PAPER:
return COMPUTER_WINS
else:
return INVALID
# The choiceString function displays a choice in string format
def choiceString(choice):
if choice == ROCK:
return 'rock'
elif choice == PAPER:
return 'paper'
elif choice == SCISSORS:
return 'scissors'
else:
return 'something went wrong'
if __name__ == "__main__":
main()
| [
"48802353+orengoa0459@users.noreply.github.com"
] | 48802353+orengoa0459@users.noreply.github.com |
a3b2ff7c0ac1c3c0e55ab2b80b3a651100768d81 | 90bb32b3156232973012185896edb5899a5716c8 | /cal/migrations/0001_initial.py | 1efff23d2ab5b2b6d015cc44b05fcd6a75102041 | [] | no_license | odedahay/django-calendar-app | a68c4e8a3072422f811a41d2562006be911e2fd9 | b9daad72be7ebbb1827c5aa8facae3d7e4d68395 | refs/heads/master | 2022-04-13T14:51:32.021441 | 2020-04-12T10:42:43 | 2020-04-12T10:42:43 | 255,060,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # Generated by Django 3.0.5 on 2020-04-12 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField()),
],
),
]
| [
"odedahay@yahoo.com"
] | odedahay@yahoo.com |
d4e32ff94b192f142379526a3b922b50b3206f93 | c8bac63e2b4da9a86bdf3217b665b0d4fc0fca8e | /module6_1_tensorboard.py | 2989093f386404190e2f23be93da73d448ce0dcb | [] | no_license | jandziak/tensorflow_workshop | 902cc62432f88b97176f22c7bc6664618660112b | a1daa34f0cd2967a178ad3319ac81b711b1f5223 | refs/heads/master | 2021-01-20T15:26:33.402415 | 2017-07-24T13:31:02 | 2017-07-24T13:31:02 | 82,812,810 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # Module 6: Tensorboard
# Author: Dr. Alfred Ang
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
logdir = '/tmp/demo/4'
import tensorflow as tf
a = tf.constant(12,name='a')
b = tf.constant(4,name='b')
# c = tf.multiply(a,b,name='c')
# d = tf.div(a, b, name='d')
with tf.name_scope('multiply'):
c = tf.multiply(a, b, name='c')
with tf.name_scope('divide'):
d = tf.div(a, b, name='d')
sess = tf.Session()
tf.summary.scalar('c',c)
tf.summary.scalar('d',d)
merged_summary = tf.summary.merge_all()
s = sess.run(merged_summary)
writer = tf.summary.FileWriter(logdir)
writer.add_summary(s)
writer.add_graph(sess.graph)
print(sess.run(c))
print(sess.run(d)) | [
"janidziak@gmail.com"
] | janidziak@gmail.com |
e2d5e7c66693b0f86ee9a3063be785b83ed0b7d5 | ddfde9de04919c7a3bcd2b5dcfb8354c90568637 | /nested_frame_work/test_framework_init.py | a1e70c813c19b4edef0ae948f23bc8bc8705a227 | [] | no_license | ORNL-Fusion/ips-examples | aedfbf35e2abb08f1df5de9effcebca2eb3287d5 | 4950eeb8cb20eed764018fca10617473b814c883 | refs/heads/master | 2023-07-24T12:49:40.520201 | 2023-01-27T20:51:45 | 2023-01-27T20:51:45 | 44,407,382 | 4 | 5 | null | 2022-10-09T03:44:15 | 2015-10-16T19:53:59 | Python | UTF-8 | Python | false | false | 1,928 | py | #! /usr/bin/env python
#-------------------------------------------------------------------------------
#
# IPS wrapper for TEST Init component.
#
#-------------------------------------------------------------------------------
from component import Component
#-------------------------------------------------------------------------------
#
# TEST Init Class
#
#-------------------------------------------------------------------------------
class test_framework_init(Component):
#-------------------------------------------------------------------------------
#
# TEST Init Component Constructor
#
#-------------------------------------------------------------------------------
def __init__(self, services, config):
print('test_framework_init: Construct')
Component.__init__(self, services, config)
#-------------------------------------------------------------------------------
#
# test_framework_init Component init method. This method prepairs the input
# files. This allows staging the plasma state files and creates the inital
# state.
#
#-------------------------------------------------------------------------------
def init(self, timeStamp=0.0):
print('test_framework_init: init')
#-------------------------------------------------------------------------------
#
# test_framework_init Component step method. This component does nothing and is
# never called.
#
#-------------------------------------------------------------------------------
def step(self, timeStamp=0.0):
print('test_framework_init: step')
#-------------------------------------------------------------------------------
#
# test_framework_init Component finalize method. This cleans up afterwards. Not
# used.
#
#-------------------------------------------------------------------------------
def finalize(self, timeStamp=0.0):
print('test_framework_init: finalize')
| [
"cianciosamr@ornl.gov"
] | cianciosamr@ornl.gov |
ad90603cf859e61bf494044b900c7e253b975437 | 7ae423b21e39bc12b33566826ca8b27475a89e40 | /spider_JDMeizitu.py | f3c9a215f25168acf884884ae610ef3d46c2d34a | [] | no_license | Joeliqq/PythonSpider | d28a3fd3d337f55a328cea57954c8ade5d6b01ad | afc1a3ef312a269b5efec18c363dd21649598d20 | refs/heads/master | 2021-05-04T10:02:15.913070 | 2018-01-31T00:45:51 | 2018-01-31T00:45:51 | 48,534,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | # coding:utf-8
import urllib2
import random
import os
from bs4 import BeautifulSoup
def getSoup(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.124 Safari/537.36')
html = urllib2.urlopen(req).read()
soup = BeautifulSoup(html, "html.parser") # "html.parser" 是指定beautifulsoup的解析器
return soup
def saveImaURL(url):
soup = getSoup(url)
filterURL = soup.find_all('a',class_="view_img_link")
# imageURL = filterURL[0]['href'] # nice第一个代表数组,总共只有一个,第二个代表其中的哪个元素、
# print imageURL
imglist = []
# print filterURL[1]
file = open('url.txt', 'w')
for i in filterURL:
# print i # i 是 BeautifulSoup得到的所有的匹配元素遍历时的之一,是tag类型,相当于是filterURL这个数组中的一个。
# print i['href'] # 这个相当于一个字典,对应关系,即映射、 (字典通常一对一,也能够实现一对多)
imageURL = i['href']
imglist.append(imageURL)
file.write(imageURL + '\n')
# imageURL = filterURL[0]['href']
# print imageURL
# imglist.append(imageURL)
# file.write(imageURL + '\n')
file.close()
return imglist
# def saveURL2Text():
# file = open('url.txt', 'w')
# for imgid in saveImaURL():
# file.write(getImaURLList + '\n')
# file.close()
def wgetImage():
command = 'wget -P ./jiandanmeizitu -c -i url.txt'
os.system(command)
return
if __name__ == '__main__':
url = 'http://jandan.net/ooxx'
saveImaURL(url)
wgetImage()
| [
"joe.study.work@gmail.com"
] | joe.study.work@gmail.com |
2fcdb0d6b42b464448075d478508767377ecbf00 | e38cdd027c9809c182765d1920d5bf8686c7015a | /Galtron-master/settings.py | 04f7aa789313617aed1e1a765f5b2045512897cb | [] | no_license | Moongss/Kookmin_OSS_2017 | 2c7f4f636fa1fdc5bf68de7792bf349a43d5fc39 | 8e83f2770610cc3260813c4f8cff5af946a0cbaf | refs/heads/master | 2021-01-01T20:38:33.429672 | 2017-07-31T15:58:45 | 2017-07-31T15:58:45 | 98,905,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import pygame as pg
from pygame.transform import *
class Settings():
"""A class to store all settings for game"""
def __init__(self):
"""Initialize the class"""
self.windowCaption = 'MOONGS INVADER'
self.screenWidth = 1280
self.screenHeight = 720
self.bgColor = (20, 20, 20)
img = pg.image.load('gfx/background.png')
img = scale(img, (1280, 720))
self.bg = img
#Ships speed
self.shipLimit = 3
#Bullet settings
self.bulletWidth = 3
self.bulletHeight = 15
self.bulletColor = (60, 60, 60)
#Alien settings
#How quickly the game speeds up
self.speedUp = 2
self.scoreSpeedUp = 1.5
self.initDynamicSettings()
def initDynamicSettings(self):
self.shipSpeed = 10
self.bulletSpeed = 10
self.alienSpeed = 5
self.fleetDropSpeed = 3
self.fleetDir = 1
self.alienPoints = 50
def increaseSpeed(self):
"""Increase the speed settings"""
#self.shipSpeed *= self.speedUp
#self.bulletSpeed *= self.speedUp
if self.alienSpeed <= 1.5:
self.alienSpeed *= self.speedUp
self.fleetDropSpeed *= self.speedUp
self.alienPoints = int(self.alienPoints * self.scoreSpeedUp) | [
"min@adminui-Mac-Pro.local"
] | min@adminui-Mac-Pro.local |
f3467f1043b80a0ea9337c61aa83eb37180e440c | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/mphnok005/question3.py | 8ab32014734be33c45000ec60015c87758483dae | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | from math import*
x=sqrt(2)
a=2
pi=2*(a/x)
while x<2:
x=(sqrt(2+x))
pi=(pi*a/x)
print("Approximation of pi:",round(pi,3))
c=eval(input("Enter the radius:\n"))
print("Area:",round(c**2*pi,3)) | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
65bb9e2842ec5aad1bd2c89bf069d156d746157c | bf91f844d0890f072208acbeb6bd78aa719d1533 | /venv/Scripts/pyhtmlizer-script.py | 9f1758ed305565d5482f2744e4b243ca70a5473d | [] | no_license | jiaohongtao/python_util | 33b7b91405dd02e4318f59e9bafe60edc268c4dc | 4baad933d4fbb0fc1919322143a53aec0cebd824 | refs/heads/master | 2023-02-16T19:57:01.431806 | 2021-01-14T06:53:08 | 2021-01-14T06:53:08 | 235,258,883 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | #!E:\Projects\PycharmProjects\python_util\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.10.0','console_scripts','pyhtmlizer'
__requires__ = 'Twisted==19.10.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.10.0', 'console_scripts', 'pyhtmlizer')()
)
| [
"jiaohongtao@beyondcent.com"
] | jiaohongtao@beyondcent.com |
6cd613bcdd91d3c252c77c5671f432f525d64cfc | bee2af5228232ce94f418b61810cecd93af62615 | /virtual/bin/django-admin.py | d6b83b56c12b4e16e7f24824f482b665a071f57a | [] | no_license | thuitafaith/djangoapp | b64c2e1a05c67b1135d4d9dd7975c17522238a69 | e06280b34a7b1ec012d0baab6f0fb153875a39b4 | refs/heads/master | 2022-12-11T19:06:08.540528 | 2019-08-29T12:36:45 | 2019-08-29T12:36:45 | 203,321,071 | 0 | 0 | null | 2022-11-22T04:13:07 | 2019-08-20T07:15:28 | Python | UTF-8 | Python | false | false | 158 | py | #!/home/faith/Desktop/django-rem/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"thuitamuthoni15@gmail.com"
] | thuitamuthoni15@gmail.com |
fb08e0f0d2390feae8987bf4b8d691e996af235a | dd1ce317b6016ec1a86e913c4c227f803ec3e15f | /k11/digger/middlewares.py | 27df64bac097a61280a4f6978051370ebbf0ed26 | [] | no_license | JaisPiyush/k11 | 3933b7f9b542ff935e1c52a17129000a8b7c95b7 | 312ae1c6bb4ebc6fcb6581fcd4eb2e15846ce7f8 | refs/heads/main | 2023-08-05T01:56:30.355775 | 2021-10-08T05:21:09 | 2021-10-08T05:21:09 | 359,769,657 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class DiggerSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class DiggerDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"iampiyushjaiswal103@gmail.com"
] | iampiyushjaiswal103@gmail.com |
517883fff7511e6a85417acd48de8b4f13d37f6e | 15b916d8a3f2adbb9bae525461b1c08e10f733d1 | /models/dep.py | 723a09884f9bf7de9fe3662e172d7b420b604e37 | [] | no_license | sa1am8/digital-journal | d492d5dff71e9ee9f3b27879f490b585880f05b9 | 7e1b812f608234b59c68ab6e740fe386176824b6 | refs/heads/master | 2023-04-08T07:19:53.686831 | 2021-04-07T08:34:25 | 2021-04-07T08:34:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,213 | py | from flask import render_template, Blueprint, url_for, redirect, request, flash
from sqlalchemy.exc import IntegrityError
import sys
sys.path.insert(1, '/home/toshka/PycharmProjects/EPAM linux/EPAM')
from models.models import Employee, Department
from app import db
from forms.forms import DepartmentForm
dep = Blueprint("dep", __name__)
@dep.route("/departments")
def show_departments():
"""Render a list of all departments"""
departments = Department.query.order_by(Department.id).all()
employees = Employee.query.all()
# Get information about all employees' salaries
# and departments they belong to.
salaries_info = {}
for employee in employees:
if employee.department_id in salaries_info:
salaries_info[employee.department_id]["total"] += employee.salary
salaries_info[employee.department_id]["count"] += 1
else:
salaries_info.update(
{
employee.department_id: {
"total": employee.salary,
"count": 1,
}
}
)
# Calculate average salaries for all departments
# and store them in a dictionary.
avg_salaries = {}
for department in departments:
if department.id in salaries_info:
# If department has employees.
avg_salaries[department.id] = (
round(salaries_info[department.id]["total"]
/ salaries_info[department.id]["count"], 2)
)
else:
# Department has no employees.
avg_salaries[department.id] = 0
return render_template(
"html/departaments.html", departments=departments,
avg_salaries=avg_salaries, title="All departments"
)
@dep.route("/add_department", methods=["GET", "POST"])
def add_department():
"""Add a new department using a form."""
form = DepartmentForm()
if form.validate_on_submit():
# Set department name to a value from the form.
department = Department(name=form.name.data)
db.session.add(department)
db.session.commit()
flash("Department has been added!", "success")
return redirect(url_for("dep.show_departments"))
return render_template(
"html/departament_add.html", title="Add new department",
form=form, legend="New Department"
)
@dep.route("/department/<int:department_id>")
def show_department(department_id):
"""Render page of a department with a given id"""
department = Department.query.get_or_404(department_id)
return render_template(
"html/departament.html", title=department.name, department=department
)
@dep.route("/department/<int:department_id>/update", methods=["GET", "POST"])
def update_department(department_id):
"""Delete department with a given id"""
department = Department.query.get_or_404(department_id)
form = DepartmentForm()
if form.validate_on_submit():
# Set department name to a value from the form.
department.name = form.name.data
db.session.commit()
flash("Department has been updated!", "success")
return redirect(url_for("dep.show_departments"))
if request.method == "GET":
# Fill the form with current value.
form.name.data = department.name
return render_template(
"html/departament_add.html", title="Update department",
form=form, legend=f"Update {department.name}"
)
@dep.route("/department/<int:department_id>/delete", methods=["POST"])
def delete_department(department_id):
"""Delete department with a given id"""
department = Department.query.get_or_404(department_id)
try:
db.session.delete(department)
db.session.commit()
except IntegrityError:
# If department has employees handle an exception.
flash("Department that has employees cannot be deleted!", "danger")
return redirect(url_for("dep.show_departments"))
else:
# Redirect to departments page with success message.
flash("Department has been deleted!", "success")
return redirect(url_for("dep.show_departments"))
| [
"tinkerino571@gmail.com"
] | tinkerino571@gmail.com |
8ba80ac4b037dde92443141d60bd35bf1f98031e | e4414bd8152e52855db7ab9065ae12b7329143e0 | /python/src/hangman.py | 0dd38bbfdc6501bc39f632a253400dd40bbf2d07 | [] | no_license | catalinc/programmingpraxis-solutions | 39cb847877ec46d2fb85740791c24889ab5654a8 | c0b13906aa76ffac705bf108db138fb9a38bc16a | refs/heads/master | 2021-03-27T16:46:47.781839 | 2017-09-09T15:17:38 | 2017-09-09T15:17:38 | 53,532,233 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,889 | py | #!/usr/bin/env python
# See http://programmingpraxis.com/2011/12/20/hangman/
import random
import sys
HANGMAN = [
"",
"""
O
""",
"""
O
|
""",
"""
_O
|
""",
"""
_O_
|
""",
"""
_O_
|
/
""",
"""
_O_
|
/ \\
"""
]
def play_game():
secret_word = random_word().upper()
guessed_letters = set()
failed_attempts = 0
print_matches(secret_word, guessed_letters)
while True:
try:
letter = raw_input("Your guess ? ").upper()
except KeyboardInterrupt:
exit_game()
if letter in secret_word:
guessed_letters.add(letter)
else:
failed_attempts += 1
print_hangman(failed_attempts)
if lose(failed_attempts):
print("Sorry, you lose...")
print("The word was: %s" % (" ".join(list(secret_word))))
break
print_matches(secret_word, guessed_letters)
if win(secret_word, guessed_letters):
print("You nail it !")
break
def random_word(words_file='words.lst'):
word = None
n = 0
with open(words_file) as f:
for line in f:
n += 1
if random.random() < 1.0 / n:
word = line
return word
def print_matches(word, letters):
out = []
for l in word:
if l in letters:
out.append(l)
else:
out.append("_")
print(" ".join(out))
def exit_game():
print("Bye !")
sys.exit(0)
def print_hangman(guess_attempts):
print HANGMAN[guess_attempts]
def win(secret_word, guessed_letters):
return len(secret_word) == len(guessed_letters)
def lose(failed_attempts):
return failed_attempts == len(HANGMAN) - 1
if __name__ == '__main__':
print("Let's play Hangman !")
while True:
play_game()
if raw_input("Play another ? [Y]/N ").upper() == "N":
exit_game()
| [
"catalin.cristu@gmail.com"
] | catalin.cristu@gmail.com |
f34e9faec68e7fae95df6a032abed8a08c2de4b9 | 2dcf2376a47133cc3b0a47a6bf4a68959fc5070a | /Purva/__init__.py | e108078c8234edbff57608013737d12566e5a3c8 | [] | no_license | princeapyds/Tg_90_Py_DS | 2c079b2ff1ce876003a393289c92a1bdd0fd65af | 90c3b5250344e0a57bf678f16821015442214106 | refs/heads/main | 2023-02-24T06:28:47.466909 | 2021-01-30T22:55:05 | 2021-01-30T22:55:05 | 334,366,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | a=78
b=98
print(a+b) | [
"Purvajagtap21@gmail.com"
] | Purvajagtap21@gmail.com |
12e05ceaac7c5c4174fb21ada9bdbb1e70c90c54 | ffb05b145989e01da075e2a607fb291955251f46 | /pypers/oxford/non_cooperative.py | 6c7b293967ae50f89ebf7f90ccccdc8e62ba6d40 | [] | no_license | micheles/papers | a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7 | be9070f8b7e8192b84a102444b1238266bdc55a0 | refs/heads/master | 2023-06-07T16:46:46.306040 | 2018-07-14T04:17:51 | 2018-07-14T04:17:51 | 32,264,461 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # non_cooperative.py
class B1(object):
def __init__(self, **kw):
print "B1.__init__"
super(B1, self).__init__(**kw)
class B2(object):
def __init__(self, **kw):
print "B2.__init__"
super(B2, self).__init__(**kw)
| [
"michele.simionato@gmail.com"
] | michele.simionato@gmail.com |
00fd1033e37e360a31091239beb574fc36c393de | 55ee96e2bba54908974cddbcd6ce39868bb6ccae | /config/urls.py | ca9abba46406f86aeb53b015d743107dc14505c9 | [] | no_license | marti1125/BlogAPI | ec9d00816463b989108c451cab3aec05e48ea6c6 | 6275b67a762cc14cf214b3882c023a74927fc6da | refs/heads/main | 2023-03-25T17:50:20.128068 | 2021-03-23T20:12:27 | 2021-03-23T20:12:27 | 350,809,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | """config URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URL conf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title='Blog API',
default_version='v1',
description='A sample API form learnig DRF',
terms_of_service='https://www.google.com/policies/terms/',
contact=openapi.Contact(email='will@company.com'),
license=openapi.License(name='MIT license'),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/', include('posts.urls')),
path('api-auth/', include('rest_framework.urls')),
path('api/v1/auth/', include('dj_rest_auth.urls')),
path('api/v1/auth/registration/', include('dj_rest_auth.registration.urls')),
path('swagger/', schema_view.with_ui(
'swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui(
'redoc', cache_timeout=0), name='schema-redoc'),
]
| [
"marti1125@gmail.com"
] | marti1125@gmail.com |
51d5ae1fa2d5ae73a65d826bd1113e9b57cef767 | 03383b657ad6d526e7e6aa6639fe41019cd39ea2 | /recursion/palandrome.py | 985c2d63e7528bf16d0978634606988d462fbf30 | [] | no_license | ahmedmeshref/Leetcode-Solutions | 1c5f908cb2f6487c9dfadcc8f91192dedbb5a17e | 28f848cb25e4aa22e6d8c9d715488f191ed15137 | refs/heads/main | 2023-05-26T14:33:11.246122 | 2021-06-07T21:32:13 | 2021-06-07T21:32:13 | 356,045,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | def isPalindrome(s: str) -> bool:
l = 0
r = len(s) - 1
while l != r:
if not s[l].isalpha():
l += 1
elif not s[r].isalpha():
r -= 1
elif s[l].lower() != s[r].lower():
return False
else:
l += 1
r -= 1
return s[l] == s[r]
print(isPalindrome("A man, a plan, a canal: Panama"))
| [
"a.meshref@alustudent.com"
] | a.meshref@alustudent.com |
cdbf6fa8943b1af5835b87a56ecc9d8ee32cbe88 | 133150a4bfcfa14f45ffb0745d006ff10377e4a5 | /natural_language_processing/basic_methods.py | 7924d0d681e139fad1f26aa68a51c11cf9d9ded0 | [] | no_license | ElvinOuyang/learning-and-practices | 4ce8a05ec546fdf0d16aa53269cb028234c72a6d | 244b5a21cf17cde4efa89b92e19ef54e8f9355e3 | refs/heads/master | 2020-07-05T00:50:03.579650 | 2019-10-15T02:47:27 | 2019-10-15T02:47:27 | 74,134,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | # preparation
from __future__ import division
import nltk
from nltk.book import *
'Searching Text'
# .concordance() will display context of each searched words
text1.concordance("monstrous")
text2.concordance("affection")
text3.concordance("lived")
print("\n")
# .similar() will return words that appear in similar context
text1.similar("monstrous")
print("\n")
text2.similar("affection")
print("\n")
text3.similar("lived")
# .commmon_contexts() will return the contexts shared by two or more words,
# separated with commas
text2.common_contexts(["monstrous","very"])
# display the words' locations with dispersion plots
text4.dispersion_plot(["citizens","democracy","freedom","duties","America"])
'Tokens, word types, and lexical richness'
print(sorted(set(text3)))
# a list of all word types included in text3
# set() returns unique token occurrence within a text
print(len(set(text3)))
# 2789 word types
print(len(text3) / len(set(text3)))
# Calculates the "lexical richness" of the text, namely how many times each word is used in the text
print(text3.count("smote"))
# Calculates how many times a word appear in a text
print(100 * text5.count('lol') / len(text5))
# Calculates how much a certain word takes in the whole text string
def lexical_diversity(text):
# A function that returns average usage for each word in a piece of text
return len(text) / len(set(text))
def percentage(word, full_text):
# A function that returns the percentage of a word in a text
return 100 * full_text.count(word) / len(full_text)
# Use "+" operators for concatenation
| [
"elvin.ouyang@gmail.com"
] | elvin.ouyang@gmail.com |
bb9d2da18b8fd9fb4ca15383251051ce2328118c | 8dd7de62d854e797e96135040fefd4f879a729ca | /app/urls.py | a5232ba3e4899c6a8dae5b0d70df7122e8c8cf36 | [] | no_license | Anusreechandra/newwebtd | 12fc7e701302030c933c69da6d1c8101cd32ad78 | 53a2868f000d0dd56e0443a4815b5aee6bc6cc0b | refs/heads/master | 2023-09-05T11:44:22.496261 | 2021-11-23T05:51:25 | 2021-11-23T05:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from os import name
from django.urls import path
from django.urls.resolvers import URLPattern
from. import views
app_name="app"
urlpatterns=[
path('',views.new,name="web"),
] | [
"anusreechandra.327@gmail.com"
] | anusreechandra.327@gmail.com |
74242c01bb15f0739920d399519d7227f57b8f8a | fc91e867bb74cbebcb0ee608f1477ae16af91631 | /.venv/bin/django-admin.py | 96ac186b1a98732e5519c8d218d286e285191802 | [] | no_license | karthik018/FacebookPosts | 7580afbfab066c6bd09a43be086e4ce9621bbd65 | 2fbe59e8640ca2da01e0028f7b10c4f9d7b62b65 | refs/heads/master | 2020-06-12T05:29:43.493418 | 2019-07-16T06:16:25 | 2019-07-16T06:16:25 | 194,204,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | #!/home/ib_admin/FacebookPosts/.venv/bin/python3.7
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"thinkcreative01karthik@gmail.com"
] | thinkcreative01karthik@gmail.com |
f2b1502e69ebbcb83d168b947b4e1536ab2f5ca9 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/nlp/Speech_Transformer_ID0487_for_PyTorch/test/test_lr.py | 50038c0c6c0a3d30f8e5cf25a74a0eebf9c1ebc4 | [
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,717 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
if __name__ == '__main__':
k = 0.2
warmup_steps = 4000
d_model = 512
init_lr = d_model ** (-0.5)
lr_list = []
for step_num in range(1, 500000):
lr = k * init_lr * min(step_num ** (-0.5),
step_num * (warmup_steps ** (-1.5)))
lr_list.append(lr)
print(lr_list[:100])
print(lr_list[-100:])
plt.plot(lr_list)
plt.show()
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
99023c5533e743afb8349cd031816969f2e0f52e | 6527b66fd08d9e7f833973adf421faccd8b765f5 | /yuancloud/recicler/event/tests/test_mail_schedule.py | 7b92308e184f89e0d7bc6436545f7d9324c6b05d | [] | no_license | cash2one/yuancloud | 9a41933514e57167afb70cb5daba7f352673fb4d | 5a4fd72991c846d5cb7c5082f6bdfef5b2bca572 | refs/heads/master | 2021-06-19T22:11:08.260079 | 2017-06-29T06:26:15 | 2017-06-29T06:26:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | # -*- coding: utf-8 -*-
import datetime
from dateutil.relativedelta import relativedelta
from yuancloud import fields, tools
from yuancloud.addons.event.tests.common import TestEventCommon
from yuancloud.tools import mute_logger
class TestMailSchedule(TestEventCommon):
@mute_logger('yuancloud.addons.base.ir.ir_model', 'yuancloud.models')
def test_00_event_mail_schedule(self):
""" Test mail scheduling for events """
self.env['ir.values'].set_default('event.config.settings', 'auto_confirmation', True)
now = fields.datetime.now()
event_date_begin = now + relativedelta(days=1)
event_date_end = now + relativedelta(days=3)
test_event = self.Event.sudo(self.user_eventmanager).create({
'name': 'TestEventMail',
'date_begin': event_date_begin,
'date_end': event_date_end,
'seats_max': 10,
'event_mail_ids': [
(0, 0, { # right at subscription
'interval_unit': 'now',
'interval_type': 'after_sub',
'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_subscription')}),
(0, 0, { # 2 days before event
'interval_nbr': 2,
'interval_unit': 'days',
'interval_type': 'before_event',
'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_reminder')}),
]
})
# create some registrations
self.Registration.sudo(self.user_eventuser).create({
'event_id': test_event.id,
'name': 'Reg0',
'email': 'reg0@example.com',
})
self.Registration.sudo(self.user_eventuser).create({
'event_id': test_event.id,
'name': 'Reg1',
'email': 'reg1@example.com',
})
# check subscription scheduler
schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'after_sub')])
self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation')
self.assertEqual(schedulers[0].scheduled_date, test_event.create_date, 'event: incorrect scheduled date for checking controller')
# verify that subscription scheduler was auto-executed after each registration
self.assertEqual(len(schedulers[0].mail_registration_ids), 2, 'event: incorrect number of mail scheduled date')
mails = self.env['mail.mail'].search([('subject', 'ilike', 'subscription'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3)
self.assertEqual(len(mails), 2, 'event: wrong number of subscription mail sent')
for registration in schedulers[0].mail_registration_ids:
self.assertTrue(registration.mail_sent, 'event: wrongly confirmed mailing on subscription')
# check before event scheduler
schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'before_event')])
self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation')
self.assertEqual(schedulers[0].scheduled_date, datetime.datetime.strftime(event_date_begin + relativedelta(days=-2), tools.DEFAULT_SERVER_DATETIME_FORMAT), 'event: incorrect scheduled date')
# execute event reminder scheduler explicitly
schedulers[0].execute()
self.assertTrue(schedulers[0].mail_sent, 'event: reminder scheduler should have sent an email')
self.assertTrue(schedulers[0].done, 'event: reminder scheduler should be done')
mails = self.env['mail.mail'].search([('subject', 'ilike', 'reminder'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3)
self.assertEqual(len(mails), 2, 'event: wrong number of reminders in outgoing mail queue')
| [
"liuganghao@lztogether.com"
] | liuganghao@lztogether.com |
43ccac82b3cdb77298df1e3149e0f74a89bcc7f9 | ae462dab95f3a0c9a60ce884dcc57c45e282bec7 | /venv/bin/django-admin.py | 3332cf1d05f15580c89bc3163e45a63b1f521516 | [] | no_license | aidandyde/bridging_coursework | 8224a177536ef115eac2be86c7a891bfcd54b8b2 | e4fe1df0baa4adf99b759a2fc84ebdf5d5c8bf1c | refs/heads/master | 2022-12-24T11:48:59.882757 | 2020-10-10T13:03:44 | 2020-10-10T13:03:44 | 270,255,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | #!/home/adyde/PycharmProjects/bc2/venv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"adyde@DESKTOP-5A990CS.lan"
] | adyde@DESKTOP-5A990CS.lan |
d25af681e443d6b6f9b541e3f60dab32f7f5e5ae | f0c3edae1d5f97ffb0c5039bca2ed9c51a74bc6b | /libalgopy/common/interfaces/binary_search_tree.py | e892bd31ab6dd4b6ae1b8a50a912ad5f2931b1c3 | [
"MIT"
] | permissive | PotapenkoOleg/libalgopy | d2bfbbcec6004c07e0b3aea4d835085ae18f3f6f | ac625c0f874918c1967218c302c6fcb200db0271 | refs/heads/master | 2020-07-07T03:36:02.698884 | 2019-09-24T19:13:45 | 2019-09-24T19:13:45 | 203,232,533 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | class BinarySearchTreeBase:
def add(self, key, value):
pass
def get(self, key):
pass
def remove(self, key):
pass
def get_min(self):
pass
def get_max(self):
pass
def get_floor(self):
pass
def get_ceiling(self):
pass
def get_rank(self):
pass
def preorder(self, action):
pass
def inorder(self, action):
pass
def postorder(self, action):
pass
def clear(self):
pass
def is_empty(self):
pass
def get_size(self):
pass
if __name__ == '__main__':
pass
| [
"PotapenkoOleg@gmail.com"
] | PotapenkoOleg@gmail.com |
e72948be0ca42fe9b52c9425a7150892ba8b117e | 704b303675dae80119257090fabf1afb386dd3e2 | /AtCoder/misc/エイシング2020/A.py | e1d29b6edc2c66d6003073a54bbbaec2e551ba63 | [] | no_license | kathmandu777/CompetitiveProgramming | 7b7ead14d8a697df89f04faf05276030349a1a98 | 1949ab55fd399e16765c9effb95541e600601730 | refs/heads/master | 2023-05-13T19:23:30.503873 | 2021-06-07T10:15:57 | 2021-06-07T10:15:57 | 347,570,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py | l, r, d = map(int, input().split())
ans = 0
for i in range(l, r + 1):
if (i % d == 0):
ans += 1
print(ans)
| [
"mahhakatomanato@gmail.com"
] | mahhakatomanato@gmail.com |
53a8b28fc5cf1543cb3b8e7da6653456f2cfc645 | 8e61d9c9ee28014d1eb36f4c98b4295172ac4e4a | /2020/Linear-Regression/challenge_regression.py | f29b738bbd177f5d5d4c42fb52ceb5859f91845a | [] | no_license | vigneshwaran444/python-folders | d53d303fdc5ef4f6ced48d5e1528ebf9d6bff09f | 0ebd8bafd03a7d1e23b6b180547d553d55b35d6a | refs/heads/master | 2021-01-06T10:43:41.547026 | 2020-02-18T07:48:18 | 2020-02-18T07:48:18 | 241,300,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | import pandas as pd
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
#import data
dataframe = pd.read_csv('data/challenge_dataset.csv')
x_values = dataframe[['x']]
y_values = dataframe[['y']]
#train model on data
model = LinearRegression()
model.fit(x_values, y_values)
#visualise results
plt.scatter(x_values, y_values)
plt.plot(x_values, model.predict(x_values))
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
411dcfd4de66bbc272d46522811c996c36dce81d | e04a6a11b8770e8bbcfe9509348d8a86a4189295 | /mapper/utils.py | 6a8f9834d28a6b5a859a7d50eee77fa8fc178bbc | [] | no_license | minh5/mapper | 48267c7591b022aa2f548665ff6b9349de5cccd5 | 04deb1d917ca1feb784afd6a5252303f44f704cc | refs/heads/master | 2021-01-17T07:31:46.146206 | 2017-04-02T03:28:07 | 2017-04-02T03:28:07 | 83,748,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py |
DC_CENSUS_TRACTS = 'http://opendata.dc.gov/datasets/6969dd63c5cb4d6aa32f15effb8311f3_8.zip'
DC_CENSUS_BLOCKS = 'http://opendata.dc.gov/datasets/c143846b7bf4438c954c5bb28e5d1a21_2.zip'
DC_GEOJSON_TRACTS = 'mapper/static/geojson/dc_census_tracts.geojson'
DC_GEOJSON_BLOCKS = 'mapper/static/geojson/dc_census_blocks.geojson'
GEO_LEVEL_CHOICES = [('tracts', 'Census Tracts'), ('blocks', 'Census Blocks')]
MATCH_KEY_CHOICES = [('geoid', 'GEO ID'),
('tract', 'CENSUS TRACT'),
('blkgrp', 'CENSUS BLOCK GROUP')]
COLUMNS_TO_AVOID = ['OBJECTID', 'TRACT', 'BLKGRP', 'GEOID']
# df = DataFile.objects.first()
# self = MapMaker(geo_level='tracts', match_key='GEOID', data_file=df)
| [
"minh.v.mai@gmail.com"
] | minh.v.mai@gmail.com |
bfbfc8a34978055dd0f63e6431d91169cf9edad9 | f697327e665ef5c19c182ac9dc0f02b648cc6f09 | /Lib/site-packages/django/contrib/gis/geos/prototypes/geom.py | 89f58f2b4bb5cb445437b3d599349712e35ce222 | [
"BSD-3-Clause"
] | permissive | SgtSwagrid/swagbets | e45860949f1f3da8de2d89b45786357d5956f1ce | 5391142a95a444e020d6fb8d76022ba538489ea6 | refs/heads/develop | 2023-08-19T03:38:05.314230 | 2020-04-30T14:02:35 | 2020-04-30T14:02:35 | 217,722,496 | 1 | 1 | null | 2021-09-22T18:37:31 | 2019-10-26T14:40:39 | Python | UTF-8 | Python | false | false | 3,563 | py | from ctypes import POINTER, c_char_p, c_int, c_size_t, c_ubyte
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_sized_string, check_string,
)
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
# ### ctypes factory classes ###
class BinConstructor(GEOSFuncFactory):
"Generate a prototype for binary construction (HEX, WKB) GEOS routines."
argtypes = [c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# HEX & WKB output
class BinOutput(GEOSFuncFactory):
"Generate a prototype for the routines that return a sized string."
argtypes = [GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
class GeomOutput(GEOSFuncFactory):
"For GEOS routines that return a geometry."
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
class IntFromGeom(GEOSFuncFactory):
"Argument is a geometry, return type is an integer."
argtypes = [GEOM_PTR]
restype = c_int
errcheck = staticmethod(check_minus_one)
class StringFromGeom(GEOSFuncFactory):
"Argument is a Geometry, return type is a string."
argtypes = [GEOM_PTR]
restype = geos_char_p
errcheck = staticmethod(check_string)
# ### ctypes prototypes ###
# The GEOS geometry type, typeid, num_coordinates and number of geometries
geos_normalize = IntFromGeom('GEOSNormalize')
geos_type = StringFromGeom('GEOSGeomType')
geos_typeid = IntFromGeom('GEOSGeomTypeId')
get_dims = GEOSFuncFactory('GEOSGeom_getDimensions', argtypes=[GEOM_PTR], restype=c_int)
get_num_coords = IntFromGeom('GEOSGetNumCoordinates')
get_num_geoms = IntFromGeom('GEOSGetNumGeometries')
# Geometry created factories
create_point = GeomOutput('GEOSGeom_createPoint', argtypes=[CS_PTR])
create_linestring = GeomOutput('GEOSGeom_createLineString', argtypes=[CS_PTR])
create_linearring = GeomOutput('GEOSGeom_createLinearRing', argtypes=[CS_PTR])
# Polygon and collection created routines are special and will not
# have their argument types defined.
create_polygon = GeomOutput('GEOSGeom_createPolygon')
create_empty_polygon = GeomOutput('GEOSGeom_createEmptyPolygon')
create_collection = GeomOutput('GEOSGeom_createCollection')
# Ring routines
get_extring = GeomOutput('GEOSGetExteriorRing', argtypes=[GEOM_PTR])
get_intring = GeomOutput('GEOSGetInteriorRingN', argtypes=[GEOM_PTR, c_int])
get_nrings = IntFromGeom('GEOSGetNumInteriorRings')
# Collection Routines
get_geomn = GeomOutput('GEOSGetGeometryN', argtypes=[GEOM_PTR, c_int])
# Cloning
geom_clone = GEOSFuncFactory('GEOSGeom_clone', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# Destruction routine.
destroy_geom = GEOSFuncFactory('GEOSGeom_destroy', argtypes=[GEOM_PTR])
# SRID routines
geos_get_srid = GEOSFuncFactory('GEOSGetSRID', argtypes=[GEOM_PTR], restype=c_int)
geos_set_srid = GEOSFuncFactory('GEOSSetSRID', argtypes=[GEOM_PTR, c_int])
| [
"alec.dorrington@protonmail.com"
] | alec.dorrington@protonmail.com |
900ce188b2d480f02d08d8fe381e861ddc612129 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/j33.py | 32bc61a86b88039321d406fcdd145e2fdf95b792 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'j33':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
3838f2b41a6e02c39425f0fdede2a1ec591aa2eb | cbfa9a784f85df6ed0dd630757eac709b556deca | /venv/bin/pygmentize | a090a273b8a7c223c0aa644d85dcbd3d07124155 | [] | no_license | itohdaigo/finance_app | c2b8b5c681180459054ee1744bff6e3b4726f8a4 | afa9c17c12aadd5e28d62715c162cdadf4505ca0 | refs/heads/master | 2023-04-13T02:37:35.720408 | 2021-04-24T09:39:03 | 2021-04-24T09:39:03 | 361,120,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | #!/Users/x17007xx/PycharmProjects/WebAPI/final_finance/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"x17007xx@pcx17007xx.local"
] | x17007xx@pcx17007xx.local | |
7987e4d08bd07e960ecd3647886fef2d5e2e298e | 7340cba4292746f9f2fb62f8414d7267a4a9f626 | /sewamobilmysql/urls.py | d2b527c36e5268c9a09ba9026bdc9cc589ce3b72 | [] | no_license | Fairizal/sewamobilmysql | 931344c74b7b8a6f9ae2fc32686f2e211fd59b34 | 669d3ab3b90934cd121d0fe1ec5f1e932575caec | refs/heads/master | 2023-02-10T09:34:24.903094 | 2021-01-07T14:14:54 | 2021-01-07T14:14:54 | 327,594,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | """sewamobilmysql URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from mobilmysql import views
from mobilmysql import carviews
from mobilmysql import rentviews
urlpatterns = [
path('admin/', admin.site.urls),
path('contacts/', views.IndexView.as_view(), name='index'),
path('contacts/<int:pk>/', views.ContactDetailView.as_view(), name='detail'),
path('contacts/edit/<int:pk>/', views.edit, name='edit'),
path('contacts/detail/<int:pk>/', views.detail, name='detail'),
path('contacts/create/', views.create, name='create'),
path('contacts/delete/<int:pk>/', views.delete, name='delete'),
path('cars/', carviews.IndexView.as_view(), name='carsindex'),
path('cars/<int:pk>/', carviews.CarDetailView.as_view(), name='carsdetail'),
path('cars/edit/<int:pk>/', carviews.edit, name='carsedit'),
path('cars/detail/<int:pk>/', carviews.detail, name='carsdetail'),
path('cars/create/', carviews.create, name='carscreate'),
path('cars/delete/<int:pk>/', carviews.delete, name='carsdelete'),
path('rent/', rentviews.IndexView.as_view(), name='rentindex'),
path('rent/<int:pk>/', rentviews.RentDetailView.as_view(), name='rentdetail'),
path('rent/edit/<int:pk>/', rentviews.edit, name='rentedit'),
path('rent/detail/<int:pk>/', rentviews.detail, name='rentdetail'),
path('rent/create/', rentviews.create, name='rentcreate'),
path('rent/delete/<int:pk>/', rentviews.delete, name='rentdelete'),
]
| [
"mrzarowtaz@gmail.com"
] | mrzarowtaz@gmail.com |
3d195c4f0e242b52419cb18ba6b04f8c2286253c | a2fd4bdfea5030201b44320fc9bf8fccf5f31a59 | /Robot/ttwifi.py | 8f83c0a0b66d8c7dec442f5dd351d998ee1d2f6f | [] | no_license | aaron-visschedijk/PicoRobot | c2e1c51a621b6f6dc7f594183d5470d6e5cbfee5 | 71b6e9c550a24487f917c69499dc16d7b5799306 | refs/heads/master | 2022-10-31T11:04:35.264849 | 2020-06-10T11:31:30 | 2020-06-10T11:31:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | import network, time
def init_wifi(apname, password, timeout=3000):
"""Connect to wifi. A timeout (milliseconds) will cause the function to block
until the timeout has expired or a successful connection is made."""
wifi = network.WLAN(network.STA_IF)
wifi.active(True)
wifi.connect(apname, password)
if timeout > 0:
time.sleep_ms(1000)
now = time.ticks_ms()
while True:
if wifi.ifconfig()[0] != '0.0.0.0':
break
if time.ticks_ms() - now > timeout:
break
return wifi
| [
"noreply@github.com"
] | noreply@github.com |
1749340cf3def999cc726ba63a0fb94d0a618ed5 | 69636805a67ed244e13d61d838b56791018dee62 | /compjour/homework2/part3/b.py | 7400d58870411077ae415bb3d0a9eddb5ca1c8a9 | [] | no_license | anacrochas1/compciv-2016 | 2176306d774642f7d9a22f02c9d6a599a9942a18 | abd94d0bfcc6c1612ada06f3f563c0764b2fe2b9 | refs/heads/master | 2021-01-18T21:09:33.755755 | 2016-06-02T05:54:18 | 2016-06-02T05:54:18 | 49,533,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,996 | py | import requests
import pdfplumber
from os.path import basename
from glob import glob
import csv
urls = [
'https://www.nccommerce.com/Portals/11/Documents/Reports/WARN/Warn.pdf',
'https://www.nccommerce.com/Portals/11/Documents/Reports/WARN/warn-2015.pdf',
'https://www.nccommerce.com/Portals/11/WARN/Warn2014.pdf',
'https://www.nccommerce.com/Portals/11/WARN/Warn-2013.pdf'
]
for url in urls:
pdf_fname = 'NCWARN-' + basename(url)
print("Downloading", url, 'into', pdf_fname)
resp = requests.get(url)
with open(pdf_fname, 'wb') as f:
f.write(resp.content)
pdf_filenames = glob('NCWARN-*.pdf')
for pdf_fname in pdf_filenames:
print("This is a filename of a pdf:", pdf_fname)
pdf = pdfplumber.open(pdf_fname)
type(pdf)
# PDF 1
pdf_fname = 'NCWARN-Warn.pdf'
outfile = open('NCWARN-Warn.csv', 'w')
outcsv = csv.writer(outfile)
pdf = pdfplumber.open(pdf_fname)
for page in pdf.pages:
table = page.extract_table()
for row in table[1:]: # note how I'm still skipping the header
outcsv.writerow(row)
outfile.close
# PDF 2
pdf_fname = 'NCWARN-warn-2015.pdf'
outfile = open('NCWARN-warn-2015.csv', 'w')
outcsv = csv.writer(outfile)
pdf = pdfplumber.open(pdf_fname)
for page in pdf.pages:
table = page.extract_table()
for row in table[1:]: # note how I'm still skipping the header
outcsv.writerow(row)
outfile.close
# PDF 3
pdf_fname = 'NCWARN-Warn2014.pdf'
outfile = open('NCWARN-Warn2014.csv', 'w')
outcsv = csv.writer(outfile)
pdf = pdfplumber.open(pdf_fname)
for page in pdf.pages:
table = page.extract_table()
for row in table[1:]: # note how I'm still skipping the header
outcsv.writerow(row)
outfile.close
# PDF 4
pdf_fname = 'NCWARN-Warn-2013.pdf'
outfile = open('NCWARN-Warn-2013.csv', 'w')
outcsv = csv.writer(outfile)
pdf = pdfplumber.open(pdf_fname)
for page in pdf.pages:
table = page.extract_table()
for row in table[1:]: # note how I'm still skipping the header
outcsv.writerow(row)
outfile.close | [
"anasantos@stanford.edu"
] | anasantos@stanford.edu |
946d3175c0adf994c71ca2c1a180f19e61fd3b8d | 2b1421583f94290485751bfdd4b8bc84497ccf52 | /example/SimpleExample/wordcount/wc_mapper.py | 973ebec5271f6e8991c6e8100bba9c2b8511b172 | [] | no_license | mrasu/SkipJack | dae792c0ef469f47439defbceda05e5db974e120 | 5d6a4d64f96a30b5b989eac95c1754b2ee8497bd | refs/heads/master | 2021-01-01T19:20:27.048172 | 2015-01-12T08:15:50 | 2015-01-12T08:31:13 | 28,266,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | #!/usr/bin/env python3
# encoding: utf-8
import sys
for l in sys.stdin:
for word in l.strip().split(): print('{0}\t1'.format(word))
| [
"m.rasu.hitsuji@gmail.com"
] | m.rasu.hitsuji@gmail.com |
23467415c0ff0bb85683ce085bbe02f8ea763c9c | 8209cc68a3fdd611922b00bae5936d70f149ae3e | /bin/jupyter-trust | a64b62fb12ec2c2a47a5bb4ab02634a0ad3644a4 | [] | no_license | alinmuraretu/alin1987 | ccab6afc12e44dc5f0b550f598874f391891b56a | 8538af0de6a8619ef9bd225ce4bc027a9802aa71 | refs/heads/master | 2022-11-14T00:45:58.635014 | 2019-12-16T11:00:31 | 2019-12-16T11:00:31 | 225,604,601 | 0 | 1 | null | 2022-10-28T01:18:09 | 2019-12-03T11:34:43 | Python | UTF-8 | Python | false | false | 273 | #!/home/alin/alinH/hello/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from nbformat.sign import TrustNotebookApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(TrustNotebookApp.launch_instance())
| [
"alinmuraretu@gmail.com"
] | alinmuraretu@gmail.com | |
61a8e927425ca7f83f10755c734017335c282faf | d3eba05768f13fb77037f094c8cd5bee0a2ce920 | /python/pyclaw/evolve/rp/rp_nel.py | f2e2719f65640b03ee2716b28443d86dd264553b | [
"BSD-3-Clause"
] | permissive | geoflows/geoclaw-4.x | cb262ffcc30b483e4e1bf0ba480f43408cb1b6fd | c8879d25405017b38392aa3b1ea422ff3e3604ea | refs/heads/master | 2023-07-10T06:39:42.774585 | 2021-02-23T20:15:36 | 2021-02-23T20:15:36 | 6,094,587 | 1 | 2 | BSD-3-Clause | 2023-06-28T16:50:43 | 2012-10-05T18:18:31 | Fortran | UTF-8 | Python | false | false | 2,368 | py | #!/usr/bin/env python
# encoding: utf-8
r"""
F-wave Riemann solver for nonlinear elasticity in heterogeneous media
.. math:: q_t + f(q,x)_x = 0
where
.. math::
q(x,t) = \left [ \begin{array}{c} \epsilon(x,t) \\ \rho(x) u(x,t) \end{array} \right ]
and the flux vector is
.. math::
f(q,x) = \left [ \begin{array}{c} -u \\ \sigma(\epsilon,x) \end{array} \right ]
:Authors:
David I. Ketcheson (2010-11-06): Initial version
"""
# ============================================================================
# Copyright (C) 2010 David I. Ketcheson <david.ketcheson@kaust.edu.sa>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import numpy as np
def rp_nel_1d(q_l,q_r,aux_l,aux_r,aux_global):
r"""
1d nonlinear elasticity riemann solver
*aux* is expected to contain -
- aux[i,0] - density in cell i
- aux[i,1] - bulk modulus in cell i
See :ref:`pyclaw_rp` for more details.
:Version: 1.0 (2010-11-06)
"""
meqn = 2
mwaves = 2
# Convenience
nrp = np.size(q_l,0)
# Set up arrays for return values
fwave = np.empty( (nrp, meqn, mwaves) )
s = np.empty( (nrp, mwaves) )
amdq = np.empty( (nrp, meqn) )
apdq = np.empty( (nrp, meqn) )
#Linearized bulk modulus, sound speed, and impedance:
bulkl = sigmap(q_l[:,0],aux_l[:,1])
bulkr = sigmap(q_r[:,0],aux_r[:,1])
cl = np.sqrt(bulkl/aux_l[:,0])
cr = np.sqrt(bulkr/aux_r[:,0])
zl = cl*aux_l[:,0]
zr = cr*aux_r[:,0]
#Jumps:
du = q_r[:,1]/aux_r[:,0]-q_l[:,1]/aux_l[:,0]
dsig = sigma(q_r[:,0],aux_r[:,1]) - sigma(q_l[:,0],aux_l[:,1])
b1 = - (zr*du + dsig) / (zr+zl)
b2 = - (zl*du - dsig) / (zr+zl)
# Compute the f-waves
# 1-Wave
fwave[:,0,0] = b1
fwave[:,1,0] = b1 * zl
s[:,0] = -cl
# 2-Wave
fwave[:,0,1] = b2
fwave[:,1,1] = b2 * (-zr)
s[:,1] = cr
# Compute the left going and right going fluctuations
for m in xrange(meqn):
amdq[:,m] = fwave[:,m,0]
apdq[:,m] = fwave[:,m,1]
return fwave, s, amdq, apdq
def sigma(eps,K):
return np.exp(K*eps)-1.0
def sigmap(eps,K):
return K*np.exp(K*eps)
| [
"dave.jorge@gmail.com"
] | dave.jorge@gmail.com |
32cc458f40f3f76f78a8c90090a128e02d2287c3 | e79a2e369c616d53098af77fd5a778f2eee74efd | /carousel/serializers.py | a9bd729f51040e1157f7d729cbe8c2c1ca923ed3 | [] | no_license | dev-codflaw/django-cms-app | ff5417740e21c50e8db3909aaa25b36bba010cf6 | 662cd6b32d3d93d32291f135cf9e925f547ee590 | refs/heads/master | 2023-03-15T07:22:22.196633 | 2021-03-28T20:13:22 | 2021-03-28T20:13:22 | 344,841,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from rest_framework import serializers
from .models import Carousel, CarouselSlide
class CarouselSlideSerializer(serializers.ModelSerializer):
class Meta:
model = CarouselSlide
fields = '__all__'
class CarouselSerializer(serializers.ModelSerializer):
images = CarouselSlideSerializer(many=True, read_only=True)
class Meta:
model = Carousel
fields = '__all__' | [
"dev.codflaw@gmail.com"
] | dev.codflaw@gmail.com |
7ecc62afd402ebb789b92db5a227ff9decfcf2eb | 5cd8accc308978291ffba6d08aed7664d2ba9d6b | /ngrams.py | 267c8cbe7109a9006bff0ea9eb7e21fa808c9669 | [] | no_license | ValentineBlacker/PoetryBot | 699752ac50a62b9f3e30409f1cdca6f67b2dddcd | db2c6b81c60e282de69a38b146a9c5a11f9930a8 | refs/heads/master | 2020-05-17T03:48:15.904719 | 2017-08-24T21:10:23 | 2017-08-24T21:10:23 | 40,332,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,120 | py | import nltk
import random
from nltk.collocations import *
from nltk.metrics.association import *
class NgramUtils(object):
def __init__(self, sourcestring):
self.text = self.buildtext(sourcestring)
self.tokens = self.prepareTokens(self.text)
def importFromLocal(self, filename):
f = open(filename, encoding="utf-8")
raw = f.read()
raw.lower()
return raw
def buildtext(self, sourcestring):
text = ' '.join([self.importFromLocal(s) for s in sourcestring])
return text
def removeThingsFromList(self, possibleList):
list_with_things_removed = possibleList
removeList = ['"'"", '.', ',','.', ';','!']
for i in removeList:
if list_with_things_removed.__contains__(i):
list_with_things_removed.remove(i)
return list_with_things_removed
def prepareTokens(self, text):
tokens = nltk.wordpunct_tokenize(text)
return tokens
def buildCollocationsList(self, myword, scored_ngrams):
collocationslist = []
for element in scored_ngrams:
if element[0] == myword:
for word in element[1:]:
collocationslist.append(word)
return collocationslist
def assocMeasuresSwitcher(self, number_of_grams):
function_list = [BigramAssocMeasures, TrigramAssocMeasures, QuadgramAssocMeasures]
return function_list[number_of_grams - 2]
def finderSwitcher(self, number_of_grams):
function_list = [BigramCollocationFinder.from_words, TrigramCollocationFinder.from_words, QuadgramCollocationFinder.from_words]
return function_list[number_of_grams -2]
def buildScoredNgramsList(self, myword, number_of_grams):
ngram_measures_func = self.assocMeasuresSwitcher(number_of_grams)
ngram_measures = ngram_measures_func()
finder_func = self.finderSwitcher(number_of_grams)
finder = finder_func(self.tokens, window_size=4)
finder.apply_freq_filter(2)
scored = finder.score_ngrams(ngram_measures.raw_freq)
return sorted(ngram for ngram, score in scored)
def findcollocations(self, myword, number_of_grams = 2):
scoredNgrams = self.buildScoredNgramsList(myword, number_of_grams)
return self.buildCollocationsList(myword, scoredNgrams)
def findonecollocation(self, myword):
thinglist = self.findcollocations(myword)
if len(thinglist) > 0:
randomnum = random.randrange(0, len(thinglist))
collocation = thinglist[randomnum]
else: collocation = ' '
return collocation
def generateresult(self, myword):
result = []
result.append(myword)
for i in range (100):
randomnext = self.findonecollocation(myword)
result.append(randomnext)
myword = randomnext
resultstring = ''
for i in result:
resultstring = resultstring + i + ' '
return resultstring
### FIND RANDOM WORD
def pickWord(self):
randomnum = random.randrange(0, len(self.tokens))
word = self.tokens[randomnum]
if not word.isalpha():
randomnum = random.randrange(0, len(self.tokens))
word = self.tokens[randomnum]
else:
return word
def generateTitle(self, length):
word1 = self.pickWord()
currentword = str(word1)
returnlist = []
for i in range(length):
newword = findonecollocation(str(currentword))
returnlist.append(newword)
currentword = newword
resultstring = ''
for word in returnlist:
resultstring = resultstring + word + ' '
return resultstring
# for i in range(5):
# print (generateTitle(random.randrange(3,9)))
#print (findcollocations('the'))
# for i in range(0,3):
# print (generateresult('the'))
#unscoredQuadgrams = finder.nbest(Quadgram_measures.pmi, 300)
| [
"emoore@pillartechnology.com"
] | emoore@pillartechnology.com |
280b77339358be2f20c549dc8150493e387dda41 | 70976a4a0526f7585f810921925cf8d19e6aabfa | /project/apps/registration/renderers.py | 284383224eec531d0f219ee4cde5a726982bb973 | [
"BSD-2-Clause"
] | permissive | barberscore/barberscore-api | 36be50b943ed59ac2fc738069661f5b589354a36 | 1ed4c01ae35cad21282b573a492733837f956285 | refs/heads/master | 2023-09-03T21:14:57.358069 | 2023-07-08T20:45:03 | 2023-07-08T20:45:03 | 11,014,681 | 14 | 5 | BSD-2-Clause | 2023-02-08T01:18:17 | 2013-06-28T03:28:17 | Python | UTF-8 | Python | false | false | 1,118 | py |
# Third-Party
from rest_framework.renderers import BaseRenderer
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework_json_api.renderers import JSONRenderer
class BrowsableAPIRendererWithoutForms(BrowsableAPIRenderer):
"""Renders the browsable api, but excludes the forms."""
def get_context(self, *args, **kwargs):
ctx = super().get_context(*args, **kwargs)
ctx['display_edit_forms'] = False
return ctx
def show_form_for_method(self, view, method, request, obj):
"""We never want to do this! So just return False."""
return False
def get_rendered_html_form(self, data, view, method, request):
"""Why render _any_ forms at all. This method should return
rendered HTML, so let's simply return an empty string.
"""
return ""
class XLSXRenderer(BaseRenderer):
media_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
format = 'xlsx'
charset = None
render_style = 'binary'
def render(self, data, media_type=None, renderer_context=None):
return data
| [
"noreply@github.com"
] | noreply@github.com |
56bcbea21f1b75f0de25fd94d56e75f5b4c8ed34 | 1f5f63b5f42cf0b410cb4f67c355e51aba71f646 | /lab1-2/part1.py | d9527bd97a2130e56b35f155e69308b4e91ea3d4 | [] | no_license | danemortensen/databases | 02378545faae783a7d227679dfa470e8437c869d | 84dffd52908a52226ac152813cd6e40c3a99e08b | refs/heads/master | 2021-08-23T02:48:33.709280 | 2017-09-27T06:00:39 | 2017-09-27T06:00:39 | 104,786,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,186 | py | # Dane Mortensen and Kartik Mendiratta
import sys
class Student:
def __init__(self, first_name, last_name, grade, classroom, bus, gpa,\
teacher_first, teacher_last):
self.first_name = first_name
self.last_name = last_name
self.grade = grade
self.classroom = classroom
self.bus = bus
self.gpa = gpa
self.teacher_first = teacher_first
self.teacher_last = teacher_last
def record_students(students):
try:
for line in open("students.txt"):
info = line.strip().split(",")
students.append(Student(info[1].strip(), info[0].strip(), int(info[2]), int(info[3]),\
int(info[4]), float(info[5]), info[7].strip(), info[6].strip()))
except:
sys.exit()
def student_search(students, last_name):
for student in students:
if student.last_name == last_name:
print "{0}, {1}: grade = {2}, classroom = {3}, teacher = {4}, {5}".format(student.last_name,\
student.first_name, student.grade, student.classroom, student.teacher_last, student.teacher_first)
def student_bus_search(students, last_name):
for student in students:
if student.last_name == last_name:
print "{0}, {1}: bus route = {2}".format(student.last_name,\
student.first_name, student.bus)
def teacher_search(students, last_name):
for student in students:
if student.teacher_last == last_name:
print "{0}, {1}".format(student.last_name, student.first_name)
def grade_high_search(students, grade):
target = 0
gpa = -1
for student in students:
if student.grade == grade and (gpa == -1 or student.gpa > gpa):
target = student
gpa = student.gpa
if gpa >= 0:
print "{0}, {1}: gpa = {2}, teacher = {3}, {4}, bus = {5}".format(target.last_name,\
target.first_name, target.gpa, target.teacher_last, target.teacher_first, target.bus)
def grade_low_search(students, grade):
target = 0
gpa = -1
for student in students:
if student.grade == grade and (gpa == -1 or student.gpa < gpa):
target = student
gpa = student.gpa
if gpa >= 0:
print "{0}, {1}: gpa = {2}, teacher = {3}, {4}, bus = {5}".format(target.last_name,\
target.first_name, target.gpa, target.teacher_last, target.teacher_first, target.bus)
def grade_search(students, grade):
for student in students:
if student.grade == grade:
print "{0}, {1}".format(student.last_name, student.first_name)
def bus_search(students, bus):
for student in students:
if student.bus == bus:
print "{0}, {1}: grade = {2}, classroom = {3}".format(student.last_name,\
student.first_name, student.grade, student.classroom)
def average_search(students, grade):
total = 0
count = 0
for student in students:
if student.grade == grade:
total += student.gpa
count += 1
if count > 0:
print "Grade {0}: avg gpa = {1}".format(grade, total / count)
def info_search(students):
grades = [0, 0, 0, 0, 0, 0, 0]
for student in students:
if student.grade >= 0 and student.grade <= 6:
grades[student.grade] += 1
for x in range(0, 7):
print "{0}: {1}".format(x, grades[x])
def print_invalid():
print("Invalid command")
def handle_command(students, cmd):
STUDENT = "Student"
BUS = "Bus"
TEACHER = "Teacher"
GRADE = "Grade"
HIGH = "High"
LOW = "Low"
AVERAGE = "Average"
INFO = "Info"
if cmd[0] == STUDENT[:len(cmd[0])]:
if len(cmd) == 2:
student_search(students, cmd[1])
elif len(cmd) == 3 and cmd[2] == BUS[:len(cmd[2])]:
student_bus_search(students, cmd[1])
else:
print_invalid()
elif cmd[0] == TEACHER[:len(cmd[0])]:
if len(cmd) == 2:
teacher_search(students, cmd[1])
else:
print_invalid()
elif cmd[0] == GRADE[:len(cmd[0])]:
if len(cmd) == 3 and cmd[2] == HIGH[:len(cmd[2])] and cmd[1].isdigit():
grade_high_search(students, int(cmd[1]))
elif len(cmd) == 3 and cmd[2] == LOW[:len(cmd[2])] and cmd[1].isdigit():
grade_low_search(students, int(cmd[1]))
elif len(cmd) == 2 and cmd[1].isdigit():
grade_search(students, int(cmd[1]))
else:
print_invalid()
elif cmd[0] == BUS[:len(cmd[0])]:
if len(cmd) == 2 and cmd[1].isdigit():
bus_search(students, int(cmd[1]))
else:
print_invalid()
elif cmd[0] == AVERAGE[:len(cmd[0])]:
if len(cmd) == 2 and cmd[1].isdigit():
average_search(students, int(cmd[1]))
else:
print_invalid()
elif cmd[0] == INFO[:len(cmd[0])]:
if len(cmd) == 1:
info_search(students)
else:
print_invalid()
else:
print_invalid()
def main():
QUIT = "Quit"
students = []
record_students(students)
while 1:
line = raw_input("Enter a command: ")
while len(line.strip()) == 0 or line.strip()[0] == '#':
line = raw_input()
if line == QUIT[:len(line)]:
break
cmd = line.replace(": ", " ").split(" ")
handle_command(students, cmd)
if __name__ == "__main__":
main()
| [
"mortensendane@gmail.com"
] | mortensendane@gmail.com |
a9c5464e88bf6c5aa66f33725d36f389ec6c8f5c | 8db990c0b732597a02e77fb5f090bd2581ce7147 | /setup.py | 5e67da0140c2d6c387aec11110484b65a1d497e1 | [] | no_license | bonham/python_patch_poc | fc68246c1d51cb0ca1d1907b995f7d1385bd7ee3 | e8d5e774d937729333d311f781c7be090ff58d83 | refs/heads/master | 2023-01-15T17:32:31.216875 | 2020-12-01T20:56:45 | 2020-12-01T20:56:45 | 317,664,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | from setuptools import setup
setup(name='acm', packages=['.'])
| [
"2823246+bonham@users.noreply.github.com"
] | 2823246+bonham@users.noreply.github.com |
4f8578fbd7547486757f7eca34ac33f443c2e858 | 6bb7b1c556e84234cc58c12072d211c5e0238467 | /classic-algs/lab19/Morozov/main.py | be53ffaac8b0c3af3f3f6927f0d1632eca634ad1 | [] | no_license | MikhailErofeev/a3200-2015-algs | 56db99494c28f150c62042171d90fb89d089636c | 186304637851c989b4680a6a75d9bd2851d326e0 | refs/heads/master | 2021-01-15T15:31:29.201475 | 2016-02-27T10:02:55 | 2016-02-27T10:02:55 | 42,346,823 | 0 | 28 | null | 2023-06-24T07:29:36 | 2015-09-12T06:08:16 | Python | UTF-8 | Python | false | false | 914 | py | from sys import stdin, stdout
__author__ = 'vks'
def palindrome(s):
length = len(s)
if length == 0:
return ""
dp = [[0 for i in range(length)] for i in range(length)]
for i in range(length):
dp[i][i] = 1
for i in range(length - 2, -1, -1):
for j in range(i + 1, length):
dp[i][j] = max(dp[i + 1][j], dp[i][j - 1]) if s[i] != s[j] else dp[i + 1][j - 1] + 2
ans = ""
i = 0
j = length - 1
middle = ""
while i <= j:
if s[i] == s[j]:
if i == j:
middle += s[i]
else:
ans += s[i]
i += 1
j -= 1
else:
if dp[i + 1][j] > dp[i][j - 1]:
i += 1
else:
j -= 1
ans += middle + ans[::-1]
return ans
if __name__ == "__main__":
s = stdin.readline()
stdout.write(palindrome(s) + "\n")
| [
"vks_m@mail.ru"
] | vks_m@mail.ru |
7d82abc23d5e3d4bf5e54cd6ec2da4a4d1a8768f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02631/s475540632.py | 3a87c44b29ca2411c0463a78d1676b61c5e7616c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | N = int(input())
a = list(map(int, input().split()))
# XOR演算子 ^
# aの要素全てのXORを計算、それをSとする
S = 0
for aa in a:
S ^= aa
# i番目の番号はaiとSのXORで表される
ans = []
for ai in a:
ans.append(S ^ ai)
print(*ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
34557398436b87628d05729c6353fc562dd95e50 | 2c3a8e18dbd7ae9aeaa3d11e16490fb24946c294 | /main.py | f2c7e9683ddbb6a8a3bca25b910c95902e641a49 | [] | no_license | Shicheng-Guo/pharma_scraper | f98af3b624f25559536959fa5e316dcd8eaf7524 | 6837bfdf4d6903a105bc2b89a345cb72ffae2399 | refs/heads/master | 2023-05-05T00:31:44.991795 | 2020-05-13T21:14:46 | 2020-05-13T21:14:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,206 | py | import bio, yahoo
from baker_bros import get_baker_holdings
#libraries needed to make and open csv files
import csv, os, sys
from platform import system
from datetime import datetime
#progress bar code
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
#establishing the fields in my spreadsheet
fields = ["Ticker", "Date", "Price", "Type", "Drug Name", "Note", "Market Cap", "Yearly High", "Yearly Low" ,"Target", "Revenue", "Cash", "Debt", "Net Income Avai.", "Baker Bros Own?"]
#get input for how many days you want to seach for
try:
days_to_search = int(input("How many days do you want to search for? "))
if days_to_search == 0:
raise Exception('Zero days entered')
except:
print("There was an error with your input, defaulting to 90 days. \n")
days_to_search = 90
#gets all entries within 90 days
entries = bio.getEntries(days_to_search)
#makes a new line so everything looks cleaner
print("\n")
#rows will eventually hold all data necessary
rows = []
#gets the baker bros info
baker_holdings = get_baker_holdings()
#this iterates through every entry and maps it to the information for that line on the csv
#the first for loop goes through all of the entries but also makes a progeress bar for us
for entry in progressbar(entries, "Fetching: "):
for i in range(len(entry['companies'])):
ticker = entry['companies'][i]['ticker']
yahoo_data = yahoo.scrape(ticker)
rows.append([ticker, entry['date'], round(entry['companies'][i]["price"], 2), entry['class'], entry['name'], entry['note'], yahoo_data[0], yahoo_data[2], yahoo_data[3], yahoo_data[1], yahoo_data[4], yahoo_data[5], yahoo_data[6], yahoo_data[7], "Yes" if ticker in baker_holdings else "No"])
#this will create a folder named spreadsheets in teh same folder and it will determine the filname for the csv based on the current date
#this line determines if i am running in a bundle or live and determines the home dir. When doing os.getcwd() on a bundle it gives the hom dir but running sys.executable on a live operation
# returns the python path
cwd = os.path.dirname(sys.executable) if getattr( sys, 'frozen', False ) else os.getcwd()
os.makedirs(cwd + "/spreadsheets", exist_ok=True)
filename = cwd + "/spreadsheets/curated_list_" + datetime.today().strftime('%Y-%m-%d') + ".csv"
#write to csv file
print("\nWriting to csv file.")
with open(filename, 'w+') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(rows)
#open csv file
print("\nOpening file now.\n")
platform_name = system()
if platform_name == "Windows" : os.startfile(filename)
elif platform_name == "Darwin": os.system("open {}".format(filename))
elif platform_name == "Linux" : os.system("xdg-open {}".format(filename))
else: print("Your OS is not supported for automatic opening of the CSV file. Please check the current directory for a csv file.") | [
"veer.shah1016@gmail.com"
] | veer.shah1016@gmail.com |
62cd2150ee646c818aaf4bdc5ab5bad886d60b7d | 13bb4b9391cecbe738f42fe5cdcc0ba8a7f9459b | /138.py | d34c56802331b035461fabcd1820ecb40f585599 | [] | no_license | sairin1202/Leetcode | af526ad4dbcabeda2c4c0af293915b5a0f352997 | ba97b463373f319f7531fe347e05adadf4c84255 | refs/heads/master | 2020-04-28T04:44:59.301283 | 2019-05-11T13:23:37 | 2019-05-11T13:23:37 | 174,992,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | """
# Definition for a Node.
class Node(object):
def __init__(self, val, next, random):
self.val = val
self.next = next
self.random = random
"""
class Solution(object):
def copyRandomList(self, head):
"""
:type head: Node
:rtype: Node
"""
if head == None:
return None
hash_map_real = {}
hash_map_copy = {}
cur = head
hash_map_real[head] = 0
copy_head = Node(cur.val, None, None)
copy_cur = copy_head
hash_map_copy[0] = copy_cur
i = 0
while cur.next:
i += 1
cur = cur.next
copy_next = Node(cur.val, None, None)
copy_cur.next = copy_next
hash_map_real[cur] = i
hash_map_copy[i] = copy_next
copy_cur = copy_cur.next
cur = head
copy_cur = copy_head
if cur.random:
copy_cur.random = hash_map_copy[hash_map_real[cur.random]]
while cur.next:
cur = cur.next
copy_cur = copy_cur.next
if cur.random:
copy_cur.random = hash_map_copy[hash_map_real[cur.random]]
return copy_head
| [
"952141617@qq.com"
] | 952141617@qq.com |
a2559efa512954198137adbc50e9f68a983b1d49 | e2717a7e949e45d830a1f8f6e48db2eaf2acc559 | /SendMail/HtmlMail.py | 7f13bb8fec0dfcdaac286ca5411955eff59d4b2e | [] | no_license | wangfuli217/cookbooks | a256d17c813b752cd1a9a3a2bab24e35f53dbbc3 | cd76e29cbfe54506e74be9aa8468b46939b9970d | refs/heads/master | 2022-03-13T16:22:09.293003 | 2016-01-25T12:21:22 | 2016-01-25T12:21:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | '''
Created on Mar 1, 2013
@author: absolootly
'''
# # {{{ http://code.activestate.com/recipes/577751/ (r1)
# Send an HTML email with an embedded image and a plain text message for
# email clients that don't want to display the HTML.
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
# Define these once; use them twice!
strFrom = 'from@example.com'
strTo = 'to@example.com'
# Create the root message and fill in the from, to, and subject headers
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = 'test message'
msgRoot['From'] = strFrom
msgRoot['To'] = strTo
msgRoot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
msgText = MIMEText('This is the alternative plain text message.')
msgAlternative.attach(msgText)
# We reference the image in the IMG SRC attribute by the ID we give it below
msgText = MIMEText('<b>Some <i>HTML</i> text</b> and an image.<br><img src="cid:image1"><br>Nifty!', 'html')
msgAlternative.attach(msgText)
# This example assumes the image is in the current directory
fp = open('test.jpg', 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# Define the image's ID as referenced above
msgImage.add_header('Content-ID', '<image1>')
msgRoot.attach(msgImage)
# Send the email (this example assumes SMTP authentication is required)
import smtplib
smtp = smtplib.SMTP()
smtp.connect('smtp.example.com')
smtp.login('exampleuser', 'examplepass')
smtp.sendmail(strFrom, strTo, msgRoot.as_string())
smtp.quit()
| [
"simon.takite@gmail.com"
] | simon.takite@gmail.com |
a8b9238885ae44f9ca50f62afbb013af1ccaf9fb | c5beafc6083f367581a85e509702117ab7e30571 | /KJ_Python_101_lessons/Medium/HOW_MANY_COINS.py | 498f911b1104a3248ab9269378ed3070c80245b0 | [] | no_license | jevinkeffers/DC-Repos-Week-1 | dc5720d9d56752d17ba384aeef67c53cdc7e7d09 | 5d2d88ad4f934e104a7084c119759198aff85597 | refs/heads/master | 2022-11-26T01:31:17.420529 | 2020-08-03T17:43:09 | 2020-08-03T17:43:09 | 284,602,277 | 0 | 0 | null | 2020-08-03T17:44:47 | 2020-08-03T04:23:09 | Python | UTF-8 | Python | false | false | 503 | py | # 3. How many coins?
# Write a program that will prompt you for how many coins you want. Initially you have no coins. It will ask you if you want a coin? If you type "yes", it will give you one coin, and print out the current tally. If you type no, it will stop the program.
coins = 0
answer = "yes"
while answer == "yes":
print("You have %s coins." % coins)
answer = input("Do you want another? ")
if answer == "yes":
coins +=1
if answer == "no":
print("Bye")
# #SOLVED | [
"jevinkeffers@gmail.com"
] | jevinkeffers@gmail.com |
f656638e96e930f1109f6fab8c21afd95f6cfbb2 | 6522148a2dede107d02ee134bd40aa3a445c3d18 | /Rating/movies/models.py | 41b121b9848324ed243a4b6d0e8cb27f893e9dff | [] | no_license | nishchintg01/Inter-Task | 3d22e92a52929853312354c1e7116d5c30891a05 | d4fc887cc1dc809d8428c0832da49eac7454b7ba | refs/heads/master | 2023-03-11T04:53:31.044017 | 2021-02-20T17:48:19 | 2021-02-20T17:48:19 | 340,706,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | from django.db import models
from django.db.models import Avg
from django.contrib.auth.models import User
class Movies(models.Model):
title = models.CharField(max_length=1000)
Image = models.ImageField(upload_to = 'movies', default = 'default.png')
Description = models.TextField()
@property
def Avg_rating(self):
rating = 0
for ratings in Comments.objects.filter(movie = Movies.objects.get(id=self.id)):
rating += ratings.Rating
try:
return rating/Comments.objects.filter(movie = Movies.objects.get(id=self.id)).count()
except:
return 0
class Comments(models.Model):
review = models.TextField()
Rating = models.FloatField()
created = models.DateTimeField(auto_now_add=True)
movie = models.ForeignKey(Movies, related_name="comments" ,on_delete=models.CASCADE)
Author = models.ForeignKey(User, related_name="author",on_delete=models.CASCADE)
@property
def ratings(self):
return ["hi"]*int(self.Rating)
| [
"nishchintg01@gmail.com"
] | nishchintg01@gmail.com |
2406a8e151d4c61ef3ecc2072ee1776148ed6813 | 389e77ec9c48d5c17ec60513af430baa9604e4ca | /User_Crawler/get_CC_by_date.py | b0f9ec2c6723947f79dde4fd763cc8547484a62f | [
"MIT"
] | permissive | lgs/Medium-crawler-with-data-analyzer | 244a36fd654cd5f6f0070c81063877b812595640 | fed1a99c0b524871d430b3090a6bd8f501654535 | refs/heads/master | 2020-04-13T23:49:49.776900 | 2017-11-12T11:33:25 | 2017-11-12T11:33:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | # -*- coding: utf-8 -*-
from util_graph import *
if __name__ == '__main__':
get_CC_by_date()
| [
"lifei9696@gmail.com"
] | lifei9696@gmail.com |
d07a60bdf242a97e8858b4b2294e67ebaf2da416 | b05346b257a7c73bf1b1cda2aadf2c737d48de6c | /nytimes/nytimes/items.py | 6f76b1b3d546564e7b58c7b6dd1e1078deef7e33 | [] | no_license | speedhawk21/bamboo-listeners | 7494509d803f5875bcac07dbe2109cedd6af8aa6 | 3d026b50a9f639f138e8026249e1718950016c1a | refs/heads/master | 2022-01-07T04:43:58.290590 | 2018-05-02T03:17:13 | 2018-05-02T03:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class NytimesItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"briansc@gmail.com"
] | briansc@gmail.com |
a9a8b662488d57b7b989089e053d23aa782604eb | 55bf3dd0cddb5d43af367744895b8cafc7ca86bc | /Clss/Controller/HtmlConsensusController.py | 40e4c8c86ededb354bcb1a5ab0d3c50525774f86 | [] | no_license | Maximato/fstage | 5f0c4d6936d92a713ac285209a5fa27651ad95a2 | 98f8dc52f735bcaa9cc8a2f1c2f1697fcf0f5b4d | refs/heads/master | 2020-09-12T11:14:03.185510 | 2020-03-18T09:36:32 | 2020-03-18T09:36:32 | 222,405,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | from Clss.FileSys.Extractor import Extractor
from Clss.Model.HtmlConsensusParser import HtmlConsensusParser
from Clss.FileSys.PeMutateConsensusWriter import PeMutateConsensusWriter
from Clss.FileSys.RecordsWriter import RecordsWriter
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
class HtmlConsensusController:
@staticmethod
def convert_to_mutations(html_file, outfile, levels_of_confidence, cut_from=0, cut_to=None, fmt="fasta"):
"""
Converting html consensus into '.fasta' or 'primer explorer' format file containing consensus string
and confidence string with information about mutations. Mutations is positions with low level of confidence
(in this position high probability to find different nucleotides) marked as '-'
:param html_file: filename with consensus
:param outfile: out filename
:param levels_of_confidence: list with classes, that we considered as 'reliable' position. All other positions
will mark as mutations. Format: ['c90', 'c80', ... ]
:param cut_to: cutting consensus from this position
:param cut_from: cutting consensus to this position
:param fmt: format of output file: 'fasta' or 'pe' for primer explorer
"""
html_consensus = Extractor.extract_html_consensus(html_file)
html_consensus_parser = HtmlConsensusParser()
html_consensus_parser.parse_html_consensus(html_consensus, levels_of_confidence)
sequence = html_consensus_parser.consensus_string[cut_from:cut_to]
consensus = html_consensus_parser.confidence_string[cut_from:cut_to]
if fmt == "fasta":
consensus_record = SeqRecord(Seq(sequence), id="sequence", description=f"sequence of {html_file}")
confidence_record = SeqRecord(Seq(consensus), id="consensus", description=f"consensus with levels:"
f" {levels_of_confidence}")
RecordsWriter([consensus_record, confidence_record]).write_to(outfile)
elif fmt == "pe":
PeMutateConsensusWriter(sequence, consensus).write_in_pe_format(outfile)
else:
raise AttributeError("Only 'fasta' or 'pe' output formats are available")
| [
"maxxxnes@gmail.com"
] | maxxxnes@gmail.com |
997d82f70e17e51e762e7d35ef8e1a7305bfbda6 | badaf6547fd2b47474ca02aa5bcc6910df193576 | /tests/test_list.py | dc7f700c421e25dd1f2e0702c488404e212b4157 | [
"MIT"
] | permissive | yongman/tidis | e70ae6252d1a7333d67cb56e5a17ae4ad3d65afa | 405e53a63aa637b055d0e4c35333832d853fa6c6 | refs/heads/master | 2023-05-28T22:10:06.946529 | 2022-09-20T06:03:03 | 2022-09-20T06:03:03 | 129,863,749 | 1,473 | 151 | MIT | 2022-09-20T06:00:40 | 2018-04-17T07:25:40 | Go | UTF-8 | Python | false | false | 4,768 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 yongman <yming0221@gmail.com>
#
# Distributed under terms of the MIT license.
"""
unit test for list type
"""
import unittest
import time
import string
import random
from rediswrap import RedisWrapper
class ListTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print 'connect to 127.0.0.1:5379\n'
cls.r = RedisWrapper('127.0.0.1', 5379).get_instance()
cls.k1 = '__list1__'
cls.k2 = '__list2__'
cls.v1 = 'value1'
cls.v2 = 'value2'
def setUp(self):
self.r.execute_command('del', self.k1)
self.r.execute_command('del', self.k2)
pass
def random_string(n):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(n))
def test_lpop(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
for i in range(200):
self.assertEqual(self.r.lpop(self.k1), str(i))
def test_lpush(self):
for i in range(200):
self.assertTrue(self.r.lpush(self.k1, str(i)))
for i in range(200):
self.assertEqual(self.r.rpop(self.k1), str(i))
def test_rpop(self):
for i in range(200):
self.assertTrue(self.r.lpush(self.k1, str(i)))
for i in range(200):
self.assertEqual(self.r.rpop(self.k1), str(i))
def test_rpush(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
for i in range(200):
self.assertEqual(self.r.lpop(self.k1), str(i))
def test_llen(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
self.assertEqual(self.r.llen(self.k1), 200)
def test_lindex(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
for i in range(200):
self.assertEqual(self.r.lindex(self.k1, i), str(i))
def test_lrange(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
self.assertListEqual(self.r.lrange(self.k1, 10, 100), [str(i) for i in range(10, 101)])
def test_lset(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
self.assertTrue(self.r.lset(self.k1, 100, 'hello'))
self.assertEqual(self.r.lindex(self.k1, 100), 'hello')
def test_ltrim(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
self.assertTrue(self.r.ltrim(self.k1, 0, 100))
self.assertListEqual(self.r.lrange(self.k1, 0, -1), [str(i) for i in range(0, 101)])
self.assertEqual(self.r.llen(self.k1), 101)
def test_del(self):
for i in range(200):
self.assertTrue(self.r.rpush(self.k1, str(i)))
self.assertEqual(self.r.execute_command('del', self.k1), 1)
def test_pexpire(self):
self.assertTrue(self.r.lpush(self.k1, self.v1))
# expire in 5s
self.assertTrue(self.r.execute_command('pexpire', self.k1, 5000))
self.assertLessEqual(self.r.execute_command('pttl', self.k1), 5000)
self.assertEqual(self.r.llen(self.k1), 1)
time.sleep(6)
self.assertEqual(self.r.llen(self.k1), 0)
def test_pexpireat(self):
self.assertTrue(self.r.lpush(self.k1, self.v1))
# expire in 5s
ts = int(round(time.time()*1000)) + 5000
self.assertTrue(self.r.execute_command('pexpireat', self.k1, ts))
self.assertLessEqual(self.r.execute_command('pttl', self.k1), 5000)
self.assertEqual(self.r.llen(self.k1), 1)
time.sleep(6)
self.assertEqual(self.r.llen(self.k1), 0)
def test_expire(self):
self.assertTrue(self.r.lpush(self.k1, self.v1))
# expire in 5s
self.assertTrue(self.r.execute_command('expire', self.k1, 5))
self.assertLessEqual(self.r.execute_command('ttl', self.k1), 5)
self.assertEqual(self.r.llen(self.k1), 1)
time.sleep(6)
self.assertEqual(self.r.llen(self.k1), 0)
def test_expireat(self):
self.assertTrue(self.r.lpush(self.k1, self.v1))
# expire in 5s
ts = int(round(time.time())) + 5
self.assertTrue(self.r.execute_command('expireat', self.k1, ts))
self.assertLessEqual(self.r.execute_command('ttl', self.k1), 5)
self.assertEqual(self.r.llen(self.k1), 1)
time.sleep(6)
self.assertEqual(self.r.llen(self.k1), 0)
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
cls.r.execute_command('del', cls.k1)
cls.r.execute_command('del', cls.k2)
print '\nclean up\n'
| [
"yming0221@gmail.com"
] | yming0221@gmail.com |
60cf3e5f03ac14e51283e70a544e7f187a594223 | ca13620a0967cc6fccd9e98080c235db9faa65a7 | /models/transaction.py | dcd5a391102a38bf9698510b890be94ae7b0e205 | [] | no_license | Geoffe-Ga/grocery_list | b60a2771fa06ca8fd6be8d87ca74b9eb640aae80 | b854cc180e1b0ebb0bdfb9e17ad2824cc1cdc392 | refs/heads/master | 2020-12-15T16:22:19.655624 | 2020-01-20T19:03:24 | 2020-01-20T19:03:24 | 235,175,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | from db import db
from datetime import datetime
class TransactionModel(db.Model):
__tablename__ = 'transaction'
id = db.Column(db.Integer, primary_key=True)
product_id = db.Column(db.Integer, db.ForeignKey('product.id'))
product = db.relationship('ProductModel')
trip_id = db.Column(db.Integer, db.ForeignKey('trip.id'))
trip = db.relationship('TripModel')
created_at = db.Column('created_at', db.DateTime, default=datetime.utcnow())
updated_at = db.Column('updated_at', db.DateTime, onupdate=datetime.utcnow())
completed_at = db.Column('completed_at', db.DateTime)
def __init__(self, trip_id, product_id):
self.trip_id = trip_id
self.product_id = product_id
def json(self):
return {'id': self.id,
'created_at': self.created_at,
'completed_at': self.completed_at,
'product': self.product.name,
'trip': self.trip.id}
@classmethod
def find_unfinished(cls, product_id):
return cls.query.filter_by(product_id=product_id, completed_at=None).first()
@classmethod
def find_by_id(cls, trans_id):
return cls.query.filter_by(id=trans_id).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def mark_done(self):
self.completed_at = datetime.utcnow()
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit() | [
"geoff@Geoffs-MacBook-Air.local"
] | geoff@Geoffs-MacBook-Air.local |
cc571bd6d4cd9e3ac6d6badcfa204d3458c75c01 | 2a7b18ad1e1ccc2abf1638ae4eb1a3cb3570ff1b | /docs/source/gallery/plotting_with_traja.py | d9b2fc91ab6eee03d574d96d55ede098364979aa | [
"MIT"
] | permissive | Saran-nns/traja | 2d754b89562f43f9f922e0259e298a567cad5539 | f2256cc47abd33377b3a87f110f4c8da1cf6765f | refs/heads/master | 2021-06-21T20:04:35.385593 | 2020-12-25T21:35:14 | 2020-12-25T21:35:14 | 168,675,536 | 0 | 0 | MIT | 2021-01-01T15:01:08 | 2019-02-01T09:42:28 | Python | UTF-8 | Python | false | false | 2,149 | py | """
Plotting with traja
-----------------------------------
`traja <https://traja.readthedocs.io>`_ is a Python
library providing a selection of easy-to-use spatial visualizations. It is
built on top of pandas and is designed to work with a range of libraries.
For more details on the library refer to its documentation.
First we'll load in data using traja.
"""
import traja
df = traja.TrajaDataFrame({'x':[0,1,2,3,4],'y':[1,3,2,4,5]})
###############################################################################
# Plotting with Traja
# =====================
#
# We start out by plotting a basic sime series trajectory using the ``traja``
# accessor and ``.plot()`` method.
df.traja.plot()
###############################################################################
# Generate Random Walks
# =====================
#
# Also, random walks can be generated using ``generate``.
df = traja.generate(n=1000, random=True, fps=30)
df.traja.plot()
###############################################################################
# Traja can re-scale data with any units
df.traja.scale(100)
df.spatial_units='cm'
df.traja.plot()
###############################################################################
# Rediscretize step lengths
# =========================
#
# ``rediscretize`` method allows resampling the trajectory into an arbitrary step
# length ``R``.
rt = df.traja.rediscretize(R=5000)
rt.traja.plot()
###############################################################################
# Calculate derivatives
# =====================
#
# Derivatives can be calculated with ``derivatives`` and histograms can be
# plotted using pandas built-in :meth:`plot <pandas.pandas.DataFrame.plot>` method.
derivs = df.traja.get_derivatives()
speed = derivs['speed']
speed.hist()
###############################################################################
# Again, these are just some of the plots you can make with Traja. There are
# several other possibilities not covered in this brief introduction. For more
# examples, refer to the
# `Gallery <https://traja.readthedocs.io/en/latest/gallery/index.html>`_ in the
# traja documentation.
| [
"shenkjustin@gmail.com"
] | shenkjustin@gmail.com |
bdd0dcb052c78df945e0924fea3a9b179b9a23d7 | d0c9414f6bc85d29c1520ac47dd688c8c3a5092f | /join.py | ab3faef4cf2555ae8e6794589b631ec99981cfa3 | [] | no_license | Bodek123/Udemy---Python | 77de6dc54c99d87556aa078cb409e7e0cd56bc01 | 24bb32b5ad50d54a50295ee7841e1e25d49e289c | refs/heads/master | 2021-04-29T21:48:20.398898 | 2018-02-17T12:05:46 | 2018-02-17T12:05:46 | 121,624,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | # Modify the program so that the exits is a dictionary rather than a list,
# with the keys being the numbers of the locations and the values being
# dictionaries holding the exits (as they do at present). No change should
# be needed to the actual code.
#
# Once that is working, create another dictionary that contains words that
# players may use. These words will be the keys, and their values will be
# a single letter that the program can use to determine which way to go.
locations = {0: "You are sitting in front of a computer learning Python",
1: "You are standing at the end of a road before a small brick building",
2: "You are at the top of a hill",
3: "You are inside a building, a well house for a small stream",
4: "You are in a valley beside a stream",
5: "You are in the forest"}
#exits must be in a dictionary, not in a lists
exits = {0: {"Q": 0},
1: {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
2: {"N": 5, "Q": 0},
3: {"W": 1, "Q": 0},
4: {"N": 1, "W": 2, "Q": 0},
5: {"W": 2, "S": 1, "Q": 0} }
## Exits in lists
#exits = [{"Q": 0},
# {"W": 2, "E": 3, "N": 5, "S": 4, "Q": 0},
# {"N": 5, "Q": 0},
# {"W": 1, "Q": 0},
# {"N": 1, "W": 2, "Q": 0},
# {"W": 2, "S": 1, "Q": 0}]
vocabulary = {"QUIT" : "Q",
"NORTH" : "N",
"SOUTH" : "S",
"EAST" : "E",
"WEST" : "W"}
#print(locations[0])
#print(locations[0].split())
#print(locations[3].split(","))
#print(' '.join(locations[0].split()))
loc = 1
while True:
availableExites =", ".join(exits[loc].keys())
print(locations[loc])
if loc == 0:
break
direction = input ("Dostepne wyjscia to: " + availableExites + " ").upper()
print()
#Usage of vocabulary dictionary in input
if len(direction) > 1:
# print ("You choosed: " + vocabulary[direction])
# for word in vocabulary:
# if word in direction:
# direction = vocabulary[word]
words = direction.split() #Znajduje kierunek w zdaniu
for word in words:
if word in vocabulary:
direction = vocabulary[word]
break
if direction in exits[loc]:
loc = exits[loc][direction]
else:
print("Nie pojdziesz ")
| [
"bodek87@gmail.com"
] | bodek87@gmail.com |
7f46726f259a60a0a1580927d4c5d8b74369d2fe | a5805a41fbbdc4d1b7cc322b4ab98e126da87c8d | /99bottles.py | e66113148ca6e3e88a73b0ad410819c16a650fe2 | [] | no_license | paulsenj/HearMeCode | 84bfc34cb4a5fb965d4f0d1a87a422458155e9f0 | 7171af4e97a8771edaea3f7d216fa1c4c5584774 | refs/heads/master | 2021-01-10T05:30:59.757092 | 2017-08-08T12:12:34 | 2017-08-08T12:12:34 | 52,220,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,936 | py | # Difficulty Level: Beginner
# Can you make Python print out the song for 99 bottles of beer on the wall?
# Note: You can use range() in three different ways
# First:
# range(5) will give you a list containing [0, 1, 2, 3, 4]
# In this case, range assumes you want to start counting at 0, and the parameter you give is the number to stop *just* short of.
# Second:
# range(5, 10) will give you a list containing [5, 6, 7, 8, 9]
# In this case, the two parameters you give to range() are the number to start at and the number to stop *just* short of.
# Helpful mnemonic: range(start, stop)
# Third:
# range(5, 15, 3) will give you a list containing [5, 8, 11, 14]
# In this case, the three parameters you give to range() are the number to start at, the number to stop *just* short of, and the number to increment each time by.
# Note that normally, the number to increment each time by is assumed to be 1. (In other words, you add 1 each time through.)
# That's why it goes [0, 1, 2, 3, 4] unless you specify that third parameter, called the step.
# Helpful mnemonic: range(start, stop, step)
# # Using range() and a loop, print out the song. Your output should look like this:
# bottles = range(99)
# #print bottles
# for bottle in bottles:
# if bottle == 98:
# print "{0} bottle of beer on the wall. {0} bottle of beer...".format(99-bottle)
# print "If that bottle should happen to fall, no more bottles of beer on the wall.".format(98-bottle)
# elif bottle == 97:
# print "{0} bottles of beer on the wall. {0} bottles of beer...".format(99-bottle)
# print "If one of those bottles should happen to fall, only one more bottle of beer on the wall.".format(98-bottle)
# else:
# print "{0} bottles of beer on the wall. {0} bottles of beer...".format(99-bottle)
# print "If one of those bottles should happen to fall, {0} bottles of beer on the wall.".format(98-bottle)
# 99 bottles of beer on the wall, 99 bottles of beer ...
# If one of those bottles should happen to fall, 98 bottles of beer on the wall
# 98 bottles of beer on the wall, 98 bottles of beer ...
# If one of those bottles should happen to fall, 97 bottles of beer on the wall
# 97 bottles of beer on the wall, 97 bottles of beer ...
# If one of those bottles should happen to fall, 96 bottles of beer on the wall
# 96 bottles of beer on the wall, 96 bottles of beer ...
# If one of those bottles should happen to fall, 95 bottles of beer on the wall
# 95 bottles of beer on the wall, 95 bottles of beer ...
# If one of those bottles should happen to fall, 94 bottles of beer on the wall
# 94 bottles of beer on the wall, 94 bottles of beer ...
# If one of those bottles should happen to fall, 93 bottles of beer on the wall
# 93 bottles of beer on the wall, 93 bottles of beer ...
# If one of those bottles should happen to fall, 92 bottles of beer on the wall
# 92 bottles of beer on the wall, 92 bottles of beer ...
# If one of those bottles should happen to fall, 91 bottles of beer on the wall
# 91 bottles of beer on the wall, 91 bottles of beer ...
# If one of those bottles should happen to fall, 90 bottles of beer on the wall
# 90 bottles of beer on the wall, 90 bottles of beer ...
# If one of those bottles should happen to fall, 89 bottles of beer on the wall
# 89 bottles of beer on the wall, 89 bottles of beer ...
# If one of those bottles should happen to fall, 88 bottles of beer on the wall
# 88 bottles of beer on the wall, 88 bottles of beer ...
# If one of those bottles should happen to fall, 87 bottles of beer on the wall
# 87 bottles of beer on the wall, 87 bottles of beer ...
# If one of those bottles should happen to fall, 86 bottles of beer on the wall
# 86 bottles of beer on the wall, 86 bottles of beer ...
# If one of those bottles should happen to fall, 85 bottles of beer on the wall
# 85 bottles of beer on the wall, 85 bottles of beer ...
# If one of those bottles should happen to fall, 84 bottles of beer on the wall
# 84 bottles of beer on the wall, 84 bottles of beer ...
# If one of those bottles should happen to fall, 83 bottles of beer on the wall
# 83 bottles of beer on the wall, 83 bottles of beer ...
# If one of those bottles should happen to fall, 82 bottles of beer on the wall
# 82 bottles of beer on the wall, 82 bottles of beer ...
# If one of those bottles should happen to fall, 81 bottles of beer on the wall
# 81 bottles of beer on the wall, 81 bottles of beer ...
# If one of those bottles should happen to fall, 80 bottles of beer on the wall
# 80 bottles of beer on the wall, 80 bottles of beer ...
# If one of those bottles should happen to fall, 79 bottles of beer on the wall
# 79 bottles of beer on the wall, 79 bottles of beer ...
# If one of those bottles should happen to fall, 78 bottles of beer on the wall
# 78 bottles of beer on the wall, 78 bottles of beer ...
# If one of those bottles should happen to fall, 77 bottles of beer on the wall
# 77 bottles of beer on the wall, 77 bottles of beer ...
# If one of those bottles should happen to fall, 76 bottles of beer on the wall
# 76 bottles of beer on the wall, 76 bottles of beer ...
# If one of those bottles should happen to fall, 75 bottles of beer on the wall
# 75 bottles of beer on the wall, 75 bottles of beer ...
# If one of those bottles should happen to fall, 74 bottles of beer on the wall
# 74 bottles of beer on the wall, 74 bottles of beer ...
# If one of those bottles should happen to fall, 73 bottles of beer on the wall
# 73 bottles of beer on the wall, 73 bottles of beer ...
# If one of those bottles should happen to fall, 72 bottles of beer on the wall
# 72 bottles of beer on the wall, 72 bottles of beer ...
# If one of those bottles should happen to fall, 71 bottles of beer on the wall
# 71 bottles of beer on the wall, 71 bottles of beer ...
# If one of those bottles should happen to fall, 70 bottles of beer on the wall
# 70 bottles of beer on the wall, 70 bottles of beer ...
# If one of those bottles should happen to fall, 69 bottles of beer on the wall
# 69 bottles of beer on the wall, 69 bottles of beer ...
# If one of those bottles should happen to fall, 68 bottles of beer on the wall
# 68 bottles of beer on the wall, 68 bottles of beer ...
# If one of those bottles should happen to fall, 67 bottles of beer on the wall
# 67 bottles of beer on the wall, 67 bottles of beer ...
# If one of those bottles should happen to fall, 66 bottles of beer on the wall
# 66 bottles of beer on the wall, 66 bottles of beer ...
# If one of those bottles should happen to fall, 65 bottles of beer on the wall
# 65 bottles of beer on the wall, 65 bottles of beer ...
# If one of those bottles should happen to fall, 64 bottles of beer on the wall
# 64 bottles of beer on the wall, 64 bottles of beer ...
# If one of those bottles should happen to fall, 63 bottles of beer on the wall
# 63 bottles of beer on the wall, 63 bottles of beer ...
# If one of those bottles should happen to fall, 62 bottles of beer on the wall
# 62 bottles of beer on the wall, 62 bottles of beer ...
# If one of those bottles should happen to fall, 61 bottles of beer on the wall
# 61 bottles of beer on the wall, 61 bottles of beer ...
# If one of those bottles should happen to fall, 60 bottles of beer on the wall
# 60 bottles of beer on the wall, 60 bottles of beer ...
# If one of those bottles should happen to fall, 59 bottles of beer on the wall
# 59 bottles of beer on the wall, 59 bottles of beer ...
# If one of those bottles should happen to fall, 58 bottles of beer on the wall
# 58 bottles of beer on the wall, 58 bottles of beer ...
# If one of those bottles should happen to fall, 57 bottles of beer on the wall
# 57 bottles of beer on the wall, 57 bottles of beer ...
# If one of those bottles should happen to fall, 56 bottles of beer on the wall
# 56 bottles of beer on the wall, 56 bottles of beer ...
# If one of those bottles should happen to fall, 55 bottles of beer on the wall
# 55 bottles of beer on the wall, 55 bottles of beer ...
# If one of those bottles should happen to fall, 54 bottles of beer on the wall
# 54 bottles of beer on the wall, 54 bottles of beer ...
# If one of those bottles should happen to fall, 53 bottles of beer on the wall
# 53 bottles of beer on the wall, 53 bottles of beer ...
# If one of those bottles should happen to fall, 52 bottles of beer on the wall
# 52 bottles of beer on the wall, 52 bottles of beer ...
# If one of those bottles should happen to fall, 51 bottles of beer on the wall
# 51 bottles of beer on the wall, 51 bottles of beer ...
# If one of those bottles should happen to fall, 50 bottles of beer on the wall
# 50 bottles of beer on the wall, 50 bottles of beer ...
# If one of those bottles should happen to fall, 49 bottles of beer on the wall
# 49 bottles of beer on the wall, 49 bottles of beer ...
# If one of those bottles should happen to fall, 48 bottles of beer on the wall
# 48 bottles of beer on the wall, 48 bottles of beer ...
# If one of those bottles should happen to fall, 47 bottles of beer on the wall
# 47 bottles of beer on the wall, 47 bottles of beer ...
# If one of those bottles should happen to fall, 46 bottles of beer on the wall
# 46 bottles of beer on the wall, 46 bottles of beer ...
# If one of those bottles should happen to fall, 45 bottles of beer on the wall
# 45 bottles of beer on the wall, 45 bottles of beer ...
# If one of those bottles should happen to fall, 44 bottles of beer on the wall
# 44 bottles of beer on the wall, 44 bottles of beer ...
# If one of those bottles should happen to fall, 43 bottles of beer on the wall
# 43 bottles of beer on the wall, 43 bottles of beer ...
# If one of those bottles should happen to fall, 42 bottles of beer on the wall
# 42 bottles of beer on the wall, 42 bottles of beer ...
# If one of those bottles should happen to fall, 41 bottles of beer on the wall
# 41 bottles of beer on the wall, 41 bottles of beer ...
# If one of those bottles should happen to fall, 40 bottles of beer on the wall
# 40 bottles of beer on the wall, 40 bottles of beer ...
# If one of those bottles should happen to fall, 39 bottles of beer on the wall
# 39 bottles of beer on the wall, 39 bottles of beer ...
# If one of those bottles should happen to fall, 38 bottles of beer on the wall
# 38 bottles of beer on the wall, 38 bottles of beer ...
# If one of those bottles should happen to fall, 37 bottles of beer on the wall
# 37 bottles of beer on the wall, 37 bottles of beer ...
# If one of those bottles should happen to fall, 36 bottles of beer on the wall
# 36 bottles of beer on the wall, 36 bottles of beer ...
# If one of those bottles should happen to fall, 35 bottles of beer on the wall
# 35 bottles of beer on the wall, 35 bottles of beer ...
# If one of those bottles should happen to fall, 34 bottles of beer on the wall
# 34 bottles of beer on the wall, 34 bottles of beer ...
# If one of those bottles should happen to fall, 33 bottles of beer on the wall
# 33 bottles of beer on the wall, 33 bottles of beer ...
# If one of those bottles should happen to fall, 32 bottles of beer on the wall
# 32 bottles of beer on the wall, 32 bottles of beer ...
# If one of those bottles should happen to fall, 31 bottles of beer on the wall
# 31 bottles of beer on the wall, 31 bottles of beer ...
# If one of those bottles should happen to fall, 30 bottles of beer on the wall
# 30 bottles of beer on the wall, 30 bottles of beer ...
# If one of those bottles should happen to fall, 29 bottles of beer on the wall
# 29 bottles of beer on the wall, 29 bottles of beer ...
# If one of those bottles should happen to fall, 28 bottles of beer on the wall
# 28 bottles of beer on the wall, 28 bottles of beer ...
# If one of those bottles should happen to fall, 27 bottles of beer on the wall
# 27 bottles of beer on the wall, 27 bottles of beer ...
# If one of those bottles should happen to fall, 26 bottles of beer on the wall
# 26 bottles of beer on the wall, 26 bottles of beer ...
# If one of those bottles should happen to fall, 25 bottles of beer on the wall
# 25 bottles of beer on the wall, 25 bottles of beer ...
# If one of those bottles should happen to fall, 24 bottles of beer on the wall
# 24 bottles of beer on the wall, 24 bottles of beer ...
# If one of those bottles should happen to fall, 23 bottles of beer on the wall
# 23 bottles of beer on the wall, 23 bottles of beer ...
# If one of those bottles should happen to fall, 22 bottles of beer on the wall
# 22 bottles of beer on the wall, 22 bottles of beer ...
# If one of those bottles should happen to fall, 21 bottles of beer on the wall
# 21 bottles of beer on the wall, 21 bottles of beer ...
# If one of those bottles should happen to fall, 20 bottles of beer on the wall
# 20 bottles of beer on the wall, 20 bottles of beer ...
# If one of those bottles should happen to fall, 19 bottles of beer on the wall
# 19 bottles of beer on the wall, 19 bottles of beer ...
# If one of those bottles should happen to fall, 18 bottles of beer on the wall
# 18 bottles of beer on the wall, 18 bottles of beer ...
# If one of those bottles should happen to fall, 17 bottles of beer on the wall
# 17 bottles of beer on the wall, 17 bottles of beer ...
# If one of those bottles should happen to fall, 16 bottles of beer on the wall
# 16 bottles of beer on the wall, 16 bottles of beer ...
# If one of those bottles should happen to fall, 15 bottles of beer on the wall
# 15 bottles of beer on the wall, 15 bottles of beer ...
# If one of those bottles should happen to fall, 14 bottles of beer on the wall
# 14 bottles of beer on the wall, 14 bottles of beer ...
# If one of those bottles should happen to fall, 13 bottles of beer on the wall
# 13 bottles of beer on the wall, 13 bottles of beer ...
# If one of those bottles should happen to fall, 12 bottles of beer on the wall
# 12 bottles of beer on the wall, 12 bottles of beer ...
# If one of those bottles should happen to fall, 11 bottles of beer on the wall
# 11 bottles of beer on the wall, 11 bottles of beer ...
# If one of those bottles should happen to fall, 10 bottles of beer on the wall
# 10 bottles of beer on the wall, 10 bottles of beer ...
# If one of those bottles should happen to fall, 9 bottles of beer on the wall
# 9 bottles of beer on the wall, 9 bottles of beer ...
# If one of those bottles should happen to fall, 8 bottles of beer on the wall
# 8 bottles of beer on the wall, 8 bottles of beer ...
# If one of those bottles should happen to fall, 7 bottles of beer on the wall
# 7 bottles of beer on the wall, 7 bottles of beer ...
# If one of those bottles should happen to fall, 6 bottles of beer on the wall
# 6 bottles of beer on the wall, 6 bottles of beer ...
# If one of those bottles should happen to fall, 5 bottles of beer on the wall
# 5 bottles of beer on the wall, 5 bottles of beer ...
# If one of those bottles should happen to fall, 4 bottles of beer on the wall
# 4 bottles of beer on the wall, 4 bottles of beer ...
# If one of those bottles should happen to fall, 3 bottles of beer on the wall
# 3 bottles of beer on the wall, 3 bottles of beer ...
# If one of those bottles should happen to fall, 2 bottles of beer on the wall
# 2 bottles of beer on the wall, 2 bottles of beer ...
# If one of those bottles should happen to fall, 1 bottles of beer on the wall
bottles = range(99, 0, -1)
#print bottles
for bottle in bottles:
if bottle == 1:
print "{0} bottle of beer on the wall. {0} bottle of beer...".format(bottle)
print "If that bottle should happen to fall, no more bottles of beer on the wall."
elif bottle == 2:
print "{0} bottles of beer on the wall. {0} bottles of beer...".format(bottle)
print "If one of those bottles should happen to fall, only one more bottle of beer on the wall."
else:
print "{0} bottles of beer on the wall. {0} bottles of beer...".format(bottle)
print "If one of those bottles should happen to fall, {0} bottles of beer on the wall.".format(bottle-1) | [
"noreply@github.com"
] | noreply@github.com |
03077baac22100638f1f73d6914d61d5790e636d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03804/s359892659.py | cf5a2561cd5c03f89d2bfa0dc2d375e6139544c1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | n, m = map(int, input().split())
a = [""] * n
b = [""] * m
for i in range(n):
a[i] = input()
for i in range(m):
b[i] = input()
for i in range(n):
for j in range(n):
if i + m > n or j + m > n:
continue
flag = True
for k in range(m):
for l in range(m):
if a[i + k][j + l] != b[k][l]:
flag = False
if flag is True:
print("Yes")
exit(0)
print("No")
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
b3df535e0bf14619764330e153f9691f97ebfe7a | ae3df32afc258c80cb2ce504ce87fa5bb7740ea7 | /main/apps.py | a1c166fbd7a6ef9873d13d2341e00132f5d8b9dd | [] | no_license | chensandiego/elast-python | 622251d806b947899d74dc064c19193b418ac505 | 8c28a47acfc5ef540a017abcd786cf815591b163 | refs/heads/master | 2020-12-24T05:40:38.917432 | 2016-08-08T09:16:39 | 2016-08-08T09:16:39 | 65,190,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | from __future__ import unicode_literals
from django.apps import AppConfig
from elasticsearch_dsl.connections import connections
class MainConfig(AppConfig):
name = 'main'
def ready(self):
connections.create_connection() | [
"chensandiego@gmail.com"
] | chensandiego@gmail.com |
26c3ed7037c5e7c99c281a9602db0848de390886 | ce55c319f5a78b69fefc63595d433864a2e531b5 | /前后端分离-vue-DRF/houfen_DRF-projects/15day周末作业/booklogin/user/views.py | ea76a3b29e6788ab22cbcb4e135039d76dd5f722 | [] | no_license | Suijng/1809_data | a072c875e8746190e3b715e53f1afe3323f4666b | 45f8a57089f5c30ccc1a3cddb03b76dc95355417 | refs/heads/master | 2022-12-21T12:38:30.458291 | 2019-09-27T01:14:41 | 2019-09-27T01:14:41 | 211,207,071 | 0 | 0 | null | 2022-11-22T03:16:18 | 2019-09-27T00:55:21 | HTML | UTF-8 | Python | false | false | 7,016 | py | from django.shortcuts import render
# Create your views here.
from rest_framework.views import APIView
from rest_framework.response import Response
from user.serializers import ResgsterUserSerializer,CategorySerializer,\
BookDetailSerializer,BookSerializer,\
ChpaterListSerializer,ChpaterDetailSerializer
from user.models import User,Token,Category,Book,Chpater
from utils.pagination import MyPageNumberPagination
# 注册
# class RegisterView(APIView):
#
# def post(self,request,*args,**kwargs):
# ret = {
# 'code':1,
# 'msg':'注册成功'
# }
# # 获取post请求参数
# data = request.data
# # 序列化请求参数
# ser = ResgsterUserSerializer(data=data)
# if ser.is_valid(): # 验证字段
# print(ser.validated_data)
# ser.save()
# else:
# # 验证失败打印错误信息
# print(ser.errors)
# ret['code'] = 0
# ret['msg'] = '参数错误,注册失败'
#
# return Response(ret)
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import CreateModelMixin,ListModelMixin,RetrieveModelMixin
from rest_framework import status
# 注册
class RegisterView(CreateModelMixin,GenericViewSet):
queryset = User.objects.all()
serializer_class = ResgsterUserSerializer
# 重写内部创建方法
def create(self, request, *args, **kwargs):
ret = {
'code': 1,
'msg': '注册成功'
}
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(ret,status=status.HTTP_201_CREATED, headers=headers)
else:
# 验证失败打印错误信息
print(serializer.errors)
ret['code'] = 0
ret['msg'] = '参数错误,注册失败'
return Response(ret)
#************************** 登录
# 生成token
import time,hashlib
def get_token(name,password):
add_time = str(int(time.time() * 1000))
md5_obj = hashlib.md5(add_time.encode('utf8'))
md5_obj.update(name.encode('utf8'))
md5_obj.update(password.encode('utf8'))
return md5_obj.hexdigest()
# 登录
class LoginView(APIView):
def post(self,request,*args,**kwargs):
ret = {
'code': 1,
'msg': '登录成功'
}
# 获取post请求
data = request.data
# 获取用户名
name = data['name']
# 获取密码
password = data['password']
try:
obj = User.objects.filter(name=name).first()
if obj:
# 用户存在的
if obj.password == password:
# 登录成功 生成登录标识
token = get_token(name,password)
Token.objects.update_or_create(user=obj,defaults={'token':token})
ret['token'] = token
else:
# 密码错误
ret['msg'] = '账号或密码错误'
ret['code'] = 0
else:
ret['msg'] = '该用户不存在'
ret['code'] = 0
except Exception as e:
print(e)
ret['msg'] = '捕获异常'
ret['code'] = 0
return Response(ret)
#****************** 书籍分类
class CategoryView(ListModelMixin,RetrieveModelMixin,GenericViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
pagination_class = MyPageNumberPagination
def get_serializer_class(self):
# 动态设置序列化的类
if self.action == 'list':
return CategorySerializer
elif self.action == 'retrieve':
return BookSerializer
# 给前端展示的字典套列表套字典
def list(self, request, *args, **kwargs):
print(request.version) # 打印版本
ret = {
'code': 1,
}
queryset = self.filter_queryset(self.get_queryset())
# 没有分页展示所有数据
serializer = self.get_serializer(queryset, many=True)
ret['data'] = serializer.data
return Response(ret)
#***** 书籍分类下的书
def retrieve(self, request, *args, **kwargs):
category_id = kwargs.get('pk')
if category_id:
books = Book.objects.filter(category=category_id)
# 调用paginate_queryset方法获取当前分页数据
page = self.paginate_queryset(books)
# 通过判断page结果 判断是否使用了分页
if page is not None:
serializer = self.get_serializer(page,many=True)
return self.get_paginated_response(serializer.data)
#******** 书籍详情视图 获取每本book书的url地址
class BookDetailView(RetrieveModelMixin,GenericViewSet):
queryset = Book.objects.all()
serializer_class = BookDetailSerializer
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
ret = {'code':1,'data':serializer.data}
return Response(ret)
# 章节列表视图
from utils.authenandpermission import MyPermission,MyAuthentication
class ChapterView(ListModelMixin,RetrieveModelMixin,GenericViewSet):
queryset = Chpater.objects.all()
serializer_class = ChpaterListSerializer
pagination_class = MyPageNumberPagination
def get_serializer_class(self):
if self.action == 'list':
return ChpaterListSerializer
elif self.action == 'retrieve':
return ChpaterDetailSerializer
# 认证
def get_authenticators(self):
if self.kwargs.get('pk'):
# 根据章节id获取,章节详情
return [MyAuthentication(),]
return []
# 权限
def get_permissions(self):
if self.kwargs.get('pk'):
# 根据章节id获取,章节详情,返回权限类
return [MyPermission(), ]
return []
def list(self, request, *args, **kwargs):
book_id = kwargs.get('bookid')
if book_id:
chpaters = Chpater.objects.filter(book=book_id)
# 调用paginate_queryset方法获取当前分页数据
page = self.paginate_queryset(chpaters)
# 通过判断page结果 判断是否使用了分页
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def retrieve(self, request, *args, **kwargs):
# 根据章节的id获取章节详情信息
instance = self.get_object()
serializer = self.get_serializer(instance)
ret = {'code':1,'data':serializer.data}
return Response(ret)
| [
"1627765913@qq.com"
] | 1627765913@qq.com |
08a3f80486dcb6dd1386d4e4c03661d4f81e29be | 2d86283e9afcb3719484b7d1dc23d0463d58ae24 | /pre_treat/proc_data.py | d958dceca71b18da1d4c801c31dcce9054c04975 | [] | no_license | linzimu/SoGouClassifer | c95349feb1bc4cd1490ffa10d91a7df841df0c6e | 3d1017ed685d115a9053fce4a52ce14f86ba4a90 | refs/heads/master | 2020-04-25T22:11:09.208697 | 2019-03-03T09:02:22 | 2019-03-03T09:02:22 | 173,103,298 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,186 | py | import xml.etree.cElementTree as ET
import jieba
import re
import os
def getcontent(text):
"""抽取指定text需要的内容"""
root = ET.fromstring(text)
record_url = root.find('url').text
record_text = root.find('content').text
if not record_text or not record_url:
return None, None
else:
record_class = re.findall(r'http://(\w+)\.', record_url)[0]
record_text = ' '.join(jieba.cut(record_text))
print(record_class, record_text)
return record_class, record_text
def save_records(filepath='../data/news_sohusite_xml.dat'):
"""抽取文件中需要的内容并保存到新的文件中"""
with open(filepath, encoding='gb18030') as f:
res = ''
path, filename = filepath.rsplit('\\', 1)
filename = '.'.join(filename.split('.')[-2:])
fw = open(path + '/new_data/' + filename, 'w', encoding='utf8')
for i, line in enumerate(f, 1):
if i % 6 == 1 and res:
record_class, record_text = getcontent(res)
if record_class and record_text:
fw.write(record_class + '\t' + record_text + '\n')
res = line
# break
elif i % 6 == 2:
res += line.replace('&', '')
else:
res += line
fw.close()
def get_all(path='../data/SogouCS'):
"""抽取指定目录下的所有文件中的指定内容到新文件中"""
filenames = os.listdir(path)
for filename in filenames:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
print(filepath)
save_records(filepath)
def merge_files(path='../data/SogouCS/new_data', stop_file='../data/stop_words.txt'):
"""合并文件并去除文件中的停用词"""
stopwords = []
with open(stop_file, 'r', encoding='gb18030') as f:
for line in f:
stopwords.append(line.strip())
filenames = os.listdir(path)
fw = open('../data/all_data.txt', 'w', encoding='utf8')
for i, filename in enumerate(filenames, 1):
filepath = os.path.join(path, filename)
print(filepath)
if os.path.isfile(filepath):
with open(filepath, 'r', encoding='utf8') as f:
for line in f:
tmp = [item for item in line.strip().split() if item not in stopwords]
fw.write(' '.join(tmp) + '\n')
# if i == 1:
# break
fw.close()
print('文件合并完成!')
def file_stat(path='../data/all_data.txt'):
with open(path, 'r', encoding='utf8') as f:
file_classes = set()
for i, line in enumerate(f, 1):
file_classes.add(line.split('\t')[0])
print(i, file_classes)
# 419595 {'travel', 'news', 'business', 'house', 'it', 'career', 'mil', 'sports', '2008', 'auto', 'health', 'women', 'cul', 'yule', 'learning'}
if __name__ == '__main__':
# step1: 抽取原始数据中需要的内容到单独的文件中
# get_all()
# step2: 合并包含需要的内容到一个文件中
merge_files()
# step3: 统计相关特征
# file_stat()
pass
| [
"m18744235218@163.com"
] | m18744235218@163.com |
51cbbc6395a6c0755b0aaf0f460fc91f7dc63457 | 43d7c7721ab991a27b0aa64bdad4bc51255e484e | /hyde/ext/templates/jinja.py | bfe2d1b16afaa0bf660a5ac7250fe02999114524 | [
"MIT"
] | permissive | chewable/hyde | 071f5787dcdfbd8eadc73b58275bb16249b2e638 | 88b701b813440c455d91e73d9b30efe3cb05dacf | refs/heads/master | 2021-01-18T17:33:51.379900 | 2011-02-09T16:58:38 | 2011-02-09T16:58:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,697 | py | # -*- coding: utf-8 -*-
"""
Jinja template utilties
"""
from hyde.fs import File, Folder
from hyde.template import HtmlWrap, Template
from hyde.site import Resource
from hyde.util import getLoggerWithNullHandler, getLoggerWithConsoleHandler
from jinja2 import contextfunction, Environment, FileSystemLoader
from jinja2 import environmentfilter, Markup, Undefined, nodes
from jinja2.ext import Extension
from jinja2.exceptions import TemplateError
logger = getLoggerWithNullHandler('Jinja2')
class SilentUndefined(Undefined):
"""
A redefinition of undefined that eats errors.
"""
def __getattr__(self, name):
return self
__getitem__ = __getattr__
def __call__(self, *args, **kwargs):
return self
@contextfunction
def media_url(context, path):
"""
Returns the media url given a partial path.
"""
site = context['site']
return Folder(site.config.media_url).child(path)
@contextfunction
def content_url(context, path):
"""
Returns the content url given a partial path.
"""
site = context['site']
return Folder(site.config.base_url).child(path)
@environmentfilter
def markdown(env, value):
"""
Markdown filter with support for extensions.
"""
try:
import markdown
except ImportError:
logger.error(u"Cannot load the markdown library.")
raise TemplateError("Cannot load the markdown library")
output = value
d = {}
if hasattr(env.config, 'markdown'):
d['extensions'] = getattr(env.config.markdown, 'extensions', [])
d['extension_configs'] = getattr(env.config.markdown, 'extension_configs', {})
md = markdown.Markdown(**d)
return md.convert(output)
@environmentfilter
def syntax(env, value, lexer=None, filename=None):
"""
Processes the contained block using `pygments`
"""
try:
import pygments
from pygments import lexers
from pygments import formatters
except ImportError:
logger.error(u"pygments library is required to use syntax highlighting tags.")
raise TemplateError("Cannot load pygments")
pyg = (lexers.get_lexer_by_name(lexer)
if lexer else
lexers.guess_lexer(value))
settings = {}
if hasattr(env.config, 'syntax'):
settings = getattr(env.config.syntax, 'options', {})
formatter = formatters.HtmlFormatter(**settings)
code = pygments.highlight(value, pyg, formatter)
code = code.replace('\n\n', '\n \n').replace('\n', '<br />')
caption = filename if filename else pyg.name
return Markup(
'\n\n<div class="code"><figcaption>%s</figcaption>%s</div>\n\n'
% (caption, code))
class Markdown(Extension):
"""
A wrapper around the markdown filter for syntactic sugar.
"""
tags = set(['markdown'])
def parse(self, parser):
"""
Parses the statements and defers to the callback for markdown processing.
"""
lineno = parser.stream.next().lineno
body = parser.parse_statements(['name:endmarkdown'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_render_markdown'),
[], [], body).set_lineno(lineno)
def _render_markdown(self, caller=None):
"""
Calls the markdown filter to transform the output.
"""
if not caller:
return ''
output = caller().strip()
return markdown(self.environment, output)
class YamlVar(Extension):
"""
An extension that converts the content between the tags
into an yaml object and sets the value in the given
variable.
"""
tags = set(['yaml'])
def parse(self, parser):
"""
Parses the contained data and defers to the callback to load it as
yaml.
"""
lineno = parser.stream.next().lineno
var = parser.stream.expect('name').value
body = parser.parse_statements(['name:endyaml'], drop_needle=True)
return [
nodes.Assign(
nodes.Name(var, 'store'),
nodes.Const({})
).set_lineno(lineno),
nodes.CallBlock(
self.call_method('_set_yaml', args=[nodes.Name(var, 'load')]),
[], [], body).set_lineno(lineno)
]
def _set_yaml(self, var, caller=None):
"""
Loads the yaml data into the specified variable.
"""
if not caller:
return ''
try:
import yaml
except ImportError:
return ''
out = caller().strip()
var.update(yaml.load(out))
return ''
def parse_kwargs(parser):
name = parser.stream.expect('name').value
parser.stream.expect('assign')
if parser.stream.current.test('string'):
value = parser.parse_expression()
else:
value = nodes.Const(parser.stream.next().value)
return (name, value)
class Syntax(Extension):
"""
A wrapper around the syntax filter for syntactic sugar.
"""
tags = set(['syntax'])
def parse(self, parser):
"""
Parses the statements and defers to the callback for pygments processing.
"""
lineno = parser.stream.next().lineno
lex = nodes.Const(None)
filename = nodes.Const(None)
def fail_syntax():
parser.fail(
'Invalid syntax tag. Expected:'
'{% syntax lex=yaml, filename=abc.yaml %} or'
'{% syntax yaml, \'abc.yaml\' %}')
if not parser.stream.current.test('block_end'):
if parser.stream.look().test('assign'):
name = value = name1 = value1 = None
(name, value) = parse_kwargs(parser)
if parser.stream.skip_if('comma'):
(name1, value1) = parse_kwargs(parser)
(lex, filename) = (value, value1) \
if name == 'lex' \
else (value1, value)
else:
lex = nodes.Const(parser.stream.next().value)
if parser.stream.skip_if('comma'):
filename = parser.parse_expression()
body = parser.parse_statements(['name:endsyntax'], drop_needle=True)
return nodes.CallBlock(
self.call_method('_render_syntax',
args=[lex, filename]),
[], [], body).set_lineno(lineno)
def _render_syntax(self, lex, filename, caller=None):
"""
Calls the syntax filter to transform the output.
"""
if not caller:
return ''
output = caller().strip()
return syntax(self.environment, output, lex, filename)
class IncludeText(Extension):
"""
Automatically runs `markdown` and `typogrify` on included
files.
"""
tags = set(['includetext'])
def parse(self, parser):
"""
Delegates all the parsing to the native include node.
"""
node = parser.parse_include()
return nodes.CallBlock(
self.call_method('_render_include_text'),
[], [], [node]).set_lineno(node.lineno)
def _render_include_text(self, caller=None):
"""
Runs markdown and if available, typogrigy on the
content returned by the include node.
"""
if not caller:
return ''
output = caller().strip()
output = markdown(self.environment, output)
if 'typogrify' in self.environment.filters:
typo = self.environment.filters['typogrify']
output = typo(output)
return output
MARKINGS = '_markings_'
class Reference(Extension):
"""
Marks a block in a template such that its available for use
when referenced using a `refer` tag.
"""
tags = set(['mark', 'reference'])
def parse(self, parser):
"""
Parse the variable name that the content must be assigned to.
"""
token = parser.stream.next()
lineno = token.lineno
tag = token.value
name = parser.stream.next().value
body = parser.parse_statements(['name:end%s' % tag], drop_needle=True)
return nodes.CallBlock(
self.call_method('_render_output',
args=[nodes.Name(MARKINGS, 'load'), nodes.Const(name)]),
[], [], body).set_lineno(lineno)
def _render_output(self, markings, name, caller=None):
if not caller:
return ''
out = caller()
if isinstance(markings, dict):
markings[name] = out
return out
class Refer(Extension):
"""
Imports content blocks specified in the referred template as
variables in a given namespace.
"""
tags = set(['refer'])
def parse(self, parser):
"""
Parse the referred template and the namespace.
"""
token = parser.stream.next()
lineno = token.lineno
tag = token.value
parser.stream.expect('name:to')
template = parser.parse_expression()
parser.stream.expect('name:as')
namespace = parser.stream.next().value
includeNode = nodes.Include(lineno=lineno)
includeNode.with_context = True
includeNode.ignore_missing = False
includeNode.template = template
temp = parser.free_identifier(lineno)
return [
nodes.Assign(
nodes.Name(temp.name, 'store'),
nodes.Name(MARKINGS, 'load')
).set_lineno(lineno),
nodes.Assign(
nodes.Name(MARKINGS, 'store'),
nodes.Const({})).set_lineno(lineno),
nodes.Assign(
nodes.Name(namespace, 'store'),
nodes.Const({})).set_lineno(lineno),
nodes.CallBlock(
self.call_method('_push_resource',
args=[
nodes.Name(namespace, 'load'),
nodes.Name('site', 'load'),
nodes.Name('resource', 'load'),
template]),
[], [], []).set_lineno(lineno),
nodes.Assign(
nodes.Name('resource', 'store'),
nodes.Getitem(nodes.Name(namespace, 'load'),
nodes.Const('resource'), 'load')
).set_lineno(lineno),
nodes.CallBlock(
self.call_method('_assign_reference',
args=[
nodes.Name(MARKINGS, 'load'),
nodes.Name(namespace, 'load')]),
[], [], [includeNode]).set_lineno(lineno),
nodes.Assign(nodes.Name('resource', 'store'),
nodes.Getitem(nodes.Name(namespace, 'load'),
nodes.Const('parent_resource'), 'load')
).set_lineno(lineno),
nodes.Assign(
nodes.Name(MARKINGS, 'store'),
nodes.Name(temp.name, 'load')
).set_lineno(lineno),
]
def _push_resource(self, namespace, site, resource, template, caller):
namespace['parent_resource'] = resource
namespace['resource'] = site.content.resource_from_relative_path(template)
return ''
def _assign_reference(self, markings, namespace, caller):
"""
Assign the processed variables into the
given namespace.
"""
out = caller()
for key, value in markings.items():
namespace[key] = value
namespace['html'] = HtmlWrap(out)
return ''
class HydeLoader(FileSystemLoader):
"""
A wrapper around the file system loader that performs
hyde specific tweaks.
"""
def __init__(self, sitepath, site, preprocessor=None):
config = site.config if hasattr(site, 'config') else None
if config:
super(HydeLoader, self).__init__([
str(config.content_root_path),
str(config.layout_root_path),
])
else:
super(HydeLoader, self).__init__(str(sitepath))
self.site = site
self.preprocessor = preprocessor
def get_source(self, environment, template):
"""
Calls the plugins to preprocess prior to returning the source.
"""
template = template.strip()
logger.debug("Loading template [%s] and preprocessing" % template)
(contents,
filename,
date) = super(HydeLoader, self).get_source(
environment, template)
if self.preprocessor:
resource = self.site.content.resource_from_relative_path(template)
if resource:
contents = self.preprocessor(resource, contents) or contents
return (contents, filename, date)
# pylint: disable-msg=W0104,E0602,W0613,R0201
class Jinja2Template(Template):
"""
The Jinja2 Template implementation
"""
def __init__(self, sitepath):
super(Jinja2Template, self).__init__(sitepath)
def configure(self, site, engine=None):
"""
Uses the site object to initialize the jinja environment.
"""
self.site = site
self.engine = engine
self.preprocessor = (engine.preprocessor
if hasattr(engine, 'preprocessor') else None)
self.loader = HydeLoader(self.sitepath, site, self.preprocessor)
self.env = Environment(loader=self.loader,
undefined=SilentUndefined,
trim_blocks=True,
extensions=[IncludeText,
Markdown,
Syntax,
Reference,
Refer,
YamlVar,
'jinja2.ext.do',
'jinja2.ext.loopcontrols',
'jinja2.ext.with_'])
self.env.globals['media_url'] = media_url
self.env.globals['content_url'] = content_url
self.env.globals['engine'] = engine
self.env.globals['deps'] = {}
self.env.filters['markdown'] = markdown
self.env.filters['syntax'] = syntax
config = {}
if hasattr(site, 'config'):
config = site.config
self.env.extend(config=config)
try:
from typogrify.templatetags import jinja2_filters
except ImportError:
jinja2_filters = False
if jinja2_filters:
jinja2_filters.register(self.env)
def get_dependencies(self, path):
"""
Finds dependencies hierarchically based on the included
files.
"""
text = self.env.loader.get_source(self.env, path)[0]
from jinja2.meta import find_referenced_templates
ast = self.env.parse(text)
tpls = find_referenced_templates(ast)
deps = list(self.env.globals['deps'].get('path', []))
for dep in tpls:
deps.append(dep)
if dep:
deps.extend(self.get_dependencies(dep))
return list(set(deps))
@property
def exception_class(self):
"""
The exception to throw. Used by plugins.
"""
return TemplateError
@property
def patterns(self):
"""
The pattern for matching selected template statements.
"""
return {
"block_open": '\s*\{\%\s*block\s*([^\s]+)\s*\%\}',
"block_close": '\s*\{\%\s*endblock\s*([^\s]*)\s*\%\}',
"include": '\s*\{\%\s*include\s*(?:\'|\")(.+?\.[^.]*)(?:\'|\")\s*\%\}',
"extends": '\s*\{\%\s*extends\s*(?:\'|\")(.+?\.[^.]*)(?:\'|\")\s*\%\}'
}
def get_include_statement(self, path_to_include):
"""
Returns an include statement for the current template,
given the path to include.
"""
return '{%% include \'%s\' %%}' % path_to_include
def get_extends_statement(self, path_to_extend):
"""
Returns an extends statement for the current template,
given the path to extend.
"""
return '{%% extends \'%s\' %%}' % path_to_extend
def get_open_tag(self, tag, params):
"""
Returns an open tag statement.
"""
return '{%% %s %s %%}' % (tag, params)
def get_close_tag(self, tag, params):
"""
Returns an open tag statement.
"""
return '{%% end%s %%}' % tag
def get_content_url_statement(self, url):
"""
Returns the content url statement.
"""
return '{{ content_url(\'%s\') }}' % url
def get_media_url_statement(self, url):
"""
Returns the media url statement.
"""
return '{{ media_url(\'%s\') }}' % url
def render(self, text, context):
"""
Renders the given resource using the context
"""
template = self.env.from_string(text)
return template.render(context)
| [
"lakshmi.vyas@gmail.com"
] | lakshmi.vyas@gmail.com |
6e476551aad9c0a33a8007d074245b6a965d219e | 5f052c5ebc53e8d72978fad1f6232778c404b417 | /hera/bin/hera-simulations-hermesWorkflow | 3468e9e3d3c86d6d9356592b705931e76e129c08 | [] | no_license | swipswaps/Hera | e7afa0cb0f15f377cd0ed47a27ff47c6e248439e | 7fbf20536c81c54cd69d1745f88bbcb264158e82 | refs/heads/master | 2023-01-23T02:36:26.581758 | 2020-12-07T14:20:57 | 2020-12-07T14:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,768 | #! /usr/bin/env python
import argparse
from hermes import expandWorkflow
from hermes import hermesWorkflow
import json
import os
import pathlib
from hera.datalayer import Project
from pathlib import Path
class argsHandler(Project):
templateDocType = "HermesOpenFOAM"
def __init__(self,projectName=None):
projectName = "OpenFoamRuns" if projectName is None else projectName
super().__init__(projectName)
def _expand_and_load(self,templatePath,newTemplatePath,loadToDB=True):
"""
parameters
----------
templatePath: string. the fileName/path to workflow json file
newTemplatePath: string. the fileName/path for resulted expanded workflow json file
loadToDB: boolean. load/not the workflow to DB. determined by the -noDB flag
"""
expander = expandWorkflow()
newTemplate = expander.expand(templatePath)
with open(newTemplatePath, 'w') as fp:
json.dump(newTemplate, fp)
if loadToDB:
self.logger.info("Saving template to the DB")
self.addSimulationsDocument(resource=newTemplate['CaseDirectory'],
dataFormat='string',
type=self.templateDocType,
desc=dict(OF_Workflow=newTemplate)) #desc=dict(OF_Workflow=newTemplate
self.logger.info("Done")
def _build(self,templatePath,WDPath,builder,pythonPath):
"""
parameters
----------
templatePath: string. the fileName/path to the expanded workflow json file
WDPath:
builder:
pythonPath: string. the fileName/path for resulted python file
"""
flow = hermesWorkflow(templatePath, WDPath,"")
build = flow.build(builder)
with open(pythonPath, "w") as file:
file.write(build)
self.logger.info("Done")
def _executeLuigi(self,pythonPath):
"""
parameters
----------
pythonPath: string. the fileName/path of the python file
"""
cwd = pathlib.Path().absolute()
moduleParent = pathlib.Path(pythonPath).parent.absolute()
os.chdir(moduleParent)
os.system(f"python3 -m luigi --module {os.path.basename(pythonPath)} finalnode_xx_0 --local-scheduler")
os.chdir(cwd)
def expand_handler(self,args):
"""
parameters
----------
args: argparse object' resulted from CLI inputs
"""
arguments=args.args
templatePath = arguments[0]
newTemplatePath = arguments[1]
loadToDB=False if args.noDB else True
self._expand_and_load(templatePath, newTemplatePath, loadToDB)
def buildPython_handler(self,args):
"""
parameters
----------
args: argparse object' resulted from CLI inputs
"""
arguments=args.args
templatePath = arguments[0]
pythonPath = arguments[1]
WDPath = arguments[2] if len(arguments) > 2 else str(pathlib.Path(pythonPath).parent.absolute())
builder = arguments[3] if len(arguments) > 3 else "luigi"
self._build(templatePath,WDPath,builder,pythonPath)
def executeLuigi_handler(self,args):
"""
parameters
----------
args: argparse object' resulted from CLI inputs
"""
arguments=args.args
pythonPath = arguments[0]
self._executeLuigi(pythonPath)
def runAll_handler(self,args):
"""
parameters
----------
args: argparse object' resulted from CLI inputs
"""
arguments=args.args
with open(arguments[0]) as f:
argDict = json.load(f)
templatePath=argDict["templatePath"]
newTemplatePath=argDict["newTemplatePath"]
loadToDB=False if args.noDB else True
pythonPath=argDict.get('pythonPath')
WDPath=argDict.get('WDPath',str(pathlib.Path(pythonPath).parent.absolute()))
builder = argDict.get('builder', "luigi")
self._expand_and_load(templatePath,newTemplatePath,loadToDB)
self._build(newTemplatePath,WDPath,builder,pythonPath)
self._executeLuigi(Path(pythonPath).stem)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('command', nargs=1, type=str)
parser.add_argument('args', nargs='*', type=str)
parser.add_argument('-noDB', action='store_true')
args = parser.parse_args()
funcName = args.command[0]
projectName = args.args[-1] if not args.noDB and funcName=='expand' else None
handler = argsHandler(projectName)
function = getattr(handler,f"{funcName}_handler")
function(args)
| [
"davidg@example.com"
] | davidg@example.com | |
8319e02dd8e51c0f3c972288a559d15a0f3bb1c5 | acd41dc7e684eb2e58b6bef2b3e86950b8064945 | /res/packages/scripts/scripts/common/Lib/plat-mac/Carbon/Cm.py | 81888a1e6189f6251d73285153430da7c7720a3a | [] | no_license | webiumsk/WoT-0.9.18.0 | e07acd08b33bfe7c73c910f5cb2a054a58a9beea | 89979c1ad547f1a1bbb2189f5ee3b10685e9a216 | refs/heads/master | 2021-01-20T09:37:10.323406 | 2017-05-04T13:51:43 | 2017-05-04T13:51:43 | 90,268,530 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 362 | py | # 2017.05.04 15:34:09 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/plat-mac/Carbon/Cm.py
from _Cm import *
# okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\plat-mac\Carbon\Cm.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.05.04 15:34:09 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
1eb42d6df6e8ec59425615e6f90c19e7fc7803e3 | 91e05c703f3868d652fa4b32825df274cce26658 | /neural_net/convolution.py | 34f4d80ec2a81ad1939203d1993ef0b4fb23ce6a | [] | no_license | mie998/image_processing | 2843a0423b84c89ddf95179d047fe7689ee56e4f | fcd9a5e306ca20a9e85f7f87caba83b148a772e1 | refs/heads/master | 2022-11-15T07:38:08.815740 | 2020-07-17T11:07:47 | 2020-07-17T11:07:47 | 224,567,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,983 | py | from collections import OrderedDict
import matplotlib.pyplot as plt
import os
import sys
sys.path.append(os.pardir)
from common.layers import *
from common.utils import *
from common.optimizer import *
class ConvolutionalNeuralNet:
def __init__(self, input_shape, hidden_size, output_size, conv_params):
filter_num = conv_params['filter_num']
filter_size = conv_params['filter_size']
filter_stride = conv_params['filter_stride']
filter_padding = conv_params['filter_padding']
pool_size = conv_params['pool_size']
pool_stride = conv_params['pool_stride']
pool_padding = conv_params['pool_padding']
channel_num = input_shape[0]
conv_output_size = (input_shape[2] + 2 * filter_padding - filter_size) // filter_stride + 1
pool_output_num = filter_num * ((conv_output_size + 2 * pool_padding - pool_size) // pool_stride + 1) ** 2
self.params = {
'w1': np.random.randn(filter_num, channel_num, filter_size, filter_size),
'b1': np.random.randn(filter_num),
'w2': random_array_generator_normal(pool_output_num, hidden_size)[0],
'b2': random_array_generator_normal(pool_output_num, hidden_size)[1],
'w3': random_array_generator_normal(hidden_size, output_size)[0],
'b3': random_array_generator_normal(hidden_size, output_size)[1],
}
self.layers = OrderedDict()
self.layers['convolution'] = Convolution(w=self.params['w1'], b=self.params['b1'],
stride=filter_stride, padding=filter_padding)
self.layers['relu1'] = ReLU()
self.layers['pooling'] = Pooling(pool_h=pool_size, pool_w=pool_size, stride=pool_stride, padding=pool_padding)
self.layers['affine1'] = Affine(w=self.params['w2'], b=self.params['b2'])
self.layers['relu2'] = ReLU()
self.layers['dropout'] = Dropout(drop_rate=0.3, is_test=False)
self.layers['affine2'] = Affine(w=self.params['w3'], b=self.params['b3'])
self.lastLayer = SoftMaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
ans = np.argmax(y, axis=1)
if t.ndim != 1:
t = np.argmax(t, axis=1)
accuracy = np.sum(ans == t) / float(x.shape[0])
return accuracy
def gradient(self, x, t):
self.loss(x, t)
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
gradients = {}
gradients['w1'] = self.layers['convolution1'].dw
gradients['b1'] = self.layers['convolution1'].db
gradients['w2'] = self.layers['affine1'].dw
gradients['b2'] = self.layers['affine1'].db
gradients['w3'] = self.layers['affine2'].dw
gradients['b3'] = self.layers['affine2'].db
return gradients
def main():
np.random.seed(1)
iteration = 10000
batch_size = 100
hidden_size = 100
output_size = 10
sifar_img_num = 10000
sifar_channel_num = 3
sifar_img_size = 32
epoch_size = sifar_img_num / batch_size
train_losses = []
train_accs = []
test_accs = []
pickle = '../data/cifar-10-batches-py/'
train_x, train_y = unpickle(pickle + 'data_batch_1')
test_x, test_y = unpickle(pickle + 'test_batch')
train_x = normalization(train_x)
test_x = normalization(test_x)
train_x = train_x.reshape(sifar_img_num, sifar_channel_num, sifar_img_size, sifar_img_size)
test_x = test_x.reshape(sifar_img_num, sifar_channel_num, sifar_img_size, sifar_img_size)
# img_size + padding*2 - filter_size が filter_stride の倍数になるようにパラメータを設定する
conv_params = {
'filter_num': 10,
'filter_size': 5,
'filter_stride': 2,
'filter_padding': 1,
'pool_size': 5,
'pool_stride': 1,
'pool_padding': 0,
}
CNN = ConvolutionalNeuralNet((sifar_channel_num, sifar_img_size, sifar_img_size),
hidden_size, output_size, conv_params)
for i in range(iteration):
batch_idxes = np.random.choice(sifar_img_num, batch_size)
train_x_batch = train_x[batch_idxes]
train_y_batch = train_y[batch_idxes]
train_y_batch = to_one_hot_vector_batch(train_y_batch, output_size)
gradients = CNN.gradient(train_x_batch, train_y_batch)
### select optimizer for comparison
# optimizer = SGD(lr=0.01)
# optimizer = Momentum(alpha=0.9, lr=0.01)
# optimizer = AdaGrad(lr=0.001, delta=1e-8)
# optimizer = RMSProp(lr=0.001, law=0.9, delta=1e-8)
# optimizer = AdaDelta(law=0.95, delta=1e-6)
optimizer = Adam(alpha=0.001, beta_1=0.9, beta_2=0.999, delta=1e-8)
optimizer.update(CNN.params, gradients)
loss = CNN.loss(train_x_batch, train_y_batch)
print("loss: {}".format(loss))
if i % epoch_size == 0:
train_acc = CNN.accuracy(train_x, train_y)
test_acc = CNN.accuracy(test_x, test_y)
train_accs.append(train_acc)
test_accs.append(test_acc)
train_losses.append(loss)
print("----- epoch{} -----".format(i / epoch_size))
print("train accuracy: {}%".format(train_acc * 100))
print("test accuracy: {}%".format(test_acc * 100))
epochs = range(len(train_accs))
plt.plot(epochs, train_accs, 'b', label='train_acc')
plt.plot(epochs, test_accs, 'r', label='test_acc')
plt.title('train and test accuracy')
plt.legend(bbox_to_anchor=(1, 0), loc='lower right')
plt.show()
if __name__ == '__main__':
main()
| [
"kei.west.post920@gmail.com"
] | kei.west.post920@gmail.com |
dd8c3943762e6c4c542152b3b97add415fa3ebf0 | 91516f6f7bb90f7d88a437032b5e9b2c48014bfc | /Ceaser/admin.py | 5843fb29aae84a055d47b1d126493ee70a76166d | [] | no_license | Hritikchoudhary1000/Ceaser | 94b089b52c1a00e61a5f3890207f9f8d4454bd06 | 4dddea211382c9d619c3a7678498a50910c4ecae | refs/heads/master | 2021-05-16T22:55:18.123932 | 2020-03-27T10:20:48 | 2020-03-27T10:20:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | from django.contrib import admin
from .models import Document, UserProfileInfo
# Register your models here.
admin.site.register(UserProfileInfo)
admin.site.register(Document) | [
"bendwalayush3@gmail.com"
] | bendwalayush3@gmail.com |
f802ccf82930e013add713cd13df5f3f979a97d7 | a051440b292819666be8761bde4ee4c80d391cfe | /custom_components/blink4home.py | 6598c4607da296c10ae492ae5c40e3426c67d80b | [
"MIT"
] | permissive | dib0/home-assistant-custom-components | 7b11fc64ed5af7b0d3a0d7e0efbf9d1fe44891fb | 95ada814bf6b1538f29c2995792aed7bda3982be | refs/heads/master | 2021-01-11T20:33:42.220320 | 2017-01-17T16:46:52 | 2017-01-17T16:46:52 | 79,143,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,347 | py | """
Support for Blink4home cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/blink4home/
"""
import asyncio
import logging
from datetime import timedelta
import json
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = 'Blink4Home camera support'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
CONF_NETWORK_ID = 'network_id'
DOMAIN = 'blink4home'
DATA_BLINK = 'blink4home'
API_URL = 'https://rest.prir.immedia-semi.com'
CLIENT_SPECIFIER = 'Home-Assistant | '
HEADERS = {'Content-Type': 'application/json'}
TOKEN_HEADER = 'TOKEN_AUTH'
UNAUTH_ACCESS = 'Unauthorized access'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NETWORK_ID, default=0): cv.positive_int,
})
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass, config):
"""Setting up the platform."""
blink_config = config.get(DOMAIN, {})
username = blink_config.get(CONF_USERNAME)
password = blink_config.get(CONF_PASSWORD)
network = blink_config.get(CONF_NETWORK_ID)
version = hass.config.as_dict()['version']
def arm_blink(call):
"""Arm the system."""
blink = hass.data[DATA_BLINK]
blink.arm()
def disarm_blink(call):
"""Disarm the system."""
blink = hass.data[DATA_BLINK]
blink.disarm()
blink = Blink4Home(username, password, version, network)
# Store data
hass.data[DATA_BLINK] = blink
# Add service
hass.services.async_register(DOMAIN, 'arm', arm_blink)
hass.services.async_register(DOMAIN, 'disarm', disarm_blink)
return blink.logged_in
class Blink4Home(object):
"""Blink4home api."""
def __init__(self, username, password, version, network):
"""Init the Blink4Home api."""
self._username = username
self._password = password
self._version = version
self._api_key = ""
self._network_id = ""
self._network = network
self._armed = False
self._notifications = 0
self._logged_in = False
# Login
self._login()
@property
def logged_in(self):
"""Return the name of the sensor."""
return self._logged_in
@property
def notifications(self):
"""Return the amount of notifications."""
return self._notifications
@property
def state(self):
"""Return the state."""
return self._armed
def _login(self, force=False):
"""Perform login."""
if not self._api_key or force:
self._api_key = ''
url = (API_URL + '/login')
data = {'password': self._password,
'client_specifier': CLIENT_SPECIFIER +
str(self._version),
'email': self._username}
_LOGGER.debug('Sending request with: %s',
json.dumps(data))
response = requests.post(url,
data=json.dumps(data),
headers=HEADERS, timeout=10)
if response.status_code == 200:
_LOGGER.debug('Received login response: %s',
response.text)
result = response.json()
self._api_key = result['authtoken']['authtoken']
_LOGGER.debug('Got api-key: %s',
self._api_key)
networks = result['networks']
found = False
for key, value in networks.items():
_LOGGER.debug('Network: %s, value: %s',
key, value)
# choose network from config or
# the first one (maybe the only one)
if not found and \
(self._network == 0 or str(self._network) == key):
self._network_id = key
found = True
if found:
break
if found:
self._logged_in = True
self.update()
_LOGGER.debug('Api key: %s',
json.dumps(self._api_key))
_LOGGER.debug('Selected network: %s',
json.dumps(self._network_id))
else:
self._api_key = ''
_LOGGER.debug('Received error response: %s',
response.status_code)
_LOGGER.error('Error logging in to the Blink4Home platform. '
'Received status was %s.',
response.status_code)
def _do_post(self, url, data='', second_try=False):
if not self._logged_in or not self._api_key:
self._login(True)
if not self._api_key:
_LOGGER.error('Couldn\'t arm system. There was '
'a problem with the login.')
headers = HEADERS
headers[TOKEN_HEADER] = self._api_key
response = requests.post(url, data=data,
headers=headers, timeout=10)
if not response.status_code == 200:
if response.status_code == 401 and not second_try:
_LOGGER.debug('Token not valid: %s',
response.status_code)
self._login(True)
self._do_post(url=url, data=data, second_try=True)
else:
_LOGGER.debug('Received error response on post: %s',
response.text)
_LOGGER.error('Error with the Blink4Home '
'platform. Received status was %s.',
response.status_code)
return response
def _do_get(self, url, second_try=False):
if not self._logged_in or not self._api_key:
self._login(True)
if not self._api_key:
_LOGGER.error('Couldn\'t arm system. '
'There was a problem with the login.')
headers = HEADERS
headers[TOKEN_HEADER] = self._api_key
response = requests.get(url, headers=headers,
timeout=10)
if not response.status_code == 200:
if response.status_code == 401 and not second_try:
_LOGGER.debug('Token not valid: %s',
response.status_code)
self._login(True)
self._do_get(url=url, second_try=True)
else:
_LOGGER.debug('Received error response on get: %s',
response.text)
_LOGGER.error('Error with the Blink4Home '
'platform. Received status was %s.',
response.status_code)
return response
def arm(self):
"""Arm the system."""
_LOGGER.debug('Arming the system')
response = self._do_post(API_URL + '/network/' +
str(self._network_id) + '/arm')
if response.status_code == 200:
_LOGGER.debug('Received arm response: %s',
response.text)
self.update()
else:
_LOGGER.debug('Received error response on update: %s',
response.text)
_LOGGER.error('Error arming the Blink4Home '
'platform. Received status was %s.',
response.status_code)
def disarm(self, second_try=False):
"""Disarm the system."""
_LOGGER.debug('Disarming the system')
response = self._do_post(API_URL + '/network/' +
str(self._network_id) + '/disarm')
if response.status_code == 200:
_LOGGER.debug('Received disarm response: %s',
response.text)
self.update()
else:
_LOGGER.debug('Received error response on update: %s',
response.text)
_LOGGER.error('Error disarming the Blink4Home '
'platform. Received status was %s.',
response.status_code)
def update(self, second_try=False):
"""Update the status."""
_LOGGER.debug('Updating the system')
response = self._do_get(API_URL + '/homescreen')
if response.status_code == 200:
_LOGGER.debug('Received update response: %s',
response.text)
result = response.json()
self._armed = result['network']['armed']
self._notifications = result['network']['notifications']
else:
_LOGGER.debug('Received error response on update: %s',
response.text)
_LOGGER.error('Error updating the Blink4Home '
'sensor. Received status was %s.',
response.status_code)
| [
"noreply@github.com"
] | noreply@github.com |
76dfb7da0c46bc7c91e9cc31820e5adb7354dc54 | 7be373d17a0a46c0e1fa486ce66ef295c17b7757 | /assignment3/complex.py | 87edda453c390aa3674a5e07993b9dc44174c4de | [] | no_license | anettfre/INF3331 | 425c47deaa0438ec2b1c8075a36249139bfb3e1f | 54ee1aba4ebf7d26dfd01d4e0ed34439dce02a59 | refs/heads/master | 2020-05-25T04:56:18.907988 | 2020-02-01T18:34:55 | 2020-02-01T18:34:55 | 187,638,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,681 | py | #!/usr/bin/python3
from math import sqrt
class Complex():
def __init__(self, real, imag):
"""takes in self, a real part, and a imaginery part"""
self.real = real
self.imag = imag
# Assignment 3.3
def conjugate(self):
"""this function takes in one argument, that is self, and self has a real part and imagenary part.
Returns a new instanse of a complex number, that is the conjugate og self that was given as argument"""
return Complex(self.real, -self.imag)
def modulus(self):
"""takes in self, calculates the modulus, returns an int or float"""
mod = sqrt(self.real**2 + self.imag**2)
return mod
def __add__(self, other):
"""takes in self and other, that can be to complex numbers with real and imag.
Calculates the real part of self and other, by adding them.
Calculates the imaginery part of self and other, by adding them. Makes a new complex number with the new calculated values and returns it"""
addedreal = self.real + other.real
addedim = self.imag + other.imag
return Complex(addedreal, addedim)
def __sub__(self, other):
"""Takes in self and other that can be complex mumber, subtract the real and imagenary parts from each other and returns a new complex number"""
subreal = self.real - other.real
subim = self.imag - other.imag
return Complex(subreal, subim)
def __mul__(self, other):
"""The formula: (a+bi)(c+di) = (ac + adi + bci + bd(i^2)) = (ac - bd) + (ad + bc)i.
Calculates, then makes a new complex number with the new calculated values and returns it"""
mulreal = self.real * other.real #ac
mulim = self.imag * other.imag #bd
mulmix1 = self.real * other.imag #ad
mulmix2 = self.imag * other.real #bc
return Complex((mulreal - mulim), (mulmix1 + mulmix2))
def __eq__(self, other):
"""takes in self and other, if the statment is true the numbers are equal then it returns ture.
It returns false if the numbers imagenary or/and real part is different"""
if self.real == other.real and self.imag==other.imag:
return True
else:
return False
# Assignment 3.4
def __radd__(self, other):
"""takes in self and other, makes a new complex number with the new calculated values and returns it"""
addedreal = self.real + other.real
addedim = self.imag + other.imag
return Complex(addedreal, addedim)
def __rsub__(self, other):
"""takes in self and other, makes a new complex number with the new calculated values and returns it"""
subreal = self.real - other.real
subim = self.imag - other.imag
return Complex(subreal, subim)
def __rmul__(self, other):
"""takes in self and other, the formula: (a+bi)(c+di) = (ac + adi + bci + bd(i^2)) = (ac - bd) + (ad + bc)i.
Calculates, then makes a new complex number with the new calculated values and returns it"""
mulreal = self.real * other.real #ac
mulim = self.imag * other.imag #bd
mulmix1 = self.real * other.imag #ad
mulmix2 = self.imag * other.real #bc
return Complex((mulreal - mulim), (mulmix1 + mulmix2))
# Optional, possibly useful methods
# Allows you to write `-a`
def __neg__(self):
pass
# Make the `complex` function turn this into Python's version of a complex number
def __complex__(self):
"""takes in self, that is a complex number, then returns pythons version of complex number"""
return complex(self.real, self.imag)
| [
"noreply@github.com"
] | noreply@github.com |
26c63443b63b1498758a95004e4fd26103c6f481 | 54ea50206d6532799da5dfb5f3948f9690835298 | /python数据分析与挖掘学习笔记/src/chapter4/code/data_discretization.py | 52bd10b44315afb727bf8d46e6f842f0aa03b813 | [] | no_license | chenhch8/machine-learning | 5860587e6363d1bcaca8046e0b8b0a334c38d755 | 77ff004e19be8748466dccb3e2492a760f9b1d3b | refs/heads/master | 2021-01-21T16:45:31.883671 | 2017-07-13T16:36:01 | 2017-07-13T16:36:01 | 91,905,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,462 | py | # -*- coding: utf-8 -*-
'''数据规范化'''
import pandas as pd
file = '../data/discretization_data.xls'
data = pd.read_excel(file)
data = data[u'肝气郁结证型系数'].copy()
k = 4
# 等宽离散化
d1 = pd.cut(data, k, labels = list(range(k)))
# 等频离散化
w = [1.0 * i / k for i in range(k + 1)]
w = data.describe(percentiles = w)[4:4+k+1]
d2 = pd.cut(data, w, labels = list(range(k)))
# 聚类离散化
from sklearn.cluster import KMeans
# n_clusters 簇个数;n_jobs 并行化数量
kmodel = KMeans(n_clusters = k, n_jobs = 4)
# 训练模型
kmodel.fit(data.values.reshape(len(data), 1))
# 输出聚类中心,且排序
c = pd.DataFrame(kmodel.cluster_centers_).sort_values(0)
# 相邻 两项求中点,作为边界点
w = c.rolling(center=False, window=2).mean()[1:]
# 加上首末边界点
w = [0] + list(w[0]) + [data.max()]
d3 = pd.cut(data, w, labels = list(range(k)))
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
'''自定义作图函数用于显示聚类结果'''
# d: 分类结果;k: 分类个数
def cluster_plot(d, k):
plt.figure(figsize = (8, 3))
for j in range(0, k):
# data[[true, false, ...]]:筛选出为true的数据
plt.plot(data[d==j], [j for i in d[d==j]], 'o')
# 设置纵坐标刻度
plt.ylim(-0.5, k-0.5)
return plt
cluster_plot(d1, k).show()
cluster_plot(d2, k).show()
cluster_plot(d3, k).show() | [
"496746441@qq.com"
] | 496746441@qq.com |
8e3269fafdfc4c4927faaa47a88c3a3c531bf398 | 676f6f2d02db6aeeaa1bb0b28ab49e8c73923d0e | /venv/Lib/site-packages/falcon/bench/bench.py | b0c60863ab0aaefa5eee2e548c536b1e6bc55c82 | [
"Apache-2.0"
] | permissive | vrian/orsen | ce34f74ea3a14c95d37ffa5c694b7c66725925df | 9c10148aba62868fad4b679a4b9b717829586e96 | refs/heads/master | 2023-01-21T21:47:06.210918 | 2018-06-23T04:46:26 | 2018-06-23T04:46:26 | 120,284,869 | 1 | 0 | Apache-2.0 | 2023-01-09T09:39:16 | 2018-02-05T09:44:03 | Python | UTF-8 | Python | false | false | 11,546 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
from collections import defaultdict, deque
from decimal import Decimal
import gc
import inspect
import platform
import random
import sys
import tempfile
import timeit
try:
import cProfile
except ImportError:
import profile as cProfile
try:
import guppy
except ImportError:
heapy = None
else:
heapy = guppy.hpy()
try:
import pprofile
except ImportError:
pprofile = None
try:
import vmprof
from vmshare.service import Service
except ImportError:
vmprof = None
from falcon.bench import create # NOQA
import falcon.testing as helpers
# NOTE(kgriffs): Based on testing, these values provide a ceiling that's
# several times higher than fast x86 hardware can achieve today.
ITER_DETECTION_MAX_ATTEMPTS = 27
ITER_DETECTION_MULTIPLIER = 1.7
ITER_DETECTION_STARTING = 3000
# NOTE(kgriffs): Benchmark duration range, in seconds, to target
ITER_DETECTION_DURATION_MIN = 1.0
ITER_DETECTION_DURATION_MAX = 5.0
JIT_WARMING_MULTIPLIER = 30
PYPY = platform.python_implementation() == 'PyPy'
BODY = helpers.rand_string(10240, 10240).encode('utf-8') # NOQA
HEADERS = {'X-Test': 'Funky Chicken'} # NOQA
class StartResponseMockLite(object):
"""Mock object representing a WSGI `start_response` callable."""
def __init__(self):
self._called = 0
self.status = None
self.headers = None
self.exc_info = None
def __call__(self, status, headers, exc_info=None):
"""Implements the PEP-3333 `start_response` protocol."""
self._called += 1
self.status = status
self.headers = headers
self.exc_info = exc_info
@property
def call_count(self):
return self._called
def bench(func, iterations, stat_memory):
gc.collect()
heap_diff = None
if heapy and stat_memory:
heap_before = heapy.heap()
total_sec = timeit.timeit(func, setup=gc.enable, number=iterations)
if heapy and stat_memory:
heap_diff = heapy.heap() - heap_before
sec_per_req = Decimal(str(total_sec)) / Decimal(str(iterations))
return (sec_per_req, heap_diff)
def determine_iterations(func):
# NOTE(kgriffs): Algorithm adapted from IPython's magic timeit
# function to determine iterations so that 0.2 <= total time < 2.0
iterations = ITER_DETECTION_STARTING
for __ in range(1, ITER_DETECTION_MAX_ATTEMPTS):
gc.collect()
total_sec = timeit.timeit(
func,
setup=gc.enable,
number=int(iterations)
)
if total_sec >= ITER_DETECTION_DURATION_MIN:
assert total_sec < ITER_DETECTION_DURATION_MAX
break
iterations *= ITER_DETECTION_MULTIPLIER
return int(iterations)
def profile(name, env, filename=None, verbose=False):
if filename:
filename = name + '-' + filename
print('Profiling %s ==> %s' % (name, filename))
else:
filename = None
title = name + ' profile'
print()
print('=' * len(title))
print(title)
print('=' * len(title))
func = create_bench(name, env)
gc.collect()
num_iterations = 100000
if PYPY:
print('JIT warmup...')
# TODO(kgriffs): Measure initial time, and keep iterating until
# performance increases and then steadies
for x in range(num_iterations * JIT_WARMING_MULTIPLIER):
func()
print('Ready.')
code = 'for x in range({0}): func()'.format(num_iterations)
if verbose:
if pprofile is None:
print('pprofile not found. Please install pprofile and try again.')
return
pprofile.runctx(code, locals(), globals(), filename=filename)
else:
cProfile.runctx(code, locals(), globals(),
sort='tottime', filename=filename)
def profile_vmprof(name, env):
if vmprof is None:
print('vmprof not found. Please install vmprof and try again.')
return
func = create_bench(name, env)
gc.collect()
#
# Based on: https://github.com/vmprof/vmprof-python/blob/master/vmprof/__main__.py
#
prof_file = tempfile.NamedTemporaryFile(delete=False)
filename = prof_file.name
vmprof.enable(prof_file.fileno())
try:
for __ in range(1000000):
func()
except BaseException as e:
if not isinstance(e, (KeyboardInterrupt, SystemExit)):
raise
vmprof.disable()
service = Service('vmprof.com')
service.post({
Service.FILE_CPU_PROFILE: filename,
Service.FILE_JIT_PROFILE: filename + '.jit',
'argv': ' '.join(sys.argv[:]),
'VM': platform.python_implementation(),
})
prof_file.close()
def exhaust(iterator_or_generator):
# from https://docs.python.org/dev/library/itertools.html#itertools-recipes
deque(iterator_or_generator, maxlen=0)
def create_bench(name, env):
srmock = StartResponseMockLite()
function = name.lower().replace('-', '_')
app = eval('create.{0}(BODY, HEADERS)'.format(function))
def bench():
app(env, srmock)
assert srmock.status == '200 OK'
def bench_generator():
exhaust(app(env, srmock))
assert srmock.status == '200 OK'
if inspect.isgeneratorfunction(app):
return bench_generator
else:
return bench
def consolidate_datasets(datasets):
results = defaultdict(list)
for dataset in datasets:
for name, sec_per_req, _ in dataset:
results[name].append(sec_per_req)
return [(name, min(vector)) for name, vector in results.items()]
def round_to_int(dec):
return int(dec.to_integral_value())
def avg(array):
return sum(array) / len(array)
def hello_env():
request_headers = {'Content-Type': 'application/json'}
return helpers.create_environ('/hello/584/test',
query_string='limit=10&thing=ab',
headers=request_headers)
def queues_env():
request_headers = {'Content-Type': 'application/json'}
path = ('/v1/852809/queues/0fd4c8c6-bd72-11e2-8e47-db5ebd4c8125'
'/claims/db5ebd4c8125')
qs = 'limit=10&thing=a+b&x=%23%24'
return helpers.create_environ(path, query_string=qs,
headers=request_headers)
def get_env(framework):
return queues_env() if framework == 'falcon-ext' else hello_env()
def run(frameworks, trials, iterations, stat_memory):
# Skip any frameworks that are not installed
for name in frameworks:
try:
create_bench(name, hello_env())
except ImportError as ex:
print(ex)
print('Skipping missing library: ' + name)
del frameworks[frameworks.index(name)]
print()
datasets = []
if not frameworks:
print('Nothing to do.\n')
return datasets
benchmarks = []
for name in frameworks:
bm = create_bench(name, get_env(name))
bm_iterations = iterations if iterations else determine_iterations(bm)
if PYPY:
print('{}: JIT warmup'.format(name))
# TODO(kgriffs): Measure initial time, and keep iterating until
# performance increases and then steadies
bench(bm, bm_iterations * JIT_WARMING_MULTIPLIER, False)
bm_iterations = iterations if iterations else determine_iterations(bm)
benchmarks.append((name, bm_iterations, bm))
print('{}: {} iterations'.format(name, bm_iterations))
print()
for r in range(trials):
random.shuffle(frameworks)
sys.stdout.write('Benchmarking, Trial %d of %d' %
(r + 1, trials))
sys.stdout.flush()
dataset = []
for name, bm_iterations, bm in benchmarks:
sec_per_req, heap_diff = bench(
bm,
bm_iterations,
stat_memory
)
dataset.append((name, sec_per_req, heap_diff))
sys.stdout.write('.')
sys.stdout.flush()
datasets.append(dataset)
print('done.')
return datasets
def main():
frameworks = [
'bottle',
'django',
'falcon',
'falcon-ext',
'flask',
'pecan',
'werkzeug',
]
parser = argparse.ArgumentParser(description='Falcon benchmark runner')
parser.add_argument('-b', '--benchmark', type=str, action='append',
choices=frameworks, dest='frameworks', nargs='+')
parser.add_argument('-i', '--iterations', type=int, default=0)
parser.add_argument('-t', '--trials', type=int, default=10)
parser.add_argument('-p', '--profile', type=str,
choices=['standard', 'verbose', 'vmprof'])
parser.add_argument('-o', '--profile-output', type=str, default=None)
parser.add_argument('-m', '--stat-memory', action='store_true')
args = parser.parse_args()
if args.stat_memory and heapy is None:
print('WARNING: Guppy not installed; memory stats are unavailable.\n')
if args.frameworks:
frameworks = args.frameworks
# Normalize frameworks type
normalized_frameworks = []
for one_or_many in frameworks:
if isinstance(one_or_many, list):
normalized_frameworks.extend(one_or_many)
else:
normalized_frameworks.append(one_or_many)
frameworks = normalized_frameworks
# Profile?
if args.profile:
framework = 'falcon-ext'
if args.profile == 'vmprof':
profile_vmprof(framework, get_env(framework))
else:
profile(framework, get_env(framework),
filename=args.profile_output,
verbose=(args.profile == 'verbose'))
print()
return
# Otherwise, benchmark
datasets = run(frameworks, args.trials, args.iterations,
args.stat_memory)
if not datasets:
return
dataset = consolidate_datasets(datasets)
dataset = sorted(dataset, key=lambda r: r[1])
baseline = dataset[-1][1]
print('\nResults:\n')
for i, (name, sec_per_req) in enumerate(dataset):
req_per_sec = round_to_int(Decimal(1) / sec_per_req)
us_per_req = (sec_per_req * Decimal(10 ** 6))
factor = round_to_int(baseline / sec_per_req)
print('{3}. {0:.<20s}{1:.>06d} req/sec or {2: >3.2f} μs/req ({4}x)'.
format(name, req_per_sec, us_per_req, i + 1, factor))
if heapy and args.stat_memory:
print()
for name, _, heap_diff in datasets[0]:
title = 'Memory change induced by ' + name
print()
print('=' * len(title))
print(title)
print('=' * len(title))
print(heap_diff)
print()
if __name__ == '__main__':
main()
| [
"jbryanalburo@gmail.com"
] | jbryanalburo@gmail.com |
299e52aceff3116633645892938214e85f2e1ce5 | ef8fea0e7fe917743b79e35e6193eff48c675601 | /sample_players.py | 902fe5801b4ffefdf289e978a8548275fbba9863 | [] | no_license | b-Squad/AIND-Isolation-master | ef878e817f1711ec0f380325fd42c36d2c35a19a | 6dd8c3fb51400f83f01c0542eb540e30f7ae9ad0 | refs/heads/master | 2021-01-23T16:20:57.394383 | 2017-06-04T06:33:34 | 2017-06-04T06:33:34 | 93,294,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,435 | py | """This file contains a collection of player classes for comparison with your
own agent and example heuristic functions.
************************************************************************
*********** YOU DO NOT NEED TO MODIFY ANYTHING IN THIS FILE **********
************************************************************************
"""
from random import randint
def null_score(game, player):
"""This heuristic presumes no knowledge for non-terminal states, and
returns the same uninformative value for all other states.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : hashable
One of the objects registered by the game object as a valid player.
(i.e., `player` should be either game.__player_1__ or
game.__player_2__).
Returns
----------
float
The heuristic value of the current game state.
"""
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
return 0.
def open_move_score(game, player):
"""The basic evaluation function described in lecture that outputs a score
equal to the number of moves open for your computer player on the board.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : hashable
One of the objects registered by the game object as a valid player.
(i.e., `player` should be either game.__player_1__ or
game.__player_2__).
Returns
----------
float
The heuristic value of the current game state
"""
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
return float(len(game.get_legal_moves(player)))
def improved_score(game, player):
"""The "Improved" evaluation function discussed in lecture that outputs a
score equal to the difference in the number of moves available to the
two players.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : hashable
One of the objects registered by the game object as a valid player.
(i.e., `player` should be either game.__player_1__ or
game.__player_2__).
Returns
----------
float
The heuristic value of the current game state
"""
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
own_moves = len(game.get_legal_moves(player))
opp_moves = len(game.get_legal_moves(game.get_opponent(player)))
return float(own_moves - opp_moves)
def center_score(game, player):
"""Outputs a score equal to square of the distance from the center of the
board to the position of the player.
This heuristic is only used by the autograder for testing.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
player : hashable
One of the objects registered by the game object as a valid player.
(i.e., `player` should be either game.__player_1__ or
game.__player_2__).
Returns
----------
float
The heuristic value of the current game state
"""
if game.is_loser(player):
return float("-inf")
if game.is_winner(player):
return float("inf")
w, h = game.width / 2., game.height / 2.
y, x = game.get_player_location(player)
return float((h - y)**2 + (w - x)**2)
class RandomPlayer():
"""Player that chooses a move randomly."""
def get_move(self, game, time_left):
"""Randomly select a move from the available legal moves.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
----------
(int, int)
A randomly selected legal move; may return (-1, -1) if there are
no available legal moves.
"""
legal_moves = game.get_legal_moves()
if not legal_moves:
return (-1, -1)
return legal_moves[randint(0, len(legal_moves) - 1)]
class GreedyPlayer():
"""Player that chooses next move to maximize heuristic score. This is
equivalent to a minimax search agent with a search depth of one.
"""
def __init__(self, score_fn=open_move_score):
self.score = score_fn
def get_move(self, game, time_left):
"""Select the move from the available legal moves with the highest
heuristic score.
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
----------
(int, int)
The move in the legal moves list with the highest heuristic score
for the current game state; may return (-1, -1) if there are no
legal moves.
"""
legal_moves = game.get_legal_moves()
if not legal_moves:
return (-1, -1)
_, move = max([(self.score(game.forecast_move(m), self), m) for m in legal_moves])
return move
class HumanPlayer():
"""Player that chooses a move according to user's input."""
def get_move(self, game, time_left):
"""
Select a move from the available legal moves based on user input at the
terminal.
**********************************************************************
NOTE: If testing with this player, remember to disable move timeout in
the call to `Board.play()`.
**********************************************************************
Parameters
----------
game : `isolation.Board`
An instance of `isolation.Board` encoding the current state of the
game (e.g., player locations and blocked cells).
time_left : callable
A function that returns the number of milliseconds left in the
current turn. Returning with any less than 0 ms remaining forfeits
the game.
Returns
----------
(int, int)
The move in the legal moves list selected by the user through the
terminal prompt; automatically return (-1, -1) if there are no
legal moves
"""
legal_moves = game.get_legal_moves()
if not legal_moves:
return (-1, -1)
print(game.to_string()) #display the board for the human player
print(('\t'.join(['[%d] %s' % (i, str(move)) for i, move in enumerate(legal_moves)])))
valid_choice = False
while not valid_choice:
try:
index = int(input('Select move index:'))
valid_choice = 0 <= index < len(legal_moves)
if not valid_choice:
print('Illegal move! Try again.')
except ValueError:
print('Invalid index! Try again.')
return legal_moves[index]
if __name__ == "__main__":
from isolation import Board
# create an isolation board (by default 7x7)
player1 = RandomPlayer()
player2 = GreedyPlayer()
game = Board(player1, player2)
# place player 1 on the board at row 2, column 3, then place player 2 on
# the board at row 0, column 5; display the resulting board state. Note
# that the .apply_move() method changes the calling object in-place.
game.apply_move((2, 3))
game.apply_move((0, 5))
print(game.to_string())
# players take turns moving on the board, so player1 should be next to move
assert(player1 == game.active_player)
# get a list of the legal moves available to the active player
print(game.get_legal_moves())
# get a successor of the current state by making a copy of the board and
# applying a move. Notice that this does NOT change the calling object
# (unlike .apply_move()).
new_game = game.forecast_move((1, 1))
assert(new_game.to_string() != game.to_string())
print("\nOld state:\n{}".format(game.to_string()))
print("\nNew state:\n{}".format(new_game.to_string()))
# play the remainder of the game automatically -- outcome can be "illegal
# move", "timeout", or "forfeit"
winner, history, outcome = game.play()
print("\nWinner: {}\nOutcome: {}".format(winner, outcome))
print(game.to_string())
print("Move history:\n{!s}".format(history))
| [
"narsimhb@gmail.com"
] | narsimhb@gmail.com |
d5ca2bcbd5de3c1b9c9bac46eab8058ddbdaa268 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/1005.py | d1785f14535df9f9f8739a47a08da1ea17308063 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | import numpy as np
def solve(n, j):
front = 0
for i in range(1, len(n)):
if n[-i-1] > n[-i]:
n[-i-1] -= 1
front = i
if front:
n[-front:] = 9
if not n[0]:
n = n[1:]
print('Case #{}: {}'.format(j+1, ''.join(map(str, n))))
def main():
T = int(input())
for i in range(T):
solve(np.array(list(map(int, list(input())))), i)
if __name__ == '__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
0ad073e1bc65db7c887ea154b22245e55b75a3c8 | 33110712aa0c2ab1c2e7b9e2053a8c1c0daeb028 | /textboxify/__init__.py | c820e3a2ff9513887137d19f3a3ded31cfa76df1 | [
"MIT"
] | permissive | andrehirano10/TextBoxify | b69a4fd6fb8d9797fe886c69be4bd193a2f636f6 | 159bf75d061974190a322e03088617eac51789a3 | refs/heads/master | 2023-03-16T06:48:40.471005 | 2019-11-06T19:47:29 | 2019-11-06T19:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | """TextBoxify is a packages for creating dialog boxes in games.
The purpose of this package is to easily implement dialog boxes
in games created with Pygame. The boxes can be simple text or
more elaborated boxes with animations and borders, because the
package offers the ability to easily customize the boxes."""
# These are available when `import textboxify` is used.
from .text import Text
from .textbox import TextBox, TextBoxFrame
# Border sprites are available with `textboxify.borders.DARK` after import or
# could be imported as: `textboxify.borders import *`.
from . import borders
| [
"henrik@tutamail.com"
] | henrik@tutamail.com |
ccd699e60459bbf8af8540cc3922d590f42b4547 | a17bed6af99239b59d693fb68bc4163339412469 | /config/config.py | 63ec2023ba7cc7bb0be221e02132d290dbd89898 | [] | no_license | Mubangizi/Epic-Mail-Backend | 43b6b5801273122302675de146cf0ce8a3162fb8 | 8cdb4414a25d08db3aa4eb6141fb91d3dbd207fc | refs/heads/master | 2022-09-27T04:48:38.307997 | 2020-03-20T14:22:31 | 2020-03-20T14:22:31 | 247,948,530 | 0 | 0 | null | 2022-09-16T18:19:15 | 2020-03-17T11:06:58 | Python | UTF-8 | Python | false | false | 584 | py | import os
class Base:
""" base config """
class Development(Base):
""" development config """
DEBUG = True
SQLALCHEMY_DATABASE_URI = "postgresql:///epicmail"
class Testing(Base):
""" test environment config """
TESTING = True
DEBUG = True
# use a separate db
SQLALCHEMY_DATABASE_URI = "postgresql:///epicmail_test_db"
class Production(Base):
""" production config """
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.getenv("DATABASE_URI")
app_config = {"development": Development, "testing": Testing, "production": Production}
| [
"mubangizia22@gmail.com"
] | mubangizia22@gmail.com |
b7420d59481fb281fa17f4810d519a2e48c5a60e | 5d33fbaec7b29217106e8249800eee3977f677f0 | /load_data.py | 98c685104cb2978ccffd09288d2c1d075b1abe1f | [] | no_license | rk1998/cs7648-project | c4409fa57fb5f92795ccb44e06028b9f6746dd21 | e145cfc8f025e35214b2b44bb22b54759e172bde | refs/heads/main | 2023-06-25T02:05:00.275210 | 2021-07-25T00:56:45 | 2021-07-25T00:56:45 | 351,656,031 | 0 | 0 | null | 2021-04-29T17:31:59 | 2021-03-26T03:57:17 | Python | UTF-8 | Python | false | false | 10,925 | py | import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import nltk
from nltk import word_tokenize
from nltk.tokenize import TreebankWordTokenizer
nltk.download('punkt')
from tokenizers import BertWordPieceTokenizer
from collections import Counter
import os
import sys
from sklearn.model_selection import train_test_split
tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True)
def load_tweet_csv(tweet_csv_path, overfit=True, shuffle_data=True, overfit_val=5000):
if overfit:
data = pd.read_csv(tweet_csv_path, nrows=overfit_val)
else:
data = pd.read_csv(tweet_csv_path)
labels = data['label'].values
labels[labels == 0] = -1
labels[labels == 2] = 0
labels[labels == 4] = 1
tweets = data['text'].values
if shuffle_data:
indices = np.arange(tweets.shape[0])
np.random.shuffle(indices)
tweets = tweets[indices]
labels = labels[indices]
# if overfit:
# tweets = tweets[0:overfit_val]
# labels = labels[0:overfit_val]
# return labels, tweets
# tweet_lists = split_tweets_to_lists(tweets.values)
return labels, tweets
def load_unlabeled_tweet_csv(tweet_csv_path, all_tweets=False, num_tweets=50000):
if all_tweets:
data = pd.read_csv(tweet_csv_path)
else:
data = pd.read_csv(tweet_csv_path, nrows=num_tweets)
tweets = data['text'].values
labels = data['label'].values
labels[labels == 0] = -1
labels[labels == 2] = 0
labels[labels == 4] = 1
return tweets, labels
def split_data(tweet_csv_path, test_split_percent=0.2, val_split_percent=0.2, shuffle=True, overfit=False, overfit_val=5000):
'''
Splits Twitter Data into Training, Dev, and Test sets
returns them as pandas dataframes
'''
labels, tweets = load_tweet_csv(tweet_csv_path, overfit=overfit, shuffle_data=shuffle, overfit_val=overfit_val)
vocab = create_vocab(tweets)
# indices = np.arange(tweets.shape[0])
# np.random.shuffle(indices)
# labels = labels[indices]
# tweets = tweets[indices]
X_train, X_test, y_train, y_test = train_test_split(tweets, labels, test_size=test_split_percent)
test_data = pd.DataFrame({'label': y_test, 'text':X_test})
X_train, X_dev, y_train, y_dev = train_test_split(X_train, y_train, test_size=val_split_percent)
dev_data = pd.DataFrame({'label': y_dev, 'text':X_dev})
train_data = pd.DataFrame({'label':y_train, 'text':X_train})
return train_data, dev_data, test_data, vocab
def create_vocab(tweet_data):
vocab = Vocab()
for tweet in tweet_data:
tokenized_tweet = word_tokenize(tweet)
for word in tokenized_tweet:
id = vocab.GetID(word.lower())
vocab.Lock()
return vocab
class Vocab:
'''
Class that maps words in the twitter dataset to indices
'''
def __init__(self, vocabFile=None):
self.locked = False
self.nextId = 0
self.word2id = {}
self.id2word = {}
self.word_counts = Counter()
if vocabFile:
for line in open(vocabFile):
line = line.rstrip('\n')
(word, wid) = line.split('\t')
self.word2id[word] = int(wid)
self.id2word[wid] = word
self.nextId = max(self.nextId, int(wid) + 1)
def GetID(self, word):
if not word in self.word2id:
if self.locked:
return -1 #UNK token is -1.
else:
self.word_counts[word] += 1
self.word2id[word] = self.nextId
self.id2word[self.word2id[word]] = word
self.nextId += 1
return self.word2id[word]
def HasWord(self, word):
return self.word2id.has_key(word)
def HasId(self, wid):
return self.id2word.has_key(wid)
def GetWord(self, wid):
return self.id2word[wid]
def SaveVocab(self, vocabFile):
fOut = open(vocabFile, 'w')
for word in self.word2id.keys():
fOut.write("%s\t%s\n" % (word, self.word2id[word]))
def GetVocabSize(self):
#return self.nextId-1
return self.nextId
def GetWords(self):
return self.word2id.keys()
def convert_to_words(self, word_ids):
"""
Converts a list of word ids to their actual words in the vocabulary
Inputs:
word_ids: list(int) - list of word ids
Returns:
str: the output string from the list of word ids
"""
output = ""
for i in range(len(word_ids)):
word_i = self.GetWord(word_ids[i])
if i == 0:
output = word_i
else:
output = output + " " + word_i
return output
def Lock(self):
self.locked = True
class TwitterDataset:
'''
Class to that tokenizes raw tweet text and stores corresponding labels
'''
def __init__(self, data_frame, vocab = None, use_bert_tokenizer=False):
# labels, tweet_list = load_tweet_csv(twitter_csv_path)
self.labels = data_frame['label'].values
tweet_list = data_frame['text'].values
self.length = len(self.labels)
self.use_bert_tokenizer = use_bert_tokenizer
# self.tweet_list = tweet_list
if not vocab:
self.vocab = Vocab()
else:
self.vocab = vocab
self.Xwordlist = []
if self.use_bert_tokenizer:
for tweet in tweet_list:
wordlist = tokenizer.encode(tweet).ids
self.Xwordlist.append(wordlist)
else:
for tweet in tweet_list:
wordlist = [self.vocab.GetID(w.lower()) for w in word_tokenize(tweet) if self.vocab.GetID(w.lower()) >= 0]
self.Xwordlist.append(wordlist)
if self.use_bert_tokenizer:
self.vocab_size = tokenizer.get_vocab_size()
else:
self.vocab_size = self.vocab.GetVocabSize()
self.vocab.Lock()
index = np.arange(len(self.Xwordlist))
np.random.shuffle(index) #randomly shuffle words and labels
self.Xwordlist = [torch.LongTensor(self.Xwordlist[i]) for i in index]
self.labels = self.labels[index]
def convert_text_to_ids(self, text_list):
id_list = []
if self.use_bert_tokenizer:
for item in text_list:
wordlist = tokenizer.encode(item).ids
# wordlist = [self.vocab.GetID(w.lower()) for w in word_tokenize(item) if self.vocab.GetID(w.lower()) >= 0]
id_list.append(wordlist)
else:
for item in text_list:
# wordlist = tokenizer.encode(item).ids
word_tokens = word_tokenize(item)
wordlist = []
for w in word_tokens:
id = self.vocab.GetID(w.lower())
if id >= 0:
wordlist.append(id)
# wordlist = [self.vocab.GetID(w.lower()) for w in word_tokenize(item) if self.vocab.GetID(w.lower()) >= 0]
id_list.append(wordlist)
id_list = [torch.LongTensor(id_list[i]) for i in range(0, len(id_list))]
return id_list
def convert_to_words(self, id_list):
if self.use_bert_tokenizer:
tweet = tokenizer.decode(id_list)
else:
output = ""
for i in range(len(id_list)):
word_i = self.vocab.GetWord(id_list[i])
if i == 0:
output = word_i
else:
output = output + " " + word_i
return output
return tweet
def get_word_counts(self, word_ids):
counts = [self.vocab.word_counts[self.vocab.id2word[id.item()]] for id in word_ids]
return counts
def load_twitter_data(tweet_filepath, test_split_percent=0.2, val_split_percent=0.2, shuffle=True, overfit=False, use_bert=False, overfit_val=500):
'''
Loads twitter csv file, splits it into training, dev, and test data
and returns them as TwitterDataset objects.
'''
print("Splitting Data")
train_data, dev_data, test_data, vocab = split_data(tweet_filepath, test_split_percent=test_split_percent, shuffle=shuffle, val_split_percent=val_split_percent, overfit=overfit, overfit_val=overfit_val)
print("Converting to Indices")
if not use_bert:
train_dataset = TwitterDataset(train_data, vocab=vocab)
dev_dataset = TwitterDataset(dev_data, vocab=vocab)
test_dataset = TwitterDataset(test_data, vocab=vocab)
else:
train_dataset = TwitterDataset(train_data, use_bert_tokenizer=use_bert)
dev_dataset = TwitterDataset(dev_data, use_bert_tokenizer=use_bert)
test_dataset = TwitterDataset(test_data, use_bert_tokenizer=use_bert)
return train_dataset, dev_dataset, test_dataset
def load_twitter_data_active_learning(tweet_filepath, test_split_percent=0.2, val_split_percent=0.2, seed_size=1000, overfit=False, overfit_val=500):
train_data, dev_data, test_data = split_data(tweet_filepath,
test_split_percent=test_split_percent,
val_split_percent=val_split_percent,
overfit=overfit,
overfit_val=overfit_val)
train_dataset = TwitterDataset(train_data)
seed_data = pd.DataFrame({'label':train_data['label'][0:seed_size], 'text':train_data['text'][0:seed_size]})
unlabeled_data = pd.DataFrame({'label':train_data['label'][seed_size:], 'text':train_data['text'][seed_size:]})
seed_dataset = TwitterDataset(seed_data, vocab=train_dataset.vocab)
unlabeled_data = TwitterDataset(unlabeled_data, vocab=train_dataset.vocab)
dev_dataset = TwitterDataset(dev_data, vocab=train_dataset.vocab)
test_dataset = TwitterDataset(test_data, vocab=train_dataset.vocab)
return seed_dataset, unlabeled_data, dev_dataset, test_dataset
def main():
twitter_csv_path = "..\\twitter_test.csv"
# train_dataset, dev_data, test_dataset = load_twitter_data(twitter_csv_path, split_percent=0.3, overfit=True)
seed_dataset, unlabeled_dataset, dev_dataset, test_dataset = load_twitter_data_active_learning(twitter_csv_path, test_split_percent=0.2, overfit=True, overfit_val=12000)
# tweet_data = TwitterDataset(twitter_csv_path)
print(seed_dataset.length)
print(unlabeled_dataset.length)
print(dev_dataset.length)
print(test_dataset.length)
print(seed_dataset.Xwordlist[2].tolist())
# print(train_dataset.Xwordlist[0].tolist())
print([seed_dataset.vocab.GetWord(x) for x in seed_dataset.Xwordlist[2].tolist()])
print(seed_dataset.labels[2])
print(seed_dataset.labels[0:10])
if __name__ == '__main__':
main()
| [
"rohithk98@gmail.com"
] | rohithk98@gmail.com |
a8cbba2afa5ccdb4f7f8580d54d4c531a781cf8e | b1bddabb1fb57b06c652594ec0e6c09c8d0c4650 | /ROOK/gamestatistics.py | 6f7fa834cd0345a0a6da82e620f54a43a8c7056a | [] | no_license | JacobClark95/Personal_Projects | bda4529a792f870b264676aa287662994f322798 | 56753aff22f6545989e199ae85b2909c17f0866f | refs/heads/master | 2020-04-11T00:47:05.744449 | 2018-12-11T21:39:27 | 2018-12-11T21:39:27 | 161,395,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | from player import Team
from collection import Collection
from card import Card
import time
class GameStatistics():
def __init__(self):
self.roundNum = 1
self.roundLeader = None
self.sittingOrder = []
self.teams = {Team.BID_WINNER:[], Team.REGULAR:[]}
self.cardsWon = {}
self.cardsPlayedThisRound = {}
self.cardsNotPlayed = {}
self.playersMissingSuit = {}
def startGame(self):
self.cardsWon = {player : Collection() for player in self.sittingOrder}
self.cardsPlayedThisRound = {player : None for player in self.sittingOrder}
self.playersMissingSuit = {player : [] for player in self.sittingOrder}
self.cardsNotPlayed = Collection().complement()
def playCard(self, playerID, card):
if card.suit != Card.lead:
self.playersMissingSuit[playerID].append(Card.lead)
self.cardsNotPlayed.remove([card])
self.cardsPlayedThisRound[playerID] = card
def endRound(self):
winner = self.whoIsWinningRound()
self.cardsWon[winner].add(list(self.cardsPlayedThisRound.values()))
self.cardsPlayedThisRound = {player : None for player in self.sittingOrder}
self.roundNum += 1
self.roundLeader = winner
def pointsToBePlayed(self):
cardsLeftToBePlayed = list(self.cardsPlayedThisRound.values()).count(None) - 1
averageCardValue = self.cardsNotPlayed.pointValue() / len(self.cardsNotPlayed)
return cardsLeftToBePlayed * averageCardValue
def oddsOfWinning(self, playerID, card):
""" this returns the probability that you win minus the probability that the
opponent wins"""
myTeam = Team.BID_WINNER if playerID in self.teams[Team.BID_WINNER] else Team.REGULAR
winningPlayer = self.whoIsWinningRound()
if winningPlayer == None:
#TODO: startingGame
return None
else:
winningTeam = Team.BID_WINNER if winningPlayer in self.teams[Team.BID_WINNER] else Team.REGULAR
winningCard = self.cardsPlayedThisRound[winningPlayer]
if winningCard > card and winningTeam != myTeam:
#TODO: add the possability that the our team will win
return -1
else:
#TODO: subtract the possability that the other team will win
return 1
def whoIsWinningRound(self):
playedCards = list(set(self.cardsPlayedThisRound.values()))
if None in playedCards:
playedCards.remove(None)
if len(playedCards) == 0:
return None
winningCard = max(playedCards)
return list(self.cardsPlayedThisRound.keys()) \
[list(self.cardsPlayedThisRound.values()).index(winningCard)]
| [
"jacobclark@Jacobs-MBP-3-010034237103.app.byu.edu"
] | jacobclark@Jacobs-MBP-3-010034237103.app.byu.edu |
0f91df3955342d356b8bdf5ba87bb6bf4ba74fd1 | a64d2a7ef8f4dd53cb366c4ba5f0065f9f9ed33c | /flatten.py | 11aaeebf6f8d62104cc0c854e339ce6ed438f08e | [] | no_license | DerekRoy/CMPT414FinalAssignment | 2b5ed011bac50e8759815394c154bd1afa8edac3 | f4ca585e3a9b382f2c1648bbbae17e1da1c810ca | refs/heads/master | 2021-03-15T17:07:57.687593 | 2020-04-11T18:04:46 | 2020-04-11T18:04:46 | 246,867,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import numpy as np
def is_array(potential_array):
return isinstance(potential_array, np.ndarray) or isinstance(potential_array, list)
class flatten:
def __init__(self, input_shape):
self.output_shape = (np.prod(input_shape), 1)
def out(self):
return self.output_shape
def flatten(self, array):
return array.reshape(self.output_shape)
| [
"alex@minderov.com"
] | alex@minderov.com |
932cad2a12e44b2877efbb474a3bd04f002556c0 | d18abe85b1f1a354b04018ac9378a72a14430de9 | /cdrentalDatabase/cd/clerk.py | 019e3d0f5dbceba42cf3eabd8335d88d365fa9e7 | [] | no_license | diofelpallega/CSC-183-Activities | ce73f9d474e6db1df946e7cfb17bf16b484b37ae | 4b83cf93241c7f765546379abd2771c6b7f3b56e | refs/heads/master | 2021-01-01T18:38:13.045771 | 2014-12-11T12:34:30 | 2014-12-11T12:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from dosql import *
class Clerk(object):
def __init__(self):
pass
def checkout(self,cdid,customerid):
b = doSql()
query = "SELECT get_customer_rented('"+customerid +"' );"
items = b.execqry(query,False)
result = []
for item in items:
stringed = map(str, item)
result.append(stringed)
[[new]] = result
return new
| [
"diofel.pallega@gmail.com"
] | diofel.pallega@gmail.com |
d370737bcbe5ffd75991ed89aaab50c8b82d4490 | f53c1fe797979a3f36458362195b22be2a819f06 | /database.py | 7170cbef29920d91f654ffdffc4bb52bb37556f9 | [] | no_license | Amritha777/MediBoT | 82738f77040bcda7b8814f6c28b283a9c171ef0d | f1070466b5ad0ff6db03b4b15304a2fa957d0d1f | refs/heads/master | 2021-09-05T02:50:22.139714 | 2018-01-23T19:13:46 | 2018-01-23T19:13:46 | 118,656,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | import pymysql
import json
class DB:
db = None
def __init__(self):
try:
self.db=pymysql.connect("localhost","root","123456789","MediBOT")
except Exception as e:
print("Database not Found")
def get_med(disease):
cursor=db.cursor()
cursor.execute("select medicine from medicines where Illness={} ".format(disease))
result = cursor.fetchall()
| [
"noreply@github.com"
] | noreply@github.com |
5c4b29381d1a76a65cb65bf7d3de213c703febac | c63b62dc2eb7e873289040df6377f47a168c6d31 | /flask/bin/jupyter-migrate | 8bf29f46c530501d07406642f52fa211ddd74bd4 | [] | no_license | onepau/baromontres | c801299b0849100fb6b27ba0551c73cafc72afcf | ae538e46c47a76d61c01c5f3d6ad1513eb0dced1 | refs/heads/master | 2021-01-22T19:22:37.271408 | 2017-03-17T17:30:35 | 2017-03-17T17:30:35 | 85,192,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/Users/pauloneil/ML/baromontres_flask/flask/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.migrate import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"pauloneil@MacBook-Air.local"
] | pauloneil@MacBook-Air.local | |
4e06e836da5cf68854d332fed25e2651f482fe55 | 2ba87ce6df8179d06599c08b9afaadaea0a6dfee | /io模型/BIO.py | 276806b66441af383e6db2328feee980048f0d85 | [] | no_license | github-zbp/operating-system | 1e85446a3748fe663eb064cf64d06f2644cde5ff | 5a55405c2059848abd46ab65de9581d246092675 | refs/heads/master | 2023-01-11T23:04:29.396614 | 2020-11-09T07:20:22 | 2020-11-09T07:20:22 | 264,598,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,102 | py | # coding=utf-8
from threading import Thread, currentThread
import socket
# 服务端代码
# 创建套接字
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定ip和端口
ip = "127.0.0.1"
port = 8000
server.bind((ip, port))
# 监听套接字
server.listen()
print("服务已开启")
def contact(client):
print("客户端 %s 已成功连接" % currentThread().name)
msg = client.recv(1024).decode("utf-8") # 接收客户端发送到服务端的消息,这里也会收到阻塞
while msg: # 允许接收客户端发送多次消息,如果对方发送空字符,则认为客户端断开连接,此时结束该线程
print("客户端 %s 发送信息:%s" % (currentThread().name, msg))
msg = client.recv(1024).decode("utf-8")
print("客户端 %s 断开连接" % currentThread().name)
while True:
print("等待接收客户端连接")
client,addr = server.accept() # 接受连接, 这里会受到阻塞
# 创建线程用于客户端和服务端通信
thread = Thread(target=contact, args=(client,))
thread.start()
| [
"1640632344@qq.com"
] | 1640632344@qq.com |
186f257abc7925e21ac032fd77c21762f7c90112 | 21d9a124b7ae9529e4f0f07f5643ea0386b2543d | /python/HelloWorld/TFunRef.py | 3eaf2a7834e308462b5ea365feb6461077fa1340 | [] | no_license | gitsoftsun/Learn | 7d8a1257940bf5264db08b46e7af783d59f482eb | c8df897507ccda655b467b35f8d37636a269a5f0 | refs/heads/master | 2016-09-06T18:24:45.318306 | 2015-04-11T09:37:44 | 2015-04-11T09:37:44 | 29,095,566 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | #coding=utf-8
# python 中所有参数, -传引用
#function defination is here
def printStr(str):
"print str"
str = "change value in function";
print str;
return;
#you can call printStr function
str = "Hello";
print str;
printStr(str);
#function defination
def printInfo(name, age):
"print info"
print "My name is : %s, I'm %d years!" %(name, age);
return;
#call printinfo function
printInfo("lzy", 23);
#命名参数
printInfo(age=22, name="lll");
#缺省参数- 在定义的时候, 给定初始值
#例如:
def printM(name="lll", age=2):
"缺省参数"
print name, age;
return;
#call function
printM();
#不定长参数
#function defination
def printSomeInfo(*varTuple):
"print some info"
for var in varTuple:
print var;
return;
#call
printSomeInfo("lzy", 23, "ZJU");
printSomeInfo();
printSomeInfo([23, "Hello"]);
#anonymous function defination
sum = lambda arg1, arg2: arg1+arg2;
#call function
print "the value of total : ", sum(12, 3);
print "the sum of values : ", sum(1, 2);
result = lambda arg1, arg2, arg3: (arg1+arg2)%arg3;
#call result function
print "result is : ", result(2, 2, 2); | [
"cstlizy@hotmail.com"
] | cstlizy@hotmail.com |
81e5483abb52cfea3eba17ca5089a2da327ca720 | 21577a16046e9e6d80db673ce877f1e76a84ffb1 | /a2c.py | de686e9f806c7950abcfc46ce3c6b5d474611cf7 | [] | no_license | BenoitLeguay/DNA_RL | e741e8404990e2689bf8240455f66c29f7108d42 | ffe92ac257522f58555310180cd8d79c785ddb77 | refs/heads/master | 2023-03-12T19:01:47.985028 | 2021-03-01T14:32:48 | 2021-03-01T14:32:48 | 273,250,548 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,088 | py | import numpy as np
import torch
from torch import nn
import variable as var
import utils
class A2C:
def __init__(self, init_a2c):
self.discount_factor = init_a2c["discount_factor"]
self.state_dim = init_a2c["state_dim"]
self.action_dim = init_a2c["action_space"]
self.critic = Critic(init_a2c['critic']).cuda()
self.actor = var.actor_types[init_a2c['actor_type']](init_a2c['actor']).cuda()
self.optim_actor = None
self.random_generator = np.random.RandomState(seed=init_a2c['seed'])
self.next_state = None
self.next_action = None
self.init_optimizers(critic_optimizer=init_a2c['critic']['optimizer'],
actor_optimizer=init_a2c['actor']['optimizer'])
def init_optimizers(self, critic_optimizer={}, actor_optimizer={}):
self.critic.init_optimizer(critic_optimizer)
self.actor.init_optimizer(actor_optimizer)
def policy(self, state):
with torch.no_grad():
action = self.actor.predict_action(state)
return action
def episode_init(self, state):
state = utils.to_tensor(state).view((1, ) + state.shape)
action = self.policy(state)
self.next_action = action
self.next_state = state
return action.cpu().numpy()
def update(self, state, reward, done):
state = utils.to_tensor(state).view((1, ) + state.shape)
next_action = -1
if not done:
next_action = self.update_step(state, reward)
if done:
self.update_end(reward)
return next_action
def update_step(self, next_state, reward):
current_action = self.next_action
current_state = self.next_state
next_state_value = self.critic.estimate_state(next_state)
current_state_value = self.critic.estimate_state(current_state)
td_target = reward + self.discount_factor * next_state_value
td_error = td_target - current_state_value
self.actor.update(current_state, current_action, td_error)
self.critic.update(current_state_value, td_target)
next_action = self.policy(next_state)
self.next_state = next_state
self.next_action = next_action
return next_action.cpu().numpy()
def update_end(self, reward):
current_action = self.next_action
current_state = self.next_state
current_state_value = self.critic.estimate_state(current_state)
td_target = utils.to_tensor([[float(reward)]])
td_error = td_target - current_state_value
self.actor.update(current_state, current_action, td_error)
self.critic.update(current_state_value, td_target)
class Critic(torch.nn.Module):
def __init__(self, critic_init):
super(Critic, self).__init__()
network_init = critic_init['network']
self.relu = nn.ReLU(inplace=False)
self.conv1 = nn.Sequential(
nn.ConstantPad1d(15 // 2, 0.25),
nn.Conv1d(4, 400, 15),
nn.LeakyReLU(0.1),
nn.AdaptiveMaxPool1d(1))
self.l1 = nn.Linear(network_init["i_size"], network_init["l1_size"])
self.l2 = nn.Linear(network_init["l1_size"], network_init["l2_size"])
self.o = nn.Linear(network_init["l2_size"], 1)
self.optimizer = None
self.loss = torch.nn.MSELoss()
self.loss_history = list()
self.state_representation = critic_init['state_representation']
def init_optimizer(self, optimizer_args):
self.optimizer = torch.optim.Adam(self.parameters(), **optimizer_args)
def forward(self, x):
if self.state_representation == 'raw':
x = self.conv1(x).squeeze(dim=2)
x = self.relu(self.l1(x))
x = self.relu(self.l2(x))
x = self.o(x)
return x
def estimate_state(self, state):
return self(state)
def update(self, current_state_value, td_target):
loss = self.loss(current_state_value, td_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.loss_history.append(loss.item())
class Actor(torch.nn.Module):
def __init__(self, actor_init):
super(Actor, self).__init__()
torch.manual_seed(actor_init['seed'])
network_init = actor_init['network']
self.action_dim = actor_init['action_dim']
self.entropy_learning_rate = actor_init['entropy_learning_rate']
self.optimizer = None
self.loss_history = list()
self.state_representation = actor_init["state_representation"]
self.hidden_size = network_init["hidden_size"]
self.l1_size = network_init["l1_size"]
self.relu = nn.ReLU()
self.conv1 = nn.Sequential(
nn.ConstantPad1d(15 // 2, 0.25),
nn.Conv1d(4, 400, 15),
nn.LeakyReLU(0.1),
nn.AdaptiveMaxPool1d(1))
self.l1 = nn.Linear(network_init["i_size"] + self.hidden_size, self.l1_size)
self.l1_to_h = nn.Linear(self.l1_size, self.hidden_size)
self.l1_to_o = nn.Linear(self.l1_size, network_init["o_size"])
self.softmax = nn.Softmax(dim=1)
def init_optimizer(self, optimizer_args):
self.optimizer = torch.optim.Adam(self.parameters(), **optimizer_args)
def forward(self, x, hidden):
if self.state_representation == 'raw':
x = self.conv1(x).squeeze(dim=2)
combined = torch.cat((x, hidden), 1)
l1_out = self.relu(self.l1(combined))
hidden = self.relu(self.l1_to_h(l1_out))
output = self.relu(self.l1_to_o(l1_out))
output = self.softmax(output) # .clone()
return output, hidden
def rnn_forward(self, x):
# hidden = torch.zeros(1, self.hidden_size, device=var.device) slower than empty and then fill
hidden = torch.empty(x.shape[0], self.hidden_size, device=var.device).fill_(0)
outputs = list()
for _ in range(self.action_dim):
output, hidden = self(x, hidden)
outputs.append(output)
return torch.stack(outputs, dim=0).squeeze(dim=1)
def predict_action(self, state): # return an action
action_probabilities = self.rnn_forward(state)
action_distributions = torch.distributions.Categorical(probs=action_probabilities)
return action_distributions.sample()
def update(self, state, action, td_error):
actions_probabilities = self.rnn_forward(state)
action_chosen_prob = torch.gather(actions_probabilities, dim=1, index=action.unsqueeze(dim=1))
sum_entropy = torch.distributions.Categorical(probs=actions_probabilities).entropy().sum()
loss = -torch.log(action_chosen_prob.prod()) * td_error - self.entropy_learning_rate * sum_entropy
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
self.optimizer.step()
self.loss_history.append(loss.item())
class ActorVanilla:
def __init__(self, actor_vanilla_init):
self.action_names = ['co_length', 'opt_start_point', 'co_start_point']
self.actors = {action_name: OneActionActor(actor_vanilla_init[action_name])
for action_name in self.action_names}
self.loss_history = list()
def cuda(self):
for actor_name, actor in self.actors.items():
self.actors[actor_name] = actor.cuda()
return self
def init_optimizer(self, optimizer_args):
for actor_name, actor in self.actors.items():
actor.init_optimizer(optimizer_args=optimizer_args[actor_name])
def predict_action(self, state):
actions_chosen = list()
for actor_name, actor in self.actors.items():
actions_chosen.append(actor.predict_action(state))
return utils.to_tensor(actions_chosen).long()
def update(self, state, action, td_error):
loss_value = 0.0
for idx, (actor_name, actor) in enumerate(self.actors.items()):
loss_value += actor.update(state.clone(), action[idx].clone(), td_error.clone())
self.loss_history.append(loss_value)
class OneActionActor(torch.nn.Module):
def __init__(self, one_action_actor_init):
network_init = one_action_actor_init['network']
super(OneActionActor, self).__init__()
torch.manual_seed(one_action_actor_init['seed'])
self.entropy_learning_rate = one_action_actor_init['entropy_learning_rate']
self.optimizer = None
self.loss_history = list()
self.state_representation = one_action_actor_init["state_representation"]
self.relu = nn.ReLU()
self.conv1 = nn.Sequential(
nn.ConstantPad1d(15 // 2, 0.25),
nn.Conv1d(4, 400, 15),
nn.LeakyReLU(0.1),
nn.AdaptiveMaxPool1d(1))
self.l1 = nn.Linear(network_init["i_size"], network_init["l1_size"])
self.l2 = nn.Linear(network_init["l1_size"], network_init["l2_size"])
self.l3 = nn.Linear(network_init["l2_size"], network_init["o_size"])
self.softmax = nn.Softmax(dim=1)
def init_optimizer(self, optimizer_args):
self.optimizer = torch.optim.Adam(self.parameters(), **optimizer_args)
def forward(self, x):
if self.state_representation == 'raw':
x = self.conv1(x).squeeze(dim=2)
x = self.relu(self.l1(x))
x = self.relu(self.l2(x))
x = self.softmax(self.l3(x))
return x
def predict_action(self, state): # return an action
action_probabilities = self(state)
action_distributions = torch.distributions.Categorical(probs=action_probabilities)
return action_distributions.sample()
def update(self, state, action, td_error):
actions_probabilities = self(state)
action_chosen_prob = torch.gather(actions_probabilities.squeeze(), dim=0, index=action)
sum_entropy = torch.distributions.Categorical(probs=actions_probabilities).entropy().sum()
loss = -torch.log(action_chosen_prob.prod()) * td_error - self.entropy_learning_rate * sum_entropy
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
self.optimizer.step()
loss_value = loss.item()
self.loss_history.append(loss_value)
return loss_value
class NActionActor(torch.nn.Module):
def __init__(self, one_action_actor_init):
network_init = one_action_actor_init['network']
super(NActionActor, self).__init__()
torch.manual_seed(one_action_actor_init['seed'])
self.action_dim = network_init["o_size"]//3
self.entropy_learning_rate = one_action_actor_init['entropy_learning_rate']
self.optimizer = None
self.loss_history = list()
self.state_representation = one_action_actor_init["state_representation"]
self.relu = nn.ReLU()
self.conv1 = nn.Sequential(
nn.ConstantPad1d(15 // 2, 0.25),
nn.Conv1d(4, 400, 15),
nn.LeakyReLU(0.1),
nn.AdaptiveMaxPool1d(1))
self.l1 = nn.Linear(network_init["i_size"], network_init["l1_size"])
self.l2 = nn.Linear(network_init["l1_size"], network_init["l2_size"])
self.l3 = nn.Linear(network_init["l2_size"], network_init["o_size"])
self.softmax = nn.Softmax(dim=1)
self.softmax_dim_0 = nn.Softmax(dim=0)
def init_optimizer(self, optimizer_args):
self.optimizer = torch.optim.Adam(self.parameters(), **optimizer_args)
def forward(self, x):
if self.state_representation == 'raw':
x = self.conv1(x).squeeze(dim=2)
x = self.relu(self.l1(x))
x = self.relu(self.l2(x))
x = self.relu(self.l3(x))
return x[0]
def predict_action(self, state): # return an action
action_values = self(state)
actions_chosen = list()
for idx in range(0, self.action_dim * 3, self.action_dim):
probabilities = self.softmax_dim_0(action_values[idx:idx + self.action_dim])
dis = torch.distributions.Categorical(probs=probabilities)
actions_chosen.append(dis.sample())
return utils.to_tensor(actions_chosen).long()
def update(self, state, action, td_error):
action_values = self(state)
probabilities = list()
for idx in range(0, self.action_dim * 3, self.action_dim):
prob = self.softmax_dim_0(action_values[idx:idx + self.action_dim])
probabilities.append(prob)
probabilities = torch.stack(probabilities)
action_chosen_prob = torch.gather(probabilities, dim=1, index=action.unsqueeze(dim=1))
sum_entropy = torch.distributions.Categorical(probs=probabilities).entropy().sum()
loss = -torch.log(action_chosen_prob.prod()) * td_error - self.entropy_learning_rate * sum_entropy
self.optimizer.zero_grad()
loss.backward(retain_graph=True)
self.optimizer.step()
loss_value = loss.item()
self.loss_history.append(loss_value)
return loss_value
| [
"benoit.leguay@gmail.com"
] | benoit.leguay@gmail.com |
0c368647695205def899acb08a90a2cba0279ade | 644db7457d18200ffc709f5c24ebeba4f2a7a813 | /TensorFlow/Chapter10/1. GPU基本操作.py | 3d586000e850dd3aab94e29817532f23aac40be4 | [
"MIT"
] | permissive | yisampi/Notes-of-TensorFlow-and-Keras | b3619824b0ea7be322af10ae26a7b5c7f5220ece | 9805c37ae2ccad961214f93101208af6a8efe144 | refs/heads/master | 2020-05-23T21:43:07.820042 | 2019-05-09T02:55:39 | 2019-05-09T02:55:39 | 186,960,119 | 2 | 0 | MIT | 2019-05-16T05:51:03 | 2019-05-16T05:51:01 | null | UTF-8 | Python | false | false | 975 | py | # coding=utf-8
import tensorflow as tf
a = tf.constant([1.0, 2.0, 3.0], shape=[3], name='a')
b = tf.constant([1.0, 2.0, 3.0], shape=[3], name='b')
c = a + b
# 通过log_device_placement参数来记录运行每一个运算的设备。
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
print sess.run(c)
# 通过tf.device将运算指定到特定的设备上。
with tf.device('/cpu:0'):
a = tf.constant([1.0, 2.0, 3.0], shape=[3], name='a')
b = tf.constant([1.0, 2.0, 3.0], shape=[3], name='b')
with tf.device('/gpu:1'):
c = a + b
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
print sess.run(c)
a_cpu = tf.Variable(0, name="a_cpu")
with tf.device('/gpu:0'):
a_gpu = tf.Variable(0, name="a_gpu")
# 通过allow_soft_placement参数自动将无法放在GPU上的操作放回CPU上。
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True))
sess.run(tf.global_variables_initializer())
| [
"1679871747@qq.com"
] | 1679871747@qq.com |
7f963dcecc9d3f15af04ffcd545a9236de141722 | ef208d5a0cfcba5cfd3fdf3780b9511b4291f71a | /Project/get_tweets.py | c7e15510fdf212ef1aab0a14a04624b9358be253 | [] | no_license | freshlybreemed/BricksquadApplication | df73b0f5d9e5b2513a3e4b7cc124e739b3073d03 | b42ec5f7107f2194f0cf6c6dc9ceb04208dc8978 | refs/heads/master | 2021-05-28T13:03:45.518778 | 2014-03-19T23:14:20 | 2014-03-19T23:14:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import re
import sys
consumer_key="Aon0bjz4YJrSvK5AaYOmaA"
consumer_secret="ok6n7nLFnTK7sPgfJy9KePVWWpNLsf66ZjG2qxybE"
access_token="702257119-3JFCScVL8HSioH3TwxEvgXESsLFcRrGSrmFHXnV6"
access_token_secret="lA1fKzQbHMJWtPYdhcgOYTtFnyhuob16ItiWHO6hw"
CUTOFF = -1
class StdOutListener(StreamListener):
count = 0
def on_data(self, data):
tweet = json.loads(data)
if self.count == CUTOFF : exit(0)
if "lang" in tweet["user"] and tweet["user"]["lang"] == "en":
print "\t".join([tweet["text"],tweet["user"]["screen_name"]]).encode('ascii', 'ignore').replace('\n', ' ')
self.count += 1
return True
def on_error(self, status):
print 'Error: ', status
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
if len(sys.argv) < 2 :
print 'USAGE : python get_tweets.py 1000 Apple > apple_tweets.txt'
exit(0)
try :
CUTOFF = int(sys.argv[1])
company_names = sys.argv[2:]
except : company_names = sys.argv[1:]
stream.filter(track=['follow me on IG'])
| [
"ebrima.jobe92@gmail.com"
] | ebrima.jobe92@gmail.com |
687597834631051ff823fce8248de22de124ebb5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02553/s242742740.py | 19cb61040111d626c69f31e200e94dcd971f47c1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | a,b,c,d = map(int,input().split())
s = max(max(a*c,a*d),max(b*c,b*d))
print(s) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
4b4fb06a5c7779a15bbde10c3ca456691d7aa16b | 2ed6ad4a736879a47d192159da45ca56610c089a | /tests/test_utils.py | 5322f50e74f0e19c141fd1adbdd2a5b05e92fb39 | [
"MIT"
] | permissive | poonyisaTH/gsheets-db-api | a82bd35984766697757cc96aa74a1281d948f019 | f023b32986d4da9a501fca8d435f2b6edc153353 | refs/heads/master | 2023-05-29T15:01:10.604324 | 2021-02-17T20:59:41 | 2021-02-17T20:59:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,952 | py | # -*- coding: utf-8 -*-
import unittest
from moz_sql_parser import parse
import pyparsing
from .context import format_gsheet_error, format_moz_error
class UtilsTestSuite(unittest.TestCase):
def test_format_moz_error(self):
query = 'SELECT ))) FROM table'
with self.assertRaises(pyparsing.ParseException) as context:
parse(query)
result = format_moz_error(query, context.exception)
expected = (
'SELECT ))) FROM table\n'
' ^\n'
'Expected {{expression1 [{[as] column_name1}]} | "*"} '
'(at char 7), (line:1, col:8)'
)
self.assertEqual(result, expected)
def test_format_gsheet_error(self):
query = 'SELECT A + B FROM "http://docs.google.com"'
translated_query = 'SELECT A + B'
errors = [{
'reason': 'invalid_query',
'detailed_message': (
"Invalid query: Can't perform the function sum on values that "
"are not numbers"
),
'message': 'INVALID_QUERY',
}]
result = format_gsheet_error(query, translated_query, errors)
expected = (
'Original query:\n'
'SELECT A + B FROM "http://docs.google.com"\n\n'
'Translated query:\n'
'SELECT A + B\n\n'
'Error:\n'
"Invalid query: Can't perform the function sum on values that "
"are not numbers"
)
self.assertEqual(result, expected)
def test_format_gsheet_error_caret(self):
query = 'SELECT A IS NULL FROM "http://docs.google.com"'
translated_query = 'SELECT A IS NULL'
errors = [{
'reason': 'invalid_query',
'detailed_message': (
'Invalid query: PARSE_ERROR: Encountered " "is" "IS "" at '
'line 1, column 10.\nWas expecting one of:\n'
' <EOF> \n'
' "where" ...\n'
' "group" ...\n'
' "pivot" ...\n'
' "order" ...\n'
' "skipping" ...\n'
' "limit" ...\n'
' "offset" ...\n'
' "label" ...\n'
' "format" ...\n'
' "options" ...\n'
' "," ...\n'
' "*" ...\n'
' "+" ...\n'
' "-" ...\n'
' "/" ...\n'
' "%" ...\n'
' "*" ...\n'
' "/" ...\n'
' "%" ...\n'
' "+" ...\n'
' "-" ...\n'
' '
),
'message': 'INVALID_QUERY',
}]
result = format_gsheet_error(query, translated_query, errors)
expected = (
'Original query:\n'
'SELECT A IS NULL FROM "http://docs.google.com"\n\n'
'Translated query:\n'
'SELECT A IS NULL\n\n'
'Error:\n'
'SELECT A IS NULL\n'
' ^\n'
'Invalid query: PARSE_ERROR: Encountered " "is" "IS "" at line 1, '
'column 10.\n'
'Was expecting one of:\n'
' <EOF> \n'
' "where" ...\n'
' "group" ...\n'
' "pivot" ...\n'
' "order" ...\n'
' "skipping" ...\n'
' "limit" ...\n'
' "offset" ...\n'
' "label" ...\n'
' "format" ...\n'
' "options" ...\n'
' "," ...\n'
' "*" ...\n'
' "+" ...\n'
' "-" ...\n'
' "/" ...\n'
' "%" ...\n'
' "*" ...\n'
' "/" ...\n'
' "%" ...\n'
' "+" ...\n'
' "-" ...'
)
self.assertEqual(result, expected)
| [
"roberto@dealmeida.net"
] | roberto@dealmeida.net |
e2e964631eef116474cd151983314c48a21da834 | 3596fd6714859644f882d4cf21bdc5988ceadff1 | /loader/migrations/0002_auto_20190731_1821.py | 96be8f3c13eee21a3d848d575074d18ee007b210 | [
"MIT"
] | permissive | and-sm/testgr | 9e54526baad6105f146abd2d40dd35510f4e9a84 | 231f6906fa3a2d29e43d407f0637214fe2bf4fa0 | refs/heads/master | 2023-05-01T14:45:45.742927 | 2022-05-10T06:43:18 | 2022-05-10T06:43:18 | 149,164,578 | 21 | 4 | MIT | 2023-03-31T15:25:03 | 2018-09-17T17:45:01 | Python | UTF-8 | Python | false | false | 755 | py | # Generated by Django 2.2.1 on 2019-07-31 18:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loader', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='testjobs',
name='uuid',
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name='tests',
name='uuid',
field=models.CharField(db_index=True, max_length=36),
),
migrations.AlterField(
model_name='testsstorage',
name='identity',
field=models.CharField(blank=True, db_index=True, max_length=255, null=True),
),
]
| [
"and.inbx@gmail.com"
] | and.inbx@gmail.com |
3aa8275b4ed0528fc62812ebb986cdc0e2bfc848 | d02385211c75b168da9eff2acb221a42b9e28fb0 | /File IO/read_write_append.py | 859bf052b9f2a54e505d5f60371abba33aaeb9f8 | [] | no_license | PMLS3/PythonLearning | 0b53c0ca72f61e6a51df5b5399914dd289b22f73 | b936dd70096ebbec3cc14c1b2efa9995dc2c4521 | refs/heads/main | 2023-02-10T05:02:01.525277 | 2021-01-06T14:44:14 | 2021-01-06T14:44:14 | 324,515,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 936 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 23 13:27:09 2020
@author: saura
"""
with open("happy\happy.txt", mode = 'a') as my_file:
text = my_file.write("I am HAPPY!")
print(text) # it prints the no. of letters written into the file
with open("happy\happy.txt", mode = 'r') as my_file:
print(my_file.read())
'''
mode = 'w' : it creates a new file and write into it. If there is an exiting file with the same name, it replaces it.
mode = 'r' : it is used to read the file
mode = 'r+' : it is used to read and write into the file. but it writes from position 0, which might replace some existing text.
mode = 'a' : it appends to the existing file. meaning writing to the file keeping the old content intact.
if the file doesn't exist, it creates a new one.
if we don't mention the mode, by default it will be considered 'r' mode.
with 'with' we don't need to close the file manually.
''' | [
"peet@partnersinbiz.tech"
] | peet@partnersinbiz.tech |
bcf4694b4be4de84974a88f8c1e0c68664a56527 | 4913fb7fd32c3dd0da53af7a012569ec2254b35a | /59.集合数据的操作.py | 75c83dbf6875876bad10856772cd2746191883a6 | [] | no_license | puhaoran12/python_note | 8a21954050ba3126f2ef6d5d1e4a2904df954b9b | b807e7b7dd90c87cee606f50421400c8f3d0ba03 | refs/heads/master | 2023-07-07T20:20:04.546541 | 2021-08-21T02:17:12 | 2021-08-21T02:17:12 | 398,439,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | #交集intersection() 或者 &
s1={10,20,30}
s2={30,40,50}
print(s1.intersection(s2))
print(s1 & s2)
#并集union() 或者 |
print(s1.union(s2))
print(s1 | s2)
#差集difference() 或者 -
print(s1.difference(s2))
print(s1 - s2)
#对称差集symmetric_difference() 或者 ^
print(s1.symmetric_difference(s2))
print(s1 ^ s2)
| [
"276191374@qq.com"
] | 276191374@qq.com |
c51fdfcbfcba03c2c6e8a2f54cd4ea6ac71386b3 | 6ebd656e2eff394454bdaba1a5793859aa3adc17 | /bot/utils/config.py | abe8bd0ffbba562e027c069356f3974412fa9b10 | [] | no_license | streemline/aiogram_template | 7d98e9286675db1ab0d21d65519bb2c9bc94bceb | 8cf2d3ea0a45b5bdf4e672af7397febe786b8c1d | refs/heads/main | 2023-06-02T01:23:26.903261 | 2021-05-11T12:03:49 | 2021-05-11T12:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from yaml import load
from . import exceptions
from .trafaret import config_trafaret
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def parse_config(path, check=True):
"""
Parse a config.
"""
try:
with open(path) as file:
config = load(file, Loader=Loader)
except TypeError:
raise exceptions.ConfigNotSpecifiedError("Config file not found")
if check:
config_trafaret.check(config)
return config
| [
"zhogalkostya@gmail.com"
] | zhogalkostya@gmail.com |
416ebd601da95b4878e39a59b5229fda813725d3 | 0a6cf240f24a59aab28a641c855116bc88ba6047 | /Tests/SSL/test_ssl_containers.py | 556305e5d83cb4d20f319dc1ae1d2870f4c5a0ba | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ayeaton/InnerEye-DeepLearning | 7e9772721332b7c5dae82a7106568807682152c0 | c2f0d668e8cefc1c3868de86c62131f38ca2531d | refs/heads/main | 2023-05-31T08:39:00.744006 | 2021-06-24T19:53:40 | 2021-06-24T19:53:40 | 376,850,420 | 0 | 0 | MIT | 2021-06-14T14:22:12 | 2021-06-14T14:22:11 | null | UTF-8 | Python | false | false | 8,280 | py | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from pathlib import Path
from typing import Dict
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import torch
from pl_bolts.models.self_supervised.resnets import ResNet
from InnerEye.Common import fixed_paths
from InnerEye.Common.common_util import is_windows
from InnerEye.Common.fixed_paths import repository_root_directory
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path
from InnerEye.ML.SSL.lightning_containers.ssl_container import EncoderName, SSLDatasetName
from InnerEye.ML.SSL.lightning_modules.byol.byol_module import BYOLInnerEye
from InnerEye.ML.SSL.lightning_modules.simclr_module import SimCLRInnerEye
from InnerEye.ML.SSL.lightning_modules.ssl_classifier_module import SSLClassifier
from InnerEye.ML.SSL.utils import SSLDataModuleType, SSLTrainingType
from InnerEye.ML.common import BEST_CHECKPOINT_FILE_NAME_WITH_SUFFIX
from InnerEye.ML.configs.ssl.CXR_SSL_configs import CXRImageClassifier
from InnerEye.ML.runner import Runner
from Tests.ML.utils.test_io_util import write_test_dicom
path_to_test_dataset = full_ml_test_data_path("cxr_test_dataset")
def _create_test_cxr_data(path_to_test_dataset: Path) -> None:
"""
Creates fake datasets dataframe and dicom images mimicking the expected structure of the datasets
of NIHCXR and RSNAKaggleCXR
:param path_to_test_dataset: folder to which we want to save the mock data.
"""
if path_to_test_dataset.exists():
return
path_to_test_dataset.mkdir(exist_ok=True)
df = pd.DataFrame({"Image Index": np.repeat("1.dcm", 200)})
df.to_csv(path_to_test_dataset / "Data_Entry_2017.csv", index=False)
df = pd.DataFrame({"subject": np.repeat("1", 300),
"label": np.random.RandomState(42).binomial(n=1, p=0.2, size=300)})
df.to_csv(path_to_test_dataset / "dataset.csv", index=False)
write_test_dicom(array=np.ones([256, 256], dtype="uint16"), path=path_to_test_dataset / "1.dcm")
def default_runner() -> Runner:
"""
Create an InnerEye Runner object with the default settings, pointing to the repository root and
default settings files.
"""
return Runner(project_root=repository_root_directory(),
yaml_config_file=fixed_paths.SETTINGS_YAML_FILE)
common_test_args = ["", "--is_debug_model=True", "--num_epochs=1", "--ssl_training_batch_size=10",
"--linear_head_batch_size=5",
"--num_workers=0"]
@pytest.mark.skipif(is_windows(), reason="Too slow on windows")
def test_innereye_ssl_container_cifar10_resnet_simclr() -> None:
"""
Tests:
- training of SSL model on cifar10 for one epoch
- checkpoint saving
- checkpoint loading and ImageClassifier module creation
- training of image classifier for one epoch.
"""
args = common_test_args + ["--model=CIFAR10SimCLR"]
with mock.patch("sys.argv", args):
loaded_config, actual_run = default_runner().run()
assert loaded_config is not None
assert isinstance(loaded_config.model, SimCLRInnerEye)
assert loaded_config.encoder_output_dim == 2048
assert loaded_config.l_rate == 1e-4
assert loaded_config.num_epochs == 1
assert loaded_config.recovery_checkpoint_save_interval == 200
assert loaded_config.ssl_training_type == SSLTrainingType.SimCLR
assert loaded_config.online_eval.num_classes == 10
assert loaded_config.ssl_training_dataset_name == SSLDatasetName.CIFAR10
assert loaded_config.online_eval.dataset == SSLDatasetName.CIFAR10.value
assert not loaded_config.use_balanced_binary_loss_for_linear_head
assert isinstance(loaded_config.model.encoder.cnn_model, ResNet)
checkpoint_path = loaded_config.outputs_folder / "checkpoints" / "best_checkpoint.ckpt"
args = common_test_args + ["--model=SSLClassifierCIFAR", f"--local_ssl_weights_path={checkpoint_path}"]
with mock.patch("sys.argv", args):
loaded_config, actual_run = default_runner().run()
assert loaded_config is not None
assert isinstance(loaded_config.model, SSLClassifier)
assert loaded_config.model.class_weights is None
assert loaded_config.model.num_classes == 10
@pytest.mark.skipif(is_windows(), reason="Too slow on windows")
def test_load_innereye_ssl_container_cifar10_cifar100_resnet_byol() -> None:
"""
Tests that the parameters feed into the BYOL model and online evaluator are
indeed the one we fed through our command line args
"""
args = common_test_args + ["--model=CIFAR10CIFAR100BYOL"]
runner = default_runner()
with mock.patch("sys.argv", args):
runner.parse_and_load_model()
loaded_config = runner.lightning_container
assert loaded_config is not None
assert loaded_config.linear_head_dataset_name == SSLDatasetName.CIFAR100
assert loaded_config.ssl_training_dataset_name == SSLDatasetName.CIFAR10
assert loaded_config.ssl_training_type == SSLTrainingType.BYOL
@pytest.mark.skipif(is_windows(), reason="Too slow on windows")
def test_innereye_ssl_container_rsna() -> None:
"""
Test if we can get the config loader to load a Lightning container model, and then train locally.
"""
runner = default_runner()
_create_test_cxr_data(path_to_test_dataset)
# Test training of SSL model
args = common_test_args + ["--model=NIH_RSNA_BYOL",
f"--local_dataset={str(path_to_test_dataset)}",
f"--extra_local_dataset_paths={str(path_to_test_dataset)}",
"--use_balanced_binary_loss_for_linear_head=True",
f"--ssl_encoder={EncoderName.densenet121.value}"]
with mock.patch("sys.argv", args):
loaded_config, actual_run = runner.run()
assert loaded_config is not None
assert isinstance(loaded_config.model, BYOLInnerEye)
assert loaded_config.online_eval.dataset == SSLDatasetName.RSNAKaggleCXR.value
assert loaded_config.online_eval.num_classes == 2
assert loaded_config.ssl_training_dataset_name == SSLDatasetName.NIHCXR
assert loaded_config.ssl_training_type == SSLTrainingType.BYOL
assert loaded_config.encoder_output_dim == 1024 # DenseNet output size
# Check model params
assert isinstance(loaded_config.model.hparams, Dict)
assert loaded_config.model.hparams["batch_size"] == 10
assert loaded_config.model.hparams["use_7x7_first_conv_in_resnet"]
assert loaded_config.model.hparams["encoder_name"] == EncoderName.densenet121.value
assert loaded_config.model.hparams["learning_rate"] == 1e-4
assert loaded_config.model.hparams["num_samples"] == 180
# Check some augmentation params
assert loaded_config.datamodule_args[
SSLDataModuleType.ENCODER].augmentation_params.preprocess.center_crop_size == 224
assert loaded_config.datamodule_args[SSLDataModuleType.ENCODER].augmentation_params.augmentation.use_random_crop
assert loaded_config.datamodule_args[SSLDataModuleType.ENCODER].augmentation_params.augmentation.use_random_affine
# Check that we are able to load the checkpoint and create classifier model
checkpoint_path = loaded_config.checkpoint_folder / BEST_CHECKPOINT_FILE_NAME_WITH_SUFFIX
args = common_test_args + ["--model=CXRImageClassifier",
f"--local_dataset={str(path_to_test_dataset)}",
"--use_balanced_binary_loss_for_linear_head=True",
f"--local_ssl_weights_path={checkpoint_path}"]
with mock.patch("sys.argv", args):
loaded_config, actual_run = runner.run()
assert loaded_config is not None
assert isinstance(loaded_config, CXRImageClassifier)
assert loaded_config.model.freeze_encoder
assert torch.isclose(loaded_config.model.class_weights, torch.tensor([0.21, 0.79]), atol=1e-6).all() # type: ignore
assert loaded_config.model.num_classes == 2
| [
"noreply@github.com"
] | noreply@github.com |
af1e5056aec3be9d614a4fd505910033c83a7e61 | 44c1ac9a83b26ab70b7ce0148acbd933e1a946c3 | /inss/src/mossoroposts/tables.py | 4020a530ac55f476c913803c46e565a0ebbdf0ec | [] | no_license | pedromaia02/ls_deploy | b9348d37260b7e55d526c840d6bbcff9a30033e5 | 9c6f8bb077455389bc541e1917974ee6c78e6b6d | refs/heads/master | 2020-04-05T22:57:15.165172 | 2017-10-08T21:08:16 | 2017-10-08T21:08:16 | 68,154,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | # -*- coding: utf-8 -*-
# Customizar as tabelas do django-table2
import django_tables2 as tables
from .models import MossoroPosts
class PostsTable(tables.Table):
actions = tables.TemplateColumn("<a href=/inssmossoro/{{record.id}}>Detalhes</a> - <a href=/inssmossoro/{{record.id}}/edit>Modificar</a> - <a href=/inssmossoro/{{record.id}}/delete onclick='return confirm(\"Confima deletar o item selecionado?\")'>Excluir</a>", orderable=False)
#valor = tables.Column(footer="Total: ")
def __init__(self, *args,**kwargs):
super(PostsTable,self).__init__(*args, **kwargs)
self.base_columns['data'].verbose_name = "Data"
self.base_columns['data'].format = "D d M Y"
self.base_columns['anexo'].verbose_name = "Anexo"
self.base_columns['local'].verbose_name = "Local"
self.base_columns['numero_chamado'].verbose_name = "Nº Chamado"
self.base_columns['status'].verbose_name = "Status"
self.base_columns['tecnico'].verbose_name = "Técnico Responsável"
# self.base_columns['profissional'].verbose_name = "Profissional Responsável"
self.base_columns['actions'].verbose_name = "Ações"
class Meta:
model = MossoroPosts
# add class="paleblue" to <table> tag
attrs = {'class': 'paleblue'}
fields = ('data', 'numero_chamado', 'local','status','tecnico','anexo','actions')
| [
"pedromaia02@gmail.com"
] | pedromaia02@gmail.com |
ad364d115bc6776a6a8c7228e0e343adfa03863e | 9860ae1038952cc6fe25cddeab0e50d51692baee | /problem/exercise_08_14.py | 633c0a7d34ee4581bd6821cc500550f6fcee7b7b | [] | no_license | messiLiao/ai_for_robotics | e303be6327f6d31eb090457f4b9cbc4a9f4cad62 | 318584dc0a6cce043cd26c472cdb17388d1a6ae4 | refs/heads/master | 2021-09-06T06:51:54.033527 | 2018-02-03T13:25:02 | 2018-02-03T13:25:02 | 104,562,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,867 | py | # Now we want to give weight to our
# particles. This program will print a
# list of 1000 particle weights.
#
# Don't modify the code below. Please enter
# your code at the bottom.
from math import *
import random
landmarks = [[20.0, 20.0], [80.0, 80.0], [20.0, 80.0], [80.0, 20.0]]
world_size = 100.0
class robot:
def __init__(self):
self.x = random.random() * world_size
self.y = random.random() * world_size
self.orientation = random.random() * 2.0 * pi
self.forward_noise = 0.0;
self.turn_noise = 0.0;
self.sense_noise = 0.0;
def set(self, new_x, new_y, new_orientation):
if new_x < 0 or new_x >= world_size:
raise ValueError, 'X coordinate out of bound'
if new_y < 0 or new_y >= world_size:
raise ValueError, 'Y coordinate out of bound'
if new_orientation < 0 or new_orientation >= 2 * pi:
raise ValueError, 'Orientation must be in [0..2pi]'
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation)
def set_noise(self, new_f_noise, new_t_noise, new_s_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.forward_noise = float(new_f_noise);
self.turn_noise = float(new_t_noise);
self.sense_noise = float(new_s_noise);
def sense(self):
Z = []
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
dist += random.gauss(0.0, self.sense_noise)
Z.append(dist)
return Z
def move(self, turn, forward):
if forward < 0:
raise ValueError, 'Robot cant move backwards'
# turn, and add randomness to the turning command
orientation = self.orientation + float(turn) + random.gauss(0.0, self.turn_noise)
orientation %= 2 * pi
# move, and add randomness to the motion command
dist = float(forward) + random.gauss(0.0, self.forward_noise)
x = self.x + (cos(orientation) * dist)
y = self.y + (sin(orientation) * dist)
x %= world_size # cyclic truncate
y %= world_size
# set particle
res = robot()
res.set(x, y, orientation)
res.set_noise(self.forward_noise, self.turn_noise, self.sense_noise)
return res
def Gaussian(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
return exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / sqrt(2.0 * pi * (sigma ** 2))
def measurement_prob(self, measurement):
# calculates how likely a measurement should be
prob = 1.0;
for i in range(len(landmarks)):
dist = sqrt((self.x - landmarks[i][0]) ** 2 + (self.y - landmarks[i][1]) ** 2)
prob *= self.Gaussian(dist, self.sense_noise, measurement[i])
return prob
def __repr__(self):
return '[x=%.6s y=%.6s orient=%.6s]' % (str(self.x), str(self.y), str(self.orientation))
#myrobot = robot()
#myrobot.set_noise(5.0, 0.1, 5.0)
#myrobot.set(30.0, 50.0, pi/2)
#myrobot = myrobot.move(-pi/2, 15.0)
#print myrobot.sense()
#myrobot = myrobot.move(-pi/2, 10.0)
#print myrobot.sense()
#### DON'T MODIFY ANYTHING ABOVE HERE! ENTER CODE BELOW ####
myrobot = robot()
myrobot = myrobot.move(0.1, 5.0)
Z = myrobot.sense()
N = 1000
p = []
for i in range(N):
x = robot()
x.set_noise(0.05, 0.05, 5.0)
p.append(x)
p2 = []
for i in range(N):
p2.append(p[i].move(0.1, 5.0))
p = p2
w = [r.measurement_prob(r.sense()) for r in p]
#insert code here!
print w #Please print w for grading purposes.
| [
"krobor.lmx@gmail.com"
] | krobor.lmx@gmail.com |
7267956f1f7b465699fb043dc755525ce97b5ccf | 2c73882fc59ca85f4854a43bcda8cc9edd282b8d | /polls_api/views.py | 2664dfc220c7e377fed156deed7d18e979f75115 | [] | no_license | mjstealth/guide-to-backbonejs-with-django | 540236f3535ee171c3aa4c43a1be9394a8a7e4bc | e7d5016c800e1e0e282da0386cc6112d4eed63c1 | refs/heads/master | 2021-01-17T22:40:28.191509 | 2012-09-04T22:17:12 | 2012-09-04T22:17:12 | 5,679,419 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from djangorestframework import views
from djangorestframework.response import Response
from polls.forms import PollForm
from polls.models import Poll
from .resources import PollResource
class PollResults (views.View):
def get(self, request, poll_id):
poll = get_object_or_404(Poll.objects.all(), pk=poll_id)
results = PollResource().serialize(poll)
return results
class PollVotes (views.View):
def post(self, request, poll_id):
poll = get_object_or_404(Poll.objects.all(), pk=poll_id)
form = PollForm(request.POST, instance=poll)
if form.is_valid():
form.save()
else:
return Response(content=form.errors, status=400)
return Response(status=303, headers={'Location': reverse('polls_api_results', args=[poll_id])})
poll_results_view = PollResults.as_view()
poll_votes_view = PollVotes.as_view()
| [
"mjumbewu@gmail.com"
] | mjumbewu@gmail.com |
4e860abb10b550c48e16c59c692326f5f75730fa | f08177abce14672891c34c0eecc064f2b42f2441 | /hydrus/client/db/ClientDBMappingsCounts.py | 5df0ef9fa636429b38e960148136fbb76dc8a01a | [
"WTFPL"
] | permissive | bbappserver/hydrus-userpatch | d217006cc7691a08c11c98ddfd2415da56d6b27d | ef19e2167e24433d960a0811a93a683d62203518 | refs/heads/master | 2022-09-19T03:36:16.575489 | 2022-05-04T21:40:27 | 2022-05-04T21:40:27 | 179,595,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,801 | py | import collections
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusDBBase
from hydrus.client import ClientData
from hydrus.client.db import ClientDBModule
from hydrus.client.db import ClientDBServices
from hydrus.client.metadata import ClientTags
def GenerateCombinedFilesMappingsCountsCacheTableName( tag_display_type, tag_service_id ):
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
name = 'combined_files_ac_cache'
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
name = 'combined_files_display_ac_cache'
suffix = str( tag_service_id )
combined_counts_cache_table_name = 'external_caches.{}_{}'.format( name, suffix )
return combined_counts_cache_table_name
def GenerateSpecificCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ):
if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE:
name = 'specific_ac_cache'
elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL:
name = 'specific_display_ac_cache'
suffix = '{}_{}'.format( file_service_id, tag_service_id )
specific_counts_cache_table_name = 'external_caches.{}_{}'.format( name, suffix )
return specific_counts_cache_table_name
class ClientDBMappingsCounts( ClientDBModule.ClientDBModule ):
CAN_REPOPULATE_ALL_MISSING_DATA = True
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices ):
self.modules_services = modules_services
ClientDBModule.ClientDBModule.__init__( self, 'client mappings counts', cursor )
self._missing_storage_tag_service_pairs = set()
self._missing_display_tag_service_pairs = set()
def _GetServiceTableGenerationDictSingle( self, tag_display_type, file_service_id, tag_service_id ):
table_dict = {}
table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
# the version was earlier here but we updated when adding combined delete files and ipfs to these tables
version = 465
table_dict[ table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER PRIMARY KEY, current_count INTEGER, pending_count INTEGER );', version )
return table_dict
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
tag_service_id = service_id
table_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
for file_service_id in file_service_ids:
for tag_display_type in ( ClientTags.TAG_DISPLAY_STORAGE, ClientTags.TAG_DISPLAY_ACTUAL ):
single_table_dict = self._GetServiceTableGenerationDictSingle( tag_display_type, file_service_id, tag_service_id )
table_dict.update( single_table_dict )
return table_dict
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
tag_service_ids = list( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) )
for tag_service_id in tag_service_ids:
for file_service_id in file_service_ids:
storage_table_dict_for_this = self._GetServiceTableGenerationDictSingle( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
storage_table_names_for_this = set( storage_table_dict_for_this.keys() )
if not storage_table_names_for_this.isdisjoint( table_names ):
self._missing_storage_tag_service_pairs.add( ( file_service_id, tag_service_id ) )
display_table_dict_for_this = self._GetServiceTableGenerationDictSingle( ClientTags.TAG_DISPLAY_ACTUAL, file_service_id, tag_service_id )
display_table_names_for_this = set( display_table_dict_for_this.keys() )
if not display_table_names_for_this.isdisjoint( table_names ):
self._missing_display_tag_service_pairs.add( ( file_service_id, tag_service_id ) )
def AddCounts( self, tag_display_type, file_service_id, tag_service_id, ac_cache_changes ):
counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
new_tag_ids = set()
new_local_tag_ids = set()
for ( tag_id, current_delta, pending_delta ) in ac_cache_changes:
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, current_count, pending_count ) VALUES ( ?, ?, ? );'.format( counts_cache_table_name ), ( tag_id, current_delta, pending_delta ) )
if self._GetRowCount() > 0:
new_tag_ids.add( tag_id )
if file_service_id == self.modules_services.combined_local_file_service_id: # and tag_service_id = all known tags
new_local_tag_ids.add( tag_id )
if len( new_tag_ids ) < len( ac_cache_changes ):
self._ExecuteMany( 'UPDATE {} SET current_count = current_count + ?, pending_count = pending_count + ? WHERE tag_id = ?;'.format( counts_cache_table_name ), ( ( num_current, num_pending, tag_id ) for ( tag_id, num_current, num_pending ) in ac_cache_changes if tag_id not in new_tag_ids ) )
return ( new_tag_ids, new_local_tag_ids )
def ClearCounts( self, tag_display_type, file_service_id, tag_service_id, keep_current = False, keep_pending = False ):
table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
if keep_current:
self._Execute( 'UPDATE {} SET pending_count = 0 WHERE pending_count > 0;'.format( table_name ) )
self._Execute( 'DELETE FROM {} WHERE current_count = 0 AND pending_count = 0;'.format( table_name ) )
elif keep_pending:
self._Execute( 'UPDATE {} SET current_count = 0 WHERE current_count > 0;'.format( table_name ) )
self._Execute( 'DELETE FROM {} WHERE current_count = 0 AND pending_count = 0;'.format( table_name ) )
else:
self._Execute( 'DELETE FROM {};'.format( table_name ) )
def CreateTables( self, tag_display_type, file_service_id, tag_service_id, populate_from_storage = False ):
table_generation_dict = self._GetServiceTableGenerationDictSingle( tag_display_type, file_service_id, tag_service_id )
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
self._Execute( create_query_without_name.format( table_name ) )
#
if tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL and populate_from_storage:
display_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
storage_table_name = self.GetCountsCacheTableName( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, current_count, pending_count ) SELECT tag_id, current_count, pending_count FROM {};'.format( display_table_name, storage_table_name ) )
def DropTables( self, tag_display_type, file_service_id, tag_service_id ):
table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( table_name ) )
def FilterExistingTagIds( self, tag_display_type, file_service_id, tag_service_id, tag_ids_table_name ):
counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
return self._STS( self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( tag_ids_table_name, counts_cache_table_name ) ) )
def GetAutocompleteCountEstimate( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ], include_current_tags: bool, include_pending_tags: bool ):
count = 0
if not include_current_tags and not include_pending_tags:
return count
( current_count, pending_count ) = self.GetAutocompleteCountEstimateStatuses( tag_display_type, tag_service_id, file_service_id, tag_ids )
if include_current_tags:
count += current_count
if include_current_tags:
count += pending_count
return count
def GetAutocompleteCountEstimateStatuses( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ] ):
include_current_tags = True
include_pending_tags = True
ids_to_count = self.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current_tags, include_pending_tags )
current_count = 0
pending_count = 0
for ( current_min, current_max, pending_min, pending_max ) in ids_to_count.values():
current_count += current_min
pending_count += pending_min
return ( current_count, pending_count )
def GetCounts( self, tag_display_type, tag_service_id, file_service_id, tag_ids, include_current, include_pending, domain_is_cross_referenced = True, zero_count_ok = False, job_key = None, tag_ids_table_name = None ):
if len( tag_ids ) == 0:
return {}
if tag_service_id == self.modules_services.combined_tag_service_id and file_service_id == self.modules_services.combined_file_service_id:
ids_to_count = {}
return ids_to_count
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = [ tag_service_id ]
cache_results = []
if len( tag_ids ) > 1:
if tag_ids_table_name is None:
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_id_table_name:
for search_tag_service_id in search_tag_service_ids:
if job_key is not None and job_key.IsCancelled():
return {}
cache_results.extend( self.GetCountsForTags( tag_display_type, file_service_id, search_tag_service_id, temp_tag_id_table_name ) )
else:
for search_tag_service_id in search_tag_service_ids:
if job_key is not None and job_key.IsCancelled():
return {}
cache_results.extend( self.GetCountsForTags( tag_display_type, file_service_id, search_tag_service_id, tag_ids_table_name ) )
else:
( tag_id, ) = tag_ids
for search_tag_service_id in search_tag_service_ids:
cache_results.extend( self.GetCountsForTag( tag_display_type, file_service_id, search_tag_service_id, tag_id ) )
#
ids_to_count = {}
for ( tag_id, current_count, pending_count ) in cache_results:
if not include_current:
current_count = 0
if not include_pending:
pending_count = 0
if current_count == 0 and pending_count == 0 and not zero_count_ok:
continue
current_max = current_count
pending_max = pending_count
if domain_is_cross_referenced:
# file counts are perfectly accurate
current_min = current_count
pending_min = pending_count
else:
# for instance this is a search for 'my files' deleted files, but we are searching on 'all deleted files' domain
current_min = 0
pending_min = 0
if tag_id in ids_to_count:
( existing_current_min, existing_current_max, existing_pending_min, existing_pending_max ) = ids_to_count[ tag_id ]
( current_min, current_max ) = ClientData.MergeCounts( existing_current_min, existing_current_max, current_min, current_max )
( pending_min, pending_max ) = ClientData.MergeCounts( existing_pending_min, existing_pending_max, pending_min, pending_max )
ids_to_count[ tag_id ] = ( current_min, current_max, pending_min, pending_max )
if zero_count_ok:
for tag_id in tag_ids:
if tag_id not in ids_to_count:
ids_to_count[ tag_id ] = ( 0, 0, 0, 0 )
return ids_to_count
def GetCountsCacheTableName( self, tag_display_type, file_service_id, tag_service_id ):
if file_service_id == self.modules_services.combined_file_service_id:
counts_cache_table_name = GenerateCombinedFilesMappingsCountsCacheTableName( tag_display_type, tag_service_id )
else:
counts_cache_table_name = GenerateSpecificCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
return counts_cache_table_name
def GetCountsEstimate( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ], include_current_tags: bool, include_pending_tags: bool ):
ids_to_count = collections.Counter()
if not include_current_tags and not include_pending_tags:
return ids_to_count
ids_to_count_statuses = self.GetCountsEstimateStatuses( tag_display_type, tag_service_id, file_service_id, tag_ids )
for ( tag_id, ( current_count, pending_count ) ) in ids_to_count_statuses.items():
count = 0
if include_current_tags:
count += current_count
if include_current_tags:
count += pending_count
ids_to_count[ tag_id ] = count
return ids_to_count
def GetCountsEstimateStatuses( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ] ):
include_current_tags = True
include_pending_tags = True
ids_to_count_full = self.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current_tags, include_pending_tags )
ids_to_count_statuses = collections.defaultdict( lambda: ( 0, 0 ) )
for ( tag_id, ( current_min, current_max, pending_min, pending_max ) ) in ids_to_count_full.items():
ids_to_count_statuses[ tag_id ] = ( current_min, pending_min )
return ids_to_count_statuses
def GetCountsForTag( self, tag_display_type, file_service_id, tag_service_id, tag_id ):
counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
return self._Execute( 'SELECT tag_id, current_count, pending_count FROM {} WHERE tag_id = ?;'.format( counts_cache_table_name ), ( tag_id, ) ).fetchall()
def GetCountsForTags( self, tag_display_type, file_service_id, tag_service_id, temp_tag_id_table_name ):
counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
# temp tags to counts
return self._Execute( 'SELECT tag_id, current_count, pending_count FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_tag_id_table_name, counts_cache_table_name ) ).fetchall()
def GetCurrentPendingPositiveCountsAndWeights( self, tag_display_type, file_service_id, tag_service_id, tag_ids, tag_ids_table_name = None ):
include_current = True
include_pending = True
ids_to_count = self.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current, include_pending, tag_ids_table_name = tag_ids_table_name )
current_tag_ids = set()
current_tag_weight = 0
pending_tag_ids = set()
pending_tag_weight = 0
for ( tag_id, ( current_min, current_max, pending_min, pending_max ) ) in ids_to_count.items():
if current_min > 0:
current_tag_ids.add( tag_id )
current_tag_weight += current_min
if pending_min > 0:
pending_tag_ids.add( tag_id )
pending_tag_weight += pending_min
return ( current_tag_ids, current_tag_weight, pending_tag_ids, pending_tag_weight )
def GetMissingTagCountServicePairs( self ):
return ( self._missing_storage_tag_service_pairs, self._missing_display_tag_service_pairs )
def GetQueryPhraseForCurrentTagIds( self, tag_display_type, file_service_id, tag_service_id ):
counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
return 'SELECT tag_id FROM {} WHERE current_count > 0'.format( counts_cache_table_name )
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
tables_and_columns = []
if content_type == HC.CONTENT_TYPE_TAG:
table_dict = self._GetServicesTableGenerationDict()
for table_name in table_dict.keys():
tables_and_columns.append( ( table_name, 'tag_id' ) )
return tables_and_columns
def GetTotalCurrentCount( self, tag_display_type, file_service_id, tag_service_id ):
counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
result = self._Execute( 'SELECT SUM( current_count ) FROM {};'.format( counts_cache_table_name ) ).fetchone()
if result is None or result[0] is None:
count = 0
else:
( count, ) = result
return count
def ReduceCounts( self, tag_display_type, file_service_id, tag_service_id, ac_cache_changes ):
# this takes positive counts, despite ultimately being a reduce guy
counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id )
deleted_tag_ids = set()
deleted_local_tag_ids = set()
for ( tag_id, current_delta, pending_delta ) in ac_cache_changes:
self._Execute( 'DELETE FROM {} WHERE tag_id = ? AND current_count = ? AND pending_count = ?;'.format( counts_cache_table_name ), ( tag_id, current_delta, pending_delta ) )
if self._GetRowCount() > 0:
deleted_tag_ids.add( tag_id )
if file_service_id == self.modules_services.combined_local_file_service_id: # and tag_service_id = all known tags
deleted_local_tag_ids.add( tag_id )
if len( deleted_tag_ids ) < len( ac_cache_changes ):
self._ExecuteMany( 'UPDATE {} SET current_count = current_count - ?, pending_count = pending_count - ? WHERE tag_id = ?;'.format( counts_cache_table_name ), ( ( current_delta, pending_delta, tag_id ) for ( tag_id, current_delta, pending_delta ) in ac_cache_changes if tag_id not in deleted_tag_ids ) )
return ( deleted_tag_ids, deleted_local_tag_ids )
| [
"hydrus.admin@gmail.com"
] | hydrus.admin@gmail.com |
c4ff9ba09ddd1c85d4d2a16bd76fc35ec78638d8 | 0c25aeef02a4d97b3d711760520dd9f3815c4826 | /okean/nc/nctypes.py | 83667fd947e48af5d26d42cdee0c21fc7c57a5f4 | [] | no_license | moghimis/okean | 71080c871c5956311d7351e5e043823f8c73505e | 20e707cd4787995b1bf17c19a3b12f232f5867ab | refs/heads/master | 2020-04-05T22:50:18.271043 | 2018-09-21T18:19:35 | 2018-09-21T18:19:35 | 20,712,916 | 0 | 0 | null | 2017-04-11T19:32:39 | 2014-06-11T05:05:19 | Python | UTF-8 | Python | false | false | 7,856 | py | '''
-------------------------------------------------------------------
datatype dtype.name name nc4 description
-------------------------------------------------------------------
f4,f float32 NC_FLOAT 32-bit floating point
f8 float64 NC_DOUBLE 64-bit floating point
i1 int8 NC_BYTE 8-bit signed integer
i2 int16 NC_SHORT 16-bit signed integer
i4,i int32 NC_INT or NC_LONG 32-bit signed integer
i8 int64 NC_INT64 1 64-bit signed integer
u1 uint8 NC_CHAR 2 8-bit unsigned integer
u2 uint16 NC_USHORT 1 16-bit unsigned integer
u4 uint32 NC_UINT 1 32-bit unsigned integer
u8 uint64 NC_UINT64 1 64-bit unsigned integer
S# string8^# NC_STRING 1 variable length character string
b(i1) bool NC_BYTE
--------------------------------------------------------------------
1) Available only for netCDF-4 format files.
All the unsigned ints (except NC_CHAR), the 64-bit ints, and
string type are for netCDF-4 files only
2) Char used in netcdf3 to represent strings!
------------------
Numpy Numeric
------------------
f4 f
f8 d
i1 1
i2 s
i4 i
i8 l
u1 b
u2 w
u4 u
u8 None
S1 c
b 1
-------------------
'''
import numpy as np
nptypes={'float32':'NC_FLOAT','float64':'NC_DOUBLE','int8':'NC_BYTE',
'int16':'NC_SHORT','int32':'NC_INT','int64':'NC_INT64',
'uint8':'NC_CHAR','uint16':'NC_USHORT','uint32':'NC_UINT',
'uint64':'NC_UINT64','stringN':'NC_STRING','bool':'NC_BYTE'}
np2numeric={'float32':'f','float64':'d','int8':'1','int16':'s',
'int32':'i','int64':'l', 'uint8':'b','uint16':'w',
'uint32':'u','uint64':False,'stringN':'c','bool':'1'}
pycdftypes=['byte','char','short','int','float','double']
numpynames=['float32','float64','int8','int16','int32','int64','uint8',
'uint16','uint32','uint64','stringN','bool']
ncnames=['float','double','byte','short','int','long','int64','char',
'ushort','uint','uint64','string']
def type_numpy2nc(type,ncver=4):
'''
Convert numpy typecode to netcdf type
'''
if isinstance(type,basestring):
npname=np.dtype(type).name
else: # is datype
npname=type.name
if ncver==3 and npname.lower().find('uint')==0: return
if npname.find('string')==0 or npname.find('unicode')==0:
npname='stringN'
if ncver==3: npname='uint8' # to return CHAR
# down type case version 3:
if ncver==3:
if npname.lower().find('int')==0:
sz=int(npname[3:])
if sz>32: npname='int32'
return nptypes[npname][3:]
def type_nc2numpy(type,strlen=1,isstr=False,isbool=False):
'''
Convert netcdf type to numpy dtype
'''
type=type.upper()
if type=='STRING': return np.dtype('S'+str(strlen))
if type=='CHAR' and isstr: return np.dtype('S1') # nc version 3
if type=='BYTE':
if isbool: return np.dtype('bool')
else: return np.dtype('int8')
for k in nptypes.keys():
if nptypes[k][3:]==type: return np.dtype(k)
def type_numpy2numeric(type):
'''
Convert numpy dtype to numeric typecode
'''
if isinstance(type,basestring):
npname=np.dtype(type).name
else: # is datype
npname=type.name
if npname.find('string')==0:
npname='stringN'
return np2numeric[npname]
def type_numeric2numpy(type,strlen=1):
'''
Convert numeric typecode to numpy dtype
'''
if type=='c':
return np.dtype('S'+str(strlen))
for k in np2numeric.keys():
if np2numeric[k]==type: return np.dtype(k)
def type_numeric2nc(type,ncver=4):
'''
Convert numeric typecode to netcdf type
'''
# strlen is not important here:
tmp=type_numeric2numpy(type,strlen=1)
return type_numpy2nc(tmp,ncver=ncver)
def type_nc2numeric(type,isstr=False):
'''
Convert netcdf type to numeric typecode
isstr (default is False) required since netcdf 3 CHAR can be
integers8 or strings
'''
# strlen and isbool not important
tmp=type_nc2numpy(type,isstr=isstr)
return type_numpy2numeric(tmp)
def type_nc2pycdf(type):
'''
Convert netcdf type to pycdf type number
byte pycdf.NC.BYTE = 1
char pycdf.NC.CHAR = 2
short pycdf.NC.SHORT = 3
int pycdf.NC.INT = 4
float pycdf.NC.FLOAT = 5
double pycdf.NC.DOUBLE = 6
'''
if pycdftypes.count(type.lower())==1:
return pycdftypes.index(type.lower())+1
def type_pycdf2nc(num):
'''Convert pycdf type numebr to netcdf type '''
return pycdftypes[num-1]
def type_var2numpy(v):
'''
Numpy dtype from python data values
Ex: type_var2numpy([1,2,3], type_var2numpy('a')
'''
return np.array(v).dtype
def type_var2numeric(v):
'''
Numeric typecode from python data values
Ex: type_var2numeric([1,2,3], type_var2numeric('a')
'''
return type_numpy2numeric(type_var2numpy(v))
def type_var2nc(v,ncver=4):
'''Netcdf type from python data values'''
type=type_var2numpy(v)
return type_numpy2nc(type,ncver=ncver)
def type_var2pycdf(v):
'''Pycdf type code from python data values'''
ncver=3
type=type_var2nc(v,ncver=ncver)
return type_nc2pycdf(type)
def type_2dtype(type,**kargs):
'''Convert Numeric typecode or netcdf type to numpy dtype
also supports numpy type names
kargs:
strlen: 1,when converting from Numeric character typecode.
isstr: False, netcdf type CHAR may be used as numpy S1 in
netcdf 3.
isbool: False, when converting nc to numpy, the type BYTE may be
seen as numpy boolean.
'''
strlen=1
isstr=False
isbool=False
if 'strlen' in kargs.keys(): strlen = kargs['strlen']
if 'isstr' in kargs.keys(): isstr = kargs['isstr']
if 'isbool' in kargs.keys(): isbool = kargs['isbool']
if isinstance(type,basestring):
if len(type)==1: # is a numeric typecode
return type_numeric2numpy(type,strlen=strlen)
elif type in numpynames: # numpy type name
return np.dtype(type)
else: # is netcdf type name:
return type_nc2numpy(type,strlen=strlen,isstr=isstr,isbool=isbool)
elif isinstance(type,np.dtype): return type
else: return False
def type_2numpy(type,**kargs):
'''
Same as type_2dtype
'''
return type_2dtype(type,**kargs)
def type_2nc(type,**kargs):
'''
Convert Numeric typecode, numpy dtype and type name
to netcdf type
if inputs is already a netcdf type its is returned as is
kargs:
ncver: 4, when converting from numpy name or dtype
'''
ncver=4
if 'ncver' in kargs.keys(): ncver = kargs['ncver']
if isinstance(type,basestring):
if len(type)==1: # is a numeric typecode
return type_numeric2nc(type,ncver=ncver)
elif type in numpynames:
return type_numpy2nc(type,ncver=ncver)
else: # is netcdf type name
return type
elif isinstance(type,np.dtype):
return type_numpy2nc(type,ncver=ncver)
else: return False
def type_2pycdf(type):
'''
Convert Numeric typecode, numpy dtype and type name or netcdf type
to pycdf code
'''
strlen=1
ncver=3
type=type_2nc(type,strlen=strlen,ncver=ncver)
return type_nc2pycdf(type)
def type_2numeric(type,**kargs):
'''
Convert numpy dtype and type name and netcdf typename to numeric
typecode
kargs:
isstr, False, used when converting from netcdf type name
'''
isstr=False
if 'isstr' in kargs.keys(): isstr = kargs['isstr']
if isinstance(type,basestring):
if len(type)==1: # numeric typecode
return type
elif type in numpynames:
return type_numpy2numeric(type)
else: # netcdf type name
return type_nc2numeric(type,isstr=isstr)
elif isinstance(type,nc.dtype):
return type_numpy2numeric(type)
else: return False
| [
"martalmeida@gmail.com"
] | martalmeida@gmail.com |
1265bb8736bd9b11afc120fcc3bdcb77428869ec | 29a4c1e436bc90deaaf7711e468154597fc379b7 | /modules/ieee/doc2/nextpow2.py | 7c5fcf15c55e96875561be4f21550ed813ecbc7a | [
"BSL-1.0"
] | permissive | brycelelbach/nt2 | 31bdde2338ebcaa24bb76f542bd0778a620f8e7c | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | refs/heads/master | 2021-01-17T12:41:35.021457 | 2011-04-03T17:37:15 | 2011-04-03T17:37:15 | 1,263,345 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,923 | py | [{'functor': {'arity': '1',
'call_types': [],
'ret_arity': '0',
'rturn': {'default': 'typename nt2::meta::as_integer<typename boost::result_of<nt2::meta::floating(T)>::type, signed>::type'},
'type_defs': [],
'types': ['real_', 'unsigned_int_', 'signed_int_']},
'unit': {'global_header': {'first_stamp': 'modified by jt the 04/12/2010',
'included': [],
'notes': [],
'ranges': {'real_': [['T(-10)', 'T(10)']],
'signed_int_': [['-100', '100']],
'unsigned_int_': [['0', '100']]},
'specific_values': {'default': {},
'real_': {'nt2::Inf<T>()': 'nt2::Zero<r_t>()',
'nt2::Minf<T>()': 'nt2::Zero<r_t>()',
'nt2::Mone<T>()': 'nt2::Zero<r_t>()',
'nt2::One<T>()': 'nt2::Zero<r_t>()',
'nt2::Zero<T>()': 'nt2::Zero<r_t>()'},
'signed_int_': {'nt2::Mone<T>()': 'nt2::Zero<r_t>()',
'nt2::One<T>()': 'nt2::Zero<r_t>()',
'nt2::Zero<T>()': 'nt2::Zero<r_t>()'},
'unsigned_int_': {'nt2::One<T>()': 'nt2::Zero<r_t>()',
'nt2::Zero<T>()': 'nt2::Zero<r_t>()'}},
'stamp': 'modified by jt the 12/12/2010',
'verif_test': {}}},
'version': '0.1'}] | [
"jtlapreste@gmail.com"
] | jtlapreste@gmail.com |
5a4d8c674b599a2c01fdc8fd795bf0ea39b3d9b4 | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/orderly_queue.py | a373cf1f75c26e6261bdd30af8d0855a2660bb45 | [] | no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 1,048 | py | # https://leetcode.com/problems/orderly-queue/
"""
ou are given a string s and an integer k. You can choose one of the first k letters of s and append it at the end of the string..
Return the lexicographically smallest string you could have after applying the mentioned step any number of moves.
Example 1:
Input: s = "cba", k = 1
Output: "acb"
Explanation:
In the first move, we move the 1st character 'c' to the end, obtaining the string "bac".
In the second move, we move the 1st character 'b' to the end, obtaining the final result "acb".
Example 2:
Input: s = "baaca", k = 3
Output: "aaabc"
Explanation:
In the first move, we move the 1st character 'b' to the end, obtaining the string "aacab".
In the second move, we move the 3rd character 'c' to the end, obtaining the final result "aaabc".
Constraints:
1 <= k <= s.length <= 1000
s consist of lowercase English letters.
"""
def orderly_queue(s: str, k: int) -> str:
if k == 1:
return min([s[i:] + s[:i] for i in range(len(s))])
else:
return "".join(sorted(s))
| [
"tyivanwu@gmail.com"
] | tyivanwu@gmail.com |
0270e9279a918710ee3c169034fee4444e0cf5c2 | 21e0aec25b83feccb3d1dd0e8970fd71971b969a | /main.py | 49b9b47ebbe57208492719c0118f09b3e2039fd3 | [] | no_license | Estex-cyber/battery_check | f8974a0510ad22b06289fb8b245fde48cd7b2116 | 883d053155b27a113f1982c702ab06faf137b9ce | refs/heads/main | 2023-04-03T03:08:11.231774 | 2021-03-24T08:06:40 | 2021-03-24T08:06:40 | 347,199,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | import psutil
battery = psutil.sensors_battery()
if battery.power_plugged:
print("Charging", battery.percent, "%")
elif not battery.power_plugged:
print("Not charging", battery.percent, "%")
print("Discharge time", int(battery.secsleft), "sec left")
if int(battery.percent == 10) or int(battery.percent < 10):
if bool(battery.power_plugged == False):
print("battery is low!", battery.percent, "%)
if int(battery.percent == 7) or int(battery.percent < 7):
print("**Warning**", battery.percent, "%", I will"battery is very low!")
quit()
elif bool(battery.power_plugged == True):
print("Continue charging")
| [
"noreply@github.com"
] | noreply@github.com |
ee6de89868cf2316edb693e794d43ca2ce025e0c | 4866bce3ef5b481849c31b1d74001f3af3525f9e | /todoapp/todoapp/urls.py | 452e6fa67cb0d9af25eae81cea8e7e008e3584d3 | [] | no_license | meliketakan/my-first-blog | 9ad63c821dc179f9ede5eb13d2f014e92e70524a | 33233f6e763c77879339c86faab0f89446cf4d58 | refs/heads/master | 2023-05-30T00:12:52.814151 | 2021-06-18T08:27:07 | 2021-06-18T08:27:07 | 372,640,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | """todoapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('todoapp1.urls')),
]
| [
"meliketakan0@gmail.com"
] | meliketakan0@gmail.com |
969ff18c3b0c3ebd06ccfc2dc0dfe97216e6a725 | 6a47ec6800610ea93479f91505e73a3eb4f34ae0 | /user/serviced.py | 74e25a9df84b86e320e670d436afb861e42769b5 | [] | no_license | risification/queue_project | 1158aac7bae3b04f98c106c23c27281c96bcaf41 | e85f9f2d1835f10a0247a569f88d4cb29803538a | refs/heads/master | 2023-04-26T08:05:25.573243 | 2021-06-07T21:03:47 | 2021-06-07T21:03:47 | 374,119,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | from django.contrib.auth.models import User
from django.core.mail import EmailMessage
def mailing(username):
email_list = []
obj = User.objects.filter(is_superuser=True)
for user in obj:
email_list.append(user.email)
subjects = 'hi'
body = f'User with {username} register in database, pls check him !'
email = EmailMessage(subject=subjects, body=body, to=email_list)
email.send()
def validate_password(password):
if len(password) >= 8 and password.isdigit() and password.isalpha():
return True
else:
return False
| [
"sultangaziev01@bk.ru"
] | sultangaziev01@bk.ru |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.