blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb7c42230b22f0923e08c1027984a0c50e947b03 | fb71611fb25fc0aec8d8026a95e4c5c39b4f360e | /hq/my_excepthook/test_excepthool_package/hq_excepthook.py | 4fc03a0617045c66fddab2a291a838946ffa70ad | [] | no_license | jwzhoui/urllib1 | 16aaf7f49dd1d48f0d02fbba0268d56a84dae2d2 | afed096587dedc8da6755a8939ad9ee83d1ee519 | refs/heads/master | 2020-03-25T17:28:26.995286 | 2018-11-08T10:15:23 | 2018-11-08T10:15:23 | 143,979,127 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # encoding: utf-8
import sys
import time
import traceback
from multiprocessing import Process
sys.path.append('/opt/space/urllib1/')
def quiet_errors(*args,**kwargs):
err = ''.join(traceback.format_exception(*args,**kwargs))
print err
# RedisCache.inset_exc_to_redis(err)
from hq.my_excepthook.test_excepthool_package.redis_cache import def_inset_exc_to_redis
def_inset_exc_to_redis(err)
# 重写系统多进程Process的run方法
# def Process_run(self):
# try:
# if self._target:
# self._target(*self._args, **self._kwargs)
# except Exception:
# print '走好巧 多进程 异常捕捉'
# quiet_errors()
# raise
#========
# 一般捕捉 定义全局异常捕获
sys.excepthook = quiet_errors
sys.__excepthook__ = quiet_errors
# 多进程捕捉
# Process.run = Process_run
| [
"cclina@isoftstone.com"
] | cclina@isoftstone.com |
f45845f3775295ca40384d997d67d25854ded28e | 72a3977adc460ec70d0ebd8177cea1bb22a06cbf | /src/Classes/TennisSet.py | 5a0109b5e881dab41bb21cd34e6c496b92627d7c | [] | no_license | andreistaicu1/TennisScoreSimulator | 0ff6307ee1f1019a42fa70f5557cacb478c1d59e | 62fc0cc21ade84526190b2c840b4dea79bfb32d6 | refs/heads/master | 2023-07-23T05:37:00.521304 | 2021-09-01T20:09:48 | 2021-09-01T20:09:48 | 349,536,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,374 | py | from src.Classes.TennisGame import *
from src.Classes.TennisTiebreak import *
class TennisSet:
def __init__(self, set_length, player1, player2, ad, serving, will_breaker):
"""
:param set_length: int - When the set stops
:param player1: Players object - player 1
:param player2: Players object - player 2
:param ad: Boolean - true if there are ads
:param serving: Boolean - true if player 1 serves first
:param will_breaker: Boolean - True if there is a breaker in the last set
"""
self.set_length = set_length
self.ad = ad
self.player_array = [player1, player2]
self.serving = serving
self.will_breaker = will_breaker
self.player1G = 0
self.player2G = 0
self.data = []
self.toText = {}
self.winner = 0
self.setOver = False
self.tiebreaker = False
def play_set(self):
"""
Plays the set on its own
:return: nothing
"""
while not self.setOver:
self.tiebreaker = self.player1G == self.player2G and self.player1G == self.set_length
if self.tiebreaker and self.will_breaker:
new_tiebreak = TennisTiebreak(self.player_array[0], self.player_array[1], True, 7)
new_tiebreak.play_breaker()
self.iterate_set_breaker(new_tiebreak)
else:
new_game = TennisGame(self.player_array[0], self.player_array[1], self.serving, self.ad)
new_game.play_game()
self.iterate_set_game(new_game)
self.serving = not self.serving
def iterate_set_game(self, current_game):
"""
Given a game, updates internal data
:param current_game: TennisGame object - game to be played
:return: nothing
"""
if self.tiebreaker and self.will_breaker:
return
game_winner = current_game.winner
if game_winner == 0:
self.setOver = True
elif game_winner == 1:
self.player1G += 1
else:
self.player2G += 1
self.data.append(current_game)
if self.player1G >= self.set_length or self.player2G >= self.set_length:
if self.player1G - self.player2G > 1:
self.winner = 1
self.setOver = True
elif self.player2G - self.player1G > 1:
self.winner = 2
self.setOver = True
def iterate_set_breaker(self, current_breaker):
"""
Given a tiebreaker updates internal data
:param current_breaker: TennisTiebreak object
:return: nothing
"""
if self.tiebreaker and self.will_breaker:
self.winner = current_breaker.winner
if self.winner == 0:
self.setOver = True
elif self.winner == 1:
self.player1G += 1
else:
self.player2G += 1
self.data.append(current_breaker)
self.setOver = True
def compile(self):
"""
Consolidates internal data in a dictionary that can be easily printed to a text file
:return: nothing
"""
self.toText['serving'] = self.serving
self.toText['will_breaker'] = self.will_breaker
| [
"55329808+andreistaicu1@users.noreply.github.com"
] | 55329808+andreistaicu1@users.noreply.github.com |
e32442bb51f8c48f5ac0d4ec09fa256a766c99f4 | 053b48ff879d73e4cd8f507e1cdc2aea6431c8d9 | /pythonYing/week03/exceise线程/p18_ProcessVsThread.py | 12af688f98d553f89f8d16007db73124a4e9a1d2 | [] | no_license | Masonnn/ApiTest | cb37c8741ffa0474d0ce000dad66b02569e10342 | e9f30b9f0e74eb22489c02682e33ee8bf7a87bbf | refs/heads/master | 2022-12-16T17:44:24.219411 | 2020-09-06T16:37:30 | 2020-09-06T16:37:30 | 249,369,612 | 0 | 0 | null | 2022-12-08T11:29:17 | 2020-03-23T08:02:38 | HTML | UTF-8 | Python | false | false | 1,847 | py | # process vs thread
import multiprocessing as mp
def job(q):
res = 0
for i in range(1000000):
res += i + i ** 2 + i ** 3
q.put(res)
# 多核
def multicore():
q = mp.Queue()
p1 = mp.Process(target=job, args=(q,))
p2 = mp.Process(target=job, args=(q,))
p1.start()
p2.start()
p1.join()
p2.join()
res1 = q.get()
res2 = q.get()
print("multicore", res1 + res2)
# 创建多线程mutithread
# 接下来创建多线程程序,创建多线程和多进程有很多相似的地方。
# 首先import threading然后定义multithread()完成同样的任务
import threading as td
def multithread():
q = mp.Queue() # thread可放入process同样的queue中
t1 = td.Thread(target=job, args=(q,))
t2 = td.Thread(target=job, args=(q,))
t1.start()
t2.start()
t1.join()
t2.join()
res1 = q.get()
res2 = q.get()
print("multiThread", res1 + res2)
# 创建普通函数
def normal():
res = 0
for _ in range(2):
for i in range(1000000):
res += i + i ** 2 + i ** 3
print('normal: ', res)
# 在上面例子中我们建立了两个进程或线程,均对job()进行了两次运算,
# 所以在normal()中我们也让它循环两次
# 运行时间
import time
if __name__ == "__main__":
st = time.time()
normal()
st1 = time.time()
print('normal time:', st1 - st)
multithread()
st2 = time.time()
print('multithread time:', st2 - st1)
multicore()
print('multicore time:', time.time() - st2)
# 普通/多线程/多进程的运行时间分别是1.41,1.47和0.75秒。
# 我们发现多核/多进程最快,说明在同时间运行了多个任务。
# 而多线程的运行时间居然比什么都不做的程序还要慢一点,
# 说明多线程还是有一定的短板的(GIL)。
| [
"lixq@weilaicheng.com"
] | lixq@weilaicheng.com |
684bd46fe2d2944e322c3a422ed060208afcb062 | 96cbdf4762cdee018522b2b1e55e33ca1e4d1fef | /pyimagesearch/transform.py | 3e5f7d79efbdc5bc77f7ecce6e6064640cfe4ac9 | [] | no_license | JoeHowarth/GoScanner | 09ad65c7ba422780d153b7e1ba4292554631f19e | f214819df8f29b73238ea0762c69be8ca01f1c35 | refs/heads/master | 2021-09-14T11:22:17.151850 | 2018-05-12T17:25:14 | 2018-05-12T17:25:14 | 106,126,339 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | # import the necessary packages
import numpy as np
import cv2
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts, square=False):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
if (square == True):
big = max(maxWidth, maxHeight)
maxWidth = big
maxHeight = big
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
| [
"josephehowarth@gmail.com"
] | josephehowarth@gmail.com |
9b67e572e5da326df863a9d611e338df54b02395 | 8132e93c74cb8c541ee037b5c85deb38af6b653a | /list_1/exercise_5.py | 3c0af37d58907e91f76d1e7dace2cdafa9a190f6 | [] | no_license | piotrkawa/data-mining | 3a6cdacd4c1b059a469a6a3186eed0694d0a3b2c | c788365eeef801450b0f6bc7ea889e1484e9b53c | refs/heads/master | 2022-04-03T11:08:50.896421 | 2020-02-11T16:48:47 | 2020-02-11T16:48:47 | 212,835,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | from wordcloud import WordCloud
import utility
import itertools
import nltk
import pdb
from nltk.corpus import stopwords as nltk_stopwords
import string
from preprocessing import preprocess_text
def create_pairs(words):
return [(word, 1) for word in words]
def group_words(pairs):
word = lambda pair: pair[0]
occurences = lambda pair: pair[1]
pairs.sort()
words_grouped = [(w, sum(1 for _ in g)) for w, g in itertools.groupby(pairs, key=word)]
words_grouped.sort(key=occurences, reverse=True)
return words_grouped
if __name__ == '__main__':
book = utility.get_text_file_as_list('shrek.txt')
words = preprocess_text(book)
pairs = create_pairs(words)
grouped_words = group_words(pairs)
wc = WordCloud(background_color="white", max_words=2000, contour_width=3, contour_color='steelblue')
wc.generate_from_frequencies(dict(grouped_words[15:]))
wc.to_file('clouds/book.png')
pdb.set_trace() | [
"piotr.w.kawa@gmail.com"
] | piotr.w.kawa@gmail.com |
be63e415ecf5e1d3a8f53e768d4c23c1d1643511 | cca21b0ddca23665f886632a39a212d6b83b87c1 | /virtual/classroom/views.py | 07712f42f10a68880ba8e8500e4a6784453a72e1 | [] | no_license | siumhossain/classroom | a8926621456d1e7ed77387fb8a5851825771a9d9 | 4afe9cdee2c58b71bd3711b042eae3f86172eaea | refs/heads/master | 2023-02-02T08:28:14.958761 | 2020-12-24T14:58:59 | 2020-12-24T14:58:59 | 323,007,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,300 | py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView,DeleteView
from .models import Course
from django.contrib.auth.mixins import LoginRequiredMixin,PermissionRequiredMixin
from django.shortcuts import redirect, get_object_or_404
from django.views.generic.base import TemplateResponseMixin,View
from .forms import ModuleFormSet
from django.forms.models import modelform_factory
from django.apps import apps
from .models import Module, Content
from braces.views import CsrfExemptMixin, JsonRequestResponseMixin
from django.db.models import Count
from .models import Subject
from django.views.generic.detail import DetailView
from students.forms import CourseEnrollForm
# Create your views here.
from django.views.generic.list import ListView
from .models import Course
class ManageCourseListView(ListView):
model = Course
template_name = 'courses/manage/course/list.html'
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
class OwnerMixin(object):
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(owner=self.request.user)
class OwnerEditMixin(object):
def form_valid(self, form):
form.instance.owner = self.request.user
return super().form_valid(form)
class OwnerCourseMixin(OwnerMixin):
model = Course
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
class OwnerCourseEditMixin(OwnerCourseMixin, OwnerEditMixin):
template_name = 'courses/manage/course/form.html'
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
class CourseCreateView(OwnerCourseEditMixin, CreateView):
pass
class CourseUpdateView(OwnerCourseEditMixin, UpdateView):
pass
class CourseDeleteView(OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
class OwnerCourseMixin(OwnerMixin,LoginRequiredMixin,PermissionRequiredMixin):
model = Course
fields = ['subject', 'title', 'slug', 'overview']
success_url = reverse_lazy('manage_course_list')
class ManageCourseListView(OwnerCourseMixin, ListView):
template_name = 'courses/manage/course/list.html'
permission_required = 'courses.view_course'
class CourseCreateView(OwnerCourseEditMixin, CreateView):
permission_required = 'courses.add_course'
class CourseUpdateView(OwnerCourseEditMixin, UpdateView):
permission_required = 'courses.change_course'
class CourseDeleteView(OwnerCourseMixin, DeleteView):
template_name = 'courses/manage/course/delete.html'
permission_required = 'courses.delete_course'
class CourseModuleUpdateView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/formset.html'
course = None
def get_formset(self, data=None):
return ModuleFormSet(instance=self.course,data=data)
def dispatch(self, request, pk):
self.course = get_object_or_404(Course,id=pk,owner=request.user)
return super().dispatch(request, pk)
def get(self, request, *args, **kwargs):
formset = self.get_formset()
return self.render_to_response({'course': self.course,'formset': formset})
def post(self, request, *args, **kwargs):
formset = self.get_formset(data=request.POST)
if formset.is_valid():
formset.save()
return redirect('manage_course_list')
return self.render_to_response({'course': self.course,'formset': formset})
class ContentCreateUpdateView(TemplateResponseMixin, View):
module = None
model = None
obj = None
template_name = 'courses/manage/content/form.html'
def get_model(self, model_name):
if model_name in ['text', 'video', 'image', 'file']:
return apps.get_model(app_label='courses',model_name=model_name)
return None
def get_form(self, model, *args, **kwargs):
Form = modelform_factory(model, exclude=['owner','order','created','updated'])
return Form(*args, **kwargs)
def dispatch(self, request, module_id, model_name, id=None):
self.module = get_object_or_404(Module,id=module_id,course__owner=request.user)
self.model = self.get_mode(model_name)
if id:
self.obj = get_object_or_404(self.model,id=id,owner=request.user)
return super().dispatch(request, module_id, model_name, id)
def get(self, request, module_id, model_name, id=None):
form = self.get_form(self.model, instance=self.obj)
return self.render_to_response({'form': form,'object': self.obj})
def post(self, request, module_id, model_name, id=None):
form = self.get_form(self.model,instance=self.obj,data=request.POST,files=request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.owner = request.user
obj.save()
if not id:
# new content
Content.objects.create(module=self.module,item=obj)
return redirect('module_content_list', self.module.id)
return self.render_to_response({'form': form,'object': self.obj})
class ContentDeleteView(View):
def post(self, request, id):
content = get_object_or_404(Content,id=id,module__course__owner=request.user)
module = content.module
content.item.delete()
content.delete()
return redirect('module_content_list', module.id)
class ModuleContentListView(TemplateResponseMixin, View):
template_name = 'courses/manage/module/content_list.html'
def get(self, request, module_id):
module = get_object_or_404(Module,id=module_id,course__owner=request.user)
return self.render_to_response({'module': module})
class ModuleOrderView(CsrfExemptMixin,JsonRequestResponseMixin,View):
def post(self, request):
for id, order in self.request_json.items():
Module.objects.filter(id=id,course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class ContentOrderView(CsrfExemptMixin,JsonRequestResponseMixin,View):
def post(self, request):
for id, order in self.request_json.items():
Content.objects.filter(id=id,module__course__owner=request.user).update(order=order)
return self.render_json_response({'saved': 'OK'})
class CourseListView(TemplateResponseMixin, View):
model = Course
template_name = 'courses/course/list.html'
def get(self, request, subject=None):
subjects = Subject.objects.annotate(total_courses=Count('courses'))
courses = Course.objects.annotate(total_modules=Count('modules'))
if subject:
subject = get_object_or_404(Subject, slug=subject)
courses = courses.filter(subject=subject)
return self.render_to_response({'subjects': subjects,'subject': subject,'courses': courses})
class CourseDetailView(DetailView):
model = Course
template_name = 'courses/course/detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['enroll_form'] = CourseEnrollForm(initial={'course':self.object})
return context | [
"sium.hossain@yahoo.com"
] | sium.hossain@yahoo.com |
2f74ae3f7caac57b707a98584b6bdd4a40ded6f8 | fd1dba8223ad1938916369b5eb721305ef197b30 | /AtCoder/ABC/abc110/abc110c.py | b19744afbe63b3698d7e3487b7f15813a0167d39 | [] | no_license | genkinanodesu/competitive | a3befd2f4127e2d41736655c8d0acfa9dc99c150 | 47003d545bcea848b409d60443655edb543d6ebb | refs/heads/master | 2020-03-30T07:41:08.803867 | 2019-06-10T05:22:17 | 2019-06-10T05:22:17 | 150,958,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | S = input()
T = input()
n = len(S)
X = [[] for _ in range(26)]
Y = [[] for _ in range(26)]
for i in range(n):
s = ord(S[i]) - 97
t = ord(T[i]) - 97
X[s].append(i)
Y[t].append(i)
P = [tuple(x) for x in X]
Q = [tuple(y) for y in Y]
if set(P) == set(Q):
print('Yes')
else:
print('No')
| [
"s.genki0605@gmail.com"
] | s.genki0605@gmail.com |
75db47364399ab750443e3afc703e376ca016a3a | b0984bc483f0e082975abb7610b4b4508764731d | /pythondispatchms/websetup/schema.py | 35f6f96b61a9c4c8179ed07b9172c6753e6dc51e | [] | no_license | jordy33/python.dispatch.ms | 42dd7b0f1d76742881111a71d9f8a897a6932ddc | e576b565e953871d56ee2f29a25a6f88b8ab120c | refs/heads/master | 2020-09-23T06:10:14.817318 | 2019-12-02T16:51:20 | 2019-12-02T16:51:20 | 225,424,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | # -*- coding: utf-8 -*-
"""Setup the python.dispatch.ms application"""
from __future__ import print_function
from tg import config
import transaction
def setup_schema(command, conf, vars):
"""Place any commands to setup pythondispatchms here"""
# Load the models
# <websetup.websetup.schema.before.model.import>
from pythondispatchms import model
# <websetup.websetup.schema.after.model.import>
# <websetup.websetup.schema.before.metadata.create_all>
print("Creating tables")
model.metadata.create_all(bind=config['tg.app_globals'].sa_engine)
# <websetup.websetup.schema.after.metadata.create_all>
transaction.commit()
print('Initializing Migrations')
import alembic.config
alembic_cfg = alembic.config.Config()
alembic_cfg.set_main_option("script_location", "migration")
alembic_cfg.set_main_option("sqlalchemy.url", config['sqlalchemy.url'])
import alembic.command
alembic.command.stamp(alembic_cfg, "head")
| [
"jorgemacias@Jorges-Mac-mini.local"
] | jorgemacias@Jorges-Mac-mini.local |
1c1aa42436ef6a2988456af22d8dd172cb127e53 | b9c579de7fdca8de76e00b4a912450b338e53b41 | /Banner_Connections/Initialize_Oracle_Connection.py | 63f10562b1831380044a3bdd027ce81ccf795501 | [] | no_license | nmbenzo/ISSS_SMS_Prod | 5bd5ede93369bdd158230a5a993a0554fcb2ecaf | dd1797b6614077a716e6df464ba865b400fcca2c | refs/heads/master | 2022-12-22T04:41:27.276600 | 2019-08-21T22:38:46 | 2019-08-21T22:38:46 | 184,464,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py | import sys, os
import cx_Oracle
import traceback
import Banner_Connections.ODSP_Creds as creds
import Banner_Connections.queries as query
def banner_odsp_handler():
"""Function to initialize an object from the cx_Oracle Connection class
:returns a cx_Oracle Connection object if valid credentials exist otherwise
prints a traceback error"""
host = creds.host
port = creds.port
sid = creds.sid
username = creds.username
password = creds.password
try:
dsn = cx_Oracle.makedsn(host, port, sid)
#print(dsn)
connection = cx_Oracle.Connection("%s/%s@%s" % (username, password, dsn))
return connection
except cx_Oracle.DatabaseError as exc:
error, = exc.args
print(sys.stderr, "Oracle-Error-Code:", error.code)
print(sys.stderr, "Oracle-Error-Message:", error.message)
tb = traceback.format_exc()
return tb
def banner_ODSP_tele(connection, query_name):
"""The banner_ODSP_tele function takes in an connection
argument to connect to ODSP. It then accepts a specific query
as the second argument and returns the query results"""
cursor = connection.cursor()
cursor.execute(query_name)
try:
query_result = [(area_code, number) for area_code, number in cursor]
cleaned_number = [''.join(number) for number in query_result]
return cleaned_number
finally:
cursor.close()
connection.close()
def banner_ODSP_emails(connection, query_name):
"""The banner_ODSP_tele function takes in an connection
argument to connect to ODSP. It then accepts a specific query
as the second argument and returns the query results"""
cursor = connection.cursor()
cursor.execute(query_name)
try:
if query_name != query.active_emails:
query_result = [email[0] for email in cursor]
cleaned_email = ''.join(email for email in query_result)
return cleaned_email
else:
query_result = [email[0] for email in cursor]
return query_result
finally:
cursor.close()
connection.close()
#if __name__ == "__main__":
# print(banner_ODSP_emails(banner_odsp_handler(), query.active_emails)) | [
"nmbenzo@gmail.com"
] | nmbenzo@gmail.com |
7d92969bf5b2b8ec520a58f55d136bb8b5bb5568 | 5bfe0ddb8953711886c10ba636d3eeba934f6ae9 | /6 zigzag.py | 3bbc6ee51018e565701f4d5b1171c6bc532d19c6 | [] | no_license | Alexis374/LeetCode | db1fd8348ff012a55582ee6c430a64308b0b9807 | 1b3ed37a87002891e55c627adb66e9a03e903061 | refs/heads/master | 2021-01-22T06:45:07.129877 | 2015-04-26T13:06:33 | 2015-04-26T13:06:33 | 30,817,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | '''
The string "PAYPALISHIRING" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)
P A H N
A P L S I I G
Y I R
And then read line by line: "PAHNAPLSIIGYIR"
Write the code that will take a string and make this conversion given a number of rows:
string convert(string text, int nRows);
convert("PAYPALISHIRING", 3) should return "PAHNAPLSIIGYIR".
边界情况是 只有一行的时候。
其他情况是存哈希表
'''
def convert(s, numRows):
result_dict = {}
for i in range(numRows):
result_dict[i]=[]
print result_dict
sign = 1
index = 0
for ch in s:
if numRows==1:
return s
result_dict[index].append(ch)
index+= sign
if index == numRows:
sign = -1
index -= 2
if index == -1:
sign =1
index = 1
result = ''
for i in range(numRows):
result = result + ''.join(result_dict[i])
return result
print convert('ab',1) | [
"lijunchengbeyond@gmail.com"
] | lijunchengbeyond@gmail.com |
047d84d7faa0c90cede96c53c0dd72cc4dcd68c7 | 606f2f38c381365f1afd82dc54a558f980db93e1 | /python-tools/tests/test_main.py | fa7abf7130f6b0ca5b1a24acf5bcb37a50e81d60 | [
"MIT"
] | permissive | nymous/actions | a279e54614e8baf3b5d90fa4cecb5ddce469cd37 | 5f178fa8a2b93474f99e80a235c0107e4ca47096 | refs/heads/master | 2023-06-12T09:54:39.910459 | 2019-08-07T09:09:58 | 2019-08-07T09:09:58 | 197,952,701 | 0 | 1 | MIT | 2021-07-05T05:05:44 | 2019-07-20T16:04:32 | Dockerfile | UTF-8 | Python | false | false | 225 | py | import pytest
from .main import multiply
def test_multiply():
assert multiply(2, 3) == 6
assert multiply(-3, 5) == -15
assert multiply(0, 4) == 0
with pytest.raises(TypeError):
multiply("abc", 1.2)
| [
"thomas.gaudin@centraliens-lille.org"
] | thomas.gaudin@centraliens-lille.org |
9d8b9c8ac32531b4807a6098f442b8edb15490aa | 5e9a65db32404fa4c788fd24abee5fa3d12b5b85 | /INV1_148events_TRUE_DH_2km_rm_sources/Kernels/Read_GP_adj_NZ_stations_sorted_Tape_flexwin_corrected_T_end.py | 46570529ccd630c09c4eb13caceec34992364911 | [] | no_license | ucgmsim/FWT_synthetic_test | 5c7b36550ffdba8d128acf8a80485788a0412219 | 94de9d0ff0528cff06ac3dc51ea423117edf14bb | refs/heads/master | 2020-05-02T09:34:05.907551 | 2020-02-25T02:01:09 | 2020-02-25T02:01:09 | 177,875,059 | 0 | 0 | null | 2019-03-26T22:00:09 | 2019-03-26T22:00:08 | null | UTF-8 | Python | false | false | 13,281 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 14:19:45 2018
@author: user
"""
import numpy as np
#import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
from scipy import signal
from qcore import timeseries
from scipy import integrate
import os
def readGP_2(loc, fname):
"""
Convinience function for reading files in the Graves and Pitarka format
"""
with open("/".join([loc, fname]), 'r') as f:
lines = f.readlines()
data = []
for line in lines[2:]:
data.append([float(val) for val in line.split()])
data=np.concatenate(data)
line1=lines[1].split()
num_pts=float(line1[0])
dt=float(line1[1])
shift=float(line1[4])
return data, num_pts, dt, shift
def computeFourier(accTimeSeries, dt, duration):
#computes fourier spectra for acceleration time series
# TODO: compute original number of points (setting to default for now) change to npts = len(accTimeSeries)
npts = len(accTimeSeries)
npts_FFT = int(np.ceil(duration)/dt)
# compute number of points for efficient FFT
ft_len = int(2.0 ** np.ceil(np.log(npts_FFT) / np.log(2.0)))
if npts > ft_len:
accTimeSeries = accTimeSeries[:ft_len]
npts = len(accTimeSeries)
# Apply hanning taper to last 5% of motion
ntap = int(npts * 0.05)
accTimeSeries[npts - ntap:] *= np.hanning(ntap * 2 + 1)[ntap + 1:]
# increase time series length with zeroes for FFT
accForFFT = np.pad(accTimeSeries, (0, ft_len - len(accTimeSeries)), 'constant', constant_values=(0,0))
ft = np.fft.rfft(accForFFT)
# compute frequencies at which fourier amplitudes are computed
ft_freq = np.arange(0, ft_len / 2 + 1) * ( 1.0 / (ft_len * dt))
return ft, ft_freq
def butter_bandpass(lowcut, highcut, fs, order):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
######################################
def normpdf_python(x, mu, sigma):
return 1/(sigma*np.sqrt(2*np.pi))*np.exp(-1*(x-mu)**2/2*sigma**2)
##############################################
#def source_adj_1(stat_data_S,stat_data_O,num_pts,dt,wd):
#
# """
# Measure TauP value and construct Jp signal
# """
# t = np.arange(num_pts)*dt
#
# ts=np.flip(-t[1:], axis=0)
# lTime = np.concatenate((ts,t), axis=0)
# w = normpdf_python(lTime, 0, 0.5)
# wp = w/max(w)
#
# stat_data_S = np.multiply(stat_data_S,wd)
# stat_data_O = np.multiply(stat_data_O,wd)
#
# Dis_S = np.cumsum(stat_data_S)*dt
# Dis_O = np.cumsum(stat_data_O)*dt
#
# Dis_S = np.multiply(signal.tukey(int(num_pts),0.1),Dis_S)
# Dis_O = np.multiply(signal.tukey(int(num_pts),0.1),Dis_O)
#
## Dis_S = signal.detrend(Dis_S)
## Dis_O = signal.detrend(Dis_O)
#
# corr=np.correlate(Dis_O,Dis_S,"full")
# wx=np.multiply(wp,corr)
#
# In=np.argmax(wx)
# TauP=lTime[In]
#
# Jp_inv_norm=1/dt*np.sum(np.multiply(stat_data_S,stat_data_S))
# Jp = TauP*np.flip(stat_data_S, axis=0)*Jp_inv_norm
#
# Source = Jp
#
# return Source
##############################################
def source_adj_ncc(stat_data_Sf,stat_data_Of,num_pts, delta_T,dt):
"""
Normalized correlation coefficient and optimal time shift
"""
num_delta_t = int(delta_T/dt)
td = np.arange(-num_delta_t,num_delta_t)*dt
ncc_array=np.zeros(len(td))
for it in range(0,len(td)):
stat_data_O_shift = np.zeros(num_pts); n_shift=int(np.abs((td[it]/dt)));
# if td[it]<0:
# stat_data_0_O_shift[n_shift:num_pts] = stat_data_0_Of[0:num_pts-n_shift]
# else:
# stat_data_0_O_shift[0:num_pts-n_shift] = stat_data_0_Of[n_shift:num_pts]
if td[it]<0:
stat_data_O_shift[0:num_pts-n_shift] = stat_data_Of[n_shift:num_pts]
else:
stat_data_O_shift[n_shift:num_pts] = stat_data_Of[0:num_pts-n_shift]
rwm1_arr=np.multiply(stat_data_Sf,stat_data_O_shift)
rwm2_arr=np.square((stat_data_Sf))
rwm3_arr=np.square((stat_data_O_shift))
rwm1=integrate.simps(rwm1_arr,t)
rwm2=integrate.simps(rwm2_arr,t)
rwm3=integrate.simps(rwm3_arr,t)
# rwm1 = np.sum(rwm1_arr)
# rwm2 = np.sum(rwm2_arr)
# rwm3 = np.sum(rwm3_arr)
ncc_array[it]=rwm1/((rwm2*rwm3)**0.5)
ncc_max = np.max(ncc_array); id_max = np.argmax(ncc_array); td_max = td[id_max];
Jp_inv_norm=1/dt*np.sum(np.multiply(stat_data_Sf,stat_data_Sf))
# Jp = TauP*np.flip(stat_data_S, axis=0)*Jp_inv_norm
Jp = -td_max*np.flip(stat_data_Sf, axis=0)*Jp_inv_norm
Source = Jp
return Source
def write_adj_source(s1,v1,mainfolder,mainfolder_source,source):
with open("/".join([mainfolder, s1]), 'r') as f:
lines = f.readlines()
tline1= lines[0]
tline2= lines[1]
filename1=mainfolder_source+v1
print(filename1)
fid = open(filename1,'w')
fid.write("%s" %(tline1))
fid.write("%s" %(tline2))
lt=len(source)
count=0
while (count+1)*6<lt:
fid.write("%10f%10f%10f%10f%10f%10f%s" %(source[count*6],source[count*6+1],source[count*6+2],source[count*6+3],source[count*6+4],source[count*6+5],'\n'))
count+=1
ii=lt-count*6
i=0
while (i<ii):
i+=1
fid.write("%10f%s" %(source[lt-ii+i-1],'\n'))
fid.close()
return
def write_adj_source_ts(s1,v1,mainfolder,mainfolder_source,source,dt):
#filename1=mainfolder_source+v1
vs1=v1.split('.')
timeseries.seis2txt(source,dt,mainfolder_source,vs1[0],vs1[1])
return
def read_source(source_file):
with open(source_file, 'r') as f:
lines = f.readlines()
line0=lines[0].split()
nShot=int(line0[0])
S=np.zeros((nShot,3))
for i in range(1,nShot+1):
line_i=lines[i].split()
S[i-1,0]=int(line_i[0])
S[i-1,1]=int(line_i[1])
S[i-1,2]=int(line_i[2])
return nShot, S
def read_stat_name(station_file):
with open(station_file, 'r') as f:
lines = f.readlines()
line0=lines[0].split()
nRec=int(line0[0])
R=np.zeros((nRec,3))
statnames = []
for i in range(1,nRec+1):
line_i=lines[i].split()
R[i-1,0]=int(line_i[0])
R[i-1,1]=int(line_i[1])
R[i-1,2]=int(line_i[2])
statnames.append(line_i[3])
return nRec, R, statnames
def rms(stat_data):
num_pts=len(stat_data)
D = (np.sum(np.square(stat_data))/num_pts)**0.5
return stat_data/D
def read_flexwin(filename):
with open(filename, 'r') as f:
lines = f.readlines()
if(len(lines)>2):
line2=lines[2].split()
t_on=float(line2[1])
t_off=float(line2[2])
td_shift=float(line2[3])
cc=float(line2[4])
else:
t_on = 0; t_off = 0; td_shift = 0; cc = 0;
return t_on, t_off, td_shift, cc
def winpad(lt,t_off,t_on,pad):
"""
Sin taper window
"""
#pad=5
if(t_on<10):
t_on=10
if(t_off>lt-10):
t_off=lt-10
L=t_off-t_on+2*pad
# window=signal.gaussian(30, 4)
# window1=signal.resample(window,L,axis=0, window=None)
# window=np.linspace(1, 1, L)))
window=np.ones((L))
#x=np.arange(0,pad,1)
x=np.linspace(0, np.pi/2, pad)
sinx=np.sin(x)
window[0:pad] = sinx
window[L-pad:L] = sinx[::-1]
print('lt='+str(lt))
ar1=np.zeros((t_on-pad))
ar2=np.zeros((lt-t_off-pad))
window_pad0 = np.concatenate((ar1,window))
window_pad = np.concatenate((window_pad0,ar2))
return window_pad
def time_shift_emod3d(data,delay_Time,dt):
n_pts = len(data)
ndelay_Time = int(delay_Time/(dt))
data_shift = np.zeros(data.shape)
data_shift[0:n_pts-ndelay_Time] = data[ndelay_Time:n_pts]
return data_shift
###################################################
#statnames = ['A1' ,'A2' ,'A3' ,'A4' ,'A5' ,'A6' , 'A7', 'B1' ,'B2' ,'B3' ,'B4' ,'B5' ,'B6' , 'B7','C1' ,'C2' ,'C3' ,'C4' ,'C5' ,'C6' ,'C7','D1' ,'D2' ,'D3' ,'D4' ,'D5' ,'D6' ,'D7','E1' ,'E2' ,'E3' ,'E4' ,'E5' ,'E6' ,'E7','F1' ,'F2' ,'F3' ,'F4' ,'F5' ,'F6','F7','G1' ,'G2' ,'G3' ,'G4' ,'G5' ,'G6','G7']
station_file = '../../../StatInfo/STATION.txt'
nRec, R, statnames = read_stat_name(station_file)
#statnames=statnames[0:10]
print('statnames')
print(statnames)
GV=['.090','.000','.ver']
GV_ascii=['.x','.y','.z']
mainfolder='../../Vel_es/Vel_es_i/'
mainfolder_o='../../Vel_ob/Vel_ob_i/'
mainfolder_source='../../../AdjSims/V3.0.7-a2a_xyz/Adj-InputAscii/'
os.system('rm ../../../AdjSims/V3.0.7-a2a_xyz/Adj-InputAscii/*.*')
print(mainfolder_o)
_, num_pts, dt, shift = readGP_2('../../Vel_ob/Vel_ob_i','CBGS.000')
num_pts=int(num_pts)
t = np.arange(num_pts)*dt
############/nesi/nobackup/nesi00213/RunFolder/tdn27/rgraves/Adjoint/Syn_VMs/Kernels/#########################
fs = 1/dt
lowcut = 0.05
#highcut = 0.05
highcut = 0.1
#ndelay_T=int((3/0.1)/(dt))
#delta_T=10
delta_T=20
flo=0.1
delay_Time=(3/flo)
fc = highcut # Cut-off frequency of the filter
w = fc / (fs / 2) # Normalize the frequency
b, a = signal.butter(4, w, 'low')
source_file='../../../StatInfo/SOURCE.txt'
nShot, S = read_source(source_file)
wr = np.loadtxt('../../../../Kernels/Iters/iter1/Dump/geo_correlation.txt')
#wr_arr = np.loadtxt('../../../../Kernels/Iters/iter1/Dump/geo_correlation.txt')
#wr=np.reshape(wr_arr,[nRec,nShot])
#wr=np.ones([nRec,nShot])
################################
fi1=open('iShot.dat','r')
ishot=int(np.fromfile(fi1,dtype='int64'))
fi1.close()
print('ishot='+str(ishot))
#R_ishot_arr=np.loadtxt('../../../../Kernels/Iters/iter1/Dump/R_ishot_'+str(ishot)+'.txt')
#R_all_arr=np.loadtxt('../../../../Kernels/index_all_ncc_gt005_t_end_corrected.txt')
R_all_arr=np.loadtxt('../../../../Kernels/index_all_ncc_gt005_tshift_20s_new.txt')
R_all=R_all_arr.reshape([nRec,3,nShot])
R_Time_record_arr = np.loadtxt('../../../../Kernels/R_Time_record_148s_dh_2km.txt')
R_Time_record = R_Time_record_arr.reshape([2,nShot,nRec])
for i,statname in enumerate(statnames):
#print('ireceiver='+str(i))
distance=((R[i,1]-S[ishot-1,1])**2+(R[i,2]-S[ishot-1,2])**2+(R[i,0]-S[ishot-1,0])**2)**(0.5)
# source_x=np.zeros(num_pts)
# source_y=np.zeros(num_pts)
# source_z=np.zeros(num_pts)
for k in range(0,3):
source_adj=np.zeros(num_pts)
s0=statname+GV[k]
v0=statname+GV_ascii[k]
if((distance<200) and (distance>0) and (R_all[i,k,ishot-1]==1)):
print('ireceiver='+str(i))
wr_ij = wr[nRec*(ishot-1)+i]
#wr_ij = wr[nShot*(i)+ishot-1]
#wr_ij = wr[i,ishot-1]
stat_data_0_S_org = timeseries.read_ascii(mainfolder+s0)
stat_data_0_S = time_shift_emod3d(stat_data_0_S_org,delay_Time,dt)
stat_data_0_S = np.multiply(signal.tukey(int(num_pts),0.1),stat_data_0_S)
stat_data_0_O = timeseries.read_ascii(mainfolder_o+s0)
stat_data_0_O = np.multiply(signal.tukey(int(num_pts),0.1),stat_data_0_O)
#stat_data_0_S = signal.detrend(stat_data_0_S)
#stat_data_0_O = signal.detrend(stat_data_0_O)
# stat_data_0_S = signal.filtfilt(b, a, stat_data_0_S)
# stat_data_0_O = signal.filtfilt(b, a, stat_data_0_O)
stat_data_0_S = butter_bandpass_filter(stat_data_0_S, lowcut, highcut, fs, order=4)
stat_data_0_O = butter_bandpass_filter(stat_data_0_O, lowcut, highcut, fs, order=4)
#stat_data_0_O = np.multiply(signal.tukey(int(num_pts),1.0),stat_data_0_O)
stat_data_0_S = rms(stat_data_0_S)*wr_ij
stat_data_0_O = rms(stat_data_0_O)*wr_ij
#Parameters for window
df=1/dt
lt=num_pts
pad=5
#flexwin
e_s_c_name = str(ishot)+'.'+s0+'.win'
#filename = '../../../../Kernels/ALL_WINs/'+e_s_c_name
filename = '../../../../Kernels/ALL_WINs_Tshift_20s/'+e_s_c_name
t_on, t_off, td_shift, cc = read_flexwin(filename)
#if (t_off> R_Time_record[1,ishot-1,i]):
# t_off = R_Time_record[1,ishot-1,i]
tx_on=int(t_on/dt)
tx_off=int(t_off/dt)
#Window for isolated filter
wd=winpad(lt,tx_off,tx_on,pad)
stat_data_0_S = np.multiply(stat_data_0_S,wd)
stat_data_0_O = np.multiply(stat_data_0_O,wd)
source_adj=source_adj_ncc(stat_data_0_S,stat_data_0_O,num_pts, delta_T,dt)
write_adj_source_ts(s0,v0,mainfolder,mainfolder_source,source_adj,dt)
| [
"andrei.nguyen@canterbury.ac.nz"
] | andrei.nguyen@canterbury.ac.nz |
a881ebb4ce333707ca54b5214f035d67462dcd23 | 6e36e1c2feb568d44ca9063635b5d83c33abd2f5 | /media.py | 02bcd19054cf905d1ab6ef05b610e41a9d385480 | [] | no_license | euanramsay/python_movie_play | 5ca95fc8907d0caa48d33148179d7773e0c476a5 | 720c59c0fe6de16a4d229766a7aeb1c317ce15ce | refs/heads/master | 2021-01-19T10:56:09.743337 | 2017-04-18T10:03:17 | 2017-04-18T10:03:17 | 87,916,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | import webbrowser
class Movie():
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
"""Template of an instance of Movie
Args:
movie_title (str): The title of the movie
movie_storyline(str): A short description of the movies plot
movie_image(str): A URL link to an image of the movie poster
trailer_youtube(str): A URL link to the movie trailer
"""
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
def show_trailer(self):
"""Opens the trailer URL in a browser
"""
webbrowser.open(self.trailer_youtube_url) | [
"euancramsay@aol.com"
] | euancramsay@aol.com |
e4eda3cc0965966f19ca0803a2f11734f439db73 | b9f9b5f0822c3ffd5cdf4767ddec179d8407a749 | /api.py | a0adb35fcbc7c693a693ed59eb1b3152dba76f99 | [] | no_license | hspearman/spotify_library_serializer | d740cd72662ebb1e09f23a0d140d8436bbc596c0 | f1779eb76d274637393253dea15e036be9e4ed10 | refs/heads/master | 2021-01-10T10:11:27.708003 | 2016-03-27T18:41:51 | 2016-03-27T18:41:51 | 52,226,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | import json
from flask import render_template, redirect, Response, Blueprint, session, request, logging
from werkzeug.exceptions import abort
from spotify_utility import build_authorization_url, refresh_token, is_token_existent, is_token_expired, get_token, \
get_tracks
api = Blueprint('api', __name__)
#
# Protects against CSRF with every request
#
@api.before_request
def check_csrf_token():
if request.method == "POST":
token = session.pop('_csrf_token', None)
if not token or token != request.form.get('_csrf_token'):
abort(400)
#
# Selects template to render for site's default URL
#
@api.route('/')
def index():
template = 'index.html'
# If token exists ...
if is_token_existent():
# If token expired, try to refresh
if is_token_expired():
try:
refresh_token()
# If refresh fails, fall-back on default template
except:
pass
# Otherwise, select authorized template
else:
template = 'index_authorized.html'
return render_template(template)
#
# Redirects user to spotify authorization endpoint
#
@api.route(
'/authorize',
methods=['POST'])
def authorize():
# Redirect user to spotify authorize page
spotify_url = build_authorization_url()
return redirect(spotify_url)
#
# Acts as callback for spotify's authorization workflow
# When called, exchanges code for access token
#
@api.route('/login')
def login():
try:
get_token()
# Ignore failure (workflow restarts on redirect)
except:
pass
return redirect("/")
#
# Acts as callback for spotify's authorization workflow
# When called, exchanges code for access token
#
@api.route(
'/logout',
methods=['POST'])
def logout():
session.clear()
return redirect("/")
#
# Get user's spotify library
#
@api.route('/library')
def get_library():
# Try to get user's tracks
try:
tracks = get_tracks()
return Response(
json.dumps(tracks),
headers={
'Content-Disposition': 'attachment;filename=library.json',
'Content-Type': 'application/json'
})
# On error, redirect to index
except:
return redirect("/")
| [
"hspearman92@gmail.com"
] | hspearman92@gmail.com |
1d66fab8a4c8f54ea28c341be52857c2efbfa426 | 10145efafecff56ba3c975598e01353ed3f7bcf3 | /locker/core/validators.py | f99da9ab528d006db64325f33550a75c7b6a1ff5 | [
"MIT"
] | permissive | crowmurk/locker | 31c0c2d838ca6458f6337b9178e7b0eec0981938 | 7246b4f524d138d48aadf4866e0b218bb924f69c | refs/heads/master | 2023-04-26T13:50:23.207329 | 2022-03-01T08:45:50 | 2022-03-01T08:45:50 | 184,296,218 | 0 | 0 | MIT | 2023-04-21T21:58:39 | 2019-04-30T16:29:29 | JavaScript | UTF-8 | Python | false | false | 1,649 | py | import json
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.utils.deconstruct import deconstructible
from django.template.defaultfilters import filesizeformat
def validate_monday(value):
"""Проверяет является ли дата понедельником.
"""
if value.weekday() != 0:
raise ValidationError(
_("This date should be a Monday.")
)
def validate_sunday(value):
"""Проверяет является ли дата воскресеньем.
"""
if value.weekday() != 6:
raise ValidationError(
_("This date should be a Sunday.")
)
def validate_slug(value):
"""Проверяет поле slug на допустимые значения.
"""
if value.lower() in ('create', 'update', 'delete'):
raise ValidationError(
_('Slug must not be "%(slug)s"'),
params={'slug': value, },
)
def validate_positive(value):
"""Проверяет числовое поле на значения большее 0.
"""
if value <= 0:
raise ValidationError(
_('This value must be grater than zero'),
)
def validate_json(value):
"""Проверяет текстовое поле на соответствие
формату json.
"""
try:
if value:
json.loads(value)
except ValueError as e:
raise ValidationError(
_('An error was founded in %(value)s template: %(message)s'),
params={'value': value, 'message': e, },
)
| [
"crowmurk@gmail.com"
] | crowmurk@gmail.com |
f5fb13e993e1f670fb944b04d958c11f4c9235e0 | 4a63c8e2545c6968547d7aa36c2dca85b9b84301 | /workscheduler/src/backend/utils/datetime.py | 88eb649edb561f5fec06a44475f4020eda3ac2b3 | [] | no_license | epirevolve/workscheduler | 458b8da84da94862c91de6544c5aaaefc1520d47 | 6c89e7264c5b66f4eb91b1989da6324695449703 | refs/heads/develop | 2023-01-23T02:01:29.356940 | 2019-12-30T01:16:32 | 2019-12-30T01:16:32 | 147,050,241 | 5 | 2 | null | 2023-01-04T11:42:19 | 2018-09-02T03:10:19 | JavaScript | UTF-8 | Python | false | false | 207 | py | # -*- coding: utf-8 -*-
from datetime import datetime
def is_overlap(a_from: datetime, a_to: datetime, b_from: datetime, b_to: datetime):
return (b_from <= a_from <= b_to) or (b_from <= a_to <= b_to)
| [
"epirevolve@gmail.com"
] | epirevolve@gmail.com |
86d1cfe33de049e146c8d0748a19437e0991912e | ac762d856fbad529d146504d828f77cdf7732993 | /bot.py | f8602c45f4ae85bad9349c16566f7e7b7d9faaa5 | [] | no_license | svonton/discord_bot | d2ead7d7f56553ee1eb261ec0774da76709e6394 | 651619b2e63251d2e13a60ac719cf34f25163a68 | refs/heads/master | 2021-05-18T01:37:12.138889 | 2020-03-29T14:19:49 | 2020-03-29T14:19:49 | 251,049,985 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,522 | py | import discord
import datetime
import random
import asyncio
from discord.ext import commands
bot = commands.Bot(command_prefix='$')
def read_token():
with open("token.txt", "r") as f:
lines = f.readlines()
return lines[0].strip()
def dembel():
td = datetime.date.today()
dds = "2020-12-15"
d = dds.split('-')
dd = datetime.date(int(d[0]), int(d[1]), int(d[2]))
resd = dd - td
return str(resd).split()[0]
def store_img(imgLink):
link = imgLink.split(" ")
with open("imgLink.txt", "r") as f:
existLines = f.readlines()
if link[1]+"\n" in existLines:
return 1
else:
with open("imgLink.txt", "a") as f:
f.write(f"""{link[1]}\n""")
return 0
def get_img(imgNumber):
with open("imgLink.txt", "r") as f:
lines = f.readlines()
return lines[imgNumber].strip()
def img_file_size():
with open("imgLink.txt", "r") as f:
lines = f.readlines()
return len(lines)
game_gunlet = ""
def gun_gunlet(message):
global game_gunlet
game_name = str(message.content).split(" ")
game_gunlet += game_name[1] + "+"
return game_gunlet
token = read_token()
client = discord.Client()
@client.event
async def on_ready():
await client.change_presence(status=discord.Status.online, activity=discord.Game("гольф промеж твоих булок"))
@client.event
async def on_message(message):
global game_gunlet
if message.content.startswith("!help"):
embed = discord.Embed(title="Помощь по боту", description="Итак гаврики вот вам парочку команд")
embed.add_field(name="!t", value="Оставшиеся дни до дембеля")
embed.add_field(name="!s url", value="Сохраняет картинку по сылке")
embed.add_field(name="!o", value="Показывает картинку по порядковуму номеру")
embed.add_field(name="!r", value="Выпуливает рандомную картинку")
embed.add_field(name="!ga", value="Добавляет игру в рулетку")
embed.add_field(name="!gc", value="Чистит рулетку")
embed.add_field(name="!gs", value="Показывает победителя рулетки")
await message.channel.send(content=None, embed=embed)
elif message.content.startswith("!t"):
await message.channel.send(f"""{dembel()}""")
elif message.content.startswith("пидр"):
await message.channel.send(f"""скорее {message.author} пидр""")
elif message.content.startswith("!s "):
if "https" in message.content and len(message.content) > 10:
if "twitch.tv" in message.content or "youtube" in message.content:
await message.channel.send("не кидай сюда хню")
else:
if store_img(str(message.content)) == 0:
await message.channel.send("забрал")
else:
await message.channel.send("изображение уже есть мудила")
elif message.content.startswith("!o "):
num = str(message.content).split(" ")
await message.channel.send(get_img(int(num[1])))
elif message.content.startswith("!r"):
await message.channel.send(get_img(random.randint(0, img_file_size()-1)))
elif message.content.startswith("!ga "):
if str(message.content) == "!ga":
await message.channel.send("Напиши название дурашка")
else:
await message.channel.send(f"""Игра {str(message.content).split(" ")[1]} добавлена в обойму""")
await message.channel.send(f"""Текущие игры в обойме: {gun_gunlet(message)}""")
elif message.content.startswith("!gc"):
game_gunlet = ""
await message.channel.send("Магазин разряжен")
elif message.content.startswith("!gs"):
if game_gunlet == "":
await message.channel.send("Ты куда стреляешь обойма то пустая")
else:
winner = game_gunlet.split("+")
await message.channel.send(f"""Победитель: {winner[random.randint(0,len(winner)-2)]}""")
await message.channel.send("Если кончил стрелять то разряди магазин с помощью !gc")
client.run(token)
| [
"svonton@users.noreply.github.com"
] | svonton@users.noreply.github.com |
69e386138bf8d9353793e4efad85b5d466050979 | 7ccb0f18fe0d5d31ef3938c46ecaf93e03838525 | /module20_project5.py | 485923b60c06e5e3412977f14c5ad5c4a1e67692 | [
"Apache-2.0"
] | permissive | Abhyudyabajpai/Machine_Learning_Projects | 82a326cc607ffa27ef32a7937d24df85aada8a46 | 1f339584d242121ef1b2fbf8a17c173cecdd87ea | refs/heads/master | 2020-09-22T23:03:38.710258 | 2020-07-25T15:57:32 | 2020-07-25T15:57:32 | 225,339,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | import codecademylib3_seaborn
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
flags = pd.read_csv('flags.csv',header =0)
print(flags.columns)
print(flags.head())
labels = flags[['Landmass']]
data = flags[["Red", "Green", "Blue", "Gold",
"White", "Black", "Orange",
"Circles",
"Crosses","Saltires","Quarters","Sunstars",
"Crescent","Triangle"]]
train_data,test_data,train_labels,test_labels = train_test_split(data,labels,random_state=1)
scores = []
for i in range(1,20):
tree = DecisionTreeClassifier(random_state=1,max_depth =i)
tree.fit(train_data,train_labels)
score = tree.score(test_data,test_labels)
scores.append(score)
plt.plot(range(1,20),scores)
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
d4244dbd81dc12c8d07c205889424d034972f5d1 | eaef5cf5e2b756ac1f67656b739fb0d941ef7995 | /es_complete/server.py | 24f0d1610df7ae928e30f06d1bcb5687e0ce9fa4 | [] | no_license | gatoatigrado/vim-complete-es-server | 965e938712c761b515d4a72d469ecf6f03562e16 | 7899ecf8b5756b8a3e01bb864888df9e1b306238 | refs/heads/master | 2021-01-10T11:12:20.771074 | 2015-12-27T08:32:36 | 2015-12-27T08:32:36 | 48,638,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | from __future__ import absolute_import
from __future__ import print_function
# Fix Flask-autoloaded module path
import sys
if "/code" not in sys.path:
sys.path.insert(0, "/code")
import flask
import re
import simplejson
from collections import namedtuple
from es_complete import es_client
from es_complete import es_query
from es_complete import es_index_data
from es_complete import text_analysis
app = flask.Flask(__name__)
AutocompleteRequest = namedtuple("AutocompleteRequest", [
"current_line",
"current_column",
"lines",
"word_being_completed"
])
SPACES_RE = re.compile(ur'[^\w\d\_]+')
def simple_buffer_complete(request):
if request.word_being_completed:
words = text_analysis.get_all_words("\n".join(request.lines))
return [
w for w in words if w.startswith(request.word_being_completed)
]
else:
return []
@app.route('/complete')
def complete():
body = flask.request.get_json(force=True)
request = AutocompleteRequest(**body)
basics = simple_buffer_complete(request)
result = (
simple_buffer_complete(request)[:10] +
es_query.elasticsearch_complete(request)
)
return simplejson.dumps(result)
@app.route("/index-raw", methods=["POST"])
def index_raw():
es_index_data.index_data(flask.request.get_data())
return "Success!"
@app.route("/clear-index", methods=["POST"])
def clear_index():
es_client.recreate_index()
return "Success!"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=18013, debug=True)
| [
"gatoatigrado@gmail.com"
] | gatoatigrado@gmail.com |
b36ecf77fd71c9f220c802648ec0c3fcca41753a | db8b780121211871af4942e4e2439dff1decf2ff | /Exercício 8 - verificação de aprendizagem.py | 77c8cf42452d96fc82199bcddd30777df37b0040 | [] | no_license | eliasssantana/logicaProgramacao | c1d86226318d4c797fa5b97fe471fb1de8b69eea | 353e710ebeae8ef9eb2cfc52baf4effe8dfd6dcf | refs/heads/main | 2023-05-15T08:29:20.543267 | 2021-06-10T02:40:38 | 2021-06-10T02:40:38 | 366,473,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | print("=-" * 30)
e = 'Exercício'
print(e.center(50))
print("=-" * 30)
# 8)
'''
- Crie um programa que leia nome, ano de nascimento e carteira de trabalho e cadastre-os (com idade) em um dicionário. Se por acaso a CTPS for diferente de 0, o dicionário receberá também o ano de contratação e o salário. Calcule e acrescente , além da idade , com quantos anos a pessoa vai se aposentar. Considere que o trabalhador deve contribuir por 35 anos para se aposentar.
'''
def aposentadoria():
pessoa = {}
nome = input("Digite o seu nome:\n")
ano_nascimento = int(input("Digite o ano de seu nascimento:\n"))
carteira_trabalho = int(input("Digite o número da sua carteira de trabalho:\n"))
pessoa['nome'] = nome
pessoa['nascimento'] = ano_nascimento
if carteira_trabalho != 0:
pessoa['ano de contratação'] = int(input("Digite o ano de sua contratação:\n"))
pessoa['salário'] = float(input('Digite o seu salário:\n'))
pessoa['CTPS'] = carteira_trabalho
else:
pass
idade = 2021 - ano_nascimento
pessoa['idade'] = idade
aposentadoria = pessoa['ano de contratação'] + 35
pessoa['idade_aposentadoria'] = (aposentadoria - 2021) + idade
pessoa['aposentadoria'] = aposentadoria
for k,v in pessoa.items():
print(f'{k} - {v}')
aposentadoria() | [
"eliassilva0045@gmail.com"
] | eliassilva0045@gmail.com |
8e0098c5df0f9fff8c4dce4243eb625ea300531f | 38363f668f87c1044e08a40005cf9319fd8ecb5c | /Servicio-Nica-Ventas/Nivel3/nica-ventas/app/app.py | a65febcae2c3c8fb4edcc096029f88a3af243541 | [] | no_license | LISSETTE-QUINTERO/Servicio-Nica-Ventas | 6f8d215514c93abc2cbdcde79d6be1aa9373c06e | 90ac01b6c5c97d6c7af9fda37311a8222ef79d7b | refs/heads/master | 2021-06-22T03:12:05.425429 | 2019-09-11T03:44:03 | 2019-09-11T03:44:03 | 207,189,785 | 0 | 0 | null | 2021-03-20T01:42:35 | 2019-09-09T00:05:11 | Python | UTF-8 | Python | false | false | 3,158 | py | from flask import Flask, jsonify, request, escape
import os
from flask_mysqldb import MySQL
from worklog import Worklog
import redis
app = Flask(__name__)
app.config['MYSQL_HOST'] = os.environ['DATABASE_HOST']
app.config['MYSQL_USER'] = os.environ['DATABASE_USER']
app.config['MYSQL_PASSWORD'] = os.environ['DATABASE_PASSWORD']
app.config['MYSQL_DB'] = os.environ['DATABASE_NAME']
mysql = MySQL(app)
redis_cli = redis.Redis(host=os.environ['REDIS_LOCATION'], port=os.environ['REDIS_PORT'])
@app.route('/active', methods=['GET'])
def get_active():
#try:
country = request.args.get('country')
city = request.args.get('city');
key = country.lower() + '_' + city.lower()
state = redis_cli.get(key)
if state:
response = {
"country":country,
"city":city,
"active":bool(state),
"cache":"hit"
}
else:
wl=Worklog(mysql,app.logger)
js=wl.find_location(country,city)
if js is None:
response = {"mensaje":"Registro no identificado"}
else:
redis_cli.set(key,escape(js[2]))
response = {
"country":js[0],
"city":js[1],
"active":bool(js[2]),
"cache":"miss"
}
return jsonify(response)
# except:
# return jsonify({"mensaje":"Error Verifique URL"})
@app.route('/active', methods=['POST'])
def post_active():
try:
payload = request.get_json()
wl = Worklog(mysql, app.logger)
js=wl.find_location(payload['country'],payload['city'])
if js is None:
wl.save_location(**payload)
response = {
"mensaje":"Registro guardado",
"country":payload['country'],
"city":payload['city']
}
else:
response = {"mensaje":"Registro existente"}
return jsonify(response)
except:
return jsonify({"mensaje": "error"})
@app.route('/active', methods=['PUT'])
def put_active():
try:
payload = request.get_json()
auth = request.headers.get("authorization", None)
if not auth:
return jsonify('Token no enviado')
elif auth != "Bearer 2234hj234h2kkjjh42kjj2b20asd6918":
return jsonify('Token no autorizado')
else:
wl = Worklog(mysql, app.logger)
wl.state_location(**payload)
response= {
"mensaje": "Registro actualizado",
"token": auth,
"country": payload['country'],
"city": payload['city'],
"active": payload['active']
}
return jsonify(response)
except:
return jsonify({"mensaje": "error"})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| [
"noreply@github.com"
] | noreply@github.com |
442fbda53e3daca717ec96081a20b07d92193745 | 5effdd4a38f85229ff09622cf46940a0c7986dda | /generator/core.py | 064596355e77748c96c4df93bdd2a47365cfb369 | [
"MIT"
] | permissive | universuen/Cat_WGAN | 598ab8a3f408486d1e18cb66d928287a18210f61 | ac983270820cf55a3eeb74a0d0edd258e715d00b | refs/heads/main | 2023-08-13T02:42:54.275525 | 2021-10-03T16:48:26 | 2021-10-03T16:48:26 | 374,564,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,808 | py | import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from matplotlib import pyplot as plt
import numpy as np
from . import config
from .datasets import RealImageDataset
from . import models
from .logger import Logger
from ._utils import train_d_model, train_g_model, save_samples, show_samples, denormalize
class Generator:
def __init__(
self,
model_name: str = None,
):
self.logger = Logger(self.__class__.__name__)
self.logger.info(f'model path: {config.path.models / model_name}')
self.model_path = str(config.path.models / model_name)
self.model = None
def load_model(self):
self.model = models.Generator().to(config.device)
self.model.load_state_dict(
torch.load(self.model_path)
)
self.model.eval()
self.logger.debug('model was loaded successfully')
def save_model(self):
torch.save(
self.model.state_dict(),
self.model_path
)
self.logger.debug('model was saved successfully')
def generate(
self,
seed: int = None,
latent_vector: torch.Tensor = None
):
if seed is not None:
torch.manual_seed(seed)
if latent_vector is None:
latent_vector = torch.randn(
1,
config.data.latent_vector_size,
device=config.device,
)
img = self.model(latent_vector).squeeze().detach().cpu().numpy()
return np.transpose(denormalize(img), (1, 2, 0))
def train(
self,
start_from_checkpoint=True,
):
self.logger.info('started training new model')
self.logger.info(f'using device: {config.device}')
# prepare data
dataset = RealImageDataset(
config.path.training_dataset,
transform=transforms.Compose(
[
transforms.Resize(config.data.image_size),
transforms.CenterCrop(config.data.image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
),
)
data_loader = DataLoader(
dataset=dataset,
batch_size=config.training.batch_size,
shuffle=True,
drop_last=True,
num_workers=4,
)
# prepare models
d_model = models.Discriminator().to(config.device)
g_model = models.Generator().to(config.device)
# link models with optimizers
d_optimizer = torch.optim.Adam(
params=d_model.parameters(),
lr=config.training.d_learning_rate,
betas=(0.5, 0.9),
)
g_optimizer = torch.optim.Adam(
params=g_model.parameters(),
lr=config.training.g_learning_rate,
betas=(0.5, 0.9),
)
# prepare to record dataset plots
d_losses = []
g_losses = []
fixed_latent_vector = torch.randn(
config.training.sample_num,
config.data.latent_vector_size,
device=config.device,
)
start_epoch = 0
if start_from_checkpoint:
try:
checkpoint = torch.load(config.path.checkpoint, map_location='cpu')
d_model.load_state_dict(checkpoint['d_model_state_dict'])
g_model.load_state_dict(checkpoint['g_model_state_dict'])
d_optimizer.load_state_dict(checkpoint['d_optimizer_state_dict'])
g_optimizer.load_state_dict(checkpoint['g_optimizer_state_dict'])
d_losses = checkpoint['d_losses']
g_losses = checkpoint['g_losses']
fixed_latent_vector = checkpoint['fixed_latent_vector'].to(config.device)
start_epoch = checkpoint['epoch'] + 1
torch.set_rng_state(checkpoint['rng_state'])
except FileNotFoundError:
self.logger.warning('Checkpoint not found')
# train
for epoch in range(start_epoch, config.training.epochs, ):
print(f'\nEpoch: {epoch + 1}')
for idx, (real_images, _) in enumerate(data_loader):
# show_samples(real_images)
real_images = real_images.to(config.device)
print(f'\rProcess: {100 * (idx + 1) / len(data_loader): .2f}%', end='')
d_loss = None
for _ in range(config.training.d_loop_num):
d_loss = train_d_model(
d_model=d_model,
g_model=g_model,
real_images=real_images,
d_optimizer=d_optimizer,
)
d_losses.append(d_loss)
torch.cuda.empty_cache()
g_loss = None
for _ in range(config.training.g_loop_num):
g_loss = train_g_model(
g_model=g_model,
d_model=d_model,
g_optimizer=g_optimizer,
)
g_losses.append(g_loss)
torch.cuda.empty_cache()
print(
f"\n"
f"Discriminator loss: {d_losses[-1]}\n"
f"Generator loss: {g_losses[-1]}\n"
)
# save losses plot
plt.title("Generator and Discriminator Loss During Training")
plt.plot(g_losses, label="generator")
plt.plot(d_losses, label="discriminator")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig(fname=str(config.path.training_plots / 'losses.jpg'))
plt.clf()
# save samples
g_model.eval()
save_samples(
file_name=f'E{epoch + 1}.jpg',
samples=g_model(fixed_latent_vector)
)
g_model.train()
self.model = g_model
self.save_model()
checkpoint = dict()
checkpoint['d_model_state_dict'] = d_model.state_dict()
checkpoint['g_model_state_dict'] = g_model.state_dict()
checkpoint['d_optimizer_state_dict'] = d_optimizer.state_dict()
checkpoint['g_optimizer_state_dict'] = g_optimizer.state_dict()
checkpoint['d_losses'] = d_losses
checkpoint['g_losses'] = g_losses
checkpoint['fixed_latent_vector'] = fixed_latent_vector
checkpoint['epoch'] = epoch
checkpoint['rng_state'] = torch.get_rng_state()
torch.save(checkpoint, config.path.checkpoint)
self.model.eval()
| [
"52519513+universuen@users.noreply.github.com"
] | 52519513+universuen@users.noreply.github.com |
d9c01472e3a355d2c744a3b72a0896f067997726 | 5fb9f29964268223869944508798d6c21d9e5298 | /sub_test/sub_test.py | ea78eeb031a733544b22f4926dc7ead63ea94ff4 | [] | no_license | CodedQuen/Python-Pocket-Reference- | 56459ce1509f74bc253af027be91935e62922948 | 8f7c69edb8ad4ac3ef7f70bab15ffe24eb162325 | refs/heads/master | 2022-06-14T20:57:13.799676 | 2020-05-05T08:27:17 | 2020-05-05T08:27:17 | 261,398,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | from subprocess import call, Popen, PIPE, check_output
print(call("ls -l", shell=True))
print(check_output("ls -l", shell=True).decode())
pipe1 = Popen("ls -l", stdout=PIPE, shell=True)
pipe2 = Popen("wc -l", stdin=pipe1.stdout, stdout=PIPE, shell=True)
print(pipe2.stdout.read().decode())
| [
"noreply@github.com"
] | noreply@github.com |
83d4ccb19ccbe1b7e8b70303939cb8fe2e8f2632 | 22ad8c48067de977b5256a9f1d566bfa731bad00 | /venv/lib/python3.8/site-packages/pyglet/gl/base.py | a3f35e96ee5bf4e19a5f82a395fd9ded2d5954d9 | [] | no_license | d-halverson/Walgreens-Scraper | dedb2bd1f40bfce359eeb3f93075929c560fd7dc | d53011a8e42c9758569bfe392436ef5b1553ec5e | refs/heads/main | 2023-04-03T12:51:36.033373 | 2021-04-07T02:33:15 | 2021-04-07T02:33:15 | 350,066,341 | 5 | 10 | null | 2021-06-17T15:29:28 | 2021-03-21T17:14:52 | Python | UTF-8 | Python | false | false | 14,908 | py | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2020 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from pyglet import gl, compat_platform
from pyglet.gl import gl_info
from pyglet.gl import glu_info
class Config:
"""Graphics configuration.
A Config stores the preferences for OpenGL attributes such as the
number of auxilliary buffers, size of the colour and depth buffers,
double buffering, stencilling, multi- and super-sampling, and so on.
Different platforms support a different set of attributes, so these
are set with a string key and a value which is integer or boolean.
:Ivariables:
`double_buffer` : bool
Specify the presence of a back-buffer for every color buffer.
`stereo` : bool
Specify the presence of separate left and right buffer sets.
`buffer_size` : int
Total bits per sample per color buffer.
`aux_buffers` : int
The number of auxilliary color buffers.
`sample_buffers` : int
The number of multisample buffers.
`samples` : int
The number of samples per pixel, or 0 if there are no multisample
buffers.
`red_size` : int
Bits per sample per buffer devoted to the red component.
`green_size` : int
Bits per sample per buffer devoted to the green component.
`blue_size` : int
Bits per sample per buffer devoted to the blue component.
`alpha_size` : int
Bits per sample per buffer devoted to the alpha component.
`depth_size` : int
Bits per sample in the depth buffer.
`stencil_size` : int
Bits per sample in the stencil buffer.
`accum_red_size` : int
Bits per pixel devoted to the red component in the accumulation
buffer.
`accum_green_size` : int
Bits per pixel devoted to the green component in the accumulation
buffer.
`accum_blue_size` : int
Bits per pixel devoted to the blue component in the accumulation
buffer.
`accum_alpha_size` : int
Bits per pixel devoted to the alpha component in the accumulation
buffer.
"""
_attribute_names = [
'double_buffer',
'stereo',
'buffer_size',
'aux_buffers',
'sample_buffers',
'samples',
'red_size',
'green_size',
'blue_size',
'alpha_size',
'depth_size',
'stencil_size',
'accum_red_size',
'accum_green_size',
'accum_blue_size',
'accum_alpha_size',
'major_version',
'minor_version',
'forward_compatible',
'debug'
]
major_version = None
minor_version = None
forward_compatible = None
debug = None
def __init__(self, **kwargs):
"""Create a template config with the given attributes.
Specify attributes as keyword arguments, for example::
template = Config(double_buffer=True)
"""
for name in self._attribute_names:
if name in kwargs:
setattr(self, name, kwargs[name])
else:
setattr(self, name, None)
def requires_gl_3(self):
if self.major_version is not None and self.major_version >= 3:
return True
if self.forward_compatible or self.debug:
return True
return False
def get_gl_attributes(self):
"""Return a list of attributes set on this config.
:rtype: list of tuple ``(name, value)``
:return: All attributes, with unset attributes having a value of
``None``.
"""
return [(name, getattr(self, name)) for name in self._attribute_names]
def match(self, canvas):
"""Return a list of matching complete configs for the given canvas.
.. versionadded:: 1.2
:Parameters:
`canvas` : `Canvas`
Display to host contexts created from the config.
:rtype: list of `CanvasConfig`
"""
raise NotImplementedError('abstract')
def create_context(self, share):
"""Create a GL context that satisifies this configuration.
:deprecated: Use `CanvasConfig.create_context`.
:Parameters:
`share` : `Context`
If not None, a context with which to share objects with.
:rtype: `Context`
:return: The new context.
"""
raise gl.ConfigException('This config cannot be used to create contexts. '
'Use Config.match to created a CanvasConfig')
def is_complete(self):
"""Determine if this config is complete and able to create a context.
Configs created directly are not complete, they can only serve
as templates for retrieving a supported config from the system.
For example, `pyglet.window.Screen.get_matching_configs` returns
complete configs.
:deprecated: Use ``isinstance(config, CanvasConfig)``.
:rtype: bool
:return: True if the config is complete and can create a context.
"""
return isinstance(self, CanvasConfig)
def __repr__(self):
import pprint
return '%s(%s)' % (self.__class__.__name__, pprint.pformat(self.get_gl_attributes()))
class CanvasConfig(Config):
"""OpenGL configuration for a particular canvas.
Use `Config.match` to obtain an instance of this class.
.. versionadded:: 1.2
:Ivariables:
`canvas` : `Canvas`
The canvas this config is valid on.
"""
def __init__(self, canvas, base_config):
self.canvas = canvas
self.major_version = base_config.major_version
self.minor_version = base_config.minor_version
self.forward_compatible = base_config.forward_compatible
self.debug = base_config.debug
def compatible(self, canvas):
raise NotImplementedError('abstract')
def create_context(self, share):
"""Create a GL context that satisifies this configuration.
:Parameters:
`share` : `Context`
If not None, a context with which to share objects with.
:rtype: `Context`
:return: The new context.
"""
raise NotImplementedError('abstract')
def is_complete(self):
return True
class ObjectSpace:
def __init__(self):
# Textures and buffers scheduled for deletion
# the next time this object space is active.
self._doomed_textures = []
self._doomed_buffers = []
class Context:
"""OpenGL context for drawing.
Use `CanvasConfig.create_context` to create a context.
:Ivariables:
`object_space` : `ObjectSpace`
An object which is shared between all contexts that share
GL objects.
"""
#: Context share behaviour indicating that objects should not be
#: shared with existing contexts.
CONTEXT_SHARE_NONE = None
#: Context share behaviour indicating that objects are shared with
#: the most recently created context (the default).
CONTEXT_SHARE_EXISTING = 1
# Used for error checking, True if currently within a glBegin/End block.
# Ignored if error checking is disabled.
_gl_begin = False
# gl_info.GLInfo instance, filled in on first set_current
_info = None
# List of (attr, check) for each driver/device-specific workaround that is
# implemented. The `attr` attribute on this context is set to the result
# of evaluating `check(gl_info)` the first time this context is used.
_workaround_checks = [
# GDI Generic renderer on Windows does not implement
# GL_UNPACK_ROW_LENGTH correctly.
('_workaround_unpack_row_length',
lambda info: info.get_renderer() == 'GDI Generic'),
# Reportedly segfaults in text_input.py example with
# "ATI Radeon X1600 OpenGL Engine"
# glGenBuffers not exported by
# "ATI Radeon X1270 x86/MMX/3DNow!/SSE2"
# "RADEON XPRESS 200M Series x86/MMX/3DNow!/SSE2"
# glGenBuffers not exported by
# "Intel 965/963 Graphics Media Accelerator"
('_workaround_vbo',
lambda info: (info.get_renderer().startswith('ATI Radeon X')
or info.get_renderer().startswith('RADEON XPRESS 200M')
or info.get_renderer() ==
'Intel 965/963 Graphics Media Accelerator')),
# Some ATI cards on OS X start drawing from a VBO before it's written
# to. In these cases pyglet needs to call glFinish() to flush the
# pipeline after updating a buffer but before rendering.
('_workaround_vbo_finish',
lambda info: ('ATI' in info.get_renderer() and
info.have_version(1, 5) and
compat_platform == 'darwin')),
]
def __init__(self, config, context_share=None):
self.config = config
self.context_share = context_share
self.canvas = None
if context_share:
self.object_space = context_share.object_space
else:
self.object_space = ObjectSpace()
def __repr__(self):
return '%s()' % self.__class__.__name__
def attach(self, canvas):
if self.canvas is not None:
self.detach()
if not self.config.compatible(canvas):
raise RuntimeError('Cannot attach %r to %r' % (canvas, self))
self.canvas = canvas
def detach(self):
self.canvas = None
def set_current(self):
if not self.canvas:
raise RuntimeError('Canvas has not been attached')
# XXX not per-thread
gl.current_context = self
# XXX
gl_info.set_active_context()
glu_info.set_active_context()
# Implement workarounds
if not self._info:
self._info = gl_info.GLInfo()
self._info.set_active_context()
for attr, check in self._workaround_checks:
setattr(self, attr, check(self._info))
# Release textures and buffers on this context scheduled for deletion.
# Note that the garbage collector may introduce a race condition,
# so operate on a copy of the textures/buffers and remove the deleted
# items using list slicing (which is an atomic operation)
if self.object_space._doomed_textures:
textures = self.object_space._doomed_textures[:]
textures = (gl.GLuint * len(textures))(*textures)
gl.glDeleteTextures(len(textures), textures)
self.object_space._doomed_textures[0:len(textures)] = []
if self.object_space._doomed_buffers:
buffers = self.object_space._doomed_buffers[:]
buffers = (gl.GLuint * len(buffers))(*buffers)
gl.glDeleteBuffers(len(buffers), buffers)
self.object_space._doomed_buffers[0:len(buffers)] = []
def destroy(self):
"""Release the context.
The context will not be useable after being destroyed. Each platform
has its own convention for releasing the context and the buffer(s)
that depend on it in the correct order; this should never be called
by an application.
"""
self.detach()
if gl.current_context is self:
gl.current_context = None
gl_info.remove_active_context()
# Switch back to shadow context.
if gl._shadow_window is not None:
gl._shadow_window.switch_to()
def delete_texture(self, texture_id):
"""Safely delete a texture belonging to this context.
Usually, the texture is released immediately using
``glDeleteTextures``, however if another context that does not share
this context's object space is currently active, the deletion will
be deferred until an appropriate context is activated.
:Parameters:
`texture_id` : int
The OpenGL name of the texture to delete.
"""
if self.object_space is gl.current_context.object_space:
id = gl.GLuint(texture_id)
gl.glDeleteTextures(1, id)
else:
self.object_space._doomed_textures.append(texture_id)
def delete_buffer(self, buffer_id):
"""Safely delete a buffer object belonging to this context.
This method behaves similarly to :py:func:`~pyglet.text.document.AbstractDocument.delete_texture`, though for
``glDeleteBuffers`` instead of ``glDeleteTextures``.
:Parameters:
`buffer_id` : int
The OpenGL name of the buffer to delete.
.. versionadded:: 1.1
"""
if self.object_space is gl.current_context.object_space and False:
id = gl.GLuint(buffer_id)
gl.glDeleteBuffers(1, id)
else:
self.object_space._doomed_buffers.append(buffer_id)
def get_info(self):
"""Get the OpenGL information for this context.
.. versionadded:: 1.2
:rtype: `GLInfo`
"""
return self._info
| [
"drew.m.halverson@gmail.com"
] | drew.m.halverson@gmail.com |
e6d5eb58168ade63b3dec20ddcaa54dadfbbe80f | 23ac473ba6ca03412e825d939676a08f6a2e6546 | /modules/exploit-0.1/commands.py | 565ba6aee271dad9935cdad2e5aa40dfd7c4ab5d | [] | no_license | bwingu/app01back | 710c77c35bcb9a27e75a27304a26ca46546b260a | b5ef894ecfd43d59e6e8d4b5215a445fd0b19f97 | refs/heads/master | 2021-01-18T10:53:37.988723 | 2012-02-08T21:11:09 | 2012-02-08T21:11:09 | 3,304,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | # Here you can create play commands that are specific to the module, and extend existing commands
MODULE = 'exploit'
# Commands that are specific to your module
COMMANDS = ['exploit:hello']
def execute(**kargs):
command = kargs.get("command")
app = kargs.get("app")
args = kargs.get("args")
env = kargs.get("env")
if command == "exploit:hello":
print "~ Hello"
# This will be executed before any command (new, run...)
def before(**kargs):
command = kargs.get("command")
app = kargs.get("app")
args = kargs.get("args")
env = kargs.get("env")
# This will be executed after any command (new, run...)
def after(**kargs):
command = kargs.get("command")
app = kargs.get("app")
args = kargs.get("args")
env = kargs.get("env")
if command == "new":
pass
| [
"louis.sebastien@gmail.com"
] | louis.sebastien@gmail.com |
72237823847f45db31b97d3e26d00c5ab4caf5a6 | 276c7120ce431ed8fae93c70b3ed2dedc9bc9301 | /resources/events.py | b64397111ff50dfb1d43e130675fbcd85b57b071 | [] | no_license | notinuse1234/zoom_zoom_zow | 8f82c6deb1e826bd1b918a5715915457659aedc8 | bc0e94bec6b62dcc0da410e9f25be787b11d5a78 | refs/heads/master | 2022-01-27T05:14:59.446663 | 2019-05-06T18:32:16 | 2019-05-06T18:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | import pygame as pg
class Events():
@staticmethod
@property
def BEGINSWING():
return pg.USEREVENT + 1
@staticmethod
@property
def ENDSWING():
return pg.USEREVENT + 2
| [
"ahester@cisco.com"
] | ahester@cisco.com |
1938abebcd0e1a343f13385337a8f7233e24c2f7 | 359881c00ce4faf756fe2cf3b503452c0bcf313a | /raterprojectapi/serializers/ratings.py | 3614aefc0cc68bf7977060010f57ea5de9d85a9c | [] | no_license | jshearon/rater-project | 1bd8fe2080faa24e2b2dd2217ae1474132aeb61a | a67142018e822c57ad80ba6589e2e054abc42bcf | refs/heads/main | 2023-02-28T04:30:11.916983 | 2021-01-27T00:56:21 | 2021-01-27T00:56:21 | 314,429,265 | 0 | 0 | null | 2021-01-27T00:52:43 | 2020-11-20T02:50:38 | Python | UTF-8 | Python | false | false | 713 | py | from rest_framework import serializers
from raterprojectapi.models import Ratings
from rest_framework.validators import UniqueTogetherValidator
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Ratings
validators = [
UniqueTogetherValidator(
queryset=Ratings.objects.all(),
fields=['player', 'game'],
message="This player has already rated this game"
)
]
player = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
game = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
fields = ('id', 'rating_value', 'player', 'game')
depth = 0
| [
"jonathan.shearon@gmail.com"
] | jonathan.shearon@gmail.com |
90cfe476dff2e715a46fde91c5b2163d5eb304ca | b7c68fac8bb7b3b372a76988f3efe63c4f2526a5 | /tensorflow-experiments.py | 754f456975768457874ff4d4c0127e1405d60391 | [] | no_license | misrasiddhant/Python | 5b87feaae7e47766621e4453969c39e2030137bf | 6bd4afb6b614613c7462e36a24ffd852c5ec82d1 | refs/heads/master | 2023-04-19T17:59:56.141669 | 2021-05-01T22:50:22 | 2021-05-01T22:50:22 | 163,623,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 23 17:51:28 2017
@author: Siddhant Misra
"""
import tensorflow as tf
"""
a = tf.constant(3.0)
b= tf.constant(4.0, dtype = tf.float32)
print(a,b)
session = tf.Session()
session.run([a,b])
sum = tf.add(a,b)
print(sum)
session.run(sum)
a = tf.placeholder(dtype = tf.float32)
b = tf.placeholder(dtype = tf.float32)
c = tf.add(a,b)
session.run(c,{a:2,b:5})
session.run(c,{a:[2,4],b:[5,9]})
session.run(c,{a:[[2,4],[3,5]],b:[[2,3],[5,9]]})
"""
session = tf.Session()
m = tf.Variable([.3], dtype = tf.float32)
c = tf.Variable([-.3], dtype = tf.float32)
x = tf.placeholder(dtype = tf.float32)
y = tf.placeholder(dtype = tf.float32)
linear_model = m*x+c
init= tf.global_variables_initializer()
session.run(init)
session.run(linear_model, {x:[1,2,3,4,5]})
SqErr = tf.squared_difference(linear_model ,y)
loss = tf.reduce_sum(SqErr)
session.run(loss,{x:[1,2,3,4,5],y:[2,6,10,14,18]})
print(loss)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
session.run(init)
for i in range(100000):
session.run(train,{x:[1,2,3,4,5],y:[2,6,10,14,18]})
session.run([m,c])
session.run(loss,{x:[1,2,3,4,5],y:[2,6,10,14,18]}) | [
"misrasiddhant@yahoo.co.in"
] | misrasiddhant@yahoo.co.in |
ef06e79576ab02091941bdb71d690f0d88a6d32d | 9fa4b5f59dd1089e27ff4a4148e59eab76930ce8 | /Lesson2/day2_gaussian.py | 763939d697f7f5b65145aea3f70d3a1bac3a5f46 | [] | no_license | butlerkend/DrexelCodingLessons | 1dc469e14c651a555bd28eb4a7010661b3ec90d9 | c98b04c57ba1216a71d02e1920c5b142a7bcee79 | refs/heads/master | 2022-12-13T09:59:28.963756 | 2020-09-11T14:58:24 | 2020-09-11T14:58:24 | 294,724,529 | 0 | 0 | null | 2020-09-11T14:57:07 | 2020-09-11T14:57:06 | null | UTF-8 | Python | false | false | 356 | py | import math as m
#choose variables
x = float(input("Choose x: "))
m_var = float(input("Choose m: "))
s = float(input("Choose s: "))
print("x is %d" % x)
print("m is %d" % m_var)
print("s is %d" % s)
#calculate
f = (1./m.sqrt(2 * m.pi)) * m.exp(-1*(1./2)*(((x-m_var)/float(s))**2))
#print answer
print("therefore, the Guassian is computed as")
print(f)
| [
"scl63@drexel.edu"
] | scl63@drexel.edu |
4a04f161cd2987c6ca772ac5ef11c4953ecbb7ec | cfa35dc2ea93ee0eceb2399a9e6112e987579c09 | /stonesoup/metricgenerator/__init__.py | 580303c8a8d1dce6e8550f6f212b7afe198d89c9 | [
"LicenseRef-scancode-proprietary-license",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011"
] | permissive | dstl/Stone-Soup | 227e6a9e6fbdceca14af3f0259f311ec74095597 | f24090cc919b3b590b84f965a3884ed1293d181d | refs/heads/main | 2023-09-01T14:33:14.626428 | 2023-09-01T11:35:46 | 2023-09-01T11:35:46 | 98,420,803 | 315 | 126 | MIT | 2023-09-14T14:55:34 | 2017-07-26T12:34:28 | Python | UTF-8 | Python | false | false | 65 | py | from .base import MetricGenerator
__all__ = ['MetricGenerator']
| [
"sdhiscocks@dstl.gov.uk"
] | sdhiscocks@dstl.gov.uk |
ac2e687c0c4a12c9903eb7d92a3010cc767d746d | 055badce36d00effa7f36c43bf4c3c9fc0cd3642 | /streamFootballNew.py | 504b135de8c66a59718c955e1c0383ee58384417 | [] | no_license | sgtrouge/Adsfluence | f4bbb3b684b8fb08c5ce642ea8535222123bd2c8 | 23843f03aded1915cf3f885c9198129abaf73bbc | refs/heads/master | 2021-01-11T06:16:36.206828 | 2016-12-12T20:50:45 | 2016-12-12T20:50:45 | 72,179,452 | 0 | 0 | null | 2016-11-21T03:55:41 | 2016-10-28T06:19:30 | Python | UTF-8 | Python | false | false | 4,413 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# https://www.dataquest.io/blog/streaming-data-python/
import time
import tweepy
import json
import csv
from getpass import getpass
from textwrap import TextWrapper
# Keywords
track_Michigan = ['GoBlue', 'Ann Arbor', 'UMich', 'Michigan Wolverines', 'BigHouse', 'University of Michigan', 'Duderstadt', 'Yost arena', 'Crisler Center']
import csv
w = open("newFootball3rd.txt", "r")
initContent = w.read()
count = len(initContent.split('}{'))
w.close()
w = open("newFootball3rd.txt", "a")
# Read through old user list to ensure we only filter
# tweets of existing user
all_user_id = {}
def readOldUser(filename):
f = open(filename, 'r')
global all_user_id
data = f.read()
splits = data.split('}{')
splits[0] = splits[0][1:]
# For some reason the last one isn't built correctly, need to append ]]
splits[-1] = splits[-1][:-1] +']]'
count = 0
for js in splits:
jss = '{' + js + '}'
try:
tweet_map = json.loads(jss)
user = tweet_map["user_id"]
all_user_id[int(user)] = True
if "retweeted_author_id" in tweet_map:
retweeted_author_id = tweet_map['retweeted_author_id']
all_user_id[int(retweeted_author_id)] = True
count += 1
except:
pass
print len(all_user_id)
f.close()
# extract a row of info into csv from status
# Features: user_id, source_id if RT, content, location of user, # of RT, # of followers, # of followees, # timestamp, tweet ID
def writeAsJSON(status):
global all_user_id
global count
print count
if int(status.author.id) not in all_user_id:
return
rowInfo = {}
rowInfo['content'] = status.text
rowInfo['user_id'] = status.author.id
rowInfo['user_follower_count'] = status.author.followers_count
rowInfo['user_location'] = status.author.location
rowInfo['retweet_count'] = status.retweet_count
rowInfo['timestamp'] = status.timestamp_ms
if hasattr(status, 'retweeted_status'):
if int(status.retweeted_status.author.id) not in all_user_id:
return
rowInfo['retweeted_author_id'] = status.retweeted_status.author.id
rowInfo['retweeted_author_followers_count'] = status.retweeted_status.author.followers_count
rowInfo['retweeted_author_location'] = status.retweeted_status.author.location
rowInfo['retweeted_favorite_count'] = status.retweeted_status.favorite_count
global w
count = count + 1
print count
json.dump(rowInfo, w)
class StreamWatcherListener(tweepy.StreamListener):
status_wrapper = TextWrapper(width=60, initial_indent=' ', subsequent_indent=' ')
def on_status(self, status):
try:
print self.status_wrapper.fill(status.text)
writeAsJSON(status)
print '\n %s %s via %s\n' % (status.author.screen_name, status.created_at, status.source)
except:
# Catch any unicode errors while printing to console
# and just ignore them to avoid breaking application.
pass
def on_error(self, status_code):
print 'An error has occured! Status code = %s' % status_code
return True # keep stream alive
def on_timeout(self):
print 'Snoozing Zzzzzz'
def main():
readOldUser('resultBackup.txt')
access_token = "308609794-UnsFrbl4fcBQsOzbG5sqliFMKowhOlzRmLHVeBdp"
access_token_secret = "BwjfaD1QgiF0wEzMdyBDsLLEnYXeXLpLqgVcru4oU9QLB"
consumer_key = "Tzui94xupQIdChpAD7shh6DVo"
consumer_secret = "uMRrB7tH7YHgANboYa3wB1U0HHGm2g51j9Aj7QG9XVZnLgRlv9"
auth = tweepy.auth.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = tweepy.Stream(auth, StreamWatcherListener(), timeout=None)
# Prompt for mode of streaming
valid_modes = ['sample', 'filter']
while True:
mode = raw_input('Mode? [sample/filter] ')
if mode in valid_modes:
break
print 'Invalid mode! Try again.'
if mode == 'sample':
stream.sample()
elif mode == 'filter':
track_list = track_Michigan
print track_list
stream.filter([], track_list, languages=['en'])
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print '\nGoodbye!'
w.close()
| [
"nghia.vo.0509@gmail.com"
] | nghia.vo.0509@gmail.com |
1fffe8e845e8dadbebbcb8cc480060849bec6a19 | e8815ba621e72a015e3567e736aa7d5c6d05dd71 | /experiment_datasets_creator.py | 1150f93b1d38d2fc3ef76503ef6d083bd3c97b17 | [] | no_license | oserikov/nn_harmony_np | 5f810114481402a56b9349dfff77af47939ee9d8 | b93c19bcd625eea3716dc964bc962af84e21fb4b | refs/heads/master | 2021-07-01T16:40:07.420621 | 2020-10-05T08:01:07 | 2020-10-05T08:01:07 | 179,607,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,566 | py | import numpy as np
from phonology_tool import PhonologyTool
from nn_model import NNModel, ModelStateLogDTO
from typing import List, Callable
import csv
# 1. vow vs cons
# 2. +front vs -front
# 3. voiced stop consonant detection
# 4. start cons cluster
# 5. 2nd in cons cluster
# 6. +front harmony TODO
# 7. -front_harmony TODO
class ExperimentCreator:
def __init__(self, nn_model: NNModel, dataset: List[str], phonology_tool: PhonologyTool):
self.nn_model = nn_model
self.dataset = dataset
self.phon_tool = phonology_tool
def construct_unigram_dataset(self, char2target_fun: Callable, nn_feature_extractor_fun: Callable):
training_data = []
for word in self.dataset:
nn_features = self.get_nn_features_for_word(word)
word_training_data = []
for nn_feature in nn_features:
word_training_data.append((nn_feature_extractor_fun(nn_feature), char2target_fun(nn_feature.char)))
training_data.extend(word_training_data)
return training_data
def construct_ngram_dataset(self, ngram2target_fun: Callable, nn_feature_extractor_fun: Callable, ngram_len: int):
training_data = []
for word in self.dataset:
nn_features = self.get_nn_features_for_word(word)
word_training_data = []
for idx, nn_feature in enumerate(nn_features[:-ngram_len + 1]):
ngram = ''.join([f.char for f in nn_features[idx:idx+ngram_len]])
tmp_features = [{"ngram": ngram}]
for idx, tmp_feature in enumerate(nn_features[idx:idx+ngram_len]):
tmp_features.append({f"char_{idx}_"+k: v
for entry in nn_feature_extractor_fun(tmp_feature)
for k, v in entry.items()})
word_training_data.append((tmp_features, ngram2target_fun(ngram)))
# word_training_data.append((nn_feature_extractor_fun(nn_feature), ngram2target_fun(nn_feature.char)))
training_data.extend(word_training_data)
return training_data
def front_harmony_dataset(self):
return self.construct_ngram_dataset(self.phon_tool.shows_front_harmony, self.extract_all_nn_features, 4)
def round_feature_dataset(self):
return self.construct_unigram_dataset(self.phon_tool.is_round, self.extract_all_nn_features)
def vov_vs_cons_dataset(self):
return self.construct_unigram_dataset(self.phon_tool.is_vowel, self.extract_all_nn_features)
def front_feature_dataset(self):
return self.construct_unigram_dataset(self.phon_tool.is_front, self.extract_all_nn_features)
def voiced_stop_consonant_dataset(self):
return self.construct_unigram_dataset(self.phon_tool.is_voiced_stop_consonant, self.extract_all_nn_features)
def second_consonant_in_cluster_dataset(self):
nn_feature_extractor_fun = self.extract_all_nn_features
training_data = []
for word in self.dataset:
nn_features = self.get_nn_features_for_word(word)
nn_feature = nn_features[0]
word_training_data = [(nn_feature_extractor_fun(nn_feature), False)]
previous_is_first_consonant_in_cluster = self.phon_tool.is_consonant(nn_feature.char)
previous_is_vowel = self.phon_tool.is_vowel(nn_feature.char)
for nn_feature in nn_features:
word_training_data.append((nn_feature_extractor_fun(nn_feature),
self.phon_tool.is_consonant(nn_feature.char)
and previous_is_first_consonant_in_cluster))
previous_is_first_consonant_in_cluster = self.phon_tool.is_consonant(nn_feature.char) \
and previous_is_vowel
previous_is_vowel = self.phon_tool.is_vowel(nn_feature.char)
training_data.extend(word_training_data)
return training_data
def is_starting_consonant_cluster_dataset(self):
nn_feature_extractor_fun = self.extract_all_nn_features
training_data = []
for word in self.dataset:
nn_features = self.get_nn_features_for_word(word)
nn_feature = nn_features[0]
word_training_data = [(nn_feature_extractor_fun(nn_feature), self.phon_tool.is_consonant(nn_feature.char))]
previous_is_vowel = self.phon_tool.is_vowel(nn_feature.char)
for nn_feature in nn_features:
word_training_data.append((nn_feature_extractor_fun(nn_feature),
self.phon_tool.is_consonant(nn_feature.char) and previous_is_vowel))
previous_is_vowel = self.phon_tool.is_vowel(nn_feature.char)
training_data.extend(word_training_data)
return training_data
def extract_all_nn_features(self, nn_feature: ModelStateLogDTO):
return nn_feature.as_dict()
def get_nn_features_for_word(self, word) -> List[ModelStateLogDTO]:
return self.nn_model.run_model_on_word(word)
@staticmethod
def train_features_to_single_dict(dataset_train_features_list):
res = {}
for idx, d in enumerate(dataset_train_features_list):
for k, v in d.items():
res[f"{idx}_" + k] = v
return res
@staticmethod
def make_dataset_pretty(dataset):
pretty_dataset = []
for (train_entry, target_entry) in dataset:
pretty_dataset.append((ExperimentCreator.train_features_to_single_dict(train_entry), target_entry))
return pretty_dataset
@staticmethod
def save_dataset_to_tsv(dataset, dataset_fn):
dataset_to_single_dicts_list = []
for entry in dataset:
new_entry = entry[0].copy()
new_entry["TARGET"] = entry[1]
dataset_to_single_dicts_list.append(new_entry)
dataset_keys = list(dataset_to_single_dicts_list[0].keys())
with open(dataset_fn, 'w', encoding="utf-8", newline='') as dataset_f:
dictWriter = csv.DictWriter(dataset_f, dataset_to_single_dicts_list[0].keys(), delimiter='\t')
dictWriter.writeheader()
dictWriter.writerows(dataset_to_single_dicts_list)
# print('\t'.join(dataset_keys + ["target"]), file=dataset_f)
# for (features, target) in dataset:
# print('\t'.join([features[key] for key in dataset_keys] + [target]), file=dataset_f)
| [
"srkvoa@gmail.com"
] | srkvoa@gmail.com |
2b63046ccd7b852daa7ce8a78c6345d746f667f9 | 6c137e70bb6b1b618fbbceddaeb74416d387520f | /spyre/testing/cavity.py | 1d95f5fa22fb580cf87be1fa538c49f3fa4ba85b | [
"BSD-2-Clause"
] | permissive | zhong-lab/code | fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15 | b810362e06b44387f0768353c602ec5d29b551a2 | refs/heads/master | 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 | BSD-2-Clause | 2022-12-08T21:46:15 | 2019-05-02T23:37:39 | Python | UTF-8 | Python | false | false | 361 | py | ##Config file for lifetime_spyrelet.py in spyre/spyre/spyrelet/
# Device List
devices = {
'vna':[
'lantz.drivers.VNA.P9371A',
['TCPIP0::DESKTOP-ER250Q8::hislip0,4880::INSTR'],
{}
]
}
# Experiment List
spyrelets = {
'freqSweep':[
'spyre.spyrelets.cavity_spyrelet.Record',
{'vna': 'vna'},
{}
],
} | [
"none"
] | none |
b880bb7eec743a97332b62bfdcdd7c86b2dbbd47 | 59842f3191e3f86e0fa95956b9b9d122658e199a | /vps_tools/__init__.py | b1a1a5ca75ad7cdf213f4d50321303910a295141 | [] | no_license | vitaly4uk/vps-tools | d19936abc3b8284f1ccf76ff23c5daa85a292540 | cf4f94d1ce6353464b6b71b4fce37f503dfec7c4 | refs/heads/master | 2020-05-20T19:25:34.399239 | 2018-11-24T18:35:09 | 2018-11-24T18:35:09 | 68,452,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16 | py | VERSION="0.2.10" | [
"vitaly.omelchuk@gmail.com"
] | vitaly.omelchuk@gmail.com |
66a5c9b0258f2ea3b92f24ed39c3fcd45c84f294 | 335652dd8cf0097f171dc9d349425047c53766ea | /pdestrianTracker.py | 75113a26ca6e169915d4a3c745f76d64da91bf23 | [] | no_license | bytebarian/SocialDistanceDetector | 880e3ff9b2ff1c9dafa05bf88bb9d405fd9ee5a6 | bdd28a52f39ba76ef663c5faf5f83b83331f8288 | refs/heads/master | 2023-02-12T10:42:31.882635 | 2021-01-03T22:49:31 | 2021-01-03T22:49:31 | 286,845,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,242 | py | # USAGE
# python pdestrianTracker.py --input pedestrians.mp4
# python pdestrianTracker.py --input pedestrians.mp4 --output output.avi
import argparse
import cv2
import imutils
import numpy as np
import torch
import time
import pymongo
from deep_sort import DeepSort
# import the necessary packages
from pyimagesearch.detectorSwitcher import get_detector
from tracking import CentroidTracker
from utils.parser import get_config
from datetime import datetime
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", type=str, default="pedestrians.mp4",
help="path to (optional) input video file")
ap.add_argument("-o", "--output", type=str, default="output.avi",
help="path to (optional) output video file")
ap.add_argument("-d", "--display", type=int, default=1,
help="whether or not output frame should be displayed")
ap.add_argument("-a", "--algorithm", type=str, default="detr",
help="choose what kind of algorithm should be used for people detection")
ap.add_argument("-c", "--coords", type=str, default="[(427, 0), (700, 0), (530, 318), (1, 198)]",
help="comma seperated list of source points")
ap.add_argument("-s", "--size", type=str, default="[700,400]",
help="coma separated tuple describing height and width of transformed birdview image")
ap.add_argument("-t", "--time", type=int, default=0,
help="set the initial timestamp of video stream start in unix format in nanoseconds")
ap.add_argument("r" "--object", type=str, default="person",
help="objects to detect and track")
args = vars(ap.parse_args())
# initialize the video stream and pointer to output video file
print("[INFO] accessing video stream...")
vs = cv2.VideoCapture(args["input"] if args["input"] else 0)
initial_time = args["time"] if args["time"] > 0 else time.time_ns()
writer = None
trajectories = {}
ct = CentroidTracker()
mongoclient = pymongo.MongoClient("mongodb://localhost:27017/")
mongodb = mongoclient["massmove"]
mongocol = mongodb["points"]
token = "uw400lj_tKeWjbTdwM4VJz_qZ2MnpsOh5zeBdP3BKS7Au4NaOVSpePcd1Zj47bsNdBtmqCt9Gf5u1UHvWiFYgg=="
org = "ikillforfood@gmail.com"
bucket = "points"
influxclient = InfluxDBClient(url="https://westeurope-1.azure.cloud2.influxdata.com", token=token)
write_api = influxclient.write_api(write_options=SYNCHRONOUS)
cfg = get_config()
cfg.merge_from_file("deep_sort/configs/deep_sort.yaml")
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
def draw_markers():
global i, bbox, cX, cY, id
color = (0, 255, 0)
if len(outputs) > 0:
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
# loop over the results
for (i, bbox) in enumerate(bbox_xyxy):
x1, y1, x2, y2 = [int(i) for i in bbox]
cX = int((x1 + x2) / 2)
cY = int((y1 + y2) / 2)
id = int(identities[i]) if identities is not None else 0
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]
# if the index pair exists within the violation set, then
# update the color
# if i in violate:
# color = (0, 0, 255)
# draw the centroid coordinates of the person,
cv2.circle(frame, (cX, cY), 5, color, 1)
cv2.circle(frame, (cX, y2), 5, color, 1)
cv2.line(frame, (cX, cY), (cX, y2), color, 1)
cv2.putText(frame, label, (x1, y1 + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 2)
draw_markers_on_birdview(cX, color, y2, id)
def draw_markers_on_birdview(cX, color, y2, id):
# calculate pedestrians position from birdview
p = (cX, y2)
px = (matrix[0][0] * p[0] + matrix[0][1] * p[1] + matrix[0][2]) / (
matrix[2][0] * p[0] + matrix[2][1] * p[1] + matrix[2][2])
py = (matrix[1][0] * p[0] + matrix[1][1] * p[1] + matrix[1][2]) / (
matrix[2][0] * p[0] + matrix[2][1] * p[1] + matrix[2][2])
p_after = (int(px), int(py))
cv2.circle(birdview, p_after, 5, color, 1)
cv2.circle(birdview, p_after, 30, color, 1)
add_to_pedestrian_trajectory(id, p_after)
def add_to_pedestrian_trajectory(id, p_after):
if id in trajectories:
trajectories[id].add(p_after)
else:
trajectory = set()
trajectory.add(p_after)
trajectories[id] = trajectory
(x, y) = p_after
point = Point("mem").tag("user", id).field("point", f'[{x}, {y}]').time(datetime.utcnow(), WritePrecision.NS)
write_api.write(bucket, org, point)
dict = {
"id": id,
"point": [x, y],
"time": timestamp
}
mongocol.insert_one(dict)
def calculate_bird_view():
global h, w, size, matrix, birdview
pts = np.array(eval(args["coords"]), dtype="float32")
(h, w) = frame.shape[:2]
if args["size"] != "":
size = np.array(eval(args["size"]), dtype="int")
(h, w) = size[:2]
pts_dst = np.float32([[0, 0], [w, 0], [w, h], [0, h]])
matrix = cv2.getPerspectiveTransform(pts, pts_dst)
# birdview = np.zeros((h, w, 3), np.uint8)
birdview = cv2.warpPerspective(frame, matrix, (w, h));
def calculate_bboxs_confs():
global i, bbox, w, h, cX, cY
for (i, bbox) in enumerate(boxes):
# extract the bounding box and centroid coordinates, then
# initialize the color of the annotation
startX = bbox[0]
startY = bbox[1]
w = bbox[2]
h = bbox[3]
cX = startX + int(w / 2)
cY = startY + int(h / 2)
bbox_xcycwh.append([cX, cY, w, h])
confs.append([scores[i]])
def track(boxes):
bboxes = []
for (i, bbox) in enumerate(boxes):
startX = bbox[0]
startY = bbox[1]
endX = startX + bbox[2]
endY = startY + bbox[3]
box = np.array([startX, startY, endX, endY])
bboxes.append(box.astype("int"))
objects = ct.update(bboxes)
return objects
def draw_markers_alternate():
global text
color = (0, 255, 0)
# loop over the tracked objects
for (objectID, centroid) in outputs.items():
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
draw_markers_on_birdview(centroid[0], color, centroid[1], objectID)
# loop over the frames from the video stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
timestamp = initial_time + (vs.get(cv2.CAP_PROP_POS_MSEC) * 1000000)
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# resize the frame and then detect people (and only people) in it
frame = imutils.resize(frame, width=700)
calculate_bird_view()
detector = get_detector(args["algorithm"])
boxes, scores = detector.detect(frame)
bbox_xcycwh = []
confs = []
calculate_bboxs_confs()
xywhs = torch.Tensor(bbox_xcycwh)
confss = torch.Tensor(scores)
# Pass detections to deepsort
#outputs = deepsort.update(xywhs, confss, frame)
outputs = track(boxes)
# initialize the set of indexes that violate the minimum social
# distance
violate = set()
#draw_markers()
draw_markers_alternate()
# draw the total number of social distancing violations on the
# output frame
text = "Social Distancing Violations: {}".format(len(violate))
cv2.putText(frame, text, (10, frame.shape[0] - 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.85, (0, 0, 255), 3)
# check to see if the output frame should be displayed to our
# screen
if args["display"] > 0:
# show the output frame
cv2.imshow("Frame", frame)
cv2.imshow("Birdview", birdview)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# if an output video file path has been supplied and the video
# writer has not been initialized, do so now
if args["output"] != "" and writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 25,
(frame.shape[1], frame.shape[0]), True)
# if the video writer is not None, write the frame to the output
# video file
if writer is not None:
writer.write(frame)
| [
"mariusz.dobrowolski@softwarehut.com"
] | mariusz.dobrowolski@softwarehut.com |
992cbbcc8751d9aa132eea71a9c34ba42f5b03b4 | 4754226625d4a6b9680a22fd39166f502034aeb5 | /samsung/[cutz]lab1.py | 971e71a34d9cdfed878116d35cf9fd619e85ef26 | [
"MIT"
] | permissive | cutz-j/AlgorithmStudy | 298cc7d6fa92345629623a9bd8d186f0608cdf7c | de0f81220e29bd5e109d174800f507b12a3bee36 | refs/heads/master | 2021-07-01T03:15:51.627208 | 2021-02-24T01:24:44 | 2021-02-24T01:24:44 | 222,935,322 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py | import sys
from itertools import combinations
class Queue():
def __init__(self):
self.front = 0
self.rear = 0
self.list = []
self.pop_count = 0
def append(self, x):
self.list.append(x)
self.rear += 1
def pop(self):
res = self.list[self.front]
self.front += 1
self.pop_count += 1
return res
def empty(self):
return len(self.list) == self.pop_count
res = 0
rl = lambda: sys.stdin.readline()
N, M = map(int, rl().split())
all_map = []
virus = []
zero = []
virus_num = sys.maxsize
for i in range(N):
tmp = list(map(int, rl().split()))
for j, v in enumerate(tmp):
if v == 2:
virus.append((i, j))
elif v == 0:
zero.append((i, j))
all_map.append(tmp)
row_dir, col_dir = [1, 0, -1, 0], [0, 1, 0, -1]
wall_comb = combinations(zero, 3)
for wall in wall_comb:
# visited = copy.deepcopy(all_map)
visited = []
for i in range(N):
tmp = []
for j in range(M):
tmp.append(all_map[i][j])
visited.append(tmp)
for w in wall:
visited[w[0]][w[1]] = 1
v_num = 0
queue = Queue()
for v in virus:
queue.append(v)
while queue.empty() == False:
r, c = queue.pop()
v_num += 1
if v_num > virus_num:
break
for i in range(4):
new_r, new_c = r + row_dir[i], c + col_dir[i]
if (0 <= new_r < N) and (0 <= new_c < M):
if visited[new_r][new_c] == 0:
queue.append((new_r, new_c))
visited[new_r][new_c] = 2
cnt, v_cnt = 0, 0
for i in range(N):
for j in range(M):
if visited[i][j] == 0:
cnt += 1
if visited[i][j] == 2:
v_cnt += 1
if cnt > res:
res = cnt
virus_num = v_cnt
print(res) | [
"cutz-j@naver.com"
] | cutz-j@naver.com |
a712f370e5bfa4a45f8bfd683db3ef8989ad95c7 | 195425d9c034f501ed8c453910e4286852f40385 | /import-automation/import-progress-dashboard-frontend/server/app/configs.py | c886caa0b203f66c61484a0901e126137b429910 | [
"Apache-2.0"
] | permissive | wh1210/data | f4f378a11adbbf6d0d184cb71ee1e84ec997d2de | 6b32c869f426a8a5ba1b99edd324cc0c77bbd4ad | refs/heads/master | 2022-12-06T22:47:58.148971 | 2020-08-21T18:13:11 | 2020-08-21T18:13:11 | 269,511,511 | 1 | 0 | Apache-2.0 | 2020-06-05T02:23:48 | 2020-06-05T02:23:48 | null | UTF-8 | Python | false | false | 1,207 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configurations for the server.
"""
import os
from google.cloud import logging
def is_production():
return 'DASHBOARD_FRONTEND_PRODUCTION' in os.environ
def setup_logging():
"""Connects the default logger to Google Cloud Logging.
Only logs at INFO level or higher will be captured.
"""
client = logging.Client()
client.get_default_handler()
client.setup_logging()
def get_dashboard_oauth_client_id():
"""Gets the client ID used to authenticate with Identity-Aware Proxy
from the environment variable DASHBOARD_OAUTH_CLIENT_ID."""
return os.environ.get('DASHBOARD_OAUTH_CLIENT_ID')
| [
"noreply@github.com"
] | noreply@github.com |
a00be5a37d73619acb218a0561d0862883839519 | 75d0009170fe44c315ce72a8c29e712ade3848c3 | /9_Outter_Function/_pickle.py | 6d2c0393b7e4e320f70acd7eeb4785cf3083afaf | [] | no_license | moon4311/sp | d6a65d5b95bc51332b9a80a1410ffb6854a99f61 | 207758d4f4f7c28fa1cd9f277825313257782433 | refs/heads/master | 2021-09-14T07:20:49.257372 | 2018-05-09T13:28:42 | 2018-05-09T13:28:42 | 116,090,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | import pickle
######## 바이너리 파일 (파일을 열어도 내용을 알아 볼 수 없다)
f = open("test.txt","wb")
data = {1:'python', 2: 'you need'}
pickle.dump(data, f)
f.close()
f = open("test.txt","rb")
data = pickle.load(f)
print(data)
f.close()
######## 텍스트파일 (아스키코드를 이용한 파일 , 내용을 보고 읽을 수 있다)
f = open("test2.txt","w")
f.write("{1:'python', 2: 'you need'}")
f.close()
f = open("test2.txt","r")
print(f.read())
f.close() | [
"kjm4311@naver.com"
] | kjm4311@naver.com |
ed2e2a733351c52a13a5f61c050406b5539e000c | 04177ba9d8d91d19dc3689fbb86d65d5954b36f4 | /fullthrottle/testapp/serializers.py | a66273f086b3d4e690a03dc583cd292fa5df901a | [] | no_license | ruhansharief/pythonanywhere2 | 0a7ba7ad6a6730f5bb35be8ae581e0f031e9704b | 3193e380e090f5513a3d93301fa79cb428ed9bdf | refs/heads/master | 2022-10-01T04:30:40.092396 | 2020-06-03T17:23:38 | 2020-06-03T17:23:38 | 269,122,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | from testapp.models import UserModel, ActivityPeroidModel
from rest_framework import serializers
class ActivitySerializer(serializers.ModelSerializer):
class Meta:
model = ActivityPeroidModel
fields = ['start_time', 'end_time']
class UserSerializer(serializers.ModelSerializer):
activity_periods = ActivitySerializer(read_only=True,many=True)
class Meta:
model = UserModel
fields = '__all__'
| [
"ruhansharief22@gmail.com"
] | ruhansharief22@gmail.com |
1511968638f2441910615d9b97b2c2629ea64078 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/resources/types/product_bidding_category_constant.py | 6aacc16b169b40875e5f6b751c1c07d2a833a97f | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,334 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import product_bidding_category_level
from google.ads.googleads.v6.enums.types import product_bidding_category_status
__protobuf__ = proto.module(
package='google.ads.googleads.v6.resources',
marshal='google.ads.googleads.v6',
manifest={
'ProductBiddingCategoryConstant',
},
)
class ProductBiddingCategoryConstant(proto.Message):
r"""A Product Bidding Category.
Attributes:
resource_name (str):
Output only. The resource name of the product bidding
category. Product bidding category resource names have the
form:
``productBiddingCategoryConstants/{country_code}~{level}~{id}``
id (int):
Output only. ID of the product bidding category.
This ID is equivalent to the google_product_category ID as
described in this article:
https://support.google.com/merchants/answer/6324436.
country_code (str):
Output only. Two-letter upper-case country
code of the product bidding category.
product_bidding_category_constant_parent (str):
Output only. Resource name of the parent
product bidding category.
level (google.ads.googleads.v6.enums.types.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel):
Output only. Level of the product bidding
category.
status (google.ads.googleads.v6.enums.types.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus):
Output only. Status of the product bidding
category.
language_code (str):
Output only. Language code of the product
bidding category.
localized_name (str):
Output only. Display value of the product bidding category
localized according to language_code.
"""
resource_name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.INT64, number=10, optional=True)
country_code = proto.Field(proto.STRING, number=11, optional=True)
product_bidding_category_constant_parent = proto.Field(proto.STRING, number=12, optional=True)
level = proto.Field(proto.ENUM, number=5,
enum=product_bidding_category_level.ProductBiddingCategoryLevelEnum.ProductBiddingCategoryLevel,
)
status = proto.Field(proto.ENUM, number=6,
enum=product_bidding_category_status.ProductBiddingCategoryStatusEnum.ProductBiddingCategoryStatus,
)
language_code = proto.Field(proto.STRING, number=13, optional=True)
localized_name = proto.Field(proto.STRING, number=14, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
76846a71c9a5bcac685d5452c7f039c04d5dd554 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/python基础部分/day02/code/test.py | e2a14e0f4bc5a36dc4cbb782ba168443482180ac | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | #!/usr/bin/python3
print('这是我的python第一条语句')
print('我现在开始学python')
print('这是最后一条语句') | [
"yabing_ji@163.com"
] | yabing_ji@163.com |
37c5eab2b0dce309f35baf4e54e33fcf65b69a0f | b37c027a3f63305345f266e8f4f944721adbb956 | /BASES/OLD/3_2_CAC_CC_SPLTED_CSDS/tx_no_gui.py | a9468578d04ae10a963ccd3699fadbf0be6ccf6e | [] | no_license | andrehoracio97/investigacao | fdfb663867e6fe9f240bb828b7b96b99323f8be3 | 5dd1fad12f4991bb737ed236426247dfb52333eb | refs/heads/master | 2022-10-11T02:08:30.478893 | 2020-06-16T09:58:13 | 2020-06-16T09:58:13 | 193,519,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,072 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Tx No Gui
# Author: andresilva
# GNU Radio version: 3.7.13.5
##################################################
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import fec
from gnuradio import gr
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from gnuradio.filter import pfb
from optparse import OptionParser
import insert_vec_cpp
import pmt
import random
import time
class tx_no_gui(gr.top_block):
def __init__(self, puncpat='11'):
gr.top_block.__init__(self, "Tx No Gui")
##################################################
# Parameters
##################################################
self.puncpat = puncpat
##################################################
# Variables
##################################################
self.sps = sps = 4
self.nfilts = nfilts = 32
self.eb = eb = 0.22
self.tx_rrc_taps = tx_rrc_taps = firdes.root_raised_cosine(nfilts, nfilts, 1.0, eb, 5*sps*nfilts)
self.taps_per_filt = taps_per_filt = len(tx_rrc_taps)/nfilts
self.samp_rate_array_MCR = samp_rate_array_MCR = [7500000,4000000,3750000,3000000,2500000,2000000,1500000,1000000,937500,882352,833333,714285,533333,500000,421052,400000,380952]
self.vector = vector = [int(random.random()*4) for i in range(49600)]
self.variable_qtgui_range_0 = variable_qtgui_range_0 = 50
self.samp_rate = samp_rate = samp_rate_array_MCR[1]
self.rate = rate = 2
self.polys = polys = [109, 79]
self.pld_enc = pld_enc = map( (lambda a: fec.ccsds_encoder_make(440, 0, fec.CC_TERMINATED)), range(0,16) );
self.pld_const = pld_const = digital.constellation_rect(([0.707+0.707j, -0.707+0.707j, -0.707-0.707j, 0.707-0.707j]), ([0, 1, 2, 3]), 4, 2, 2, 1, 1).base()
self.pld_const.gen_soft_dec_lut(8)
self.k = k = 7
self.frequencia_usrp = frequencia_usrp = 484e6
self.filt_delay = filt_delay = 1+(taps_per_filt-1)/2
self.MCR = MCR = "master_clock_rate=60e6"
##################################################
# Blocks
##################################################
self.uhd_usrp_sink_0_0 = uhd.usrp_sink(
",".join(("serial=F5EAE1", MCR)),
uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_sink_0_0.set_samp_rate(samp_rate)
self.uhd_usrp_sink_0_0.set_time_now(uhd.time_spec(time.time()), uhd.ALL_MBOARDS)
self.uhd_usrp_sink_0_0.set_center_freq(frequencia_usrp, 0)
self.uhd_usrp_sink_0_0.set_gain(variable_qtgui_range_0, 0)
self.uhd_usrp_sink_0_0.set_antenna('TX/RX', 0)
self.pfb_arb_resampler_xxx_0 = pfb.arb_resampler_ccf(
sps,
taps=(tx_rrc_taps),
flt_size=nfilts)
self.pfb_arb_resampler_xxx_0.declare_sample_delay(filt_delay)
self.insert_vec_cpp_new_vec_0 = insert_vec_cpp.new_vec((vector))
self.fec_extended_encoder_0 = fec.extended_encoder(encoder_obj_list=pld_enc, threading='capillary', puncpat=puncpat)
self.digital_map_bb_1_0 = digital.map_bb((pld_const.pre_diff_code()))
self.digital_diff_encoder_bb_0 = digital.diff_encoder_bb(4)
self.digital_chunks_to_symbols_xx_0_0 = digital.chunks_to_symbols_bc((pld_const.points()), 1)
self.blocks_vector_source_x_0_0_0 = blocks.vector_source_b([0], True, 1, [])
self.blocks_stream_to_tagged_stream_0_0_0 = blocks.stream_to_tagged_stream(gr.sizeof_char, 1, 992, "packet_len")
self.blocks_stream_mux_0_1_0 = blocks.stream_mux(gr.sizeof_char*1, (96, 896))
self.blocks_stream_mux_0_0 = blocks.stream_mux(gr.sizeof_char*1, (892, 4))
self.blocks_repack_bits_bb_1_0_0_1 = blocks.repack_bits_bb(8, 1, '', False, gr.GR_MSB_FIRST)
self.blocks_repack_bits_bb_1_0_0_0 = blocks.repack_bits_bb(1, 2, "packet_len", False, gr.GR_MSB_FIRST)
self.blocks_multiply_const_vxx_1 = blocks.multiply_const_vcc((0.7, ))
self.blocks_file_source_0_0_1_0_0_0 = blocks.file_source(gr.sizeof_char*1, '/home/andre/Desktop/Files_To_Transmit/video_lion.mpeg', False)
self.blocks_file_source_0_0_1_0_0_0.set_begin_tag(pmt.PMT_NIL)
self.acode_1104 = blocks.vector_source_b([0x1, 0x0, 0x1, 0x0, 0x1, 0x1, 0x0, 0x0, 0x1, 0x1, 0x0, 0x1, 0x1, 0x1, 0x0, 0x1, 0x1, 0x0, 0x1, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x1, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0], True, 1, [])
##################################################
# Connections
##################################################
self.connect((self.acode_1104, 0), (self.blocks_stream_mux_0_1_0, 0))
self.connect((self.blocks_file_source_0_0_1_0_0_0, 0), (self.blocks_repack_bits_bb_1_0_0_1, 0))
self.connect((self.blocks_multiply_const_vxx_1, 0), (self.uhd_usrp_sink_0_0, 0))
self.connect((self.blocks_repack_bits_bb_1_0_0_0, 0), (self.insert_vec_cpp_new_vec_0, 0))
self.connect((self.blocks_repack_bits_bb_1_0_0_1, 0), (self.fec_extended_encoder_0, 0))
self.connect((self.blocks_stream_mux_0_0, 0), (self.blocks_stream_mux_0_1_0, 1))
self.connect((self.blocks_stream_mux_0_1_0, 0), (self.blocks_stream_to_tagged_stream_0_0_0, 0))
self.connect((self.blocks_stream_to_tagged_stream_0_0_0, 0), (self.blocks_repack_bits_bb_1_0_0_0, 0))
self.connect((self.blocks_vector_source_x_0_0_0, 0), (self.blocks_stream_mux_0_0, 1))
self.connect((self.digital_chunks_to_symbols_xx_0_0, 0), (self.pfb_arb_resampler_xxx_0, 0))
self.connect((self.digital_diff_encoder_bb_0, 0), (self.digital_chunks_to_symbols_xx_0_0, 0))
self.connect((self.digital_map_bb_1_0, 0), (self.digital_diff_encoder_bb_0, 0))
self.connect((self.fec_extended_encoder_0, 0), (self.blocks_stream_mux_0_0, 0))
self.connect((self.insert_vec_cpp_new_vec_0, 0), (self.digital_map_bb_1_0, 0))
self.connect((self.pfb_arb_resampler_xxx_0, 0), (self.blocks_multiply_const_vxx_1, 0))
def get_puncpat(self):
return self.puncpat
def set_puncpat(self, puncpat):
self.puncpat = puncpat
def get_sps(self):
return self.sps
def set_sps(self, sps):
self.sps = sps
self.pfb_arb_resampler_xxx_0.set_rate(self.sps)
def get_nfilts(self):
return self.nfilts
def set_nfilts(self, nfilts):
self.nfilts = nfilts
self.set_taps_per_filt(len(self.tx_rrc_taps)/self.nfilts)
def get_eb(self):
return self.eb
def set_eb(self, eb):
self.eb = eb
def get_tx_rrc_taps(self):
return self.tx_rrc_taps
def set_tx_rrc_taps(self, tx_rrc_taps):
self.tx_rrc_taps = tx_rrc_taps
self.set_taps_per_filt(len(self.tx_rrc_taps)/self.nfilts)
self.pfb_arb_resampler_xxx_0.set_taps((self.tx_rrc_taps))
def get_taps_per_filt(self):
return self.taps_per_filt
def set_taps_per_filt(self, taps_per_filt):
self.taps_per_filt = taps_per_filt
self.set_filt_delay(1+(self.taps_per_filt-1)/2)
def get_samp_rate_array_MCR(self):
return self.samp_rate_array_MCR
def set_samp_rate_array_MCR(self, samp_rate_array_MCR):
self.samp_rate_array_MCR = samp_rate_array_MCR
self.set_samp_rate(self.samp_rate_array_MCR[1])
def get_vector(self):
return self.vector
def set_vector(self, vector):
self.vector = vector
def get_variable_qtgui_range_0(self):
return self.variable_qtgui_range_0
def set_variable_qtgui_range_0(self, variable_qtgui_range_0):
self.variable_qtgui_range_0 = variable_qtgui_range_0
self.uhd_usrp_sink_0_0.set_gain(self.variable_qtgui_range_0, 0)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.uhd_usrp_sink_0_0.set_samp_rate(self.samp_rate)
def get_rate(self):
return self.rate
def set_rate(self, rate):
self.rate = rate
def get_polys(self):
return self.polys
def set_polys(self, polys):
self.polys = polys
def get_pld_enc(self):
return self.pld_enc
def set_pld_enc(self, pld_enc):
self.pld_enc = pld_enc
def get_pld_const(self):
return self.pld_const
def set_pld_const(self, pld_const):
self.pld_const = pld_const
def get_k(self):
return self.k
def set_k(self, k):
self.k = k
def get_frequencia_usrp(self):
return self.frequencia_usrp
def set_frequencia_usrp(self, frequencia_usrp):
self.frequencia_usrp = frequencia_usrp
self.uhd_usrp_sink_0_0.set_center_freq(self.frequencia_usrp, 0)
def get_filt_delay(self):
return self.filt_delay
def set_filt_delay(self, filt_delay):
self.filt_delay = filt_delay
def get_MCR(self):
return self.MCR
def set_MCR(self, MCR):
self.MCR = MCR
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"", "--puncpat", dest="puncpat", type="string", default='11',
help="Set puncpat [default=%default]")
return parser
def main(top_block_cls=tx_no_gui, options=None):
if options is None:
options, _ = argument_parser().parse_args()
tb = top_block_cls(puncpat=options.puncpat)
tb.start()
tb.wait()
if __name__ == '__main__':
main()
| [
"andresilvamail@gmail.com"
] | andresilvamail@gmail.com |
9556ccc6811855a30848a322a58b84634b209823 | 992f1616004dd54a1dc741db81616e0d83fa7a74 | /python/scannerpy/stdlib/bbox_nms_kernel.py | 05cde4d4727a1a789d3d968f975e6fc07921a946 | [
"Apache-2.0"
] | permissive | mindis/scanner-1 | 503842a4463bf31b38e741d7eb26fac71d5eb847 | b00ba95629b9a775a02b47859c438ed2fd186d2a | refs/heads/master | 2021-08-26T06:41:52.226038 | 2017-11-12T02:16:24 | 2017-11-12T02:16:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 700 | py | import scannerpy
import scannerpy.stdlib.parsers as parsers
import scannerpy.stdlib.writers as writers
import scannerpy.stdlib.bboxes as bboxes
class BBoxNMSKernel(scannerpy.Kernel):
def __init__(self, config, protobufs):
self.protobufs = protobufs
args = protobufs.BBoxNMSArgs()
args.ParseFromString(config)
self.scale = args.scale
def close(self):
pass
def execute(self, input_columns):
bboxes_list = []
for c in input_columns:
bboxes_list += parsers.bboxes(c, self.protobufs)
nmsed_bboxes = bboxes.nms(bboxes_list, 0.1)
return writers.bboxes([nmsed_bboxes], self.protobufs)
KERNEL = BBoxNMSKernel
| [
"alexpoms@gmail.com"
] | alexpoms@gmail.com |
3b74887e37753f6834df15e0acf789b4118532ec | 26cf1df102b75b0c068047cc6eca0d50dbc70c5a | /melati/server/address_manager_store.py | b0adc0891bd154e0333b582a3e552035eb13fd9b | [
"Apache-2.0"
] | permissive | a96009467/melati-blockchain | 307f9a92eee25a15aa294ddfed41a595e63acc50 | 28b8cd1590ee8fa860554c66d639a1fefc0d3c41 | refs/heads/main | 2023-06-24T13:53:41.332345 | 2021-07-20T09:37:49 | 2021-07-20T09:37:49 | 387,778,815 | 0 | 0 | Apache-2.0 | 2021-07-20T12:06:20 | 2021-07-20T12:06:20 | null | UTF-8 | Python | false | false | 8,257 | py | import logging
from typing import Dict, List, Tuple
import aiosqlite
from melati.server.address_manager import (
BUCKET_SIZE,
NEW_BUCKET_COUNT,
NEW_BUCKETS_PER_ADDRESS,
AddressManager,
ExtendedPeerInfo,
)
log = logging.getLogger(__name__)
class AddressManagerStore:
"""
Metadata table:
- private key
- new table count
- tried table count
Nodes table:
* Maps entries from new/tried table to unique node ids.
- node_id
- IP, port, together with the IP, port of the source peer.
New table:
* Stores node_id, bucket for each occurrence in the new table of an entry.
* Once we know the buckets, we can also deduce the bucket positions.
Every other information, such as tried_matrix, map_addr, map_info, random_pos,
be deduced and it is not explicitly stored, instead it is recalculated.
"""
db: aiosqlite.Connection
@classmethod
async def create(cls, connection) -> "AddressManagerStore":
self = cls()
self.db = connection
await self.db.commit()
await self.db.execute("pragma journal_mode=wal")
await self.db.execute("pragma synchronous=2")
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_metadata(key text,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_nodes(node_id int,value text)")
await self.db.commit()
await self.db.execute("CREATE TABLE IF NOT EXISTS peer_new_table(node_id int,bucket int)")
await self.db.commit()
return self
async def clear(self) -> None:
cursor = await self.db.execute("DELETE from peer_metadata")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_nodes")
await cursor.close()
cursor = await self.db.execute("DELETE from peer_new_table")
await cursor.close()
await self.db.commit()
async def get_metadata(self) -> Dict[str, str]:
cursor = await self.db.execute("SELECT key, value from peer_metadata")
metadata = await cursor.fetchall()
await cursor.close()
return {key: value for key, value in metadata}
async def is_empty(self) -> bool:
metadata = await self.get_metadata()
if "key" not in metadata:
return True
if int(metadata.get("new_count", 0)) > 0:
return False
if int(metadata.get("tried_count", 0)) > 0:
return False
return True
async def get_nodes(self) -> List[Tuple[int, ExtendedPeerInfo]]:
cursor = await self.db.execute("SELECT node_id, value from peer_nodes")
nodes_id = await cursor.fetchall()
await cursor.close()
return [(node_id, ExtendedPeerInfo.from_string(info_str)) for node_id, info_str in nodes_id]
async def get_new_table(self) -> List[Tuple[int, int]]:
cursor = await self.db.execute("SELECT node_id, bucket from peer_new_table")
entries = await cursor.fetchall()
await cursor.close()
return [(node_id, bucket) for node_id, bucket in entries]
async def set_metadata(self, metadata) -> None:
for key, value in metadata:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_metadata VALUES(?, ?)",
(key, value),
)
await cursor.close()
await self.db.commit()
async def set_nodes(self, node_list) -> None:
for node_id, peer_info in node_list:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_nodes VALUES(?, ?)",
(node_id, peer_info.to_string()),
)
await cursor.close()
await self.db.commit()
async def set_new_table(self, entries) -> None:
for node_id, bucket in entries:
cursor = await self.db.execute(
"INSERT OR REPLACE INTO peer_new_table VALUES(?, ?)",
(node_id, bucket),
)
await cursor.close()
await self.db.commit()
async def serialize(self, address_manager: AddressManager):
metadata = []
nodes = []
new_table_entries = []
metadata.append(("key", str(address_manager.key)))
unique_ids = {}
count_ids = 0
for node_id, info in address_manager.map_info.items():
unique_ids[node_id] = count_ids
if info.ref_count > 0:
assert count_ids != address_manager.new_count
nodes.append((count_ids, info))
count_ids += 1
metadata.append(("new_count", str(count_ids)))
tried_ids = 0
for node_id, info in address_manager.map_info.items():
if info.is_tried:
assert info is not None
assert tried_ids != address_manager.tried_count
nodes.append((count_ids, info))
count_ids += 1
tried_ids += 1
metadata.append(("tried_count", str(tried_ids)))
for bucket in range(NEW_BUCKET_COUNT):
for i in range(BUCKET_SIZE):
if address_manager.new_matrix[bucket][i] != -1:
index = unique_ids[address_manager.new_matrix[bucket][i]]
new_table_entries.append((index, bucket))
await self.clear()
await self.set_metadata(metadata)
await self.set_nodes(nodes)
await self.set_new_table(new_table_entries)
async def deserialize(self) -> AddressManager:
address_manager = AddressManager()
metadata = await self.get_metadata()
nodes = await self.get_nodes()
new_table_entries = await self.get_new_table()
address_manager.clear()
address_manager.key = int(metadata["key"])
address_manager.new_count = int(metadata["new_count"])
# address_manager.tried_count = int(metadata["tried_count"])
address_manager.tried_count = 0
new_table_nodes = [(node_id, info) for node_id, info in nodes if node_id < address_manager.new_count]
for n, info in new_table_nodes:
address_manager.map_addr[info.peer_info.host] = n
address_manager.map_info[n] = info
info.random_pos = len(address_manager.random_pos)
address_manager.random_pos.append(n)
address_manager.id_count = len(new_table_nodes)
tried_table_nodes = [(node_id, info) for node_id, info in nodes if node_id >= address_manager.new_count]
# lost_count = 0
for node_id, info in tried_table_nodes:
tried_bucket = info.get_tried_bucket(address_manager.key)
tried_bucket_pos = info.get_bucket_position(address_manager.key, False, tried_bucket)
if address_manager.tried_matrix[tried_bucket][tried_bucket_pos] == -1:
info.random_pos = len(address_manager.random_pos)
info.is_tried = True
id_count = address_manager.id_count
address_manager.random_pos.append(id_count)
address_manager.map_info[id_count] = info
address_manager.map_addr[info.peer_info.host] = id_count
address_manager.tried_matrix[tried_bucket][tried_bucket_pos] = id_count
address_manager.id_count += 1
address_manager.tried_count += 1
# else:
# lost_count += 1
# address_manager.tried_count -= lost_count
for node_id, bucket in new_table_entries:
if node_id >= 0 and node_id < address_manager.new_count:
info = address_manager.map_info[node_id]
bucket_pos = info.get_bucket_position(address_manager.key, True, bucket)
if address_manager.new_matrix[bucket][bucket_pos] == -1 and info.ref_count < NEW_BUCKETS_PER_ADDRESS:
info.ref_count += 1
address_manager.new_matrix[bucket][bucket_pos] = node_id
for node_id, info in list(address_manager.map_info.items()):
if not info.is_tried and info.ref_count == 0:
address_manager.delete_new_entry_(node_id)
address_manager.load_used_table_positions()
return address_manager
| [
"cengndupak@gmail.com"
] | cengndupak@gmail.com |
a5cddf4c63083cdd7f14a8cf94ba290e0b3c20e8 | 5a16118003ed5669670176abc51c66467c6b0712 | /pitest.py | 85e9ee9a86a2dc66a7340226f0fd1c2e71c0d11e | [] | no_license | Messidc/hello-world | f36297d286e638631e32a465df927c4268ceac87 | 853a82e74c5a252e27b1489a8f5bc5717d8432f5 | refs/heads/master | 2021-05-11T04:35:12.226082 | 2020-11-21T08:18:55 | 2020-11-21T08:18:55 | 117,943,412 | 0 | 0 | null | 2018-01-18T07:30:20 | 2018-01-18T06:41:38 | null | UTF-8 | Python | false | false | 634 | py | from functools import reduce
import itertools
import math
def pi(N):
' 计算pi的值 '
# step 1: 创建一个奇数序列: 1, 3, 5, 7, 9, ...
# step 2: 取该序列的前N项: 1, 3, 5, 7, 9, ..., 2*N-1.
# step 3: 添加正负符号并用4除: 4/1, -4/3, 4/5, -4/7, 4/9, ...
# step 4: 求和:
natuals = itertools.count(1,2)
ns = itertools.takewhile(lambda x:x<=2*N-1,natuals)
return reduce(lambda x,y:x+y , list(map(lambda x:(pow(-1,((x + 1) / 2 - 1)))*4/x, list(ns))))
# return list(ns)
if __name__ == "__main__":
print(pi(10))
print(pi(100))
print(pi(1000))
print(pi(10000))
| [
"dengdanchaoren@gmail.com"
] | dengdanchaoren@gmail.com |
85cd55ec06dd7935e25dd335edf8a21bac1969f2 | 4f41a3faa073d0d4f672601add08edbfbb32c762 | /application/sqlalchemy/schema.py | 6a69739d1e34978864d647e50550a4cdc2c5456f | [] | no_license | gterranova/timetracker | 418577fd3fcbc6735b9c5a3a8bda2378064c8a41 | 08fb4adc221555fff2eb6d2ec92878f32f4197bf | refs/heads/master | 2021-01-12T14:40:27.162638 | 2012-04-11T19:33:47 | 2012-04-11T19:33:47 | 72,043,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91,489 | py | # schema.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The schema module provides the building blocks for database metadata.
Each element within this module describes a database entity which can be
created and dropped, or is otherwise part of such an entity. Examples include
tables, columns, sequences, and indexes.
All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as defined
in this module they are intended to be agnostic of any vendor-specific
constructs.
A collection of entities are grouped into a unit called
:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of schema
elements, and can also be associated with an actual database connection such
that operations involving the contained elements can contact the database as
needed.
Two of the elements here also build upon their "syntactic" counterparts, which
are defined in :class:`~sqlalchemy.sql.expression.`, specifically
:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`. Since these objects
are part of the SQL expression language, they are usable as components in SQL
expressions.
"""
import re, inspect
from sqlalchemy import exc, util, dialects
from sqlalchemy.sql import expression, visitors
URL = None
__all__ = ['SchemaItem', 'Table', 'Column', 'ForeignKey', 'Sequence', 'Index',
'ForeignKeyConstraint', 'PrimaryKeyConstraint', 'CheckConstraint',
'UniqueConstraint', 'DefaultGenerator', 'Constraint', 'MetaData',
'ThreadLocalMetaData', 'SchemaVisitor', 'PassiveDefault',
'DefaultClause', 'FetchedValue', 'ColumnDefault', 'DDL',
'CreateTable', 'DropTable', 'CreateSequence', 'DropSequence',
'AddConstraint', 'DropConstraint',
]
__all__.sort()
RETAIN_SCHEMA = util.symbol('retain_schema')
class SchemaItem(visitors.Visitable):
"""Base class for items that define a database schema."""
__visit_name__ = 'schema_item'
quote = None
def _init_items(self, *args):
"""Initialize the list of child items for this SchemaItem."""
for item in args:
if item is not None:
item._set_parent(self)
def _set_parent(self, parent):
"""Associate with this SchemaItem's parent object."""
raise NotImplementedError()
def get_children(self, **kwargs):
"""used to allow SchemaVisitor access"""
return []
def __repr__(self):
return "%s()" % self.__class__.__name__
@util.memoized_property
def info(self):
return {}
def _get_table_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
class Table(SchemaItem, expression.TableClause):
"""Represent a table in a database.
e.g.::
mytable = Table("mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
)
The Table object constructs a unique instance of itself based on its
name within the given MetaData object. Constructor
arguments are as follows:
:param name: The name of this table as represented in the database.
This property, along with the *schema*, indicates the *singleton
identity* of this table in relation to its parent :class:`MetaData`.
Additional calls to :class:`Table` with the same name, metadata,
and schema name will return the same :class:`Table` object.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
:param metadata: a :class:`MetaData` object which will contain this
table. The metadata is used as a point of association of this table
with other tables which are referenced via foreign key. It also
may be used to associate this table with a particular
:class:`~sqlalchemy.engine.base.Connectable`.
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`Column` objects contained within this table.
Similar to the style of a CREATE TABLE statement, other :class:`SchemaItem`
constructs may be added here, including :class:`PrimaryKeyConstraint`,
and :class:`ForeignKeyConstraint`.
:param autoload: Defaults to False: the Columns for this table should be reflected
from the database. Usually there will be no Column objects in the
constructor if this property is set.
:param autoload_with: If autoload==True, this is an optional Engine or Connection
instance to be used for the table reflection. If ``None``, the
underlying MetaData's bound connectable will be used.
:param implicit_returning: True by default - indicates that
RETURNING can be used by default to fetch newly inserted primary key
values, for backends which support this. Note that
create_engine() also provides an implicit_returning flag.
:param include_columns: A list of strings indicating a subset of columns to be loaded via
the ``autoload`` operation; table columns who aren't present in
this list will not be represented on the resulting ``Table``
object. Defaults to ``None`` which indicates all columns should
be reflected.
:param info: A dictionary which defaults to ``{}``. A space to store application
specific data. This must be a dictionary.
:param mustexist: When ``True``, indicates that this Table must already
be present in the given :class:`MetaData`` collection.
:param prefixes:
A list of strings to insert after CREATE in the CREATE TABLE
statement. They will be separated by spaces.
:param quote: Force quoting of this table's name on or off, corresponding
to ``True`` or ``False``. When left at its default of ``None``,
the column identifier will be quoted according to whether the name is
case sensitive (identifiers with at least one upper case character are
treated as case sensitive), or if it's a reserved word. This flag
is only needed to force quoting of a reserved word which is not known
by the SQLAlchemy dialect.
:param quote_schema: same as 'quote' but applies to the schema identifier.
:param schema: The *schema name* for this table, which is required if the table
resides in a schema other than the default selected schema for the
engine's database connection. Defaults to ``None``.
:param useexisting: When ``True``, indicates that if this Table is already
present in the given :class:`MetaData`, apply further arguments within
the constructor to the existing :class:`Table`. If this flag is not
set, an error is raised when the parameters of an existing :class:`Table`
are overwritten.
"""
__visit_name__ = 'table'
ddl_events = ('before-create', 'after-create', 'before-drop', 'after-drop')
def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except IndexError:
raise TypeError("Table() takes at least two arguments")
schema = kw.get('schema', None)
useexisting = kw.pop('useexisting', False)
mustexist = kw.pop('mustexist', False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not useexisting and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData instance. "
"Specify 'useexisting=True' to redefine options and "
"columns on an existing Table object." % key)
table = metadata.tables[key]
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError(
"Table '%s' not defined" % (key))
metadata.tables[key] = table = object.__new__(cls)
try:
table._init(name, metadata, *args, **kw)
return table
except:
metadata.tables.pop(key)
raise
def __init__(self, *args, **kw):
# __init__ is overridden to prevent __new__ from
# calling the superclass constructor.
pass
def _init(self, name, metadata, *args, **kwargs):
super(Table, self).__init__(name)
self.metadata = metadata
self.schema = kwargs.pop('schema', None)
self.indexes = set()
self.constraints = set()
self._columns = expression.ColumnCollection()
self._set_primary_key(PrimaryKeyConstraint())
self._foreign_keys = util.OrderedSet()
self._extra_dependencies = set()
self.ddl_listeners = util.defaultdict(list)
self.kwargs = {}
if self.schema is not None:
self.fullname = "%s.%s" % (self.schema, self.name)
else:
self.fullname = self.name
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
include_columns = kwargs.pop('include_columns', None)
self.implicit_returning = kwargs.pop('implicit_returning', True)
self.quote = kwargs.pop('quote', None)
self.quote_schema = kwargs.pop('quote_schema', None)
if 'info' in kwargs:
self.info = kwargs.pop('info')
self._prefixes = kwargs.pop('prefixes', [])
self._extra_kwargs(**kwargs)
# load column definitions from the database if 'autoload' is defined
# we do it after the table is in the singleton dictionary to support
# circular foreign keys
if autoload:
if autoload_with:
autoload_with.reflecttable(self, include_columns=include_columns)
else:
_bind_or_error(metadata, msg="No engine is bound to this Table's MetaData. "
"Pass an engine to the Table via "
"autoload_with=<someengine>, "
"or associate the MetaData with an engine via "
"metadata.bind=<someengine>").\
reflecttable(self, include_columns=include_columns)
# initialize all the column, etc. objects. done after reflection to
# allow user-overrides
self._init_items(*args)
def _init_existing(self, *args, **kwargs):
autoload = kwargs.pop('autoload', False)
autoload_with = kwargs.pop('autoload_with', None)
schema = kwargs.pop('schema', None)
if schema and schema != self.schema:
raise exc.ArgumentError(
"Can't change schema of existing table from '%s' to '%s'",
(self.schema, schema))
include_columns = kwargs.pop('include_columns', None)
if include_columns:
for c in self.c:
if c.name not in include_columns:
self.c.remove(c)
for key in ('quote', 'quote_schema'):
if key in kwargs:
setattr(self, key, kwargs.pop(key))
if 'info' in kwargs:
self.info = kwargs.pop('info')
self._extra_kwargs(**kwargs)
self._init_items(*args)
def _extra_kwargs(self, **kwargs):
# validate remaining kwargs that they all specify DB prefixes
if len([k for k in kwargs
if not re.match(r'^(?:%s)_' % '|'.join(dialects.__all__), k)]):
raise TypeError(
"Invalid argument(s) for Table: %r" % kwargs.keys())
self.kwargs.update(kwargs)
def _set_primary_key(self, pk):
if getattr(self, '_primary_key', None) in self.constraints:
self.constraints.remove(self._primary_key)
self._primary_key = pk
self.constraints.add(pk)
for c in pk.columns:
c.primary_key = True
@util.memoized_property
def _autoincrement_column(self):
for col in self.primary_key:
if col.autoincrement and \
isinstance(col.type, types.Integer) and \
not col.foreign_keys and \
isinstance(col.default, (type(None), Sequence)):
return col
@property
def key(self):
return _get_table_key(self.name, self.schema)
@property
def primary_key(self):
return self._primary_key
def __repr__(self):
return "Table(%s)" % ', '.join(
[repr(self.name)] + [repr(self.metadata)] +
[repr(x) for x in self.columns] +
["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']])
def __str__(self):
return _get_table_key(self.description, self.schema)
@property
def bind(self):
"""Return the connectable associated with this Table."""
return self.metadata and self.metadata.bind or None
def add_is_dependent_on(self, table):
"""Add a 'dependency' for this Table.
This is another Table object which must be created
first before this one can, or dropped after this one.
Usually, dependencies between tables are determined via
ForeignKey objects. However, for other situations that
create dependencies outside of foreign keys (rules, inheriting),
this method can manually establish such a link.
"""
self._extra_dependencies.add(table)
def append_column(self, column):
"""Append a ``Column`` to this ``Table``."""
column._set_parent(self)
def append_constraint(self, constraint):
"""Append a ``Constraint`` to this ``Table``."""
constraint._set_parent(self)
def append_ddl_listener(self, event, listener):
"""Append a DDL event listener to this ``Table``.
The ``listener`` callable will be triggered when this ``Table`` is
created or dropped, either directly before or after the DDL is issued
to the database. The listener may modify the Table, but may not abort
the event itself.
Arguments are:
event
One of ``Table.ddl_events``; e.g. 'before-create', 'after-create',
'before-drop' or 'after-drop'.
listener
A callable, invoked with three positional arguments:
event
The event currently being handled
target
The ``Table`` object being created or dropped
bind
The ``Connection`` bueing used for DDL execution.
Listeners are added to the Table's ``ddl_listeners`` attribute.
"""
if event not in self.ddl_events:
raise LookupError(event)
self.ddl_listeners[event].append(listener)
def _set_parent(self, metadata):
metadata.tables[_get_table_key(self.name, self.schema)] = self
self.metadata = metadata
def get_children(self, column_collections=True, schema_visitor=False, **kwargs):
if not schema_visitor:
return expression.TableClause.get_children(
self, column_collections=column_collections, **kwargs)
else:
if column_collections:
return list(self.columns)
else:
return []
def exists(self, bind=None):
"""Return True if this table exists."""
if bind is None:
bind = _bind_or_error(self)
return bind.run_callable(bind.dialect.has_table, self.name, schema=self.schema)
def create(self, bind=None, checkfirst=False):
"""Issue a ``CREATE`` statement for this table.
See also ``metadata.create_all()``.
"""
if bind is None:
bind = _bind_or_error(self)
bind.create(self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=False):
"""Issue a ``DROP`` statement for this table.
See also ``metadata.drop_all()``.
"""
if bind is None:
bind = _bind_or_error(self)
bind.drop(self, checkfirst=checkfirst)
def tometadata(self, metadata, schema=RETAIN_SCHEMA):
"""Return a copy of this ``Table`` associated with a different ``MetaData``."""
try:
if schema is RETAIN_SCHEMA:
schema = self.schema
key = _get_table_key(self.name, schema)
return metadata.tables[key]
except KeyError:
args = []
for c in self.columns:
args.append(c.copy(schema=schema))
for c in self.constraints:
args.append(c.copy(schema=schema))
return Table(self.name, metadata, schema=schema, *args)
class Column(SchemaItem, expression.ColumnClause):
"""Represents a column in a database table."""
__visit_name__ = 'column'
def __init__(self, *args, **kwargs):
"""
Construct a new ``Column`` object.
:param name: The name of this column as represented in the database.
This argument may be the first positional argument, or specified
via keyword.
Names which contain no upper case characters
will be treated as case insensitive names, and will not be quoted
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
:class:`Table`. This is to support convenient
usage within the :mod:`~sqlalchemy.ext.declarative` extension.
:param type\_: The column's type, indicated using an instance which
subclasses :class:`~sqlalchemy.types.AbstractType`. If no arguments
are required for the type, the class of the type can be sent
as well, e.g.::
# use a type with arguments
Column('data', String(50))
# use no arguments
Column('level', Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
There is partial support for automatic detection of the
type based on that of a :class:`ForeignKey` associated
with this column, if the type is specified as ``None``.
However, this feature is not fully implemented and
may not function in all cases.
:param \*args: Additional positional arguments include various
:class:`SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`Constraint`, :class:`ForeignKey`, :class:`ColumnDefault`,
and :class:`Sequence`. In some cases an equivalent keyword
argument is available such as ``server_default``, ``default``
and ``unique``.
:param autoincrement: This flag may be set to ``False`` to
indicate an integer primary key column that should not be
considered to be the "autoincrement" column, that is
the integer primary key column which generates values
implicitly upon INSERT and whose value is usually returned
via the DBAPI cursor.lastrowid attribute. It defaults
to ``True`` to satisfy the common use case of a table
with a single integer primary key column. If the table
has a composite primary key consisting of more than one
integer column, set this flag to True only on the
column that should be considered "autoincrement".
The setting *only* has an effect for columns which are:
* Integer derived (i.e. INT, SMALLINT, BIGINT)
* Part of the primary key
* Are not referenced by any foreign keys
* have no server side or client side defaults (with the exception
of Postgresql SERIAL).
The setting has these two effects on columns that meet the
above criteria:
* DDL issued for the column will include database-specific
keywords intended to signify this column as an
"autoincrement" column, such as AUTO INCREMENT on MySQL,
SERIAL on Postgresql, and IDENTITY on MS-SQL. It does
*not* issue AUTOINCREMENT for SQLite since this is a
special SQLite flag that is not required for autoincrementing
behavior. See the SQLite dialect documentation for
information on SQLite's AUTOINCREMENT.
* The column will be considered to be available as
cursor.lastrowid or equivalent, for those dialects which
"post fetch" newly inserted identifiers after a row has
been inserted (SQLite, MySQL, MS-SQL). It does not have
any effect in this regard for databases that use sequences
to generate primary key identifiers (i.e. Firebird, Postgresql,
Oracle).
:param default: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing the
*default value* for this column, which will be invoked upon insert
if this column is otherwise not specified in the VALUES clause of
the insert. This is a shortcut to using :class:`ColumnDefault` as
a positional argument.
Contrast this argument to ``server_default`` which creates a
default generator on the database side.
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
comments (a future attribute 'comment' will achieve that).
:param key: An optional string identifier which will identify this
``Column`` object on the :class:`Table`. When a key is provided,
this is the only identifier referencing the ``Column`` within the
application, including ORM attribute mapping; the ``name`` field
is used only when rendering SQL.
:param index: When ``True``, indicates that the column is indexed.
This is a shortcut for using a :class:`Index` construct on the
table. To specify indexes with explicit names or indexes that
contain multiple columns, use the :class:`Index` construct
instead.
:param info: A dictionary which defaults to ``{}``. A space to store
application specific data. This must be a dictionary.
:param nullable: If set to the default of ``True``, indicates the
column will be rendered as allowing NULL, else it's rendered as
NOT NULL. This parameter is only used when issuing CREATE TABLE
statements.
:param onupdate: A scalar, Python callable, or
:class:`~sqlalchemy.sql.expression.ClauseElement` representing a
default value to be applied to the column within UPDATE
statements, which wil be invoked upon update if this column is not
present in the SET clause of the update. This is a shortcut to
using :class:`ColumnDefault` as a positional argument with
``for_update=True``.
:param primary_key: If ``True``, marks this column as a primary key
column. Multiple columns can have this flag set to specify
composite primary keys. As an alternative, the primary key of a
:class:`Table` can be specified via an explicit
:class:`PrimaryKeyConstraint` object.
:param server_default: A :class:`FetchedValue` instance, str, Unicode
or :func:`~sqlalchemy.sql.expression.text` construct representing
the DDL DEFAULT value for the column.
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))0
y DATETIME DEFAULT NOW()
Strings and text() will be converted into a :class:`DefaultClause`
object upon initialization.
Use :class:`FetchedValue` to indicate that an already-existing
column will generate a default value on the database side which
will be available to SQLAlchemy for post-fetch after inserts. This
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
:param server_onupdate: A :class:`FetchedValue` instance
representing a database-side default generation function. This
indicates to SQLAlchemy that a newly generated value will be
available after updates. This construct does not specify any DDL
and the implementation is left to the database, such as via a
trigger.
:param quote: Force quoting of this column's name on or off,
corresponding to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param unique: When ``True``, indicates that this column contains a
unique constraint, or if ``index`` is ``True`` as well, indicates
that the :class:`Index` should be created with the unique flag.
To specify multiple columns in the constraint/index or to specify
an explicit name, use the :class:`UniqueConstraint` or
:class:`Index` constructs explicitly.
"""
name = kwargs.pop('name', None)
type_ = kwargs.pop('type_', None)
args = list(args)
if args:
if isinstance(args[0], basestring):
if name is not None:
raise exc.ArgumentError(
"May not pass name positionally and as a keyword.")
name = args.pop(0)
if args:
coltype = args[0]
if (isinstance(coltype, types.AbstractType) or
(isinstance(coltype, type) and
issubclass(coltype, types.AbstractType))):
if type_ is not None:
raise exc.ArgumentError(
"May not pass type_ positionally and as a keyword.")
type_ = args.pop(0)
no_type = type_ is None
super(Column, self).__init__(name, None, type_)
self.key = kwargs.pop('key', name)
self.primary_key = kwargs.pop('primary_key', False)
self.nullable = kwargs.pop('nullable', not self.primary_key)
self.default = kwargs.pop('default', None)
self.server_default = kwargs.pop('server_default', None)
self.server_onupdate = kwargs.pop('server_onupdate', None)
self.index = kwargs.pop('index', None)
self.unique = kwargs.pop('unique', None)
self.quote = kwargs.pop('quote', None)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
self.autoincrement = kwargs.pop('autoincrement', True)
self.constraints = set()
self.foreign_keys = util.OrderedSet()
self._table_events = set()
# check if this Column is proxying another column
if '_proxies' in kwargs:
self.proxies = kwargs.pop('_proxies')
# otherwise, add DDL-related events
elif isinstance(self.type, types.SchemaType):
self.type._set_parent(self)
if self.default is not None:
if isinstance(self.default, (ColumnDefault, Sequence)):
args.append(self.default)
else:
args.append(ColumnDefault(self.default))
if self.server_default is not None:
if isinstance(self.server_default, FetchedValue):
args.append(self.server_default)
else:
args.append(DefaultClause(self.server_default))
if self.onupdate is not None:
if isinstance(self.onupdate, (ColumnDefault, Sequence)):
args.append(self.onupdate)
else:
args.append(ColumnDefault(self.onupdate, for_update=True))
if self.server_onupdate is not None:
if isinstance(self.server_onupdate, FetchedValue):
args.append(self.server_default)
else:
args.append(DefaultClause(self.server_onupdate,
for_update=True))
self._init_items(*args)
if not self.foreign_keys and no_type:
raise exc.ArgumentError("'type' is required on Column objects "
"which have no foreign keys.")
util.set_creation_order(self)
if 'info' in kwargs:
self.info = kwargs.pop('info')
if kwargs:
raise exc.ArgumentError(
"Unknown arguments passed to Column: " + repr(kwargs.keys()))
def __str__(self):
if self.name is None:
return "(no name)"
elif self.table is not None:
if self.table.named_with_column:
return (self.table.description + "." + self.description)
else:
return self.description
else:
return self.description
def references(self, column):
"""Return True if this Column references the given column via foreign key."""
for fk in self.foreign_keys:
if fk.references(column.table):
return True
else:
return False
def append_foreign_key(self, fk):
fk._set_parent(self)
def __repr__(self):
kwarg = []
if self.key != self.name:
kwarg.append('key')
if self.primary_key:
kwarg.append('primary_key')
if not self.nullable:
kwarg.append('nullable')
if self.onupdate:
kwarg.append('onupdate')
if self.default:
kwarg.append('default')
if self.server_default:
kwarg.append('server_default')
return "Column(%s)" % ', '.join(
[repr(self.name)] + [repr(self.type)] +
[repr(x) for x in self.foreign_keys if x is not None] +
[repr(x) for x in self.constraints] +
[(self.table is not None and "table=<%s>" % self.table.description or "")] +
["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg])
def _set_parent(self, table):
if self.name is None:
raise exc.ArgumentError(
"Column must be constructed with a name or assign .name "
"before adding to a Table.")
if self.key is None:
self.key = self.name
if getattr(self, 'table', None) is not None:
raise exc.ArgumentError("this Column already has a table!")
if self.key in table._columns:
col = table._columns.get(self.key)
for fk in col.foreign_keys:
col.foreign_keys.remove(fk)
table.foreign_keys.remove(fk)
table.constraints.remove(fk.constraint)
table._columns.replace(self)
if self.primary_key:
table.primary_key._replace(self)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
self.table = table
if self.index:
if isinstance(self.index, basestring):
raise exc.ArgumentError(
"The 'index' keyword argument on Column is boolean only. "
"To create indexes with a specific name, create an "
"explicit Index object external to the Table.")
Index('ix_%s' % self._label, self, unique=self.unique)
elif self.unique:
if isinstance(self.unique, basestring):
raise exc.ArgumentError(
"The 'unique' keyword argument on Column is boolean only. "
"To create unique constraints or indexes with a specific "
"name, append an explicit UniqueConstraint to the Table's "
"list of elements, or create an explicit Index object "
"external to the Table.")
table.append_constraint(UniqueConstraint(self.key))
for fn in self._table_events:
fn(table, self)
del self._table_events
def _on_table_attach(self, fn):
if self.table is not None:
fn(self.table, self)
else:
self._table_events.add(fn)
def copy(self, **kw):
"""Create a copy of this ``Column``, unitialized.
This is used in ``Table.tometadata``.
"""
# Constraint objects plus non-constraint-bound ForeignKey objects
args = \
[c.copy(**kw) for c in self.constraints] + \
[c.copy(**kw) for c in self.foreign_keys if not c.constraint]
c = Column(
name=self.name,
type_=self.type,
key = self.key,
primary_key = self.primary_key,
nullable = self.nullable,
quote=self.quote,
index=self.index,
autoincrement=self.autoincrement,
default=self.default,
server_default=self.server_default,
onupdate=self.onupdate,
server_onupdate=self.server_onupdate,
*args
)
if hasattr(self, '_table_events'):
c._table_events = list(self._table_events)
return c
def _make_proxy(self, selectable, name=None):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [ForeignKey(f.column) for f in self.foreign_keys]
c = self._constructor(
name or self.name,
self.type,
key = name or self.key,
primary_key = self.primary_key,
nullable = self.nullable,
quote=self.quote, _proxies=[self], *fk)
c.table = selectable
selectable.columns.add(c)
if self.primary_key:
selectable.primary_key.add(c)
for fn in c._table_events:
fn(selectable, c)
del c._table_events
return c
def get_children(self, schema_visitor=False, **kwargs):
if schema_visitor:
return [x for x in (self.default, self.onupdate) if x is not None] + \
list(self.foreign_keys) + list(self.constraints)
else:
return expression.ColumnClause.get_children(self, **kwargs)
class ForeignKey(SchemaItem):
"""Defines a dependency between two columns.
``ForeignKey`` is specified as an argument to a :class:`Column` object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
)
Note that ``ForeignKey`` is only a marker object that defines
a dependency between two columns. The actual constraint
is in all cases represented by the :class:`ForeignKeyConstraint`
object. This object will be generated automatically when
a ``ForeignKey`` is associated with a :class:`Column` which
in turn is associated with a :class:`Table`. Conversely,
when :class:`ForeignKeyConstraint` is applied to a :class:`Table`,
``ForeignKey`` markers are automatically generated to be
present on each associated :class:`Column`, which are also
associated with the constraint object.
Note that you cannot define a "composite" foreign key constraint,
that is a constraint between a grouping of multiple parent/child
columns, using ``ForeignKey`` objects. To define this grouping,
the :class:`ForeignKeyConstraint` object must be used, and applied
to the :class:`Table`. The associated ``ForeignKey`` objects
are created automatically.
The ``ForeignKey`` objects associated with an individual
:class:`Column` object are available in the `foreign_keys` collection
of that column.
Further examples of foreign key configuration are in
:ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key'
def __init__(self, column, _constraint=None, use_alter=False, name=None,
onupdate=None, ondelete=None, deferrable=None,
initially=None, link_to_name=False):
"""
Construct a column-level FOREIGN KEY.
The :class:`ForeignKey` object when constructed generates a
:class:`ForeignKeyConstraint` which is associated with the parent
:class:`Table` object's collection of constraints.
:param column: A single target column for the key relationship. A
:class:`Column` object or a column name as a string:
``tablename.columnkey`` or ``schema.tablename.columnkey``.
``columnkey`` is the ``key`` which has been assigned to the column
(defaults to the column name itself), unless ``link_to_name`` is
``True`` in which case the rendered name of the column is used.
:param name: Optional string. An in-database name for the key if
`constraint` is not provided.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally
assigned ``key``.
:param use_alter: passed to the underlying
:class:`ForeignKeyConstraint` to indicate the constraint should be
generated/dropped externally from the CREATE TABLE/ DROP TABLE
statement. See that classes' constructor for details.
"""
self._colspec = column
# the linked ForeignKeyConstraint.
# ForeignKey will create this when parent Column
# is attached to a Table, *or* ForeignKeyConstraint
# object passes itself in when creating ForeignKey
# markers.
self.constraint = _constraint
self.use_alter = use_alter
self.name = name
self.onupdate = onupdate
self.ondelete = ondelete
self.deferrable = deferrable
self.initially = initially
self.link_to_name = link_to_name
def __repr__(self):
return "ForeignKey(%r)" % self._get_colspec()
def copy(self, schema=None):
"""Produce a copy of this ForeignKey object."""
return ForeignKey(
self._get_colspec(schema=schema),
use_alter=self.use_alter,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name
)
def _get_colspec(self, schema=None):
if schema:
return schema + "." + self.column.table.name + "." + self.column.key
elif isinstance(self._colspec, basestring):
return self._colspec
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
return "%s.%s" % (_column.table.fullname, _column.key)
target_fullname = property(_get_colspec)
def references(self, table):
"""Return True if the given table is referenced by this ForeignKey."""
return table.corresponding_column(self.column) is not None
def get_referent(self, table):
"""Return the column in the given table referenced by this ForeignKey.
Returns None if this ``ForeignKey`` does not reference the given table.
"""
return table.corresponding_column(self.column)
@util.memoized_property
def column(self):
# ForeignKey inits its remote column as late as possible, so tables
# can be defined without dependencies
if isinstance(self._colspec, basestring):
# locate the parent table this foreign key is attached to. we
# use the "original" column which our parent column represents
# (its a list of columns/other ColumnElements if the parent
# table is a UNION)
for c in self.parent.base_columns:
if isinstance(c, Column):
parenttable = c.table
break
else:
raise exc.ArgumentError(
"Parent column '%s' does not descend from a "
"table-attached Column" % str(self.parent))
m = self._colspec.split('.')
if m is None:
raise exc.ArgumentError(
"Invalid foreign key column specification: %s" %
self._colspec)
# A FK between column 'bar' and table 'foo' can be
# specified as 'foo', 'foo.bar', 'dbo.foo.bar',
# 'otherdb.dbo.foo.bar'. Once we have the column name and
# the table name, treat everything else as the schema
# name. Some databases (e.g. Sybase) support
# inter-database foreign keys. See tickets#1341 and --
# indirectly related -- Ticket #594. This assumes that '.'
# will never appear *within* any component of the FK.
(schema, tname, colname) = (None, None, None)
if (len(m) == 1):
tname = m.pop()
else:
colname = m.pop()
tname = m.pop()
if (len(m) > 0):
schema = '.'.join(m)
if _get_table_key(tname, schema) not in parenttable.metadata:
raise exc.NoReferencedTableError(
"Could not find table '%s' with which to generate a "
"foreign key" % tname)
table = Table(tname, parenttable.metadata,
mustexist=True, schema=schema)
_column = None
if colname is None:
# colname is None in the case that ForeignKey argument
# was specified as table name only, in which case we
# match the column name to the same column on the
# parent.
key = self.parent
_column = table.c.get(self.parent.key, None)
elif self.link_to_name:
key = colname
for c in table.c:
if c.name == colname:
_column = c
else:
key = colname
_column = table.c.get(colname, None)
if _column is None:
raise exc.NoReferencedColumnError(
"Could not create ForeignKey '%s' on table '%s': "
"table '%s' has no column named '%s'" % (
self._colspec, parenttable.name, table.name, key))
elif hasattr(self._colspec, '__clause_element__'):
_column = self._colspec.__clause_element__()
else:
_column = self._colspec
# propagate TypeEngine to parent if it didn't have one
if isinstance(self.parent.type, types.NullType):
self.parent.type = _column.type
return _column
def _set_parent(self, column):
if hasattr(self, 'parent'):
if self.parent is column:
return
raise exc.InvalidRequestError("This ForeignKey already has a parent !")
self.parent = column
self.parent.foreign_keys.add(self)
self.parent._on_table_attach(self._set_table)
def _set_table(self, table, column):
# standalone ForeignKey - create ForeignKeyConstraint
# on the hosting Table when attached to the Table.
if self.constraint is None and isinstance(table, Table):
self.constraint = ForeignKeyConstraint(
[], [], use_alter=self.use_alter, name=self.name,
onupdate=self.onupdate, ondelete=self.ondelete,
deferrable=self.deferrable, initially=self.initially,
)
self.constraint._elements[self.parent] = self
self.constraint._set_parent(table)
table.foreign_keys.add(self)
class DefaultGenerator(SchemaItem):
"""Base class for column *default* values."""
__visit_name__ = 'default_generator'
is_sequence = False
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.onupdate = self
else:
self.column.default = self
def execute(self, bind=None, **kwargs):
if bind is None:
bind = _bind_or_error(self)
return bind._execute_default(self, **kwargs)
@property
def bind(self):
"""Return the connectable associated with this default."""
if getattr(self, 'column', None) is not None:
return self.column.table.bind
else:
return None
def __repr__(self):
return "DefaultGenerator()"
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
This could correspond to a constant, a callable function, or a SQL clause.
"""
def __init__(self, arg, **kwargs):
super(ColumnDefault, self).__init__(**kwargs)
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type.")
if util.callable(arg):
arg = self._maybe_wrap_callable(arg)
self.arg = arg
@util.memoized_property
def is_callable(self):
return util.callable(self.arg)
@util.memoized_property
def is_clause_element(self):
return isinstance(self.arg, expression.ClauseElement)
@util.memoized_property
def is_scalar(self):
return not self.is_callable and not self.is_clause_element and not self.is_sequence
def _maybe_wrap_callable(self, fn):
"""Backward compat: Wrap callables that don't accept a context."""
if inspect.isfunction(fn):
inspectable = fn
elif inspect.isclass(fn):
inspectable = fn.__init__
elif hasattr(fn, '__call__'):
inspectable = fn.__call__
else:
# probably not inspectable, try anyways.
inspectable = fn
try:
argspec = inspect.getargspec(inspectable)
except TypeError:
return lambda ctx: fn()
positionals = len(argspec[0])
# Py3K compat - no unbound methods
if inspect.ismethod(inspectable) or inspect.isclass(fn):
positionals -= 1
if positionals == 0:
return lambda ctx: fn()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
if positionals - defaulted > 1:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments")
return fn
def _visit_name(self):
if self.for_update:
return "column_onupdate"
else:
return "column_default"
__visit_name__ = property(_visit_name)
def __repr__(self):
return "ColumnDefault(%r)" % self.arg
class Sequence(DefaultGenerator):
"""Represents a named database sequence."""
__visit_name__ = 'sequence'
is_sequence = True
def __init__(self, name, start=None, increment=None, schema=None,
optional=False, quote=None, metadata=None, for_update=False):
super(Sequence, self).__init__(for_update=for_update)
self.name = name
self.start = start
self.increment = increment
self.optional = optional
self.quote = quote
self.schema = schema
self.metadata = metadata
@util.memoized_property
def is_callable(self):
return False
@util.memoized_property
def is_clause_element(self):
return False
def __repr__(self):
return "Sequence(%s)" % ', '.join(
[repr(self.name)] +
["%s=%s" % (k, repr(getattr(self, k)))
for k in ['start', 'increment', 'optional']])
def _set_parent(self, column):
super(Sequence, self)._set_parent(column)
column._on_table_attach(self._set_table)
def _set_table(self, table, column):
self.metadata = table.metadata
@property
def bind(self):
if self.metadata:
return self.metadata.bind
else:
return None
def create(self, bind=None, checkfirst=True):
"""Creates this sequence in the database."""
if bind is None:
bind = _bind_or_error(self)
bind.create(self, checkfirst=checkfirst)
def drop(self, bind=None, checkfirst=True):
"""Drops this sequence from the database."""
if bind is None:
bind = _bind_or_error(self)
bind.drop(self, checkfirst=checkfirst)
class FetchedValue(object):
"""A default that takes effect on the database side."""
def __init__(self, for_update=False):
self.for_update = for_update
def _set_parent(self, column):
self.column = column
if self.for_update:
self.column.server_onupdate = self
else:
self.column.server_default = self
def __repr__(self):
return 'FetchedValue(for_update=%r)' % self.for_update
class DefaultClause(FetchedValue):
"""A DDL-specified DEFAULT column value."""
def __init__(self, arg, for_update=False):
util.assert_arg_type(arg, (basestring,
expression.ClauseElement,
expression._TextClause), 'arg')
super(DefaultClause, self).__init__(for_update)
self.arg = arg
def __repr__(self):
return "DefaultClause(%r, for_update=%r)" % (self.arg, self.for_update)
class PassiveDefault(DefaultClause):
def __init__(self, *arg, **kw):
util.warn_deprecated("PassiveDefault is deprecated. Use DefaultClause.")
DefaultClause.__init__(self, *arg, **kw)
class Constraint(SchemaItem):
"""A table-level SQL constraint."""
__visit_name__ = 'constraint'
def __init__(self, name=None, deferrable=None, initially=None,
_create_rule=None):
"""Create a SQL constraint.
name
Optional, the in-database name of this ``Constraint``.
deferrable
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
initially
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
_create_rule
a callable which is passed the DDLCompiler object during
compilation. Returns True or False to signal inline generation of
this Constraint.
The AddConstraint and DropConstraint DDL constructs provide
DDLElement's more comprehensive "conditional DDL" approach that is
passed a database connection when DDL is being issued. _create_rule
is instead called during any CREATE TABLE compilation, where there
may not be any transaction/connection in progress. However, it
allows conditional compilation of the constraint even for backends
which do not support addition of constraints through ALTER TABLE,
which currently includes SQLite.
_create_rule is used by some types to create constraints.
Currently, its call signature is subject to change at any time.
"""
self.name = name
self.deferrable = deferrable
self.initially = initially
self._create_rule = _create_rule
@property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except AttributeError:
pass
raise exc.InvalidRequestError("This constraint is not bound to a table. Did you mean to call table.add_constraint(constraint) ?")
def _set_parent(self, parent):
self.parent = parent
parent.constraints.add(self)
def copy(self, **kw):
raise NotImplementedError()
class ColumnCollectionConstraint(Constraint):
"""A constraint that proxies a ColumnCollection."""
def __init__(self, *columns, **kw):
"""
\*columns
A sequence of column names or Column objects.
name
Optional, the in-database name of this constraint.
deferrable
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
initially
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
super(ColumnCollectionConstraint, self).__init__(**kw)
self.columns = expression.ColumnCollection()
self._pending_colargs = [_to_schema_column_or_string(c) for c in columns]
if self._pending_colargs and \
isinstance(self._pending_colargs[0], Column) and \
self._pending_colargs[0].table is not None:
self._set_parent(self._pending_colargs[0].table)
def _set_parent(self, table):
super(ColumnCollectionConstraint, self)._set_parent(table)
for col in self._pending_colargs:
if isinstance(col, basestring):
col = table.c[col]
self.columns.add(col)
def __contains__(self, x):
return x in self.columns
def copy(self, **kw):
return self.__class__(name=self.name, deferrable=self.deferrable,
initially=self.initially, *self.columns.keys())
def contains_column(self, col):
return self.columns.contains_column(col)
def __iter__(self):
return iter(self.columns)
def __len__(self):
return len(self.columns)
class CheckConstraint(Constraint):
"""A table- or column-level CHECK constraint.
Can be included in the definition of a Table or Column.
"""
def __init__(self, sqltext, name=None, deferrable=None,
initially=None, table=None, _create_rule=None):
"""Construct a CHECK constraint.
sqltext
A string containing the constraint definition, which will be used
verbatim, or a SQL expression construct.
name
Optional, the in-database name of the constraint.
deferrable
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
initially
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
"""
super(CheckConstraint, self).__init__(name, deferrable, initially, _create_rule)
self.sqltext = expression._literal_as_text(sqltext)
if table is not None:
self._set_parent(table)
def __visit_name__(self):
if isinstance(self.parent, Table):
return "check_constraint"
else:
return "column_check_constraint"
__visit_name__ = property(__visit_name__)
def copy(self, **kw):
return CheckConstraint(self.sqltext, name=self.name)
class ForeignKeyConstraint(Constraint):
"""A table-level FOREIGN KEY constraint.
Defines a single column or composite FOREIGN KEY ... REFERENCES
constraint. For a no-frills, single column foreign key, adding a
:class:`ForeignKey` to the definition of a :class:`Column` is a shorthand
equivalent for an unnamed, single column :class:`ForeignKeyConstraint`.
Examples of foreign key configuration are in :ref:`metadata_foreignkeys`.
"""
__visit_name__ = 'foreign_key_constraint'
def __init__(self, columns, refcolumns, name=None, onupdate=None,
ondelete=None, deferrable=None, initially=None, use_alter=False,
link_to_name=False, table=None):
"""Construct a composite-capable FOREIGN KEY.
:param columns: A sequence of local column names. The named columns
must be defined and present in the parent Table. The names should
match the ``key`` given to each column (defaults to the name) unless
``link_to_name`` is True.
:param refcolumns: A sequence of foreign column names or Column
objects. The columns must all be located within the same Table.
:param name: Optional, the in-database name of the key.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: Optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param initially: Optional string. If set, emit INITIALLY <value> when
issuing DDL for this constraint.
:param link_to_name: if True, the string name given in ``column`` is
the rendered name of the referenced column, not its locally assigned
``key``.
:param use_alter: If True, do not emit the DDL for this constraint as
part of the CREATE TABLE definition. Instead, generate it via an
ALTER TABLE statement issued after the full collection of tables
have been created, and drop it via an ALTER TABLE statement before
the full collection of tables are dropped. This is shorthand for the
usage of :class:`AddConstraint` and :class:`DropConstraint` applied
as "after-create" and "before-drop" events on the MetaData object.
This is normally used to generate/drop constraints on objects that
are mutually dependent on each other.
"""
super(ForeignKeyConstraint, self).__init__(name, deferrable, initially)
self.onupdate = onupdate
self.ondelete = ondelete
self.link_to_name = link_to_name
if self.name is None and use_alter:
raise exc.ArgumentError("Alterable Constraint requires a name")
self.use_alter = use_alter
self._elements = util.OrderedDict()
# standalone ForeignKeyConstraint - create
# associated ForeignKey objects which will be applied to hosted
# Column objects (in col.foreign_keys), either now or when attached
# to the Table for string-specified names
for col, refcol in zip(columns, refcolumns):
self._elements[col] = ForeignKey(
refcol,
_constraint=self,
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
link_to_name=self.link_to_name
)
if table is not None:
self._set_parent(table)
@property
def columns(self):
return self._elements.keys()
@property
def elements(self):
return self._elements.values()
def _set_parent(self, table):
super(ForeignKeyConstraint, self)._set_parent(table)
for col, fk in self._elements.iteritems():
# string-specified column names now get
# resolved to Column objects
if isinstance(col, basestring):
col = table.c[col]
fk._set_parent(col)
if self.use_alter:
def supports_alter(ddl, event, schema_item, bind, **kw):
return table in set(kw['tables']) and bind.dialect.supports_alter
AddConstraint(self, on=supports_alter).execute_at('after-create', table.metadata)
DropConstraint(self, on=supports_alter).execute_at('before-drop', table.metadata)
def copy(self, **kw):
return ForeignKeyConstraint(
[x.parent.name for x in self._elements.values()],
[x._get_colspec(**kw) for x in self._elements.values()],
name=self.name,
onupdate=self.onupdate,
ondelete=self.ondelete,
use_alter=self.use_alter,
deferrable=self.deferrable,
initially=self.initially,
link_to_name=self.link_to_name
)
class PrimaryKeyConstraint(ColumnCollectionConstraint):
"""A table-level PRIMARY KEY constraint.
Defines a single column or composite PRIMARY KEY constraint. For a
no-frills primary key, adding ``primary_key=True`` to one or more
``Column`` definitions is a shorthand equivalent for an unnamed single- or
multiple-column PrimaryKeyConstraint.
"""
__visit_name__ = 'primary_key_constraint'
def _set_parent(self, table):
super(PrimaryKeyConstraint, self)._set_parent(table)
table._set_primary_key(self)
def _replace(self, col):
self.columns.replace(col)
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
Defines a single column or composite UNIQUE constraint. For a no-frills,
single column constraint, adding ``unique=True`` to the ``Column``
definition is a shorthand equivalent for an unnamed, single column
UniqueConstraint.
"""
__visit_name__ = 'unique_constraint'
class Index(SchemaItem):
"""A table-level INDEX.
Defines a composite (one or more column) INDEX. For a no-frills, single
column index, adding ``index=True`` to the ``Column`` definition is
a shorthand equivalent for an unnamed, single column Index.
"""
__visit_name__ = 'index'
def __init__(self, name, *columns, **kwargs):
"""Construct an index object.
Arguments are:
name
The name of the index
\*columns
Columns to include in the index. All columns must belong to the same
table.
\**kwargs
Keyword arguments include:
unique
Defaults to False: create a unique index.
postgresql_where
Defaults to None: create a partial index when using PostgreSQL
"""
self.name = name
self.columns = expression.ColumnCollection()
self.table = None
self.unique = kwargs.pop('unique', False)
self.kwargs = kwargs
for column in columns:
column = _to_schema_column(column)
if self.table is None:
self._set_parent(column.table)
elif column.table != self.table:
# all columns muse be from same table
raise exc.ArgumentError(
"All index columns must be from same table. "
"%s is from %s not %s" % (column, column.table, self.table))
self.columns.add(column)
def _set_parent(self, table):
self.table = table
table.indexes.add(self)
@property
def bind(self):
"""Return the connectable associated with this Index."""
return self.table.bind
def create(self, bind=None):
if bind is None:
bind = _bind_or_error(self)
bind.create(self)
return self
def drop(self, bind=None):
if bind is None:
bind = _bind_or_error(self)
bind.drop(self)
def __repr__(self):
return 'Index("%s", %s%s)' % (self.name,
', '.join(repr(c) for c in self.columns),
(self.unique and ', unique=True') or '')
class MetaData(SchemaItem):
"""A collection of Tables and their associated schema constructs.
Holds a collection of Tables and an optional binding to an ``Engine`` or
``Connection``. If bound, the :class:`~sqlalchemy.schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The `Table` objects themselves are stored in the `metadata.tables`
dictionary.
The ``bind`` property may be assigned to dynamically. A common pattern is
to start unbound and then bind later when an engine is available::
metadata = MetaData()
# define tables
Table('mytable', metadata, ...)
# connect to an engine later, perhaps after loading a URL from a
# configuration file
metadata.bind = an_engine
MetaData is a thread-safe object after tables have been explicitly defined
or loaded via reflection.
.. index::
single: thread safety; MetaData
"""
__visit_name__ = 'metadata'
ddl_events = ('before-create', 'after-create', 'before-drop', 'after-drop')
def __init__(self, bind=None, reflect=False):
"""Create a new MetaData object.
bind
An Engine or Connection to bind to. May also be a string or URL
instance, these are passed to create_engine() and this MetaData will
be bound to the resulting engine.
reflect
Optional, automatically load all tables from the bound database.
Defaults to False. ``bind`` is required when this option is set.
For finer control over loaded tables, use the ``reflect`` method of
``MetaData``.
"""
self.tables = {}
self.bind = bind
self.metadata = self
self.ddl_listeners = util.defaultdict(list)
if reflect:
if not bind:
raise exc.ArgumentError(
"A bind must be supplied in conjunction with reflect=True")
self.reflect()
def __repr__(self):
return 'MetaData(%r)' % self.bind
def __contains__(self, table_or_key):
if not isinstance(table_or_key, basestring):
table_or_key = table_or_key.key
return table_or_key in self.tables
def __getstate__(self):
return {'tables': self.tables}
def __setstate__(self, state):
self.tables = state['tables']
self._bind = None
def is_bound(self):
"""True if this MetaData is bound to an Engine or Connection."""
return self._bind is not None
def bind(self):
"""An Engine or Connection to which this MetaData is bound.
This property may be assigned an ``Engine`` or ``Connection``, or
assigned a string or URL to automatically create a basic ``Engine``
for this bind with ``create_engine()``.
"""
return self._bind
def _bind_to(self, bind):
"""Bind this MetaData to an Engine, Connection, string or URL."""
global URL
if URL is None:
from sqlalchemy.engine.url import URL
if isinstance(bind, (basestring, URL)):
from sqlalchemy import create_engine
self._bind = create_engine(bind)
else:
self._bind = bind
bind = property(bind, _bind_to)
def clear(self):
"""Clear all Table objects from this MetaData."""
# TODO: why have clear()/remove() but not all
# other accesors/mutators for the tables dict ?
self.tables.clear()
def remove(self, table):
"""Remove the given Table object from this MetaData."""
# TODO: scan all other tables and remove FK _column
del self.tables[table.key]
@property
def sorted_tables(self):
"""Returns a list of ``Table`` objects sorted in order of
dependency.
"""
from sqlalchemy.sql.util import sort_tables
return sort_tables(self.tables.itervalues())
def reflect(self, bind=None, schema=None, only=None):
"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
bind
A :class:`~sqlalchemy.engine.base.Connectable` used to access the database; if None, uses the
existing bind on this ``MetaData``, if any.
schema
Optional, query and reflect tables from an alterate schema.
only
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
"""
reflect_opts = {'autoload': True}
if bind is None:
bind = _bind_or_error(self)
conn = None
else:
reflect_opts['autoload_with'] = bind
conn = bind.contextual_connect()
if schema is not None:
reflect_opts['schema'] = schema
available = util.OrderedSet(bind.engine.table_names(schema,
connection=conn))
current = set(self.tables.iterkeys())
if only is None:
load = [name for name in available if name not in current]
elif util.callable(only):
load = [name for name in available
if name not in current and only(name, self)]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ''
raise exc.InvalidRequestError(
'Could not reflect: requested table(s) not available '
'in %s%s: (%s)' % (bind.engine.url, s, ', '.join(missing)))
load = [name for name in only if name not in current]
for name in load:
Table(name, self, **reflect_opts)
def append_ddl_listener(self, event, listener):
"""Append a DDL event listener to this ``MetaData``.
The ``listener`` callable will be triggered when this ``MetaData`` is
involved in DDL creates or drops, and will be invoked either before
all Table-related actions or after.
Arguments are:
event
One of ``MetaData.ddl_events``; 'before-create', 'after-create',
'before-drop' or 'after-drop'.
listener
A callable, invoked with three positional arguments:
event
The event currently being handled
target
The ``MetaData`` object being operated upon
bind
The ``Connection`` bueing used for DDL execution.
Listeners are added to the MetaData's ``ddl_listeners`` attribute.
Note: MetaData listeners are invoked even when ``Tables`` are created
in isolation. This may change in a future release. I.e.::
# triggers all MetaData and Table listeners:
metadata.create_all()
# triggers MetaData listeners too:
some.table.create()
"""
if event not in self.ddl_events:
raise LookupError(event)
self.ddl_listeners[event].append(listener)
def create_all(self, bind=None, tables=None, checkfirst=True):
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
bind
A :class:`~sqlalchemy.engine.base.Connectable` used to access the database; if None, uses the
existing bind on this ``MetaData``, if any.
tables
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
checkfirst
Defaults to True, don't issue CREATEs for tables already present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind.create(self, checkfirst=checkfirst, tables=tables)
def drop_all(self, bind=None, tables=None, checkfirst=True):
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
bind
A :class:`~sqlalchemy.engine.base.Connectable` used to access the database; if None, uses
the existing bind on this ``MetaData``, if any.
tables
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
checkfirst
Defaults to True, only issue DROPs for tables confirmed to be present
in the target database.
"""
if bind is None:
bind = _bind_or_error(self)
bind.drop(self, checkfirst=checkfirst, tables=tables)
class ThreadLocalMetaData(MetaData):
"""A MetaData variant that presents a different ``bind`` in every thread.
Makes the ``bind`` property of the MetaData a thread-local value, allowing
this collection of tables to be bound to different ``Engine``
implementations or connections in each thread.
The ThreadLocalMetaData starts off bound to None in each thread. Binds
must be made explicitly by assigning to the ``bind`` property or using
``connect()``. You can also re-bind dynamically multiple times per
thread, just like a regular ``MetaData``.
"""
__visit_name__ = 'metadata'
def __init__(self):
"""Construct a ThreadLocalMetaData."""
self.context = util.threading.local()
self.__engines = {}
super(ThreadLocalMetaData, self).__init__()
def bind(self):
"""The bound Engine or Connection for this thread.
This property may be assigned an Engine or Connection, or assigned a
string or URL to automatically create a basic Engine for this bind
with ``create_engine()``."""
return getattr(self.context, '_engine', None)
def _bind_to(self, bind):
"""Bind to a Connectable in the caller's thread."""
global URL
if URL is None:
from sqlalchemy.engine.url import URL
if isinstance(bind, (basestring, URL)):
try:
self.context._engine = self.__engines[bind]
except KeyError:
from sqlalchemy import create_engine
e = create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldnt have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind
bind = property(bind, _bind_to)
def is_bound(self):
"""True if there is a bind for this thread."""
return (hasattr(self.context, '_engine') and
self.context._engine is not None)
def dispose(self):
"""Dispose all bound engines, in all thread contexts."""
for e in self.__engines.itervalues():
if hasattr(e, 'dispose'):
e.dispose()
class SchemaVisitor(visitors.ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor':True}
class DDLElement(expression.Executable, expression.ClauseElement):
"""Base class for DDL expression constructs."""
_execution_options = expression.Executable.\
_execution_options.union({'autocommit':True})
target = None
on = None
def execute(self, bind=None, target=None):
"""Execute this DDL immediately.
Executes the DDL statement in isolation using the supplied
:class:`~sqlalchemy.engine.base.Connectable` or :class:`~sqlalchemy.engine.base.Connectable` assigned to the ``.bind`` property,
if not supplied. If the DDL has a conditional ``on`` criteria, it
will be invoked with None as the event.
bind
Optional, an ``Engine`` or ``Connection``. If not supplied, a
valid :class:`~sqlalchemy.engine.base.Connectable` must be present in the ``.bind`` property.
target
Optional, defaults to None. The target SchemaItem for the
execute call. Will be passed to the ``on`` callable if any,
and may also provide string expansion data for the
statement. See ``execute_at`` for more information.
"""
if bind is None:
bind = _bind_or_error(self)
if self._should_execute(None, target, bind):
return bind.execute(self.against(target))
else:
bind.engine.logger.info("DDL execution skipped, criteria not met.")
def execute_at(self, event, target):
"""Link execution of this DDL to the DDL lifecycle of a SchemaItem.
Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance, executing
it when that schema item is created or dropped. The DDL statement
will be executed using the same Connection and transactional context
as the Table create/drop itself. The ``.bind`` property of this
statement is ignored.
event
One of the events defined in the schema item's ``.ddl_events``;
e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop'
target
The Table or MetaData instance for which this DDLElement will
be associated with.
A DDLElement instance can be linked to any number of schema items.
``execute_at`` builds on the ``append_ddl_listener`` interface of
MetaDta and Table objects.
Caveat: Creating or dropping a Table in isolation will also trigger
any DDL set to ``execute_at`` that Table's MetaData. This may change
in a future release.
"""
if not hasattr(target, 'ddl_listeners'):
raise exc.ArgumentError(
"%s does not support DDL events" % type(target).__name__)
if event not in target.ddl_events:
raise exc.ArgumentError(
"Unknown event, expected one of (%s), got '%r'" %
(', '.join(target.ddl_events), event))
target.ddl_listeners[event].append(self)
return self
@expression._generative
def against(self, target):
"""Return a copy of this DDL against a specific schema item."""
self.target = target
def __call__(self, event, target, bind, **kw):
"""Execute the DDL as a ddl_listener."""
if self._should_execute(event, target, bind, **kw):
return bind.execute(self.against(target))
def _check_ddl_on(self, on):
if (on is not None and
(not isinstance(on, (basestring, tuple, list, set)) and not util.callable(on))):
raise exc.ArgumentError(
"Expected the name of a database dialect, a tuple of names, or a callable for "
"'on' criteria, got type '%s'." % type(on).__name__)
def _should_execute(self, event, target, bind, **kw):
if self.on is None:
return True
elif isinstance(self.on, basestring):
return self.on == bind.engine.name
elif isinstance(self.on, (tuple, list, set)):
return bind.engine.name in self.on
else:
return self.on(self, event, target, bind, **kw)
def bind(self):
if self._bind:
return self._bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a Dialect."""
return dialect.ddl_compiler(dialect, self, **kw)
class DDL(DDLElement):
"""A literal DDL statement.
Specifies literal SQL DDL to be executed by the database. DDL objects can
be attached to ``Tables`` or ``MetaData`` instances, conditionally
executing SQL as part of the DDL lifecycle of those schema items. Basic
templating support allows a single DDL instance to handle repetitive tasks
for multiple tables.
Examples::
tbl = Table('users', metadata, Column('uid', Integer)) # ...
DDL('DROP TRIGGER users_trigger').execute_at('before-create', tbl)
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE', on='somedb')
spow.execute_at('after-create', tbl)
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitions are available::
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
%(fullname)s - the Table name including schema, quoted if needed
The DDL's ``context``, if any, will be combined with the standard
substutions noted above. Keys present in the context will override
the standard substitutions.
"""
__visit_name__ = "ddl"
def __init__(self, statement, on=None, context=None, bind=None):
"""Create a DDL statement.
statement
A string or unicode string to be executed. Statements will be
processed with Python's string formatting operator. See the
``context`` argument and the ``execute_at`` method.
A literal '%' in a statement must be escaped as '%%'.
SQL bind parameters are not available in DDL statements.
on
Optional filtering criteria. May be a string, tuple or a callable
predicate. If a string, it will be compared to the name of the
executing database dialect::
DDL('something', on='postgresql')
If a tuple, specifies multiple dialect names::
DDL('something', on=('postgresql', 'mysql'))
If a callable, it will be invoked with four positional arguments
as well as optional keyword arguments:
ddl
This DDL element.
event
The name of the event that has triggered this DDL, such as
'after-create' Will be None if the DDL is executed explicitly.
target
The ``Table`` or ``MetaData`` object which is the target of
this event. May be None if the DDL is executed explicitly.
connection
The ``Connection`` being used for DDL execution
\**kw
Keyword arguments which may be sent include:
tables - a list of Table objects which are to be created/
dropped within a MetaData.create_all() or drop_all() method
call.
If the callable returns a true value, the DDL statement will be
executed.
context
Optional dictionary, defaults to None. These values will be
available for use in string substitutions on the DDL statement.
bind
Optional. A :class:`~sqlalchemy.engine.base.Connectable`, used by default when ``execute()``
is invoked without a bind argument.
"""
if not isinstance(statement, basestring):
raise exc.ArgumentError(
"Expected a string or unicode SQL statement, got '%r'" %
statement)
self.statement = statement
self.context = context or {}
self._check_ddl_on(on)
self.on = on
self._bind = bind
def __repr__(self):
return '<%s@%s; %s>' % (
type(self).__name__, id(self),
', '.join([repr(self.statement)] +
['%s=%r' % (key, getattr(self, key))
for key in ('on', 'context')
if getattr(self, key)]))
def _to_schema_column(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, Column):
raise exc.ArgumentError("schema.Column object expected")
return element
def _to_schema_column_or_string(element):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element
class _CreateDropBase(DDLElement):
"""Base class for DDL constucts that represent CREATE and DROP or equivalents.
The common theme of _CreateDropBase is a single
``element`` attribute which refers to the element
to be created or dropped.
"""
def __init__(self, element, on=None, bind=None):
self.element = element
self._check_ddl_on(on)
self.on = on
self.bind = bind
def _create_rule_disable(self, compiler):
"""Allow disable of _create_rule using a callable.
Pass to _create_rule using
util.portable_instancemethod(self._create_rule_disable)
to retain serializability.
"""
return False
class CreateTable(_CreateDropBase):
"""Represent a CREATE TABLE statement."""
__visit_name__ = "create_table"
class DropTable(_CreateDropBase):
"""Represent a DROP TABLE statement."""
__visit_name__ = "drop_table"
class CreateSequence(_CreateDropBase):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
class DropSequence(_CreateDropBase):
"""Represent a DROP SEQUENCE statement."""
__visit_name__ = "drop_sequence"
class CreateIndex(_CreateDropBase):
"""Represent a CREATE INDEX statement."""
__visit_name__ = "create_index"
class DropIndex(_CreateDropBase):
"""Represent a DROP INDEX statement."""
__visit_name__ = "drop_index"
class AddConstraint(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_constraint"
def __init__(self, element, *args, **kw):
super(AddConstraint, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(self._create_rule_disable)
class DropConstraint(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_constraint"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropConstraint, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(self._create_rule_disable)
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(schemaitem, 'fullname',
getattr(schemaitem, 'name', None))
if label:
item = '%s %r' % (name, label)
else:
item = name
if isinstance(schemaitem, (MetaData, DDL)):
bindable = "the %s's .bind" % name
else:
bindable = "this %s's .metadata.bind" % name
if msg is None:
msg = ('The %s is not bound to an Engine or Connection. '
'Execution can not proceed without a database to execute '
'against. Either execute with an explicit connection or '
'assign %s to enable implicit execution.') % (item, bindable)
raise exc.UnboundExecutionError(msg)
return bind
| [
"gterranova@GTERRANOVA.9ren.org"
] | gterranova@GTERRANOVA.9ren.org |
af6d319381a3634ed79b1ce5fbc909e292583f02 | 77ce46ae4198108326e7606f4f4d0bb7d59c6555 | /rpcenable/async.py | 00a6c44db3df31e5888f2a2ab600366c592b5d6e | [] | no_license | mtrdesign/django-rpcenable | 5ca87287ccef99ef7c8f6221a48ab8daf55b9228 | 7e8836699f5cc7c140a154946246f4ab2403ad39 | refs/heads/master | 2020-09-22T02:16:37.600130 | 2013-12-09T18:12:09 | 2013-12-09T18:12:09 | 6,478,013 | 2 | 0 | null | 2013-12-09T13:09:12 | 2012-10-31T17:38:02 | Python | UTF-8 | Python | false | false | 784 | py | import atexit
import Queue
import threading
import functools
from django.core.mail import mail_admins
def _worker():
while True:
func, args, kwargs = _queue.get()
try:
func(*args, **kwargs)
except:
import traceback
details = traceback.format_exc()
mail_admins('Background process exception', details)
finally:
_queue.task_done() # so we can join at exit
def postpone(f):
@functools.wraps(f)
def wrapper (*args, **kwargs):
_queue.put((f, args, kwargs))
return wrapper
_queue = Queue.Queue()
_thread = threading.Thread(target=_worker)
_thread.daemon = True
_thread.start()
def _cleanup():
_queue.join() # so we don't exit too soon
atexit.register(_cleanup) | [
"tie@TTOP.(none)"
] | tie@TTOP.(none) |
915bb39fd2f17f7a5a22a7b3fbb3525926ae7f63 | e7f8acff7948cf6618043ec7b998867b7c6ad831 | /python/pythonday5/pythonday5_5_文件操作.py | b822ba838138be00185e46990a9ce22e8e4143ea | [] | no_license | z778107203/pengzihao1999.github.io | 9b97ecefba6557f1f815a5f17da33118ddf78b37 | d683ba31772fb3a42085cb44ee0601637bcdd84e | refs/heads/master | 2022-11-13T10:44:00.988950 | 2019-10-08T15:29:04 | 2019-10-08T15:29:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """
r :以只读方式打开文件,文件的指针将会放在文件的开头,如果文件不存在抛出异常
w :以只写方式打开文件,如果文件存在会被覆盖,如果文件不存在,创建新文件
a :以追加方式打开文件,如果文件存在,文件指针将会放在文件的结尾,如果不存在,创建新文件进行写入
r+:以读写方式打开文件,文件的指针会放在文件的开头,如果文件不存在,抛出异常
w+:以读写方式打开文件,如果文件存在会被覆盖,如果文件不存在,创建新文件
a+:以读写方式打开文件,如果文件已经存在,文件指针将会放在文件的结尾,如果文件不存在,创建新文件进行写入
"""
file = open("wenjian")
text = file.read()
print(text)
file.close()
| [
"916811138@qq.com"
] | 916811138@qq.com |
9eb70544daa471de18bc1c32603379098b98bf4d | 1d6fbc4eecad619a889682ee026082e46e501475 | /Control.py | 9994cc22acad8a03307af8c85b14214a2f01028b | [] | no_license | matheus-osorio/project_opener | 740ddf40f2a9a03b743081d6dc9c083f25bcdb07 | ceb6a97a58c255c4f8e4cc085410510091de94f3 | refs/heads/main | 2023-04-18T00:31:27.435748 | 2021-05-09T19:00:32 | 2021-05-09T19:00:32 | 361,580,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py | from configs.Organizer import Organizer
class Control:
def run(self,obj,path):
st = obj['starter']
self.Organizer = Organizer(path)
response = self[st](obj)
txt = f'''
RESPONSE TYPE: {response['type']}
--------------
RESPONSE MESSAGE: {response['text']}
'''
print(txt)
def new(self,obj):
return self.Organizer.new_project(**obj['params'])
def edit(self,obj):
params = obj['params']
proj_hash = params['hash']
del params['hash']
return self.Organizer.edit_project(proj_hash,obj['params'])
def list(self,obj):
return self.Organizer.list_projects()
def delete(self,obj):
return self.Organizer.delete_project(obj['params']['hash'])
def define(self,obj):
params = obj['params']
name = params['name'][0]
content = params['content']
return self.Organizer.define_variable(name,content)
def change_hash_length(self,obj):
try:
num = int(obj['params']['content'])
except ValueError:
raise Exception('With this function type must be Integer')
except:
raise Exception('Something went wrong...')
return self.Organizer.change_sys_variable('hash_length',num)
def change_standart_folder(self,obj):
return self.Organizer.change_sys_variable('standart_folder',obj['params']['content'])
def change_standart_editor(self,obj):
return self.Organizer.change_sys_variable('standart_editor',obj['params']['content'])
def change_replacement_policy(self,obj):
content = obj['params']['content']
if content not in ['replace','ignore']:
raise Exception('content must be either "replace" or "ignore".')
return self.Organizer.change_sys_variable('duplicate_policy',content)
def open(self,obj):
name = obj['params']['name'][0]
return self.Organizer.open_project(name)
def __getitem__(self,attr):
return self.__getattribute__(attr)
| [
"matheuscerqueir@gmail.com"
] | matheuscerqueir@gmail.com |
fabdb59965535befc9dac9f98e452ca2667188c8 | 3331bef496806453a89f469d0a93b71d42f4d0d9 | /clase9.py | f7d374d056bf1741423d19a3e59ce08bd36d63f2 | [] | no_license | katherinelasluisa/Clase9 | 56ebda90ef2b63cec07f362851c145a91b3328f5 | e353edb3c960b52ffb9ed110a3d3f61363381bb8 | refs/heads/master | 2021-01-09T20:40:06.425243 | 2016-06-16T14:54:28 | 2016-06-16T14:54:28 | 61,290,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #trabajo en clase segundo emi
import time
print (time.localtime())
#time.struct_time(tm_year=2020, tm_mon=2, tm_mday=23, tm_hour=22, tm_min=18, tm_sec=39, tm_wday=0, tm_yday=73, tm_isdst=0)
t=time.localtime()
year=t[1]
month= t[2]
day= t[3]
wday=t[4]
min=t[5]
print (year)
print (month) | [
"katherinelasluisa7@gmail.com"
] | katherinelasluisa7@gmail.com |
37a4bed3bf5ad368c0622bb623e70c8852cd6ba3 | c0239d75a8199ec84ad683f945c21785c1b59386 | /dingtalk/api/rest/CorpDingTaskCreateRequest.py | ebe77db44bea52c850f1888fb9ce57aede6aae7f | [] | no_license | luss613/oauth_dingtalk | 9f253a75ce914c577dbabfb84e97fd883e80e04b | 1e2554642d2b16c642a031670d08efa4a74e8252 | refs/heads/master | 2023-04-23T01:16:33.450821 | 2020-06-18T08:22:57 | 2020-06-18T08:22:57 | 264,966,287 | 1 | 1 | null | 2020-06-18T08:31:24 | 2020-05-18T14:33:25 | Python | UTF-8 | Python | false | false | 332 | py | '''
Created by auto_sdk on 2018.07.25
'''
from dingtalk.api.base import RestApi
class CorpDingTaskCreateRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.task_send_v_o = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.corp.ding.task.create'
| [
"paul.lu@belstar.com.cn"
] | paul.lu@belstar.com.cn |
62f564b7edaf6eb32c0ccc8850acdb734cae478b | 487d6bbdd801d37478734ba5a96a670abada4021 | /MyWagtailWebsite/resume/migrations/0001_initial.py | 47d5a3c05ca09cca7ee3ab1e97374da7e3619feb | [] | no_license | Munnu/PersonalWebsite-Wagtail | 5414218f2b24933a462ac95dbc40de9fd449d772 | 950cf8d43e0b68c7f4445100c94ca67f738b477f | refs/heads/master | 2022-12-23T08:05:05.421092 | 2018-09-21T06:43:15 | 2018-09-21T06:43:15 | 84,692,087 | 0 | 0 | null | 2022-12-07T23:48:00 | 2017-03-12T01:41:09 | Python | UTF-8 | Python | false | false | 892 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-20 07:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0033_remove_golive_expiry_help_text'),
]
operations = [
migrations.CreateModel(
name='Resume',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
| [
"moniqueblake4@gmail.com"
] | moniqueblake4@gmail.com |
2752abedd7b48de52b2fddab35c8f2b31dad7226 | 3cb4c484912f540632edd5d0446df1867a68ce62 | /src/pytorch_impl/cnn_pdtb_arg_multiclass_jl.py | 6ead21d65aaf3bf957a0adf344db0e9af4c9b6cb | [
"MIT"
] | permissive | siddharthvaria/WordPair-CNN | a91307c35d8d163299b2a09dbe971e69873ef866 | d54cef994e49615e6bf9d911c11cb5992862bcce | refs/heads/master | 2022-03-25T07:51:47.634344 | 2019-08-10T04:10:18 | 2019-08-10T04:10:18 | 197,980,051 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,457 | py | import argparse
import time
import math
import os
import pickle
import numpy as np
np.random.seed(57697)
from sklearn.metrics import classification_report
from sklearn.utils import shuffle
from pytorch_impl.cnn_pdtb_classifier_utils_pytorch import get_W, post_process, print_params, get_time_stamp
from pytorch_impl.cnn_pdtb_classifier_utils_pytorch import get_class_weights, get_class_names, get_binary_class_index
from pytorch_impl.cnn_pdtb_classifier_utils_pytorch import load_data_for_mtl, compute_metrices
import torch
torch.manual_seed(57697)
import torch.nn as nn
import torch.nn.functional as F
class LossCompute:
"A Loss compute and train function."
def __init__(self, clf_criterion, opt = None):
self.clf_criterion = clf_criterion
self.opt = opt
def __call__(self, Y, clf_logits, only_return_losses = False):
# Classification loss
clf_losses = self.clf_criterion(clf_logits, Y)
# only_return_losses: true for validation and test
if only_return_losses:
return clf_losses
train_loss = clf_losses.sum()
train_loss.backward()
if self.opt is not None:
# Performs a single optimization step
self.opt.step()
self.opt.zero_grad()
return train_loss.item()
class PDTB_Classifier(nn.Module):
def __init__(self, args):
super(PDTB_Classifier, self).__init__()
self.args = args
# word embedding layer
self.word_embed = nn.Embedding(args.W.shape[0], args.W.shape[1])
self.word_embed.weight.data.copy_(torch.from_numpy(args.W))
if args.emb_static:
self.word_embed.weight.requires_grad = False
# pos embedding layer
self.pos_embed = nn.Embedding(args.P.shape[0], args.P.shape[1])
self.pos_embed.weight.data.copy_(torch.from_numpy(args.P))
self.pos_embed.weight.requires_grad = False
emb_dim = args.W.shape[1] + args.P.shape[1]
# convolutional layers for arg
self.conv_layers_arg = nn.ModuleList([nn.Conv2d(1, args.nfmaps_arg, (K, emb_dim), stride = (1, 1)) for K in args.fsz_arg])
# initialize conv_layers_arg
for conv_layer in self.conv_layers_arg:
# nn.init.xavier_uniform_(conv_layer.weight, gain = nn.init.calculate_gain('relu'))
nn.init.xavier_uniform(conv_layer.weight, gain = nn.init.calculate_gain('relu'))
conv_layer.bias.data.fill_(0)
# nn.init.zeros_(conv_layer.bias)
#
# # dense layers for arg
# dense_layers_arg = []
# for i, D in enumerate(args.dsz_arg):
# if i == 0:
# dense_layers_arg.append(nn.Linear(len(args.fsz_arg) * args.nfmaps_arg, D))
# else:
# dense_layers_arg.append(nn.Linear(args.dsz_arg[i - 1], D))
#
# self.dense_layers_arg = nn.ModuleList(dense_layers_arg)
#
# # initialize dense_layers_arg
# for dense_layer in self.dense_layers_arg:
# nn.init.xavier_uniform_(dense_layer.weight, gain = nn.init.calculate_gain('relu'))
# dense_layer.bias.data.fill_(0)
# # nn.init.zeros_(dense_layer.bias)
# define gate1
self.dense_arg_cap = nn.Linear(2 * len(args.fsz_arg) * args.nfmaps_arg, args.gate_units_arg)
# nn.init.xavier_uniform_(self.dense_arg_cap.weight, gain = nn.init.calculate_gain('relu'))
nn.init.xavier_uniform(self.dense_arg_cap.weight, gain = nn.init.calculate_gain('relu'))
self.dense_arg_cap.bias.data.fill_(0)
self.dense_arg_gate = nn.Linear(2 * len(args.fsz_arg) * args.nfmaps_arg, args.gate_units_arg)
# nn.init.xavier_uniform_(self.dense_arg_gate.weight, gain = 1)
nn.init.xavier_uniform(self.dense_arg_gate.weight, gain = 1)
self.dense_arg_gate.bias.data.fill_(0)
# classification layer for imp
self.clf_layer_imp = nn.Linear(args.gate_units_arg, args.nclasses)
# nn.init.xavier_uniform_(self.clf_layer_imp.weight, gain = 1)
nn.init.xavier_uniform(self.clf_layer_imp.weight, gain = 1)
self.clf_layer_imp.bias.data.fill_(0)
# classification layer for exp
self.clf_layer_exp = nn.Linear(args.gate_units_arg, args.nclasses)
# nn.init.xavier_uniform_(self.clf_layer_exp.weight, gain = 1)
nn.init.xavier_uniform(self.clf_layer_exp.weight, gain = 1)
self.clf_layer_exp.bias.data.fill_(0)
def forward(self, X):
X_larg, X_lpos, X_rarg, X_rpos, is_imp = X
h_arg_vecs = []
for x in [(X_larg, X_lpos), (X_rarg, X_rpos)]:
x_w, x_p = x
x_w = self.word_embed(x_w) # (batch_size, seq_len, dim)
x_p = self.pos_embed(x_p)
x_w_p = torch.cat([x_w, x_p], 2)
x_w_p = x_w_p.unsqueeze(1) # (batch_size, 1, seq_len, dim)
x_convs = [F.relu(conv_layer(x_w_p)).squeeze(3) for conv_layer in self.conv_layers_arg]
# At this point x_convs is [(batch_size, nfmaps_arg, seq_len_new), ...]*len(fsz_arg)
x_max_pools = [F.max_pool1d(xi, xi.size(2)).squeeze(2) for xi in x_convs] # [(batch_size, nfmaps_arg), ...]*len(fsz_arg)
x_max_pool = torch.cat(x_max_pools, 1)
x_max_pool = nn.Dropout(self.args.dropout_p)(x_max_pool)
h_arg_vecs.append(x_max_pool)
h_arg_vec = torch.cat(h_arg_vecs, 1)
h_arg_cap = F.relu(self.dense_arg_cap(h_arg_vec))
h_arg_gate = torch.sigmoid(self.dense_arg_gate(h_arg_vec))
h_clf_in = h_arg_cap * h_arg_gate
h_out_imp = self.clf_layer_imp(nn.Dropout(self.args.dropout_p)(h_clf_in))
h_out_exp = self.clf_layer_exp(nn.Dropout(self.args.dropout_p)(h_clf_in))
is_exp = 1 - is_imp
# TODO: cast is_imp and is_exp to float tensor
h_out = is_imp.unsqueeze(1).expand_as(h_out_imp).float() * h_out_imp + is_exp.unsqueeze(1).expand_as(h_out_exp).float() * h_out_exp
return h_out
def iter_data(datas, batch_size = 200):
n = int(math.ceil(float(len(datas[0])) / batch_size)) * batch_size
for i in range(0, n, batch_size):
if len(datas) == 1:
yield datas[0][i:i + batch_size]
else:
yield [d[i:i + batch_size] for d in datas]
def train(train_set, val_set, args, run_id):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
train_y = [train_set[-1][ii][0] for ii in range(len(train_set[-1]))]
# At this point, train set labels will be a list where for each example, we have only one label
train_set = train_set[:-1]
train_set.append(train_y)
imp_indices_val = np.where(val_set[-2] == 1)[0]
exp_indices_val = np.where(val_set[-2] == 0)[0]
y_val_imp = val_set[-1][imp_indices_val]
y_val_exp = val_set[-1][exp_indices_val]
val_set = val_set[:-1]
# model
clf = PDTB_Classifier(args)
# clf = clf.to(device)
clf = clf.cuda()
# class_weights = torch.from_numpy(np.asarray(args.class_weights, dtype = 'float32')).to(device)
class_weights = torch.from_numpy(np.asarray(args.class_weights, dtype = 'float32')).cuda()
# This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
criterion = nn.CrossEntropyLoss(weight = class_weights)
print('List of trainable parameters: ')
for name, param in clf.named_parameters():
# for name, param in clf.parameters():
if param.requires_grad:
print(name)
model_opt = torch.optim.Adam(filter(lambda p: p.requires_grad, clf.parameters()),
lr = args.lr,
weight_decay = args.l2_weight)
compute_loss_fct = LossCompute(criterion, model_opt)
n_epochs = 0
best_val_perf = None
if args.conv_metric in ['f1' , 'acc']:
best_val_perf = 0
else:
best_val_perf = np.inf
patience = args.patience
for i in range(args.n_epochs):
n_epochs += 1
print("running epoch", i)
# actual training starts here
start_time = time.time()
tr_loss = 0
batch_count = 0
for _train_set in iter_data(shuffle(*train_set, random_state = np.random),
batch_size = args.batch_size):
clf.train()
# _train_set = [torch.tensor(data, dtype = torch.long).to(device) for data in _train_set]
_train_set = [torch.LongTensor(data).cuda() for data in _train_set]
y_tr = _train_set[-1]
_train_set = _train_set[:-1]
clf_logits = clf(_train_set)
tr_loss += compute_loss_fct(y_tr, clf_logits)
batch_count += 1
tr_loss /= batch_count
print('epoch: %i, training time: %.2f secs, train loss: %.4f' % (n_epochs, time.time() - start_time, tr_loss))
logits = []
with torch.no_grad():
clf.eval()
for _val_set in iter_data(val_set, batch_size = args.batch_size):
# _val_set = [torch.tensor(data, dtype = torch.long).to(device) for data in _val_set]
_val_set = [torch.LongTensor(data).cuda() for data in _val_set]
clf_logits = clf(_val_set)
logits.append(clf_logits.to("cpu").numpy())
logits = np.concatenate(logits, 0)
logits_imp = logits[imp_indices_val]
logits_exp = logits[exp_indices_val]
# At present the logits are not probabilities so will only work for multiclass clf
_, y_true_val_imp, y_pred_val_imp = post_process(-1, logits_imp, y_val_imp, args.multilabel, args.binarize, args.binary_class)
val_acc_imp, val_macro_f1_imp, val_micro_f1_imp = compute_metrices(y_true_val_imp,
y_pred_val_imp,
binary_class = args.binary_class,
binarize = args.binarize)
_, y_true_val_exp, y_pred_val_exp = post_process(-1, logits_exp, y_val_exp, args.multilabel, args.binarize, args.binary_class)
val_acc_exp, val_macro_f1_exp, val_micro_f1_exp = compute_metrices(y_true_val_exp,
y_pred_val_exp,
binary_class = args.binary_class,
binarize = args.binarize)
print('Imp val acc: %.2f, Imp val micro f1: %.2f, Imp val macro f1: %.2f' % (val_acc_imp * 100.0, val_micro_f1_imp * 100.0, val_macro_f1_imp * 100.0))
print('Exp val acc: %.2f, Exp val micro f1: %.2f, Exp val macro f1: %.2f' % (val_acc_exp * 100.0, val_micro_f1_exp * 100.0, val_macro_f1_exp * 100.0))
if args.conv_metric == 'f1':
val_perf = val_macro_f1_imp
elif args.conv_metric == 'acc':
val_perf = val_acc_imp
if (args.conv_metric in ['f1', 'acc'] and val_perf > best_val_perf):
best_val_perf = val_perf
patience = args.patience
print('Saving the best model . . .')
path = os.path.join(args.output_dir, 'best_params_{0}'.format(run_id))
torch.save(clf.state_dict(), path)
else:
patience -= 1
if patience <= 0:
print ('Early stopping . . .')
break
def test(test_set, args, run_id):
imp_indices_test = np.where(test_set[-2] == 1)[0]
exp_indices_test = np.where(test_set[-2] == 0)[0]
y_test_imp = test_set[-1][imp_indices_test]
y_test_exp = test_set[-1][exp_indices_test]
test_set = test_set[:-1]
# model
clf = PDTB_Classifier(args)
clf.load_state_dict(torch.load(os.path.join(args.output_dir, 'best_params_{0}'.format(run_id))))
# clf = clf.to(device)
clf = clf.cuda()
logits = []
with torch.no_grad():
clf.eval()
for _test_set in iter_data(test_set, batch_size = args.batch_size):
# _test_set = [torch.tensor(data, dtype = torch.long).to(device) for data in _test_set]
_test_set = [torch.LongTensor(data).cuda() for data in _test_set]
clf_logits = clf(_test_set)
logits.append(clf_logits.to("cpu").numpy())
logits = np.concatenate(logits, 0)
logits_imp = logits[imp_indices_test]
logits_exp = logits[exp_indices_test]
# At present the logits are not probabilities so will only work for multiclass clf
_, y_true_test_imp, y_pred_test_imp = post_process(-1, logits_imp, y_test_imp, args.multilabel, args.binarize, args.binary_class)
test_acc_imp, test_macro_f1_imp, test_micro_f1_imp = compute_metrices(y_true_test_imp,
y_pred_test_imp,
binary_class = args.binary_class,
binarize = args.binarize)
_, y_true_test_exp, y_pred_test_exp = post_process(-1, logits_exp, y_test_exp, args.multilabel, args.binarize, args.binary_class)
test_acc_exp, test_macro_f1_exp, test_micro_f1_exp = compute_metrices(y_true_test_exp,
y_pred_test_exp,
binary_class = args.binary_class,
binarize = args.binarize)
print('Imp test acc: %.2f, Imp test micro f1: %.2f, Imp test macro f1: %.2f' % (test_acc_imp * 100.0, test_micro_f1_imp * 100.0, test_macro_f1_imp * 100.0))
print('Exp test acc: %.2f, Exp test micro f1: %.2f, Exp test macro f1: %.2f' % (test_acc_exp * 100.0, test_micro_f1_exp * 100.0, test_macro_f1_exp * 100.0))
pickle.dump([y_true_test_imp, y_pred_test_imp, _ , _, args.class_names], open(os.path.join(args.output_dir, 'best_prediction_imp_' + args.timestamp + '_' + str(run_id) + '.p'), 'wb'))
pickle.dump([y_true_test_exp, y_pred_test_exp, _ , _, args.class_names], open(os.path.join(args.output_dir, 'best_prediction_exp_' + args.timestamp + '_' + str(run_id) + '.p'), 'wb'))
print('############################### IMP ###############################')
print(classification_report(y_true_test_imp, y_pred_test_imp, target_names = args.class_names, labels = range(len(args.class_names)), digits = 4))
print('############################### EXP ###############################')
print(classification_report(y_true_test_exp, y_pred_test_exp, target_names = args.class_names, labels = range(len(args.class_names)), digits = 4))
return test_macro_f1_imp, test_acc_imp, test_macro_f1_exp, test_acc_exp
def main():
ts = get_time_stamp(args.input_file)
filter_hs_arg = [2, 3, 4, 5]
train_data, val_data, test_data, class_dict, examples_per_class = load_data_for_mtl(args.input_file, filter_hs_arg[-1], 2)
# [X_larg, X_rarg, X_lpos, X_rpos, X_lner, X_rner, X_wp, X_wp_rev, X_wp_pos, X_wp_rev_pos, X_wp_ner, X_wp_rev_ner, is_imp, y]
X_tr_larg, X_tr_rarg, X_tr_lpos, X_tr_rpos, _, _, _, _, _, _, _, _, is_imp_tr, Y_tr_all = train_data
X_val_larg, X_val_rarg, X_val_lpos, X_val_rpos, _, _, _, _, _, _, _, _, is_imp_val, Y_val_all = val_data
X_te_larg, X_te_rarg, X_te_lpos, X_te_rpos, _, _, _, _, _, _, _, _, is_imp_te, Y_te_all = test_data
train_set = [X_tr_larg, X_tr_lpos, X_tr_rarg, X_tr_rpos, is_imp_tr, Y_tr_all]
val_set = [X_val_larg, X_val_lpos, X_val_rarg, X_val_rpos, is_imp_val, Y_val_all]
test_set = [X_te_larg, X_te_lpos, X_te_rarg, X_te_rpos, is_imp_te, Y_te_all]
args.emb_static = True
args.reg_emb = False # should regularize embeddings or not. Obviously when emb_static is True, reg_emb will be false
args.fsz_arg = filter_hs_arg
args.nfmaps_arg = 50
args.dsz_arg = []
args.gate_units_arg = 300
args.nclasses = len(class_dict)
args.binarize = True if len(class_dict) == 2 else False
args.binary_class = get_binary_class_index(class_dict)
args.dropout_p = 0.5
args.l2_weight = 1e-4
args.batch_size = 200
args.class_names = get_class_names(class_dict)
args.n_epochs = 30
args.patience = 5
args.timestamp = ts
print_params(args)
word2idx, pos2idx, _ = pickle.load(open(args.encoder_file, "rb"))
args.W = get_W(args.w2v_file, word2idx)
args.P = np.identity(len(pos2idx))
args.class_weights = get_class_weights(class_dict, examples_per_class)
print('Class weights:')
print(args.class_weights)
f_scores_imp = []
accuracies_imp = []
f_scores_exp = []
accuracies_exp = []
runs = range(0, 5)
for run_id in runs:
print ('Run: ', run_id)
train(train_set, val_set, args, run_id)
f1_imp, acc_imp, f1_exp, acc_exp = test(test_set, args, run_id)
f_scores_imp.append(f1_imp)
accuracies_imp.append(acc_imp)
f_scores_exp.append(f1_exp)
accuracies_exp.append(acc_exp)
print('avg f1 (imp): %s' % (str(np.mean(f_scores_imp))))
print('avg acc (imp): %s' % (str(np.mean(accuracies_imp))))
print('avg f1 (exp): %s' % (str(np.mean(f_scores_exp))))
print('avg acc (exp): %s' % (str(np.mean(accuracies_exp))))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = '')
parser.add_argument('input_file', help = 'pickled input file generated using \'preprocess_pdtb_relations.py\'')
parser.add_argument('encoder_file', default = None, type = str, help = 'WordEncoder file')
parser.add_argument('w2v_file', type = str, default = None, help = 'GoogleNews-vectors-negative300.bin') # GoogleNews-vectors-negative300.bin
parser.add_argument('output_dir', help = 'directory where you want to save the best model and predictions file')
parser.add_argument('--conv_metric', default = 'f1', type = str, help = '\'f1\', \'acc\'')
parser.add_argument('--multilabel', default = True, type = bool, help = 'If True, multilabel evaluation will be done on val and test sets')
parser.add_argument('--trained_weights_file', default = None, type = str, help = 'file containing the trained model weights')
parser.add_argument('--opt', default = 'adam', type = str, help = 'opt to use')
parser.add_argument('--lr', default = 0.0005, type = float, help = 'learning rate to use')
args = parser.parse_args()
main()
| [
"varia.siddharth@gmail.com"
] | varia.siddharth@gmail.com |
6eeced6d1506a1def659d8582180f495fff68a7f | 50402cc4388dfee3a9dbe9e121ef217759ebdba8 | /etc/MOPSO-ZDT2/ZDT2-1.py | d0f2faf6d992bb8b09ed659299c095a99a98486a | [] | no_license | dqyi11/SVNBackup | bd46a69ec55e3a4f981a9bca4c8340944d8d5886 | 9ad38e38453ef8539011cf4d9a9c0a363e668759 | refs/heads/master | 2020-03-26T12:15:01.155873 | 2015-12-10T01:11:36 | 2015-12-10T01:11:36 | 144,883,382 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | '''
Created on Jan 26, 2014
@author: daqing_yi
'''
if __name__ == '__main__':
from PerformanceAnalyzer import *;
import sys;
trial_time = 30;
figFolder = sys.path[0] + "\\zdt2";
caseName = "ZDT2";
fileList1 = [];
fileList2 = [];
fileList3 = [];
fileList4 = [];
for tt in range(trial_time):
filename1 = "ZDT2-"+str(tt)+"--Div.txt";
filename2 = "ZDT2-"+str(tt)+"--AD.txt";
filename3 = "ZDT2-"+str(tt)+"--Spread.txt";
filename4 = "ZDT2-"+str(tt)+"--Efficiency.txt";
fileList1.append(filename1);
fileList2.append(filename2);
fileList3.append(filename3);
fileList4.append(filename4);
analyzer1 = PerformanceAnalyzer(fileList1, figFolder, "Diversity", 10);
analyzer1.genData();
analyzer1.plot(caseName);
analyzer1.dump(caseName);
analyzer2 = PerformanceAnalyzer(fileList2, figFolder, "Distance", 10);
analyzer2.genData();
analyzer2.plot(caseName);
analyzer2.dump(caseName);
analyzer3 = PerformanceAnalyzer(fileList3, figFolder, "Spread", 10);
analyzer3.genData();
analyzer3.plot(caseName);
analyzer3.dump(caseName);
analyzer4 = PerformanceAnalyzer(fileList4, figFolder, "Efficiency", 10);
analyzer4.genData();
analyzer4.plot(caseName);
analyzer4.dump(caseName); | [
"walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39"
] | walter@e224401c-0ce2-47f2-81f6-2da1fe30fd39 |
902b03e937d195c22fcfeec107c8ea365f4015eb | 842a7c01a33270766fd382c787b0616066522694 | /LinkedList/Leco1290.py | 95104cceacde8f384e3a45df4bf145b99b3a911a | [] | no_license | BubbleMa123/Leetcode | 7d1fc17e6ec8d716ced7accefb86b5805b088081 | 79d4f33034210764d4dbcb51484732a801fd639f | refs/heads/main | 2023-04-02T05:36:32.856999 | 2021-04-08T10:50:15 | 2021-04-08T10:50:15 | 355,212,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# class Solution:
# def getDecimalValue(self, head: ListNode) -> int:
# nums = []
# while head:
# nums.append(head.val)
# head = head.next
# num = 0
# for i in range(len(nums) - 1, -1, -1):
# num += nums[i] * 2 ** (len(nums) - 1 - i)
# return num
class Solution:
def getDecimalValue(self, head: ListNode) -> int:
num = 0
while head:
num = num * 2 + head.val
head = head.next
return num
| [
"76720145+BubbleMa123@users.noreply.github.com"
] | 76720145+BubbleMa123@users.noreply.github.com |
fd39ae92816f5fe6a089e0016756b334239d8254 | 8b8945ef99886034fe6bda7fc34a68252ef7997b | /webapp/api/resources/podcast_resources.py | 06741de832c0a9f049ab3bd8a1003e4252a6ac7b | [] | no_license | asm-products/podato-web | 90bc3463ee4581d97a7863522ac9f308dfb82d1d | e4693c232a25fa4003a2cc8de17327b9fca2fd2a | refs/heads/master | 2021-01-20T23:31:53.813211 | 2015-07-13T21:06:36 | 2015-07-13T21:06:36 | 29,367,441 | 0 | 3 | null | 2015-04-08T21:35:52 | 2015-01-16T20:57:44 | CSS | UTF-8 | Python | false | false | 2,350 | py | import urllib
from flask import abort
from flask_restplus import Resource
from flask_restplus import fields
from flask_restplus import abort
from webapp.utils import AttributeHider
from webapp.api.oauth import oauth
from webapp.api.oauth import AuthorizationRequired
from webapp.api.blueprint import api
from webapp.api.representations import podcast_full_fields, podcast_fields
from webapp.podcasts import Podcast
ns = api.namespace("podcasts")
@ns.route("/<path:podcastId>", endpoint="podcast")
@api.doc(params={"podcastId":"A podcast's id (the same as its URL. If the API returns a podcast with a different URL, it means the podcast has moved."})
class PodcastResource(Resource):
"""Resource that represents a podcast."""
@api.marshal_with(podcast_full_fields)
@api.doc(id="getPodcast")
def get(self, podcastId):
"""Get a podcast by id."""
podcastId = urllib.unquote(podcastId)
podcast = Podcast.get_by_url(podcastId)
if podcast == None:
abort(404, message="Podcast not found: %s" % podcastId)
return podcast
queryParser = api.parser()
queryParser.add_argument(name="order", required=False, location="args", default="subscriptions")
queryParser.add_argument(name="category", required=False, location="args")
queryParser.add_argument(name="author", required=False, location="args")
queryParser.add_argument(name="language", required=False, location="args")
queryParser.add_argument(name="page", default=1, type=int)
queryParser.add_argument(name="per_page", default=30, type=int)
@ns.route("/")
class PodcastQueryResource(Resource):
"""Resource representing the collection of al podcasts."""
@api.marshal_with(podcast_fields, as_list=True)
@api.doc(id="query", parser=queryParser)
def get(self):
"""Query for podcasts."""
args = queryParser.parse_args()
query = Podcast.objects
if args.get("order"):
query = query.order_by(args.get('order'))
if args.get("category"):
query = query.filter(categories=args.get("category"))
if args.get("author"):
query = query.filter(author=args.get("author"))
if args.get("language"):
query = query.filter(language=args.get("language"))
return query.paginate(page=args["page"], per_page=args["per_page"]).items
| [
"frederikcreemers@gmail.com"
] | frederikcreemers@gmail.com |
7e504807ecaf06d5ef595d1ea58a05ab471ee2b1 | a90547c558f666b8f6717d735b5cd89552e5fc20 | /animal.py | 3858b40150c65651677991c69535a52a34333eca | [] | no_license | Maniss-ai/PythonGame | 8400eaa3bfdc25983141418b0b03b7299e2f8649 | faf76a7f06b60e37357811cdc26d5c0aff681fd8 | refs/heads/main | 2023-01-09T16:33:56.355887 | 2020-11-05T11:58:01 | 2020-11-05T11:58:01 | 310,284,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | import os
import random
import game_config as gc
from pygame import image, transform
animal_count = dict((a, 0) for a in gc.ASSET_FILES)
def availabe_animals():
return [a for a, c in animal_count.items() if c < 2]
class Animal:
def __init__(self, index):
self.index = index
self.row = index // gc.NUM_TILES_SIDE
self.col = index % gc.NUM_TILES_SIDE
self.name = random.choice(availabe_animals())
animal_count[self.name] += 1
self.image_path = os.path.join(gc.ASSET_DIR, self.name)
self.image = image.load(self.image_path)
self.image = transform.scale(self.image, (gc.IMAGE_SIZE - 2*gc.MARGIN, gc.IMAGE_SIZE - 2*gc.MARGIN))
self.box = self.image.copy()
self.box.fill((200, 200, 200))
self.skip = False
| [
"noreply@github.com"
] | noreply@github.com |
b0a4b9177ca36a73a671df684292e3633f5ff0f4 | 24e53f24cbc457cb62555de1f319d279001d8539 | /9.- Clases/python_module_of_the_week.py | dca9d982871f89a1c41427e6fbe0a3aa496d864c | [] | no_license | javierramon23/Python-Crash-Course | c93d4726377ffa96310a0b10490c7f308fb0a7aa | 07fe05f149437d9fdfa9de9dbb1633835a5b5f92 | refs/heads/master | 2021-06-19T15:43:00.232933 | 2019-11-21T20:04:33 | 2019-11-21T20:04:33 | 196,716,791 | 0 | 0 | null | 2021-06-10T21:51:44 | 2019-07-13T12:02:41 | Python | UTF-8 | Python | false | false | 26 | py | '''
http://pymotw.com
''' | [
"javierramon@outlook.com"
] | javierramon@outlook.com |
47e4f83b3f35277e25d4271ecbd28e12c9bdb8d5 | 743f0944edbc7a5e148443422a0c9eac96ec5ea5 | /telaPrincipal/migrations/0017_auto_20190609_2049.py | 44fde0fcb603708373dab5eb8b234a967e097d9f | [] | no_license | phsd/SGV-Prot-tipo | 6db4a4484ef4180d474133d98034303723793ea8 | 9e4fbf97a0452ece58f56e025020219ab0efc280 | refs/heads/master | 2020-04-30T12:31:32.928019 | 2019-08-03T00:01:23 | 2019-08-03T00:01:23 | 176,828,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | # Generated by Django 2.1.7 on 2019-06-09 20:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('telaPrincipal', '0016_auto_20190609_2034'),
]
operations = [
migrations.AlterField(
model_name='hsmtarefasgerais',
name='id_hsmemprocesso',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='telaPrincipal.HSMEmProcesso'),
),
]
| [
"paulohenrique@live.com.ph"
] | paulohenrique@live.com.ph |
1a6e780d70010693616f7c54d692cdab2c4dfb90 | 24ce5a9988aa0b1b6b57746a93a8fe2b7c7554bc | /scrna_pipeline/nebula/smart-seq2/scripts/04-assign_reads.py | 39c9bdc1f8849337a99b2c8ff3ae385427ca4d73 | [
"MIT"
] | permissive | JarningGau/NGS_pipeline | 7394a3ecc3158fa396c0ff3ed363084e910742d0 | 0e9012f4e68bbc8ce2b1c2f1df5af012e68e0ba2 | refs/heads/master | 2020-07-07T13:35:45.357610 | 2019-09-06T11:35:35 | 2019-09-06T11:35:35 | 203,363,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,582 | py | from parallel import *
from utils import *
import sys
home = sys.argv[1]
project = sys.argv[2]
gene_gtf = sys.argv[3]
TE_gtf = sys.argv[4]
threads = int(sys.argv[5]) # cpu per cmd
batch_size = int(sys.argv[6])
def reads_assign_on_gene(path, summary, bam_in, experiment):
cmd0 = 'echo "processing %s"\ncd %s' % (experiment, path)
cmd1 = '''time featureCounts \
-a %s \
--extraAttributes gene_id,gene_name,gene_type \
-T %s \
-o %s \
-R BAM %s''' % (gene_gtf, threads, summary, bam_in)
cmd2 = "mv %s.featureCounts.bam %s.gene_assigned.bam" % (bam_in, experiment)
cmd3 = "time samtools sort -@ 24 %s.gene_assigned.bam -o %s.gene_assigned.sorted.bam" % (
experiment, experiment)
cmd4 = "time samtools index %s.gene_assigned.sorted.bam" % (experiment)
return (cmd0, cmd1, cmd2, cmd3, cmd4)
def reads_assign_on_TE(path, summary, bam_in, experiment):
cmd0 = "cd %s" % path
cmd1 = '''time featureCounts \
-a %s \
--extraAttributes gene_id,family_id,class_id \
-T %s \
-o %s \
-R BAM %s''' % (TE_gtf, threads, summary, bam_in)
cmd2 = "mv %s.featureCounts.bam %s.TE_assigned.bam" % (bam_in, experiment)
cmd3 = "time samtools sort -@ 24 %s.TE_assigned.bam -o %s.TE_assigned.sorted.bam" % (
experiment, experiment)
cmd4 = "time samtools index %s.TE_assigned.sorted.bam" % (experiment)
return (cmd0, cmd1, cmd2, cmd3, cmd4)
def main():
cmds = []
for experiment in get_experiment(home, project):
path = os.path.join(home, "data", project, experiment)
bam_in = "%sAligned.sortedByCoord.out.bam" % experiment
summary_gene = "%s.gene_assigned" % experiment
summary_te = "%s.TE_assigned" % experiment
cmd_tuple = reads_assign_on_gene(path, summary_gene, bam_in, experiment)
cmds.append(cmd_tuple)
cmd_tuple = reads_assign_on_TE(path, summary_te, bam_in, experiment)
cmds.append(cmd_tuple)
cmd_list = make_parallel(cmds, batch_size)
for i in range(len(cmd_list)):
fo = open(os.path.join(home, "04-assignment-%s.pbs" %(i+1)), 'w')
fo.write('''#!/bin/sh
#PBS -N preprocess_s4-b%s
#PBS -o preprocess_s4-b%s.log
#PBS -e preprocess_s4-b%s.err
#PBS -q middle
#PBS -l nodes=1:ppn=%s
#PBS -l mem=10G
module load samtools
module load Anaconda3
cd %s
echo "step4 Reads assignment, processing batch-%s"\n
''' % (i+1, i+1, i+1, threads, home, i+1))
for cmd_tuple in cmd_list[i]:
for cmd in cmd_tuple:
fo.write(cmd+"\n")
fo.write("\n\n")
fo.close()
if __name__ == '__main__':
main() | [
"jarninggau@gmail.com"
] | jarninggau@gmail.com |
7e0523f23ad99067226163240acb0bb0bd3c3b82 | a2c9dec29f36df1ea36121bc36bd43b82b247f1f | /cs231n/rnn_layers.py | aff15b5b7fe6a70f5da094313ae9da5c24367419 | [] | no_license | FengYen-Chang/cs231-assignment-3 | 9476f673ae1ff6ac825bad53d3327d754eac77ac | a45f55a16236618d296c128d62c41ea435537bbb | refs/heads/master | 2020-04-17T08:43:18.241622 | 2019-01-18T15:28:34 | 2019-01-18T15:28:34 | 166,423,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,046 | py | import numpy as np
"""
This file defines layer types that are commonly used for recurrent neural
networks.
"""
def rnn_step_forward(x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- cache: Tuple of values needed for the backward pass.
"""
next_h, cache = None, None
##############################################################################
# TODO: Implement a single forward step for the vanilla RNN. Store the next #
# hidden state and any values you need for the backward pass in the next_h #
# and cache variables respectively. #
##############################################################################
#pass
#self.h = np.tanh(np.dot(self.W_hh, self.h) + np.dot(self.W_xh, x))
next_h = np.tanh((prev_h.dot(Wh) + b) + x.dot(Wx))
cache = (x, prev_h, Wx, Wh, b, next_h)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, cache
def rnn_step_backward(dnext_h, cache):
"""
Backward pass for a single timestep of a vanilla RNN.
Inputs:
- dnext_h: Gradient of loss with respect to next hidden state
- cache: Cache object from the forward pass
Returns a tuple of:
- dx: Gradients of input data, of shape (N, D)
- dprev_h: Gradients of previous hidden state, of shape (N, H)
- dWx: Gradients of input-to-hidden weights, of shape (D, H)
- dWh: Gradients of hidden-to-hidden weights, of shape (H, H)
- db: Gradients of bias vector, of shape (H,)
"""
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: Implement the backward pass for a single step of a vanilla RNN. #
# #
# HINT: For the tanh function, you can compute the local derivative in terms #
# of the output value from tanh. #
##############################################################################
x, prev_h, Wx, Wh, b, next_h = cache
#pass
dnext_h = (1 - next_h * next_h) * dnext_h
db = np.sum(dnext_h, axis=0, keepdims=True)
dprev_h = dnext_h.dot(Wh.T)
dx = dnext_h.dot(Wx.T)
dWx = x.T.dot(dnext_h)
dWh = prev_h.T.dot(dnext_h)
db = db.reshape(db.shape[0] * db.shape[1])
#print db.shape
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dWx, dWh, db
def rnn_forward(x, h0, Wx, Wh, b):
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
- cache: Values needed in the backward pass
"""
h, cache = None, None
##############################################################################
# TODO: Implement forward pass for a vanilla RNN running on a sequence of #
# input data. You should use the rnn_step_forward function that you defined #
# above. #
##############################################################################
#pass
N, T, D = x.shape
H = Wh.shape[0]
swap_x = x.swapaxes(0, 1)
h = np.zeros(N * T * H)
h = h.reshape(T, N, H)
for i in range(T):
_x = swap_x[i, :, :]
if i == 0 :
h[i, :, :], _ = rnn_step_forward(_x, h0, Wx, Wh, b)
else :
h[i, :, :], _ = rnn_step_forward(_x, h[(i - 1), :, :], Wx, Wh, b)
h = h.swapaxes(0, 1)
cache = (x, h0, Wx, Wh, b, h)
#print h.shape
##############################################################################
# END OF YOUR CODE #
##############################################################################
return h, cache
def rnn_backward(dh, cache):
"""
Compute the backward pass for a vanilla RNN over an entire sequence of data.
Inputs:
- dh: Upstream gradients of all hidden states, of shape (N, T, H)
Returns a tuple of:
- dx: Gradient of inputs, of shape (N, T, D)
- dh0: Gradient of initial hidden state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, H)
- db: Gradient of biases, of shape (H,)
"""
dx, dh0, dWx, dWh, db = None, None, None, None, None
##############################################################################
# TODO: Implement the backward pass for a vanilla RNN running an entire #
# sequence of data. You should use the rnn_step_backward function that you #
# defined above. #
##############################################################################
#pass
x, h0, Wx, Wh, b, h = cache
x_swap = x.swapaxes(0, 1)
dh_swap = dh.swapaxes(0, 1)
h = h.swapaxes(0, 1)
N, T, H = x.shape
dx = np.zeros(x_swap.shape)
dWh = np.zeros(Wh.shape)
dWx = np.zeros(Wx.shape)
db = np.zeros(b.shape)
d_h = np.zeros(h0.shape)
for i in range(T) :
if i == (T - 1):
_cache = (x_swap[(T - 1 - i), :, :], h0, Wx, Wh, b, h[(T - 1 - i), :, :])
d_x, dh0, _dWx, _dWh, _db = rnn_step_backward((dh_swap[(T - 1 - i), :, :] + d_h), _cache)
else :
_cache = (x_swap[(T - 1 - i), :, :], h[(T - 2 - i), :, :],Wx, Wh, b, h[(T - 1 - i), :, :])
d_x, d_h, _dWx, _dWh, _db = rnn_step_backward((dh_swap[(T - 1 - i), :, :] + d_h), _cache)
#dh_swap[(T - 2 - i), :, :] += d_h
dx[(T - 1 - i) ,: , :] = d_x
dWh += _dWh
dWx += _dWx
db += _db
dx = dx.swapaxes(0, 1)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dh0, dWx, dWh, db
def word_embedding_forward(x, W):
"""
Forward pass for word embeddings. We operate on minibatches of size N where
each sequence has length T. We assume a vocabulary of V words, assigning each
to a vector of dimension D.
Inputs:
- x: Integer array of shape (N, T) giving indices of words. Each element idx
of x muxt be in the range 0 <= idx < V.
- W: Weight matrix of shape (V, D) giving word vectors for all words.
Returns a tuple of:
- out: Array of shape (N, T, D) giving word vectors for all input words.
- cache: Values needed for the backward pass
"""
out, cache = None, None
##############################################################################
# TODO: Implement the forward pass for word embeddings. #
# #
# HINT: This should be very simple. #
##############################################################################
#pass
N, T = x.shape
V, D = W.shape
out = np.zeros(N * T * D).reshape(N, T, D)
for i in range(N) :
out[i, :, :] = W[x[i, :]]
cache = (x, W)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return out, cache
def word_embedding_backward(dout, cache):
"""
Backward pass for word embeddings. We cannot back-propagate into the words
since they are integers, so we only return gradient for the word embedding
matrix.
HINT: Look up the function np.add.at
Inputs:
- dout: Upstream gradients of shape (N, T, D)
- cache: Values from the forward pass
Returns:
- dW: Gradient of word embedding matrix, of shape (V, D).
"""
dW = None
##############################################################################
# TODO: Implement the backward pass for word embeddings. #
# #
# HINT: Look up the function np.add.at #
##############################################################################
#pass
x, W = cache
N, T, D = dout.shape
dW = np.zeros(W.shape)
for i in range(N):
indices = x[i, :]
np.add.at(dW, indices, dout[i, :, :])
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dW
def sigmoid(x):
"""
A numerically stable version of the logistic sigmoid function.
"""
pos_mask = (x >= 0)
neg_mask = (x < 0)
z = np.zeros_like(x)
z[pos_mask] = np.exp(-x[pos_mask])
z[neg_mask] = np.exp(x[neg_mask])
top = np.ones_like(x)
top[neg_mask] = z[neg_mask]
return top / (1 + z)
def lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b):
"""
Forward pass for a single timestep of an LSTM.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data, of shape (N, D)
- prev_h: Previous hidden state, of shape (N, H)
- prev_c: previous cell state, of shape (N, H)
- Wx: Input-to-hidden weights, of shape (D, 4H)
- Wh: Hidden-to-hidden weights, of shape (H, 4H)
- b: Biases, of shape (4H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
- next_c: Next cell state, of shape (N, H)
- cache: Tuple of values needed for backward pass.
"""
next_h, next_c, cache = None, None, None
#############################################################################
# TODO: Implement the forward pass for a single timestep of an LSTM. #
# You may want to use the numerically stable sigmoid implementation above. #
#############################################################################
#pass
_, H = prev_h.shape
a = x.dot(Wx) + prev_h.dot(Wh) + b
i, f, o, g = sigmoid(a[:, 0: H]), sigmoid(a[:, H: 2 * H]), sigmoid(a[:, 2 * H: 3 * H]), np.tanh(a[:, 3 * H: 4 * H])
next_c = f * prev_c + i * g
next_h = o * np.tanh(next_c)
cache = (x, prev_h, prev_c, i, f, o, g, next_c, Wx, Wh, b)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return next_h, next_c, cache
def lstm_step_backward(dnext_h, dnext_c, cache):
"""
Backward pass for a single timestep of an LSTM.
Inputs:
- dnext_h: Gradients of next hidden state, of shape (N, H)
- dnext_c: Gradients of next cell state, of shape (N, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data, of shape (N, D)
- dprev_h: Gradient of previous hidden state, of shape (N, H)
- dprev_c: Gradient of previous cell state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dprev_h, dprev_c, dWx, dWh, db = None, None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for a single timestep of an LSTM. #
# #
# HINT: For sigmoid and tanh you can compute local derivatives in terms of #
# the output value from the nonlinearity. #
#############################################################################
#pass
x, prev_h, prev_c, i, f, o, g, next_c, Wx, Wh, b = cache
do = dnext_h * np.tanh(next_c)
dnext_c += (1 - np.tanh(next_c) * np.tanh(next_c)) * o * dnext_h
df = dnext_c * prev_c
dprev_c = dnext_c * f
di = dnext_c * g
dg = dnext_c * i
do = o * (1 - o) * do
df = f * (1 - f) * df
di = i * (1 - i) * di
dg = (1 - g * g) * dg
N, H = dnext_h.shape
_H = 4 * H
da = np.zeros(N * _H).reshape(N, _H)
da[:, 0: H], da[:, H: 2 * H], da[:, 2 * H: 3 * H], da[:, 3 * H: 4 * H] = di, df, do, dg
dx = da.dot(Wx.T)
dWx = x.T.dot(da)
dprev_h = da.dot(Wh.T)
dWh = prev_h.T.dot(da)
db = np.sum(da, axis=0, keepdims=True)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dprev_h, dprev_c, dWx, dWh, db
def lstm_forward(x, h0, Wx, Wh, b):
"""
Forward pass for an LSTM over an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The LSTM uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the LSTM forward, we return the hidden states for all timesteps.
Note that the initial cell state is passed as input, but the initial cell
state is set to zero. Also note that the cell state is not returned; it is
an internal variable to the LSTM and is not accessed from outside.
Inputs:
- x: Input data of shape (N, T, D)
- h0: Initial hidden state of shape (N, H)
- Wx: Weights for input-to-hidden connections, of shape (D, 4H)
- Wh: Weights for hidden-to-hidden connections, of shape (H, 4H)
- b: Biases of shape (4H,)
Returns a tuple of:
- h: Hidden states for all timesteps of all sequences, of shape (N, T, H)
- cache: Values needed for the backward pass.
"""
h, cache = None, None
#############################################################################
# TODO: Implement the forward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_forward function that you just defined. #
#############################################################################
#pass
N, T, D = x.shape
_, H = h0.shape
cache = [None] * T
x_swap = x.swapaxes(0, 1)
prev_h = h0
prev_c = np.zeros(h0.shape)
h = np.zeros(N * T * H).reshape(T, N, H)
for i in range(T) :
_x = x_swap[i, :, :]
prev_h, prev_c, cache[i] = lstm_step_forward(_x, prev_h, prev_c, Wx, Wh, b)
h[i, :, :] = prev_h
h = h.swapaxes(0, 1)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return h, cache
def lstm_backward(dh, cache):
"""
Backward pass for an LSTM over an entire sequence of data.]
Inputs:
- dh: Upstream gradients of hidden states, of shape (N, T, H)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient of input data of shape (N, T, D)
- dh0: Gradient of initial hidden state of shape (N, H)
- dWx: Gradient of input-to-hidden weight matrix of shape (D, 4H)
- dWh: Gradient of hidden-to-hidden weight matrix of shape (H, 4H)
- db: Gradient of biases, of shape (4H,)
"""
dx, dh0, dWx, dWh, db = None, None, None, None, None
#############################################################################
# TODO: Implement the backward pass for an LSTM over an entire timeseries. #
# You should use the lstm_step_backward function that you just defined. #
#############################################################################
#pass
N, T, H = dh.shape
dnext_c = np.zeros(N * H).reshape(N, H)
x, _, _, _, _, _, _, _, _, _, _ = cache[0]
_, D = x.shape
dx = np.zeros(N * T * D).reshape(T, N, D)
dh_swap = dh.swapaxes(0, 1)
_dprev_h = np.zeros(N * H).reshape(N, H)
_dprev_c = np.zeros(N * H).reshape(N, H)
dWx = np.zeros(4 * D * H).reshape(D, 4 * H)
dWh = np.zeros(4 * H * H).reshape(H, 4 * H)
db = np.zeros(4 * H).reshape(1, 4 * H)
for i in range(T) :
dnext_h = dh_swap[(T - 1 - i), :, :]
_dx, _dprev_h, _dprev_c, _dWx, _dWh, _db = lstm_step_backward(dnext_h + _dprev_h, dnext_c + _dprev_c, cache[T - 1 - i])
dx[(T - 1 - i), :, :] = _dx
dWx += _dWx
dWh += _dWh
db += _db
db = db.reshape(4 * H)
dx = dx.swapaxes(0, 1)
dh0 = _dprev_h
##############################################################################
# END OF YOUR CODE #
##############################################################################
return dx, dh0, dWx, dWh, db
def temporal_affine_forward(x, w, b):
"""
Forward pass for a temporal affine layer. The input is a set of D-dimensional
vectors arranged into a minibatch of N timeseries, each of length T. We use
an affine function to transform each of those vectors into a new vector of
dimension M.
Inputs:
- x: Input data of shape (N, T, D)
- w: Weights of shape (D, M)
- b: Biases of shape (M,)
Returns a tuple of:
- out: Output data of shape (N, T, M)
- cache: Values needed for the backward pass
"""
N, T, D = x.shape
M = b.shape[0]
out = x.reshape(N * T, D).dot(w).reshape(N, T, M) + b
cache = x, w, b, out
return out, cache
def temporal_affine_backward(dout, cache):
"""
Backward pass for temporal affine layer.
Input:
- dout: Upstream gradients of shape (N, T, M)
- cache: Values from forward pass
Returns a tuple of:
- dx: Gradient of input, of shape (N, T, D)
- dw: Gradient of weights, of shape (D, M)
- db: Gradient of biases, of shape (M,)
"""
x, w, b, out = cache
N, T, D = x.shape
M = b.shape[0]
dx = dout.reshape(N * T, M).dot(w.T).reshape(N, T, D)
dw = dout.reshape(N * T, M).T.dot(x.reshape(N * T, D)).T
db = dout.sum(axis=(0, 1))
return dx, dw, db
def temporal_softmax_loss(x, y, mask, verbose=False):
"""
A temporal version of softmax loss for use in RNNs. We assume that we are
making predictions over a vocabulary of size V for each timestep of a
timeseries of length T, over a minibatch of size N. The input x gives scores
for all vocabulary elements at all timesteps, and y gives the indices of the
ground-truth element at each timestep. We use a cross-entropy loss at each
timestep, summing the loss over all timesteps and averaging across the
minibatch.
As an additional complication, we may want to ignore the model output at some
timesteps, since sequences of different length may have been combined into a
minibatch and padded with NULL tokens. The optional mask argument tells us
which elements should contribute to the loss.
Inputs:
- x: Input scores, of shape (N, T, V)
- y: Ground-truth indices, of shape (N, T) where each element is in the range
0 <= y[i, t] < V
- mask: Boolean array of shape (N, T) where mask[i, t] tells whether or not
the scores at x[i, t] should contribute to the loss.
Returns a tuple of:
- loss: Scalar giving loss
- dx: Gradient of loss with respect to scores x.
"""
N, T, V = x.shape
x_flat = x.reshape(N * T, V)
y_flat = y.reshape(N * T)
mask_flat = mask.reshape(N * T)
probs = np.exp(x_flat - np.max(x_flat, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
loss = -np.sum(mask_flat * np.log(probs[np.arange(N * T), y_flat])) / N
dx_flat = probs.copy()
dx_flat[np.arange(N * T), y_flat] -= 1
dx_flat /= N
dx_flat *= mask_flat[:, None]
if verbose: print ('dx_flat: ', dx_flat.shape)
dx = dx_flat.reshape(N, T, V)
return loss, dx
| [
"noreply@github.com"
] | noreply@github.com |
7ce4e19ebdd1b09d6936f8c00c013e502d9af991 | 980cb4ee01049f2febb2a5b05f34c93dd8cf4cfa | /repo/guruh_repo.py | fd3e5ad0c022eb3c8363e0025c6336dea95c6f96 | [] | no_license | Fantastik-uchlik/talaba | 2e5745323f098ffe253651157a7d1022fd7eeb87 | c0dbbc10d72ec3ad71abd87e249ab3915fb3c519 | refs/heads/master | 2023-05-05T16:30:31.338612 | 2021-05-29T01:59:36 | 2021-05-29T01:59:36 | 371,858,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,234 | py | from db.db import conn
from model.guruh import Guruh
class GuruhRepo:
cur = conn.cursor()
def getAll(self):
sql = "SELECT id, nom, yunalish, yil FROM guruh;"
self.cur.execute(sql)
tplist = self.cur.fetchall()
return list(map(Guruh, tplist))
def getById(self, id):
sql = "SELECT id, nom, yunalish, yil FROM guruh WHERE id = %s;"
self.cur.execute(sql, [id])
return Guruh(self.cur.fetch())
def getByYunalish(self, yunalish_id):
sql = "SELECT id, nom, yunalish, yil FROM guruh WHERE yunalish = %s;"
try:
self.cur.execute(sql, [yunalish_id])
tplist = self.cur.fetchall()
return list(map(Guruh, tplist))
except:
return False
def add(self, g):
sql = "INSERT INTO guruh(nom, yunalish, yil) VALUES(%s, %s, %s)"
self.cur.execute(sql, [g.nom, g.yunalish, g.yil])
def update(self, g):
sql = "UPDATE public.guruh SET nom = %s, yunalish = %s, yil = %s WHERE id = %s"
self.cur.execute(sql, [g.nom, g.yunalish, g.yil, g.id])
return True
def deleteById(self, g):
sql = "DELETE FROM guruh WHERE id = %s"
self.cur.execute(sql, [g]) | [
"0177_88@mail.ru"
] | 0177_88@mail.ru |
79e39282fe18e3659d7f76f56c3f2ae8ce5dc408 | d62f1c0bd9c35cd8ae681d7465e749d63bb59d4e | /Week1/Codingbat/List-1/same_first_last.py | 43d30b5ee7aa5c509d24f23881f34fe800bd4642 | [] | no_license | Yeldan/BFDjango | 0134a57ec523b08e4ca139ec11c384eeefec6caa | a390e08b8711613040a972e30a25b4035ff58e37 | refs/heads/master | 2020-03-27T15:49:53.859506 | 2018-11-25T22:33:38 | 2018-11-25T22:33:38 | 146,742,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | def same_first_last(nums):
if len(nums) >= 1 and nums[0] == nums[len(nums)-1]:
return True
return False | [
"noreply@github.com"
] | noreply@github.com |
df9f8cafcbb7e3588ead8c12b10dddb6bd3e53d7 | 60c182223370ba1c0358a872d27737bb5b74e5f9 | /scraper/hitad_scraper/items.py | b90644894feab9911d2eb467b3d105998b4f016a | [] | no_license | IsuraManchanayake/CS4642-IR-HitAdHouseAds | 84f26be6ee0d324bbf882af1700e00e7e7c230ed | 1cc42bc2f298489a1edacab5560ee030be19d47d | refs/heads/master | 2020-03-21T00:50:58.625284 | 2018-07-10T14:59:39 | 2018-07-10T14:59:39 | 137,912,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class HitadScraperItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"isura.14@cse.mrt.ac.lk"
] | isura.14@cse.mrt.ac.lk |
97a31e802cc8fa3647b21b3d296d581a4b955343 | 4d0e38ecdea3f8f7a627f620899ec5b4989fdd1c | /system/draft/TestPropertyBasedSuite.py | 0427213b95c7561cbc5c4d1607baee131e43afcb | [
"Apache-2.0"
] | permissive | VladimirWork/indy-test-automation | d07361e014da053da39c3a0b29b300ee6b4cee40 | 11838b4061f70275058c4642f7eb755c1c98c367 | refs/heads/master | 2023-03-15T21:53:34.850695 | 2019-07-10T14:33:32 | 2019-07-10T14:33:32 | 154,509,204 | 3 | 0 | Apache-2.0 | 2023-03-05T06:40:28 | 2018-10-24T13:48:45 | Python | UTF-8 | Python | false | false | 11,821 | py | import pytest
from system.utils import *
from hypothesis import settings, given, strategies, Phase, Verbosity
from hypothesis.strategies import composite
from string import printable, ascii_letters
import hashlib
import copy
import os
import sys
@composite
def strategy_for_req_data(draw):
reqid = draw(strategies.integers().filter(lambda x: 0 < x < 999999999999999))
reqtype = draw(strategies.integers().filter(lambda x: x not in [6, 7, 119, 20001]))
data = draw(
strategies.recursive(
strategies.dictionaries(
strategies.text(printable, min_size=1), strategies.text(printable, min_size=1), min_size=1, max_size=5
), lambda x: strategies.dictionaries(strategies.text(printable, min_size=1), x, min_size=1, max_size=3)
)
)
return reqid, reqtype, data
@pytest.mark.usefixtures('docker_setup_and_teardown')
class TestPropertyBasedSuite:
@pytest.mark.skip('example')
@settings(deadline=None, max_examples=100)
@given(var_bin=strategies.binary(5, 25).filter(lambda x: x != b'\x00\x00\x00\x00\x00'), # <<< filter
var_char=strategies.characters('S').filter(lambda x: x not in ['@', '#', '$']), # <<< filter
var_text=strategies.text(ascii_letters, min_size=10, max_size=10).map(lambda x: x.lower()), # <<< map
var_rec=strategies.recursive(strategies.integers() | strategies.floats(),
lambda children:
strategies.lists(children, min_size=3) | strategies.dictionaries(
strategies.text(printable), children, min_size=3),
max_leaves=10),
var_dt_lists=
strategies.integers(1, 5).flatmap(lambda x: strategies.lists(strategies.datetimes(), x, x))) # <<< flatmap
@pytest.mark.asyncio
async def test_case_strategies(self, var_bin, var_char, var_text, var_rec, var_dt_lists):
print()
print(var_bin)
print(var_char)
print(var_text)
print(var_rec)
print(var_dt_lists)
print('-'*25)
@settings(deadline=None, max_examples=1000, verbosity=Verbosity.verbose)
@given(reqid=strategies.integers(min_value=1, max_value=999999999999999),
dest=strategies.text(ascii_letters, min_size=16, max_size=16),
# verkey=strategies.text(ascii_letters, min_size=32, max_size=32),
alias=strategies.text(min_size=1, max_size=10000))
@pytest.mark.asyncio
async def test_case_nym(self, pool_handler, wallet_handler, get_default_trustee, reqid, dest, alias):
trustee_did, trustee_vk = get_default_trustee
roles = ['0', '2', '101', '201']
req = {
'protocolVersion': 2,
'reqId': reqid,
'identifier': trustee_did,
'operation': {
'type': '1',
'dest': base58.b58encode(dest).decode(),
# 'verkey': base58.b58encode(verkey).decode(),
'role': random.choice(roles),
'alias': alias
}
}
res = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, json.dumps(req))
)
print(req)
print(res)
assert res['op'] == 'REPLY'
@settings(deadline=None, max_examples=250)
@given(reqid=strategies.integers(min_value=1, max_value=999999999999999),
xhash=strategies.text().map(lambda x: hashlib.sha256(x.encode()).hexdigest()),
key=strategies.text(printable),
value=strategies.text(printable),
enc=strategies.text(min_size=1))
@pytest.mark.asyncio
async def test_case_attrib(self, pool_handler, wallet_handler, get_default_trustee, reqid, xhash, key, value, enc):
trustee_did, trustee_vk = get_default_trustee
target_did, target_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, target_did, target_vk)
assert res['op'] == 'REPLY'
req_base = {
'protocolVersion': 2,
'identifier': target_did,
'operation': {
'type': '100',
'dest': target_did
}
}
req1 = copy.deepcopy(req_base)
req1['reqId'] = reqid + 1
req1['operation']['hash'] = xhash
res1 = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, target_did, json.dumps(req1))
)
print(req1)
print(res1)
assert res1['op'] == 'REPLY'
req2 = copy.deepcopy(req_base)
req2['reqId'] = reqid + 2
req2['operation']['raw'] = json.dumps({key: value})
res2 = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, target_did, json.dumps(req2))
)
print(req2)
print(res2)
assert res2['op'] == 'REPLY'
req3 = copy.deepcopy(req_base)
req3['reqId'] = reqid + 3
req3['operation']['enc'] = enc
res3 = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, target_did, json.dumps(req3))
)
print(req3)
print(res3)
assert res3['op'] == 'REPLY'
@settings(deadline=None, max_examples=250)
@given(reqid=strategies.integers(min_value=1, max_value=999999999999999),
version=strategies.floats(min_value=0.1, max_value=999.999),
name=strategies.text(min_size=1),
attrs=strategies.lists(strategies.text(min_size=1), min_size=1, max_size=125))
@pytest.mark.asyncio
async def test_case_schema(self, pool_handler, wallet_handler, get_default_trustee, reqid, version, name, attrs):
trustee_did, trustee_vk = get_default_trustee
creator_did, creator_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, creator_did, creator_vk, None, 'TRUSTEE')
assert res['op'] == 'REPLY'
req = {
'protocolVersion': 2,
'reqId': reqid,
'identifier': creator_did,
'operation': {
'type': '101',
'data': {
'version': str(version),
'name': name,
'attr_names': attrs
}
}
}
res = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, creator_did, json.dumps(req))
)
print(req)
print(res)
assert res['op'] == 'REPLY'
@settings(deadline=None, max_examples=250, verbosity=Verbosity.verbose)
@given(reqid=strategies.integers(min_value=1, max_value=999999999999999),
tag=strategies.text(printable, min_size=1),
primary=strategies.recursive(
strategies.dictionaries(
strategies.text(printable, min_size=1), strategies.text(printable, min_size=1),
min_size=1, max_size=3),
lambda x: strategies.dictionaries(strategies.text(printable, min_size=1), x, min_size=1, max_size=3)
))
@pytest.mark.asyncio
async def test_case_cred_def(self, pool_handler, wallet_handler, get_default_trustee,
reqid, tag, primary):
trustee_did, trustee_vk = get_default_trustee
creator_did, creator_vk = await did.create_and_store_my_did(wallet_handler, '{}')
res = await send_nym(pool_handler, wallet_handler, trustee_did, creator_did, creator_vk, None, 'TRUSTEE')
assert res['op'] == 'REPLY'
schema_id, res = await send_schema\
(pool_handler, wallet_handler, creator_did, random_string(10), '1.0', json.dumps(['attribute']))
assert res['op'] == 'REPLY'
await asyncio.sleep(1)
res = await get_schema(pool_handler, wallet_handler, creator_did, schema_id)
schema_id, schema_json = await ledger.parse_get_schema_response(json.dumps(res))
req = {
'protocolVersion': 2,
'reqId': reqid,
'identifier': creator_did,
'operation': {
'type': '102',
'ref': json.loads(schema_json)['seqNo'],
'signature_type': 'CL',
'tag': tag,
'data': {
'primary': primary
}
}
}
res = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, creator_did, json.dumps(req))
)
print(res)
assert res['op'] == 'REPLY'
@settings(deadline=None, max_examples=10000, verbosity=Verbosity.verbose)
@given(reqid=strategies.integers(min_value=1, max_value=999999999999999),
# TODO fine-tune operation structure
operation=strategies.recursive(strategies.dictionaries(
strategies.text(printable, min_size=1), strategies.text(printable, min_size=1),
min_size=1, max_size=5),
lambda x: strategies.dictionaries(strategies.text(printable, min_size=1), x, min_size=1, max_size=3)))
@pytest.mark.asyncio
async def test_case_random_req_op(self, pool_handler, wallet_handler, get_default_trustee, reqid, operation):
trustee_did, trustee_vk = get_default_trustee
req = {
'protocolVersion': 2,
'reqId': reqid,
'identifier': trustee_did,
'operation': operation
}
# client-side validation
with pytest.raises(IndyError):
await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, json.dumps(req))
@settings(deadline=None, max_examples=10000, verbosity=Verbosity.verbose)
# @given(reqid=strategies.integers(min_value=1, max_value=999999999999999),
# _type=strategies.integers().filter(lambda x: x not in [6, 7, 119, 20001]),
# # TODO fine-tune data structure
# data=strategies.recursive(strategies.dictionaries(
# strategies.text(printable, min_size=1), strategies.text(printable, min_size=1),
# min_size=1, max_size=5),
# lambda x: strategies.dictionaries(strategies.text(printable, min_size=1), x, min_size=1, max_size=3)))
@given(values=strategy_for_req_data())
@pytest.mark.asyncio
async def test_case_random_req_data(
self, pool_handler, wallet_handler, get_default_trustee, values
):
trustee_did, trustee_vk = get_default_trustee
req = {
'protocolVersion': 2,
'reqId': values[0],
'identifier': trustee_did,
'operation': {
'type': str(values[1]),
'data': values[2]
}
}
print(req)
res = json.loads(
await ledger.sign_and_submit_request(pool_handler, wallet_handler, trustee_did, json.dumps(req))
)
print(res)
# server-side static validation
try:
assert res['op'] == 'REQNACK'
except KeyError:
res = {k: json.loads(v) for k, v in res.items()}
assert all([v['op'] == 'REQNACK' for k, v in res.items()])
| [
"vladimir.shishkin@dsr-corporation.com"
] | vladimir.shishkin@dsr-corporation.com |
7707d072af3f7970dea14838853c5f34c3fd18f8 | 886308689f87c162bef933d74ff213005733506b | /ExpPhysFinalProyect/ExtraCode/madgraph_cards.py | 2fdf50d0738b459c18f14cb01878dc4aecbde560 | [] | no_license | sfuenzal/F-sicaExperimental | 3304bbdf510248244d60fcc0507fb95313b2dd84 | 31667d48a7ef97e9ae4d26d0dadda364cb371cf8 | refs/heads/master | 2023-07-13T06:49:15.860430 | 2021-08-12T18:25:10 | 2021-08-12T18:25:10 | 372,877,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,113 | py | class MadgraphCardsGenerator_ttbar(object):
def __init__(self, number_initializer):
self.number = number_initializer
def Generator(self):
aux = ["set group_subprocesses Auto"
,"set ignore_six_quark_processes False"
,"set gauge unitary"
,"set complex_mass_scheme False"
,"import model sm"
,"define p = g u c d s u~ c~ d~ s~"
,"define j = g u c d s u~ c~ d~ s~"
,"define l+ = e+ mu+"
,"define l- = e- mu-"
,"define vl = ve vm vt"
,"define vl~ = ve~ vm~ vt~"
,"generate p p > t t~ > l- vl~ b b~ j j"
,"add process p p > t t~ > l+ vl b b~ j j"
,"output /data/atlas/dbetalhc/exphys/ttbar_events/ttbar" + str(self.number)
,"launch -i"
,"multi_run 1"
,"pythia=ON"
,"pgs=OFF"
,"delphes=ON"
,"set run_card ptj 40"
,"set cut_decays True"
,"set ptb 40"
,"set ptl 40"
,"set ebeam1 7000"
,"set ebeam2 7000"
,"set nevents 50000"
,"print_results --path=/data/atlas/dbetalhc/exphys/ttbar_events/ttbar" + str(self.number) + "/cs.txt --format=short"]
return aux
class MadgraphCardsGenerator_tW(object):
def __init__(self, number_initializer):
self.number = number_initializer
def Generator(self):
aux = ["set group_subprocesses Auto"
,"set ignore_six_quark_processes False"
,"set gauge unitary"
,"set complex_mass_scheme False"
,"import model sm"
,"define p = g u c d s b u~ c~ d~ s~ b~"
,"define j = g u c d s u~ c~ d~ s~"
,"define l+ = e+ mu+"
,"define l- = e- mu-"
,"define vl = ve vm vt"
,"define vl~ = ve~ vm~ vt~"
,"generate p p > t w-"
,"add process p p > t~ w+"
,"output /data/atlas/dbetalhc/exphys/tW_events/tW" + str(self.number)
,"launch -i"
,"multi_run 1"
,"pythia=ON"
,"pgs=OFF"
,"delphes=ON"
,"set run_card ptj 40"
,"set cut_decays True"
,"set ptb 40"
,"set ptl 40"
,"set ebeam1 7000"
,"set ebeam2 7000"
,"set nevents 10000"
,"print_results --path=/data/atlas/dbetalhc/exphys/tW_events/tW" + str(self.number) + "/cs.txt --format=short"]
return aux
class MadgraphCardsGenerator_WW(object):
def __init__(self, number_initializer):
self.number = number_initializer
def Generator(self):
aux = ["set group_subprocesses Auto"
,"set ignore_six_quark_processes False"
,"set gauge unitary"
,"set complex_mass_scheme False"
,"import model sm"
,"define p = g u c d s u~ c~ d~ s~"
,"define j = g u c d s u~ c~ d~ s~"
,"define l+ = e+ mu+"
,"define l- = e- mu-"
,"define vl = ve vm vt"
,"define vl~ = ve~ vm~ vt~"
,"generate p p > w+ w-"
,"output /data/atlas/dbetalhc/exphys/WW_events/WW" + str(self.number)
,"launch -i"
,"multi_run 1"
,"pythia=ON"
,"pgs=OFF"
,"delphes=ON"
,"set ptj 40"
,"set ptb 40"
,"set ptl 40"
,"set ebeam1 7000"
,"set ebeam2 7000"
,"set nevents 50000"
,"print_results --path=/data/atlas/dbetalhc/exphys/WW_events/WW" + str(self.number) + "/cs.txt --format=short"]
return aux
for i in range(0, 11):
obj = [MadgraphCardsGenerator_ttbar(i), MadgraphCardsGenerator_tW(i), MadgraphCardsGenerator_WW(i)]
paths = ["/user/e/exphys02/F-sicaExperimental/ExpPhysFinalProyect/CharginoPairProduction/ttbar_cards/" + "ttbar" + str(i) + ".dat"
,"/user/e/exphys02/F-sicaExperimental/ExpPhysFinalProyect/CharginoPairProduction/tW_cards/" + "tW" + str(i) + ".dat"
,"/user/e/exphys02/F-sicaExperimental/ExpPhysFinalProyect/CharginoPairProduction/WW_cards/" + "WW" + str(i) + ".dat"]
for j in range(len(obj)):
textfile = open(paths[j], "w")
for k in range(len(obj[j].Generator())):
textfile.write(obj[j].Generator()[k] + "\n")
textfile.close()
| [
"sebastian.fuenzalida.garrido@gmail.com"
] | sebastian.fuenzalida.garrido@gmail.com |
8fb52d1f5f6352d64b9f99747a87aaf4c438bd26 | 857892283054b9ce416404ca2bb58aea841e7bd4 | /django_async_test/tests/test_testcase.py | 45951fa1b6fde426d8117532ed184d3b22a881e0 | [
"MIT"
] | permissive | alexhayes/django-async-test | 1effa10da547c34d0f921db623c3a75129d01920 | 0b624b16e6146a1ea9a5dd92c38cc07cdf3b3119 | refs/heads/master | 2023-01-12T05:00:26.027052 | 2016-05-09T21:09:23 | 2016-05-09T21:09:23 | 48,104,428 | 8 | 0 | MIT | 2022-12-26T20:23:37 | 2015-12-16T10:44:25 | Python | UTF-8 | Python | false | false | 1,974 | py | # -*- coding: utf-8 -*-
"""
django_async_test.tests.testcase
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :py:class:`django_async_test.TestCase`.
"""
import unittest
from django.test import TestCase
from django_async_test.tests.testapp.models import ModelWithBasicField
class TestCaseTestCase(TestCase):
def assertTests(self, tests):
suite = unittest.TestSuite()
suite.addTests(tests)
result = unittest.TestResult()
suite.run(result)
if len(result.errors) > 0:
for testcase, traceback in result.errors:
print(traceback)
if len(result.failures) > 0:
for testcase, traceback in result.failures:
print(traceback)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
def test_transaction_support(self):
"""
Test transaction support of :py:class:`django_async_test.TestCase`.
"""
from django_async_test.tests.testapp.util import DummyTestCase
self.assertTests([
DummyTestCase('test_transaction_support'),
DummyTestCase('test_transaction_support')]
)
self.assertEqual(ModelWithBasicField.objects.count(), 0)
def test_coroutine(self):
"""
Test coroutine support of :py:class:`django_async_test.TestCase`.
"""
from django_async_test.tests.testapp.util import DummyTestCase
self.assertTests([DummyTestCase('test_coroutine')])
def test_transactional_coroutine(self):
"""
Test transactional coroutine support of :py:class:`django_async_test.TestCase`..
"""
from django_async_test.tests.testapp.util import DummyTestCase
self.assertTests([
DummyTestCase('test_transactional_coroutine'),
DummyTestCase('test_transactional_coroutine')]
)
self.assertEqual(ModelWithBasicField.objects.count(), 0)
| [
"alex@alution.com"
] | alex@alution.com |
a636b852ab2787fc83d56af992caeacc384175e2 | 4df6f4f63fe7275ea71bf7fa8268a2c7ab9ed85d | /Heaps/kth_smallest.py | ab03d0fdfc6cbbfc73e96b6c727cc34a46621b97 | [] | no_license | ambuj991/Data-structure-and-algorithms | 7d328cdc7018ac505427e3c0246036abb086e504 | aaac4e0ce8d7f40e9fc70f2aa77c3d6967384957 | refs/heads/main | 2023-07-09T07:46:08.537322 | 2021-08-14T13:40:14 | 2021-08-14T13:40:14 | 396,015,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py |
import heapq
def kthSmallest(iterable, k):
smallest = []
heapq.heapify(smallest)
for value in iterable:
if (len(smallest) < k):
heapq.heappush(smallest, -value)
else:
heapq.heappushpop(smallest, -value)
if (len(smallest) < k):
return None
return -smallest[0]
arr = list(map(int, input().split()))
print(kthSmallest(arr,3))
| [
"noreply@github.com"
] | noreply@github.com |
e6537f2dd2577dc7cd6a0d0a7a8d6f98ce698029 | fb31988d2281ec31aa8dcb9aaeef5e4479a277fb | /Chapter 8/challenge8_4class.py | a2ca4a190f55141bc39720b747a26ba91f623a29 | [] | no_license | mwrouse/Python | 6bccf8a9058b640b3a0ddfc0d1f4a513d7d7c66e | 913a0cb8a1aaef061351ac37f85830f9cc462491 | refs/heads/master | 2020-06-04T01:16:19.147486 | 2014-12-22T14:41:02 | 2014-12-22T14:41:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | """
Program: challenge8_4class.py
Author: Michael Rouse
Date: 12/12/13
Description: Class object for challenge8_4.py
"""
class Critter(object):
"""A virtual pet"""
def __init__(self, name, hunger=0, boredom=0):
self.name = name
self.hunger = hunger
self.boredom = boredom
def __pass_time(self):
self.hunger += 1
self.boredom += 1
def __str__(self):
return ""
@property
def mood(self):
unhappiness = self.hunger + self.boredom
if unhappiness < 5:
m = "happy"
elif 5 <= unhappiness <= 10:
m = "okay"
elif 11 <= unhappiness <= 15:
m = "frustrated"
else:
m = "mad"
return m
def talk(self):
print(self.name + " is " + self.mood + ".")
self.__pass_time()
def eat(self, food=4):
print("Brruppp! " + self.name + " has been feed.")
self.hunger -= food
if self.hunger < 0:
self.hunger = 0
self.__pass_time()
def play(self, fun=4):
print("Wheee! " + self.name + " had lots of fun.")
self.boredom -= fun
if self.boredom < 0:
self.boredom = 0
self.__pass_time()
| [
"michael@michaelrouse.net"
] | michael@michaelrouse.net |
63c2932c0cec8f0d027cd3a620d55a0b87a292f6 | ea09d6e9878f209cad2038bbe69e91f8fecab9b7 | /fs20/pcs.py | a4f8faec0e4468ca61a7797f080b293414db1362 | [
"MIT"
] | permissive | dprokscha/pyfs20 | 7505e59d61face401a97d4062d1488fa1a6f540d | 4749cf3e9a6f5922ac72aef5ee1432ed444479f1 | refs/heads/master | 2021-01-01T19:56:10.383619 | 2013-07-01T15:41:13 | 2013-07-01T15:41:13 | 9,840,976 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,518 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Daniel Prokscha
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from array import array
import usb
# USB device ID of FS20 PCS.
ID_PRODUCT = 0xe015
ID_VENDOR = 0x18ef
# I/O endpoints.
ENDPOINT_READ = 0x81
ENDPOINT_WRITE = 0x01
# Possible data frames.
DATAFRAME_SEND_ONCE = '\x01\x06\xf1'
DATAFRAME_SEND_MULTIPLE = '\x01\x07\xf2'
DATAFRAME_STOP_MULTIPLE_SENDING = '\x01\x01\xf3'
DATAFRAME_VERSION = '\x01\x01\xf0'
# Possible response codes.
RESPONSE_DATAFRAME_UNKNOWN = 0x02
RESPONSE_DATAFRAME_MISMATCH = 0x03
RESPONSE_FIRMWARE_REQUEST_OK = 0x01
RESPONSE_OK = 0x00
RESPONSE_STOP_MULTIPLE_SENDING_OK = 0x04
RESPONSE_STOP_MULTIPLE_SENDING_NOT_SENT = 0x05
class PCS:
"""
Handles I/O of FS20 PCS.
"""
def _get_device(self):
"""
Returns FS20 PCS device instance.
Returns:
>>> self._get_device()
<usb.core.Device object>
Raises:
DeviceNotFound: If FS20 PCS is not connected or can't be found.
"""
device = usb.core.find(idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if device is None:
raise DeviceNotFound('FS20 PCS not found.')
# Set configuration if there is no active one.
try:
device.get_active_configuration()
except Exception:
device.set_configuration()
# Force I/O if device seems to be busy.
try:
device.detach_kernel_driver(0)
except Exception:
pass
return device
def _get_raw_address(self, address):
"""
Returns a raw address.
Args:
address: Byte string which represents a fully qualified address.
Returns:
>>> self._get_raw_address('\xff\xff\xff')
'\xff\xff\xff'
Raises:
InvalidInput: If more or less then 3 bytes given.
"""
if 3 == len(address):
return address
raise InvalidInput('Invalid address given (3 bytes expected).')
def _get_raw_command(self, command):
"""
Returns a raw command, which is always two bytes long.
Args:
command: Byte string which represents a fully qualified command.
Returns:
>>> self._get_raw_address('\x00\x00')
'\x00\x00'
>>> self._get_raw_address('\x01')
'\x01\x00'
Raises:
InvalidInput: If more then two or less then one bytes given.
"""
if 2 == len(command):
return command
elif 1 == len(command):
return command + '\x00'
raise InvalidInput('Invalid command given (1-2 bytes expected).')
def _get_raw_interval(self, interval):
"""
Returns a raw interval.
Args:
interval: Integer value which represents an interval.
Returns:
>>> self._get_raw_interval(15)
'\x0f'
Raises:
InvalidInput: If the given interval is not between 1 and 255.
"""
if 1 <= int(interval) <= 255:
return chr(int(interval))
raise InvalidInput('Invalid interval given (1-255 expected).')
def _get_response(self):
"""
Returns the response of FS20 PCS (after sending commands).
Returns:
>>> self._get_response()
'\x00\x00'
>>> self._get_response()
'\x01\x10'
Raises:
DeviceCommandUnknown: If an unknown command was sent to FS20 PCS.
DeviceCommandMismatch: If FS20 PCS can't handle the sent command.
DeviceInvalidResponse: If FS20 PCS returns an invalid response.
"""
try:
response = self._get_device().read(ENDPOINT_READ, 5, timeout=500)
except Exception:
response = ''
if response[0:3] == array('B', [0x02, 0x03, 0xa0]):
if response[3] in [RESPONSE_STOP_MULTIPLE_SENDING_OK,
RESPONSE_STOP_MULTIPLE_SENDING_NOT_SENT,
RESPONSE_FIRMWARE_REQUEST_OK,
RESPONSE_OK]:
return response[3:5]
elif RESPONSE_DATAFRAME_UNKNOWN == response[3]:
raise DeviceDataframeUnknown('Unknown data frame sent to device.')
elif RESPONSE_DATAFRAME_MISMATCH == response[3]:
raise DeviceDataframeMismatch('Device can not handle data frame.')
raise DeviceInvalidResponse('Invalid response from device.')
def _write(self, dataframe, with_response=True):
"""
Writes the given data frame to FS20 PCS.
Args:
dataframe: Byte string which represents a fully qualified data frame.
with_response: Boolean value whether to get a response from the sent command.
Returns:
Depends from the given data frame.
"""
self._get_device().write(ENDPOINT_WRITE, dataframe)
if with_response:
return self._get_response()
return array('B', [RESPONSE_OK, 0])
def get_version(self):
"""
Returns the firmware version of FS20 PCS.
Returns:
>>> self.get_version()
'v1.7'
"""
version = str(self._write(DATAFRAME_VERSION)[1])
return 'v%s.%s' % (version[0], version[1])
def send_multiple(self, address, command, time='\x00', interval=1):
"""
Sends the given command multiple for the given address.
Args:
address: Byte string which represents a fully qualified address.
command: Byte string which represents a fully qualified command.
time: Byte string which represents a fully qualified time.
interval: Interval between 1 and 255 how often the command should be sent.
Returns:
>>> self.send_multiple('\x00\x00\x00', '\x10', 10)
'\x00'
"""
return self._write( DATAFRAME_SEND_MULTIPLE
+ self._get_raw_address(address)
+ self._get_raw_command(command + time)
+ self._get_raw_interval(interval)
, False
)[0]
def send_once(self, address, command, time='\x00'):
"""
Sends the given command once for the given address.
Args:
address: Byte string which represents a fully qualified address.
command: Byte string which represents a fully qualified command.
time: Byte string which represents a fully qualified time.
Returns:
>>> self.send('\x00\x00\x00', '\x10')
'\x00'
"""
return self._write( DATAFRAME_SEND_ONCE
+ self._get_raw_address(address)
+ self._get_raw_command(command + time)
)[0]
def stop_multiple_sending(self):
"""
Stops instantly the multiple sending of a command.
Returns:
>>> self.stop_multiple_sending()
'\x04'
"""
return self._write(DATAFRAME_STOP_MULTIPLE_SENDING)[0]
# Module exceptions.
class DeviceDataframeMismatch(Exception):
pass
class DeviceDataframeUnknown(Exception):
pass
class DeviceInvalidResponse(Exception):
pass
class DeviceNotFound(Exception):
pass
class InvalidInput(Exception):
pass | [
"dp.online@googlemail.com"
] | dp.online@googlemail.com |
eabfec2e4c0257175b2f88f159573dc90713903f | faaad3f79c5409ba87c32648562097a611884800 | /app/app/migrations/0008_auto__add_field_partner_enabled.py | a3d5859a39c05f0df938dd399d231cd774ed6a0c | [] | no_license | ahguerilla/movements | d320cf4e59549f9aebb9c534ce4ae9c468189915 | a2065b65ff96391571390d4d44744566b5f298ac | refs/heads/master | 2020-12-29T02:32:05.568280 | 2018-05-11T16:22:00 | 2018-05-11T16:22:00 | 55,590,490 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,276 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Partner.enabled'
db.add_column(u'app_partner', 'enabled',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Partner.enabled'
db.delete_column(u'app_partner', 'enabled')
models = {
u'app.menuextension': {
'Meta': {'object_name': 'MenuExtension'},
'extended_object': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.Page']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_extension': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'draft_extension'", 'unique': 'True', 'null': 'True', 'to': u"orm['app.MenuExtension']"}),
'show_on_footer_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_on_top_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'app.newslettersignups': {
'Meta': {'ordering': "('-registered_date',)", 'object_name': 'NewsletterSignups'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'registered_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'app.notificationping': {
'Meta': {'object_name': 'NotificationPing'},
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'send_email_to': ('django.db.models.fields.EmailField', [], {'max_length': '75'})
},
u'app.partner': {
'Meta': {'object_name': 'Partner'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'app.safevpnlink': {
'Meta': {'object_name': 'SafeVPNLink', '_ormbases': ['cms.CMSPlugin']},
'base_url': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'link_text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['app'] | [
"bazerk@gmail.com"
] | bazerk@gmail.com |
48dbfd7b8a9ff2b153aa35b2ec8208aca946ed0f | 851c1b7b5cabc660d1dac448fb61746d1f66237e | /week02/week02-03-succ.py | 2ea9219e796488a14f5f81f48858978165312898 | [] | no_license | ntut0002/coding_365 | 0c89edf3d193380c64015800e6a646553a9047be | d5b0b01333943cc611fb1c5ea9713cf6ffcf246f | refs/heads/master | 2020-03-24T03:17:03.127353 | 2018-08-11T04:21:03 | 2018-08-11T04:21:03 | 142,412,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | type1or2 = int(input())
a = int(input())
if type1or2 == 1 :
for j in range(1, (a//2+1)+1 ):
for i in range(1,j+1):
print(i,end='')
print()
for j in range((a//2), 0, -1):
for i in range(1,j+1):
print(i,end='')
print()
######type2
L = []
if type1or2 == 2 :
for j in range(1, (a//2+1)+1):
for i in range(j, 0, -1):
L.append(i)
L2 = list(map(str,L))
L3 = ''.join(L2)
print('%s%s'%(((a//2+1)-j)*'.',L3))
L = []
for j in range((a//2), 0, -1):
for i in range(j, 0, -1):
L.append(i)
L2 = list(map(str,L))
L3 = ''.join(L2)
print('%s%s'%(((a//2+1)-j)*'.',L3))
L = [] | [
"ntut0002@gmail.com"
] | ntut0002@gmail.com |
2151cceac149e0509db788b0da44d68c4d1cd4cb | 3e24611b7315b5ad588b2128570f1341b9c968e8 | /Pseudo_Finder.py | 2d5054ccbc1b1928f339f8fd026680b8d0102af6 | [
"BSD-2-Clause"
] | permissive | bioCKO/lpp_Script | dc327be88c7d12243e25557f7da68d963917aa90 | 0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2 | refs/heads/master | 2022-02-27T12:35:05.979231 | 2019-08-27T05:56:33 | 2019-08-27T05:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,854 | py | #!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2015/10/19
"""
from lpp import *
import os
from optparse import OptionParser
def check_path( path ):
if not os.path.exists(path):
os.makedirs( path )
return os.path.abspath(path)+'/'
def GBLASTA( protein,assemblyresult,output ):
#os.system("""makeblastdb -in %s -title Assem -parse_seqids -out Assem -dbtype nucl"""%(assemblyresult))
COMMAND = open("gblasta_run.bat",'w')
RAW = fasta_check(open(protein,'rU'))
i=0
for t,s in RAW:
i+=1
COMMAND.write("""
genblast -P blast -q $input -t %s -o $output
"""%(assemblyresult))
os.system("""
Genblast_Run.py -i %s -s %s -c %s -o %s
"""%(
protein,COMMAND.name, i,output
)
)
def ParseGblasta(gbaresult,genewiseruncommand):
COMMAND = open(genewiseruncommand,'w')
cache_path = check_path("CACHE/")
i=0
data_cache_hash = {}
GBA = block_reading(open(gbaresult,'rU'), re.escape("//******************END*******************//") )
i=0
for e_b in GBA:
i+=1
k=0
gb_block = re.split("\n\n+", e_b)
if "for query:" not in e_b:
continue
proteinid = re.search("for query\:\s+(\S+)", e_b).group(1)
for align in gb_block[1:]:
if "gene cover" not in align:
continue
aligndata = re.search("cover\:\d+\((\S+)\%\)\|score:([^\|]+)", align)
perc = float(aligndata.group(1))
score = float(aligndata.group(2))
if perc >=80:
i+=1
if i not in data_cache_hash:
PRO= open(cache_path+'%s.pep'%(i),'w')
PRO.write(proteinseqHash[proteinid])
data_cache_hash[i] = [PRO.name]
k+=1
NUC = open(cache_path+'%s_%s.nuc'%(i,k),'w')
align_detail = align.split("\n")[0]
align_detail_list = align_detail.split("|")
subject_detail = align_detail_list[1]
scaffold_name = subject_detail.split(":")[0]
direct = align_detail_list[2]
scaffoldStart,scaffoldEND = subject_detail.split(":")[1].split("..")
scaffoldStart=int(scaffoldStart)
scaffoldEND = int(scaffoldEND)
if scaffoldStart<10000:
scaffoldStart = 0
else:
scaffoldStart =scaffoldStart -10000
scaffoldEND = scaffoldEND+10000
NUC.write(">"+scaffold_name+"__%s\n"%(scaffoldStart)+assemblyseqHash[scaffold_name][scaffoldStart:scaffoldEND]+'\n')
commandline = """Genewise_Psuedeo.py -p %s -n %s -o %s.result.gff"""%(PRO.name,NUC.name,i)
if direct =="-":
commandline += " -d"
COMMAND.write(commandline+'\n')
COMMAND.close()
os.system( "cat %s | parallel -j 64"%(COMMAND.name) )
os.system( "cat *.result.gff > %s"%(output) )
os.system(" rm *.result.gff")
#os.system("cat %s| parallel -j %s >genewise.out")
if __name__=='__main__':
usage = '''usage: python2.7 %prog [options] Kmer
Kmer is a list of K value you want,e.g [ 1, 2, 3, 4 ]'''
parser = OptionParser(usage =usage )
parser.add_option("-c", "--CPU", action="store",
dest="cpu",
type='int',
default = 60,
help="CPU number for each thread")
parser.add_option("-p", "--pro", action="store",
dest="protein",
help="protein sequence!!")
parser.add_option("-a", "--assembly", action="store",
dest="assembly",
help="Assemblied Genome!!")
parser.add_option("-o", "--out", action="store",
dest="output",
default = 'genewise.out',
help="The output file you want!!")
(options, args) = parser.parse_args()
cpu = options.cpu
protein = options.protein
assembly = options.assembly
output = options.output
assemblyseqHash = {}
for t,s in fasta_check(open(assembly,'rU')):
t = t.split()[0][1:]
s = re.sub("\s+",'',s)
assemblyseqHash[t]=s
proteinseqHash = {}
for t,s in fasta_check(open(protein,'rU')):
proteinseqHash[t.split()[0][1:]] = t+s
GBLASTA(protein, assembly,"geneblasta.out")
ParseGblasta("geneblasta.out", "genewise.command")
os.remove("genewise.command")
os.system("rm CACHE -rf")
os.system("rm cache -rf")
os.system( "rm *.xml")
| [
"409511038@qq.com"
] | 409511038@qq.com |
e1d5e4dea12c8224a49304457ec39b4f8a764fc5 | ba2da5470dd9f78907880e51fbe029ea784afe44 | /smh_microbit/orientation/pc/visualization.py | decc8983cb2d4be3a2ad61b7284b7cd447833bc8 | [] | no_license | samuelmh/microbit_tests | a678847ab4a8d0805118b826c6ee45cc6f5b629d | cbf36c08f1bcef9659aec221e40679623005bcdd | refs/heads/master | 2020-04-03T17:29:56.362199 | 2018-01-26T08:20:47 | 2018-01-26T08:20:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,446 | py | # -*- coding: utf-8 -*-
"""
"""
import sys
import vispy
vispy.use(app='pyglet', gl=None)
from vispy import app, gloo
from vispy.visuals import CubeVisual, transforms
from vispy.color import Color
from utils.orientation import Orientation
from utils.USB_data import USBData
class Canvas(app.Canvas):
def __init__(self, connection, orientation):
self.con = connection
self.orientation = orientation
app.Canvas.__init__(self, 'Cube', keys='interactive', size=(400, 400))
self.cube = CubeVisual((7.0, 4.0, 0.3), color=Color(color='grey', alpha=0.1, clip=False), edge_color="black")
# Create a TransformSystem that will tell the visual how to draw
self.cube_transform = transforms.MatrixTransform()
self.cube.transform = self.cube_transform
self._timer = app.Timer('0.05', connect=self.on_timer, start=True)
self.show()
def on_close(self, event):
self.con.close()
def on_resize(self, event):
# Set canvas viewport and reconfigure visual transforms to match.
vp = (0, 0, self.physical_size[0], self.physical_size[1])
self.context.set_viewport(*vp)
self.cube.transforms.configure(canvas=self, viewport=vp)
def on_draw(self, event):
gloo.set_viewport(0, 0, *self.physical_size)
gloo.clear('white', depth=True)
self.cube.draw()
def on_timer(self, event):
data = connection.get_data()
if data:
roll, pitch, yaw = orientation.data2roll_pitch_yaw(data)
# print("{}\t{}\t{}".format( *map(round,(roll,pitch,yaw))))
self.cube_transform.reset()
self.cube_transform.rotate(pitch, (1, 0, 0)) # Pitch
self.cube_transform.rotate(roll, (0, 1, 0)) # Roll
self.cube_transform.rotate(yaw, (0, 0, 1)) # Yaw
self.cube_transform.scale((20, 20, 0.001))
self.cube_transform.translate((200, 200))
self.update()
if __name__ == '__main__':
connection = USBData(port="/dev/ttyACM0", baudrate=115200)
connection.start()
print("Calibration\nSet your device on the origin and press ENTER.\n")
input("Waiting...")
c_roll, c_pitch, c_yaw = Orientation._data2roll_pitch_yaw(connection.get_data())
orientation = Orientation(c_roll, c_pitch, c_yaw)
win = Canvas(connection, orientation)
win.show()
if sys.flags.interactive != 1:
win.app.run()
| [
"samuel.munoz@beeva.com"
] | samuel.munoz@beeva.com |
aa1712a6023130a39815c00d100059139f19ba7f | 975aebf5e7200da9d053838f483811a685fe33b1 | /atm.py | 0f9619ea19707630a88bb3784fbc3cbc9fa73bf0 | [] | no_license | Aaditya123-apple/Project-100 | 126df1ce1d05b1c67d9f767de064bce0ae267635 | 41f5b80251a14e0aac44107c512d36ba671c3358 | refs/heads/main | 2023-06-30T11:48:40.167339 | 2021-08-05T10:25:09 | 2021-08-05T10:25:09 | 392,999,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | class ATM ():
def __init__(self, CardNumber, PIN):
self.CardNumber=CardNumber
self.PIN=PIN
def Withdrawal(self, Amount):
new_Amount=1000000 - Amount
print('You have withdrawn:'+str(Amount))
def BalanceEnquiry(self):
print('You have 1 million dollars in your account')
def main():
CardNumber = input("insert your card number:- ")
PIN = input("enter your pin number:- ")
new_user = ATM(CardNumber ,PIN)
print("Choose your activity ")
print("1.BalanceEnquriy 2.withdrawl")
activity = int(input("Enter activity number :- "))
if (activity == 1):
new_user.BalanceEnquiry()
elif (activity == 2):
amount = int(input("Enter the amount:- "))
new_user.Withdrawal(amount)
else:
print("Enter a valid number")
main() | [
"noreply@github.com"
] | noreply@github.com |
6f284c9f2339a918f68fc9ce60c1c169f2530239 | 1b962b7963ab8e459d14b8211fce9ee81b1f3918 | /Dictionaries And HashMaps/Count Triplets.py | e887124ed7b1d48a6a4a6ed1b171d6b54f676755 | [] | no_license | NipunaMadhushan/HackerRank-Interview-Preparation-Kit | f056c2665bb289a55a524b51e81455703c575153 | aed33934b0bd95bae11d6aa886c7333e68e18db5 | refs/heads/main | 2023-04-08T04:38:06.152719 | 2021-04-16T17:28:51 | 2021-04-16T17:28:51 | 358,671,397 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | #!/bin/python3
import math
import os
import random
import re
import sys
def countTriplets(arr, r):
count = 0
dict = {}
dictPairs = {}
for i in reversed(arr):
if i*r in dictPairs:
count += dictPairs[i*r]
if i*r in dict:
dictPairs[i] = dictPairs.get(i, 0) + dict[i*r]
dict[i] = dict.get(i, 0) + 1
return count
if __name__ == '__main__':
n, r = map(int, input().strip().split())
arr = list(map(int, input().strip().split()))
ans = countTriplets(arr, r)
print(ans)
| [
"noreply@github.com"
] | noreply@github.com |
2b785be5fd03980dc2b425e1e071d73b31c13bce | 77ee4cd6a10eabd9ed4e40353eca81090f693012 | /Chapter 6 - Program 6-18.py | 8b0fd40d71d53240669a5b381298ef107536f367 | [] | no_license | ScottSko/Python---Pearson---Third-Edition---Chapter-6 | 7fd54353e4eb4807dd089d77ed65a8c454a21f78 | b9174127dc051a8f2de9efd25ee7e9df613b25f0 | refs/heads/master | 2021-01-16T18:32:18.385119 | 2017-08-12T03:33:52 | 2017-08-12T03:33:52 | 100,086,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | import os
def main():
found = False
search = input("Enter a description to search for: ")
#new_qty = int(input("Enter the new quantity: "))
friend_file = open("friends.txt", 'r')
temp_file = open("temp.txt", 'w')
descr = friend_file.readline()
while descr != '':
#qty = float(friend_file.readline())
descr = descr.rstrip('\n')
if descr == search:
new_descr = input("What would you like the new name to be? ")
temp_file.write(new_descr + '\n')
#temp_file.write(str(new_qty + '\n'))
found = True
else:
temp_file.write(descr + '\n')
#temp_file.write(str(new_qty + '\n'))
descr = friend_file.readline()
friend_file.close()
temp_file.close()
os.remove('friends.txt')
os.rename("temp.txt", 'friends.txt')
if found:
print("The file has been updated.")
else:
print("That item was not found in the file.")
main() | [
"noreply@github.com"
] | noreply@github.com |
89e1b8099d3c6daef52ccd5790833d74358328b6 | 8d0be23ba5b1542787239eef664af470abc50ea4 | /posts/migrations/0001_initial.py | d111d2b7f4d6a29e2427d3853b389139494a16ac | [] | no_license | NataliiaSubotyshyna/mb | c9057ad208effbdaab480b9e35a2fcdd768264d7 | de02ce0931a0e535abb9e5d71fd126bb9334f861 | refs/heads/master | 2023-04-15T07:31:46.214771 | 2021-04-26T14:26:32 | 2021-04-26T14:26:32 | 361,772,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | # Generated by Django 3.2 on 2021-04-25 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
],
),
]
| [
"natasubotyshyna@gmail.com"
] | natasubotyshyna@gmail.com |
f5461736c28ce7d1d94133cafcb5474ecda9b94b | 7c90d8a253d676f8ab74142eec47f0716c716696 | /scripts/g_to_cterm.py | e1a79a4210274e6933c9b5d04e2cd21fc857f1fb | [] | no_license | queyenth/dotfiles | 6866afec9e761ebc60cfb18d920b9a94c4b7d6ea | c92b200b3f511952efc522ae658d5e4c8825474d | refs/heads/master | 2023-04-01T01:02:50.709996 | 2023-03-15T09:24:39 | 2023-03-15T09:24:39 | 23,884,426 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | import sys
from grapefruit import Color
from x256 import x256
def html2xterm256(color):
r, g, b = Color.HtmlToRgb(color)
r = int(r*255)
g = int(g*255)
b = int(b*255)
return x256.from_rgb(r, g, b)
color = input()
while color != "exit":
print(html2xterm256(color))
color = input()
| [
"queyenth@gmail.com"
] | queyenth@gmail.com |
75909f671b2af23833756e70594ad8826be0566b | 38fca823e622432d133c6bbf3a10fcf9868dc0b6 | /apps/customers/apps.py | db4a5bceb5549a6c1fd188ada0df9f9bc611c4fa | [] | no_license | canionlabs/MESBack | d608d76cc8a0eb8e6639e714a1fb89d1812cdeb6 | 69c94e8a41644a34782f0174b1caa21f8b17167a | refs/heads/master | 2022-01-23T14:14:12.526381 | 2020-02-13T22:28:50 | 2020-02-13T22:28:50 | 151,326,773 | 0 | 0 | null | 2022-01-21T20:13:55 | 2018-10-02T21:37:32 | Python | UTF-8 | Python | false | false | 128 | py | from django.apps import AppConfig
class CustomersConfig(AppConfig):
name = 'apps.customers'
verbose_name = 'Clientes'
| [
"caiovictor31@live.com"
] | caiovictor31@live.com |
d7d23c964b115545b69fb22184a1ca63df24a097 | bf52e1bb25945cc9d4c1eccaaa57dbb21fb557c3 | /Labs/Lab2.py | ae14ab4b0ce4c3f9a2b5b1cc93f94c7c7096017a | [] | no_license | akaHEPTA/IntroToAlgorithm | 6431ce42f151dace11f66e3844b86492c1a010ba | d09e1b111d6467d6adeac6db4c039309dc507590 | refs/heads/master | 2022-12-23T01:23:56.810110 | 2020-09-26T08:07:58 | 2020-09-26T08:07:58 | 211,411,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,776 | py | """
Basic python list problems -- no loops.
"""
def first_last6(nums):
"""
Given a list of ints, return True if 6 appears as either the first or last element in the list.
The list will be length 1 or more.
"""
if len(nums) >= 1: # Check the length of list is enough
if nums[0] == 6 or nums[-1] == 6: # Compare the first or last element is 6
return True
return False
def same_first_last(nums):
"""
Given a list of ints, return True if the list is length 1 or more, and the first element
and the last element are equal.
"""
if len(nums) >= 1: # Check the length of list is enough
if nums[0] == nums[-1]: # And check the first and the last elements are the same
return True
return False
def common_end(a, b):
"""
Given 2 lists of ints, a and b, return True if they have the same first element or they have the same last element.
Both lists will be length 1 or more.
"""
if len(a) >= 1 and len(b) >= 1: # Check the length of lists
if a[0] == b[0] or a[-1] == b[-1]: # First or last element match
return True
return False
def sum3(nums):
"""
Given a list of ints length 3, return the sum of all the elements.
"""
return nums[0] + nums[1] + nums[2] # using loop is not allowed
def rotate_left3(nums):
"""
Given a list of ints length 3, return a list with the elements "rotated left" so {1, 2, 3} yields {2, 3, 1}.
"""
nums.append(nums.pop(0))
return nums
def reverse3(nums):
"""
Given a list of ints length 3, return a new list with the elements in reverse order,
so {1, 2, 3} becomes {3, 2, 1}.
"""
temp = nums[0]
nums[0] = nums[2]
nums[2] = temp
return nums
def max_ends3(nums):
"""
Given a list of ints length 3, figure out which is larger, the first or last element in the list,
and set all the other elements to be that value. Return the changed list.
"""
if nums[0] > nums[2]:
return [nums[0], nums[0], nums[0]]
elif nums[2] > nums[0]:
return [nums[2], nums[2], nums[2]]
else: # else means the fist and the last elements' value is same
return [nums[0], nums[0], nums[0]]
def make_ends(nums):
"""
Given a list of ints, return a new list length 2 containing the first and last elements from the original list.
The original list will be length 1 or more.
"""
if len(nums) >= 1:
return [nums[0], nums[-1]]
else:
return "The list length is not enough."
def has23(nums):
"""
Given an int list length 2, return True if it contains a 2 or a 3.
"""
if nums[0] == 2 or nums[1] == 2 or nums[0] == 3 or nums[1] == 3:
return True
return False
| [
"abelsteiger@gmail.com"
] | abelsteiger@gmail.com |
59926ccc37b0d67ba5ff8f1b19411c38efcf66ff | c548ea08b1502a75bd28614e8db32a12587f585c | /TP2/my_code/zDataManager.py | d352f4a494c593b97ead2a972c6162a52d1e155c | [] | no_license | eric-aubinais/info232 | f2fc0658d1f10a633853a780882fbffc22f1be0f | 91b2d2eb7be0cbe3b5c7bd4d75f27b6dce9bfcfc | refs/heads/master | 2020-04-19T17:42:59.229564 | 2019-02-08T15:48:26 | 2019-02-08T15:48:26 | 168,342,926 | 0 | 0 | null | 2019-01-30T12:55:24 | 2019-01-30T12:55:23 | null | UTF-8 | Python | false | false | 8,598 | py | """
Created on Sat Mar 11 08:04:23 2017
Last revised: Feb 2, 2019
@author: isabelleguyon
This is an example of program that reads data and has a few display methods.
Add more views of the data getting inspired by previous lessons:
Histograms of single variables
Data matrix heat map
Correlation matric heat map
Add methods of exploratory data analysis and visualization:
PCA or tSNE
two-way hierachical clustering (combine with heat maps)
The same class could be used to visualize prediction results, by replacing X by
the predicted values (the end of the transformation chain):
For regression, you can
plot Y as a function of X.
plot the residual a function of X.
For classification, you can
show the histograms of X for each Y value.
show ROC curves.
For both: provide a table of scores and error bars.
"""
# Add the sample code in the path
mypath = "../ingestion_program"
from sys import argv, path
from os.path import abspath
import os
path.append(abspath(mypath))
# Graphic routines
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)] # Red, lime, blue
cm = LinearSegmentedColormap.from_list('rgb', colors, N=3)
# Data types
import pandas as pd
import numpy as np
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=DeprecationWarning)
# Converter class
import data_converter
# Mother class
import data_manager
# Typical score
from sklearn.metrics import accuracy_score
class DataManager(data_manager.DataManager):
'''This class reads and displays data.
With class inheritance, we do not need to redefine the constructor,
unless we want to add or change some data members.
'''
def __init__(self, basename="", input_dir=""):
''' New contructor.'''
super(DataManager, self).__init__(basename, input_dir)
# We added new members:
self.feat_name = self.loadName (os.path.join(self.input_dir, basename + '_feat.name'))
self.label_name = self.loadName (os.path.join(self.input_dir, basename + '_label.name'))
def loadName (self, filename, verbose=False):
''' Get the variable name'''
if verbose: print("========= Reading " + filename)
name_list = []
if os.path.isfile(filename):
name_list = data_converter.file_to_array (filename, verbose=False)
else:
n=self.info['feat_num']
name_list = [self.info['feat_name']]*n
name_list = np.array(name_list).ravel()
return name_list
def __str__(self):
val = "DataManager : " + self.basename + "\ninfo:\n"
for item in self.info:
val = val + "\t" + item + " = " + str(self.info[item]) + "\n"
val = val + "data:\n"
val = val + "\tX_train = array" + str(self.data['X_train'].shape) + "\n"
val = val + "\tY_train = array" + str(self.data['Y_train'].shape) + "\n"
val = val + "\tX_valid = array" + str(self.data['X_valid'].shape) + "\n"
val = val + "\tY_valid = array" + str(self.data['Y_valid'].shape) + "\n"
val = val + "\tX_test = array" + str(self.data['X_test'].shape) + "\n"
val = val + "\tY_test = array" + str(self.data['Y_test'].shape) + "\n"
val = val + "feat_type:\tarray" + str(self.feat_type.shape) + "\n"
val = val + "feat_idx:\tarray" + str(self.feat_idx.shape) + "\n"
# These 2 lines are new:
val = val + "feat_name:\tarray" + str(self.feat_name.shape) + "\n"
val = val + "label_name:\tarray" + str(self.label_name.shape) + "\n"
return val
def toDF(self, set_name):
''' Change a given data subset to a data Panda's frame.
set_name is 'train', 'valid' or 'test'.'''
DF = pd.DataFrame(self.data['X_'+set_name])
# For training examples, we can add the target values as
# a last column: this is convenient to use seaborn
# Look at http://seaborn.pydata.org/tutorial/axis_grids.html for other ideas
if set_name == 'train':
Y = self.data['Y_train']
DF = DF.assign(target=Y)
# We modified the constructor to add self.feat_name, so we can also:
# 1) Add a header to the data frame
DF.columns=np.append(self.feat_name, 'target')
# 2) Replace the numeric categories by the class labels
DF = DF.replace({'target': dict(zip(np.arange(len(self.label_name)), self.label_name))})
return DF
##### HERE YOU CAN IMPLEMENT YOUR OWN METHODS #####
def DataStats(self, set_name):
''' Display simple data statistics.'''
DF = self.toDF(set_name)
return DF.describe() # Return something better
def DataHist(self, set_name):
''' Show histograms.'''
DF = self.toDF(set_name)
return DF.hist(figsize = (10,10), bins = 50, layout = (3,2)) # Return something better
def ShowScatter(self, set_name):
''' Show scatter plots.'''
DF = self.toDF(set_name)
if set_name == 'train':
return sns.pairplot(DF, hue = "target") # Return something better
else:
return sns.pairplot(DF) # Return something better
def ShowSomethingElse(self):
''' Surprise me.'''
# For your project proposal, provide
# a sketch with what you intend to do written in English (or French) is OK.
pass
##### END OF YOUR OWN METHODS ######################
def ClfScatter(self, clf, dim1=0, dim2=1, title=''):
'''(self, clf, dim1=0, dim2=1, title='')
Split the training data into 1/2 for training and 1/2 for testing.
Display decision function and training or test examples.
clf: a classifier with at least a fit and a predict method
like a sckit-learn classifier.
dim1 and dim2: chosen features.
title: Figure title.
Returns: Test accuracy.
'''
X = self.data['X_train']
Y = self.data['Y_train']
F = self.feat_name
# Split the data
ntr=round(X.shape[0]/2)
nte=X.shape[0]-ntr
Xtr = X[0:ntr, (dim1,dim2)]
Ytr = Y[0:ntr]
Xte = X[ntr+1:ntr+nte, (dim1,dim2)]
Yte = Y[ntr+1:ntr+nte]
# Fit model in chosen dimensions
clf.fit(Xtr, Ytr)
# Compute the training score
Yhat_tr = clf.predict(Xtr)
training_accuracy = accuracy_score(Ytr, Yhat_tr)
# Compute the test score
Yhat_te = clf.predict(Xte)
test_accuracy = accuracy_score(Yte, Yhat_te)
# Define a mesh
x_min, x_max = Xtr[:, 0].min() - 1, Xtr[:, 0].max() + 1
y_min, y_max = Xtr[:, 1].min() - 1, Xtr[:, 1].max() + 1
h = 0.1 # step
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Xgene = np.c_[xx.ravel(), yy.ravel()]
# Make your predictions on all mesh grid points (test points)
Yhat = clf.predict(Xgene)
# Make contour plot for all points in mesh
Yhat = Yhat.reshape(xx.shape)
plt.subplot(1, 2, 1)
plt.contourf(xx, yy, Yhat, cmap=plt.cm.Paired)
# Overlay scatter plot of training examples
plt.scatter(Xtr[:, 0], Xtr[:, 1], c=Ytr, cmap=cm)
plt.title('{}: training accuracy = {:5.2f}'.format(title, training_accuracy))
plt.xlabel(F[dim1])
plt.ylabel(F[dim2])
plt.subplot(1, 2, 2)
plt.contourf(xx, yy, Yhat, cmap=plt.cm.Paired)
# Overlay scatter plot of test examples
plt.scatter(Xte[:, 0], Xte[:, 1], c=Yte, cmap=cm)
plt.title('{}: test accuracy = {:5.2f}'.format(title, test_accuracy))
plt.xlabel(F[dim1])
plt.ylabel(F[dim2])
plt.subplots_adjust(left = 0, right = 1.5, bottom=0, top = 1, wspace=0.2)
plt.show()
return test_accuracy
if __name__=="__main__":
# You can use this to run this file as a script and test the DataManager
if len(argv)==1: # Use the default input and output directories if no arguments are provided
input_dir = "../public_data"
output_dir = "../results"
else:
input_dir = argv[1]
output_dir = argv[2];
print("Using input_dir: " + input_dir)
print("Using output_dir: " + output_dir)
basename = 'Iris'
D = DataManager(basename, input_dir)
print(D)
D.DataStats('train') | [
"eric.aubinais@u-psud.fr"
] | eric.aubinais@u-psud.fr |
f4d3517302e8a96756017053b7ebb012998b06ba | 3dec63c2fdd0a83ecd7de937ae8a2f93f106bb75 | /projects/env/lib/python3.7/sre_parse.py | db645a3f554bd1637d5cb8b09782db2a223e404a | [] | no_license | rudresh04thakur/7_30_GBT_PY | 8c1b92988f2b686992da30103698ce2b3b6134a9 | 5355209168a80d51c37177b58eb9d3f395317dc6 | refs/heads/master | 2020-04-15T20:10:53.772642 | 2019-05-19T05:56:49 | 2019-05-19T05:56:49 | 164,981,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | /home/rudresh/anaconda3/lib/python3.7/sre_parse.py | [
"rudresh04thakur@gmail.com"
] | rudresh04thakur@gmail.com |
fe01b55d1a445146aff5fa8d9d11d9ab0fec0471 | 20f09b3c837631cff9d2e6a6838c0ad5f356cf0b | /MDA/manage.py | 2f84d17bce325ebb5d6c8c341010b9fa9a490e20 | [] | no_license | Abhi5123/intern33 | 00b5f350b7644b1ce3b269b78bf453a90b79032b | 7e16f134ea4687a157c6f85705aaf53a8622fb6e | refs/heads/master | 2022-11-27T08:35:00.285031 | 2020-08-04T16:38:13 | 2020-08-04T16:38:13 | 285,040,203 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MDA.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"62535575+Abhi-hue5123@users.noreply.github.com"
] | 62535575+Abhi-hue5123@users.noreply.github.com |
e929e0f5b1486118a552b82e73fcafa17f507211 | 08c1e1eac940a6cc17c7d4360108941e6d126ce0 | /UnitTest_addevent.py | 79bd204fbc047d010447231b20b29a39e1fbf725 | [] | no_license | anushamanda/8220assign4waves-tests | 231c0ec190b174fd5fd0735c1d2975e57ae8e6df | 1ec0ade1eb3e5d7ce1c69a05850cab2b72d2bc75 | refs/heads/master | 2022-05-24T18:27:57.693544 | 2020-05-01T05:43:02 | 2020-05-01T05:43:02 | 260,388,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
class waves_Test9(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
def test_addevent(self):
driver = self.driver
driver.maximize_window()
driver.get("https://wavesfit.herokuapp.com/")
elem = driver.find_element_by_xpath('//*[@id="myNavbar"]/ul/li[2]/a/b').click()
#time.sleep(0.5)
elem = driver.find_element_by_id("id_username")
elem.send_keys("SamRuth")
elem = driver.find_element_by_id("id_password")
elem.send_keys("sam1")
elem = driver.find_element_by_xpath('//*[@id="app-layout"]/div/div/div/div/div/div/div/div[2]/div/form/p[3]/input').click()
time.sleep(2)
elem = driver.find_element_by_xpath('//*[@id="app-layout"]/nav/div/div[1]/p/a[3]/b').click()
elem = driver.find_element_by_xpath('//*[@id="listings"]/div/div/div[2]/table/tbody/tr[1]/td[3]/a/span').click()
time.sleep(2)
elem = driver.find_element_by_id("id_event_name")
elem.send_keys("Weight lifting")
elem = driver.find_element_by_id("id_trainer_name")
elem.send_keys("sri vidya")
elem = driver.find_element_by_id("id_branch")
elem.send_keys("72nd st omaha, NE waves branch1")
elem = driver.find_element_by_id("id_description")
elem.send_keys("power lifting is a great muscle building session")
elem = driver.find_element_by_xpath('//*[@id="app-layout"]/div/div/div/form/button').click()
try:
time.sleep(2)
elem = driver.find_element_by_xpath('//*[@id="listings"]/div/div/div[1]/h2')
assert True
except NoSuchElementException:
self.fail("Login Failed")
assert False
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main() | [
"amanda@unomaha.edu"
] | amanda@unomaha.edu |
bab6d662deb64c019535c2f9d445ba7d16dddda6 | 36a3b47a9a22a11e2e83b34d295c33b59c8150bf | /dogehouse/client.py | 1f0a87b52cd02bc68018c478fd3b6157aa68aad7 | [
"MIT"
] | permissive | IdlyBond/dogehouse.py | a8007ab8952755f88e490194613242b428d5475f | 176d8714693de3332b45247da77335a4c707c1a3 | refs/heads/main | 2023-04-04T13:10:21.543626 | 2021-03-25T16:02:27 | 2021-03-25T16:02:27 | 351,272,314 | 0 | 0 | MIT | 2021-03-25T01:23:49 | 2021-03-25T01:23:49 | null | UTF-8 | Python | false | false | 20,279 | py | # -*- coding: utf-8 -*-
# MIT License
# Copyright (c) 2021 Arthur
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import websockets
from uuid import uuid4
from json import loads, dumps
from inspect import signature
from logging import info, debug
from typing import Awaitable, List, Union
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
from .utils import Repr
from .entities import User, Room, UserPreview, Message, BaseUser
from .config import apiUrl, heartbeatInterval, topPublicRoomsInterval
from .exceptions import NoConnectionException, InvalidAccessToken, InvalidSize, NotEnoughArguments, CommandNotFound
listeners = {}
commands = {}
def event(func: Awaitable):
"""
Create an event listener for dogehouse.
Example:
class Client(dogehouse.DogeClient):
@dogehouse.event
async def on_ready(self):
print(f"Logged in as {self.user.username}")
if __name__ == "__main__":
Client("token", "refresh_token").run()
"""
listeners[func.__name__.lower()] = [func, False]
return func
def command(name: str = None):
"""
Create a new command for dogehouse.
Example:
class Client(dogehouse.DogeClient):
@dogehouse.command
async def hello(self, ctx):
await self.send(f"Hello {ctx.author.mention}")
if __name__ == "__main__":
Client("token", "refresh_token").run()
"""
def wrapper(func: Awaitable):
commands[(name if name else func.__name__).lower()] = [func, False]
return func
return wrapper
class DogeClient(Repr):
"""Represents your Dogehouse client."""
def __init__(self, token: str, refresh_token: str, *, room: str = None, muted: bool = False, reconnect_voice: bool = False, prefix: Union[str, List[str]] = "!"):
"""
Initialize your Dogehouse client
Args:
token (str): Your super secret client token.
refresh_token (str): Your super secret client refresh token.
room (int, optional): The room your client should join. Defaults to None.
muted (bool, optional): Wether or not the client should be muted. Defaults to False.
reconnect_voice (bool, optional): When the client disconnects from the voice server, should it try to reconnect. Defaults to False.
prefix (List of strings or a string): The bot prefix.
"""
self.user = None
self.room = room
self.rooms = []
self.prefix = prefix
self.__token = token
self.__refresh_token = refresh_token
self.__socket = None
self.__active = False
self.__muted = muted
self.__reconnect_voice = reconnect_voice
self.__listeners = listeners
self.__fetches = {}
self.__commands = commands
async def __fetch(self, op: str, data: dict):
fetch = str(uuid4())
await self.__send(op, data, fetch_id=fetch)
self.__fetches[fetch] = op
async def __send(self, opcode: str, data: dict, *, fetch_id: str = None):
"""Internal websocket sender method."""
raw_data = dict(op=opcode, d=data)
if fetch_id:
raw_data["fetchId"] = fetch_id
await self.__socket.send(dumps(raw_data))
async def __main(self, loop):
"""This instance handles the websocket connections."""
async def event_loop():
async def execute_listener(listener: str, *args):
listener = self.__listeners.get(listener.lower())
if listener:
asyncio.ensure_future(listener[0](*args) if listener[1] else
listener[0](self, *args))
async def execute_command(command_name: str, ctx: Message, *args):
command = self.__commands.get(command_name.lower())
if command:
arguments = []
params = {}
parameters = list(signature(command[0]).parameters.items())
if not command[1]:
arguments.append(self)
parameters.pop(0)
if parameters:
arguments.append(ctx)
parameters.pop(0)
for idx, (key, param) in enumerate(parameters):
value = args[idx]
if param.kind == param.KEYWORD_ONLY:
value = " ".join(args[idx::])
params[key] = value
try:
asyncio.ensure_future(command[0](*arguments, **params))
except TypeError:
raise NotEnoughArguments(
f"Not enough arguments were provided in command `{command_name}`.")
else:
raise CommandNotFound(
f"The requested command `{command_name}` does not exist.")
info("Dogehouse: Starting event listener loop")
while self.__active:
res = loads(await self.__socket.recv())
op = res if isinstance(res, str) else res.get("op")
if op == "auth-good":
info("Dogehouse: Received client ready")
self.user = User.from_dict(res["d"]["user"])
await execute_listener("on_ready")
elif op == "new-tokens":
info("Dogehouse: Received new authorization tokens")
self.__token = res["d"]["accessToken"]
self.__refresh_token = res["d"]["refreshToken"]
elif op == "fetch_done":
fetch = self.__fetches.get(res.get("fetchId"), False)
if fetch:
del self.__fetches[res.get("fetchId")]
if fetch == "get_top_public_rooms":
info("Dogehouse: Received new rooms")
self.rooms = list(
map(Room.from_dict, res["d"]["rooms"]))
await execute_listener("on_rooms_fetch")
elif fetch == "create_room":
info("Dogehouse: Created new room")
self.room = Room.from_dict(res["d"]["room"])
elif op == "you-joined-as-speaker":
await execute_listener("on_room_join", True)
elif op == "join_room_done":
self.room = Room.from_dict(res["d"]["room"])
await execute_listener("on_room_join", False)
elif op == "new_user_join_room":
await execute_listener("on_user_join", User.from_dict(res["d"]["user"]))
elif op == "user_left_room":
await execute_listener("on_user_leave", res["d"]["userId"])
elif op == "new_chat_msg":
msg = Message.from_dict(res["d"]["msg"])
await execute_listener("on_message", msg)
if msg.author.id == self.user.id:
return
try:
async def handle_command(prefix: str):
if msg.content.startswith(prefix) and len(msg.content) > len(prefix) + 1:
splitted = msg.content[len(prefix)::].split(" ")
await execute_command(splitted[0], msg, *splitted[1::])
return True
return False
prefixes = []
if isinstance(self.prefix, str):
prefixes.append(self.prefix)
else:
prefixes = self.prefix
for prefix in prefixes:
if await handle_command(prefix):
break
except Exception as e:
await execute_listener("on_error", e)
elif op == "message_deleted":
await execute_listener("on_message_delete", res["d"]["deleterId"], res["d"]["messageId"])
elif op == "speaker_removed":
await execute_listener("on_speaker_delete", res["d"]["userId"], res["d"]["roomId"], res["d"]["muteMap"], res["d"]["raiseHandMap"])
elif op == "chat_user_banned":
await execute_listener("on_user_ban", res["d"]["userId"])
elif op == "hand_raised":
await execute_listener("on_speaker_request", res["d"]["userId"], res["d"]["roomId"])
async def heartbeat():
debug("Dogehouse: Starting heartbeat")
while self.__active:
await self.__socket.send("ping")
await asyncio.sleep(heartbeatInterval)
async def get_top_rooms_loop():
debug("Dogehouse: Starting to get all rooms")
while self.__active and not self.room:
await self.get_top_public_rooms()
await asyncio.sleep(topPublicRoomsInterval)
try:
info("Dogehouse: Connecting with Dogehouse websocket")
async with websockets.connect(apiUrl) as ws:
info("Dogehouse: Websocket connection established successfully")
self.__active = True
self.__socket = ws
info("Dogehouse: Attemting to authenticate")
await self.__send('auth', {
"accessToken": self.__token,
"refreshToken": self.__refresh_token,
"reconnectToVoice": self.__reconnect_voice,
"muted": self.__muted,
"currentRoomId": self.room,
"platform": "dogehouse.py"
})
info("Dogehouse: Successfully authenticated")
event_loop_task = loop.create_task(event_loop())
get_top_rooms_task = loop.create_task(get_top_rooms_loop())
await heartbeat()
await event_loop_task()
await get_top_rooms_task()
except ConnectionClosedOK:
info("Dogehouse: Websocket connection closed peacefully")
self.__active = False
except ConnectionClosedError as e:
if (e.code == 4004):
raise InvalidAccessToken()
def run(self):
"""Establishes a connection to the websocket servers."""
loop = asyncio.get_event_loop()
loop.run_until_complete(self.__main(loop))
loop.close()
async def close(self):
"""
Closes the established connection.
Raises:
NoConnectionException: No connection has been established yet. Aka got nothing to close.
"""
if not isinstance(self.__socket, websockets.WebSocketClientProtocol):
raise NoConnectionException()
self.__active = False
def listener(self, name: str = None):
"""
Create an event listener for dogehouse.
Args:
name (str, optional): The name of the event. Defaults to the function name.
Example:
client = dogehouse.DogeClient("token", "refresh_token")
@client.listener()
async def on_ready():
print(f"Logged in as {self.user.username}")
client.run()
# Or:
client = dogehouse.DogeClient("token", "refresh_token")
@client.listener(name="on_ready")
async def bot_has_started():
print(f"Logged in as {self.user.username}")
client.run()
"""
def decorator(func: Awaitable):
self.__listeners[(name if name else func.__name__).lower()] = [
func, True]
return func
return decorator
def command(self, name: str = None):
"""
Create an command for dogehouse.
Args:
name (str, optional): The name of the command. Defaults to the function name.
Example:
client = dogehouse.DogeClient("token", "refresh_token")
@client.command()
async def hello(ctx):
await client.send(f"Hello {ctx.author.mention}")
client.run()
# Or:
client = dogehouse.DogeClient("token", "refresh_token")
@client.listener(name="hello")
async def hello_command(ctx):
await client.send(f"Hello {ctx.author.mention}")
client.run()
"""
def decorator(func: Awaitable):
self.__commands[(name if name else func.__name__).lower()] = [
func, True]
return func
return decorator
async def get_top_public_rooms(self, *, cursor=0) -> None:
"""
Manually send a request to update the client rooms property.
This method gets triggered every X seconds. (Stated in dogehouse.config.topPublicRoomsInterval)
Args:
# TODO: Add cursor description
cursor (int, optional): [description]. Defaults to 0.
"""
await self.__fetch("get_top_public_rooms", dict(cursor=cursor))
async def create_room(self, name: str, description: str = "", *, public=True) -> None:
"""
Creates a room, when the room is created a request will be sent to join the room.
When the client joins the room the `on_room_join` event will be triggered.
Args:
name (str): The name for room.
description (str): The description for the room.
public (bool, optional): Wether or not the room should be publicly visible. Defaults to True.
"""
if 2 <= len(name) <= 60:
return await self.__fetch("create_room", dict(name=name, description=description, privacy="public" if public else "private"))
raise InvalidSize(
"The `name` property length should be 2-60 characters long.")
async def join_room(self, id: str) -> None:
"""
Send a request to join a room as a listener.
Args:
id (str): The ID of the room you want to join.
"""
await self.__send("join_room", dict(roomId=id))
async def send(self, message: str, *, whisper: List[str] = []) -> None:
"""
Send a message to the current room.
Args:
message (str): The message that should be sent.
whisper (List[str], optional): A collection of user id's who should only see the message. Defaults to [].
Raises:
NoConnectionException: Gets thrown when the client hasn't joined a room yet.
"""
if not self.room:
raise NoConnectionException("No room has been joined yet!")
def parse_message():
tokens = []
for token in message.split(" "):
t, v = "text", token
if v.startswith("@") and len(v) >= 3:
t = "mention"
v = v[1:]
elif v.startswith("http") and len(v) >= 8:
t = "link"
elif v.startswith(":") and v.endswith(":") and len(v) >= 3:
t = "emote"
v = v[1:-1]
tokens.append(dict(t=t, v=v))
return tokens
await self.__send("send_room_chat_msg", dict(whisperedTo=whisper, tokens=parse_message()))
async def ask_to_speak(self):
"""
Request in the current room to speak.
Raises:
NoConnectionException: Gets raised when no room has been joined yet.
"""
if not self.room:
raise NoConnectionException("No room has been joined yet.")
await self.__send("ask_to_speak", {})
async def make_mod(self, user: Union[User, BaseUser, UserPreview]):
"""
Make a user in the room moderator.
Args:
user (Union[User, BaseUser, UserPreview]): The user which should be promoted to room moderator.
"""
await self.__send("change_mod_status", dict(userId=user.id, value=True))
async def unmod(self, user: Union[User, BaseUser, UserPreview]):
"""
Remove a user their room moderator permissions.
Args:
user (Union[User, BaseUser, UserPreview]): The user from which his permissions should be taken.
"""
await self.__send("change_mod_status", dict(userId=user.id, value=False))
async def make_admin(self, user: Union[User, BaseUser, UserPreview]):
"""
Make a user the room administrator/owner.
NOTE: This action is irreversable.
Args:
user (Union[User, BaseUser, UserPreview]): The user which should be promoted to room admin.
"""
await self.__send("change_room_creator", dict(userId=user.id))
async def set_listener(self, user: Union[User, BaseUser, UserPreview] = None):
"""
Force a user to be a listener.
Args:
user (Union[User, BaseUser, UserPreview], optional): The user which should become a Listener. Defaults to the client.
"""
if not user:
user = self.user
await self.__send("set_listener", dict(userId=user.id))
async def ban_chat(self, user: Union[User, BaseUser, UserPreview]):
"""
Ban a user from speaking in the room.
NOTE: This action can not be undone.
Args:
user (Union[User, BaseUser, UserPreview]): The user from which their chat permissions should be taken.
"""
await self.__send("ban_from_room_chat", dict(userId=user.id))
async def ban(self, user: Union[User, BaseUser, UserPreview]):
"""
Bans a user from a room.
Args:
user (Union[User, BaseUser, UserPreview]): The user who should be banned.
"""
await self.__send("block_from_room", dict(userId=user.id))
async def unban(self, user: Union[User, BaseUser, UserPreview]):
"""
Unban a user from the room.
Args:
user (Union[User, BaseUser, UserPreview]): The user who should be unbanned.
"""
await self.__send("unban_from_room", dict(userId=user.id), fetch_id=uuid4())
async def add_speaker(self, user: Union[User, BaseUser, UserPreview]):
"""
Accept a speaker request from a user.
Args:
user (Union[User, BaseUser, UserPreview]): The user who will has to be accepted.
"""
await self.__send("add_speaker", dict(userId=user.id))
async def delete_message(self, id: str, user_id: str):
"""
Deletes a message that has been sent by a user.
Args:
id (str): The id of the message that should be removed.
user_id (str): The author of that message.
"""
await self.__send("delete_room_chat_message", dict(messageId=id, userId=user_id))
| [
"mail@arthurdw.com"
] | mail@arthurdw.com |
7b9e2349bb54dccbcdc4d761591f751e7832397f | 9197c17669f1fb0468e75c53599ea371be31ad0c | /归并排序优化版.py | 5103cc6643ecac25e2e7f9f53900d58ef0b6d178 | [] | no_license | chenglu66/sort | 31fdd6e4152874dbb533b75265ac9a4bcd9994b6 | cafeee61dca6c722cb23836cabfd6e573a21a4ed | refs/heads/master | 2021-01-20T17:14:59.387453 | 2017-06-29T15:10:05 | 2017-06-29T15:10:05 | 95,741,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 5 21:25:52 2017
@author: Lenovo-Y430p
"""
#非递归实现
def mergesorted(a,p,r):
if p<r:
mid=(p+r)//2
mergesorted(a,p,mid)
mergesorted(a,mid+1,r)
merge(a,p,mid,r)
def merge(a,p,q,r):
a1=a[p:q+1]
a2=a[q+1:r+1]
a1.append(88888)
a2.append(88888)
i=0;j=0
for k in range(p,r+1):
if a1[i]<= a2[j]:
a[k]=a1[i]
i+=1
else:
a[k]=a2[j]
j+=1
def main():
a=[2, 4, 3, 5, 6, 6, 7, 7, 7, 8, 8, 9, 44, 56, 65]
b=merge1(a,0,14)
print(a)
mergesorted(a,0,14)
print(b)
print(a)
#递归实现
def merge1(a,left,right):
if left==right:
return [a[left]]
if left<right:
mid=(left+right)//2
A=merge1(a,left,mid)
B=merge1(a,mid+1,right)
return sort(A,B)
def sort(A,B):
temp=[]
A.append(88888)#加入一个最大值因为两个list最多不会超过一个
B.append(88888)
i=0;j=0
for k in range(len(A)+len(B)-2):
if A[i]<= B[j]:
temp.append(A[i])
i+=1
else:
temp.append(B[j])
j+=1
return temp
if __name__=='__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
ae62814db0ca42c1055a4b10e13fc44e5eef0756 | 81eccf97d0dcbadb5903213fb535abdc5ff35bcf | /lib/func_mAP_v2.py | 63068d0c1fb5e8eb490f39ada6190a96dd22f6d8 | [] | no_license | zealot5209/FPN_SeNet50 | 6c9d8caedfdcd21e3422a9241bbd3070a933b3f0 | 15e996d55660dfdf7f7b1d104eeb98aade55c50e | refs/heads/master | 2020-03-10T12:55:26.296054 | 2018-04-27T03:49:54 | 2018-04-27T03:49:54 | 129,388,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,047 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import os.path as osp
import sys
import shutil
import glob
import cPickle
from xml.dom.minidom import parse
import xml.dom.minidom
import numpy as np
import BBoxXmlTool as bxt
import xml.etree.ElementTree as ET
# =======================================
# check_neg(gtxmlPath,resultxmlPath):
# add_neg(gtxmlPath,resultxmlPath):
# cleandata(xmlpath):
# savetxt_extractxml(objcls,resultxmlPath):
# parse_rec(filename):
# voc_ap(rec, prec, use_07_metric=False):
# voc_eval( ):该函数主要实现单一类别的AP计算
# do_mAP_eval(setNeg, gtxmlPath, resultxmlPath):
# =======================================
# =======================================
def check_neg(gtxmlPath,resultxmlPath):
afiles = []
bfiles = []
for root, dirs, files in os.walk(gtxmlPath):
print gtxmlPath, 'All files numbers:', len(files)
for f in files:
afiles.append(root + f)
for root, dirs, files in os.walk(resultxmlPath):
print resultxmlPath, 'All files numbers:', len(files)
for f in files:
bfiles.append(root + f)
# 去掉afiles中文件名的gtxmlPath (拿A,B相同的路径\文件名,做成集合,去找交集)
gtxmlPathlen = len(gtxmlPath)
aafiles = []
for f in afiles:
aafiles.append(f[gtxmlPathlen:])
# 去掉bfiles中文件名的resultxmlPath
resultxmlPathlen = len(resultxmlPath)
bbfiles = []
for f in bfiles:
bbfiles.append(f[resultxmlPathlen:])
afiles = aafiles
bfiles = bbfiles
setA = set(afiles)
setB = set(bfiles)
# 处理仅出现在results_XML目录中的文件
onlyFiles = setA ^ setB #
aonlyFiles = []
bonlyFiles = []
for of in onlyFiles:
if of in afiles:
aonlyFiles.append(of)
elif of in bfiles:
bonlyFiles.append(of)
print gtxmlPath, 'only files numbers:', len(aonlyFiles)
print resultxmlPath, 'only files numbers:', len(bonlyFiles)
xmlNegPath = os.path.dirname(resultxmlPath) + "/resxmlWithoutNeg"
if not (osp.exists(xmlNegPath)):
os.mkdir(xmlNegPath)
bothfiles = setA & setB
for line2 in bothfiles:
linetmp = line2.strip()
# line = line.strip() + '.jpg'
oldname = resultxmlPath + "/" + linetmp
newname = xmlNegPath + "/" + linetmp
shutil.copyfile(oldname, newname)
# xmlnames = os.listdir(gtxmlPath)
tmpdir = os.path.dirname(gtxmlPath)
imglistPath = tmpdir + '/imglist' + '.txt'
imglistfile = open(imglistPath, 'w+')
if (len(bothfiles) > 0):
for xmlname in bothfiles:
tmp1 = xmlname.split(".")
tmp2 = tmp1[0]
imglistfile.write(tmp2)
imglistfile.write('\n')
imglistfile.close()
resultxmlPath = xmlNegPath
return imglistPath, gtxmlPath, resultxmlPath
# return imglistPath
# ======================================
def add_neg(gtxmlPath,resultxmlPath):
afiles = []
bfiles = []
for root, dirs, files in os.walk(gtxmlPath):
print gtxmlPath, 'All files numbers:', len(files)
for f in files:
afiles.append(root + f)
for root, dirs, files in os.walk(resultxmlPath):
print resultxmlPath, 'All files numbers:', len(files)
for f in files:
bfiles.append(root + f)
# 去掉afiles中文件名的gtxmlPath (拿A,B相同的路径\文件名,做成集合,去找交集)
gtxmlPathlen = len(gtxmlPath)
aafiles = []
for f in afiles:
aafiles.append(f[gtxmlPathlen:])
# 去掉bfiles中文件名的resultxmlPath
resultxmlPathlen = len(resultxmlPath)
bbfiles = []
for f in bfiles:
bbfiles.append(f[resultxmlPathlen:])
afiles = aafiles
bfiles = bbfiles
setA = set(afiles)
setB = set(bfiles)
# 处理仅出现在results_XML目录中的文件
bonlyFiles = setB-setA
xmlNegPath = os.path.dirname(gtxmlPath) + "/gtxmlwithNeg"
if not (os.path.exists(xmlNegPath)):
os.mkdir(xmlNegPath)
for line in bonlyFiles:
XMLBBox = bxt.IMGBBox(line) # XMLBBox = bxt.IMGBBox(img_path=img_path) IMGbbox函数的作用是???
xml_save_path = os.path.join(xmlNegPath, os.path.splitext(line)[0] + '.xml')
XMLBBox.saveXML(save_path=xml_save_path)
for line2 in afiles:
linetmp = line2.strip()
# line = line.strip() + '.jpg'
oldname = gtxmlPath + "/" + linetmp
newname = xmlNegPath + "/" + linetmp
shutil.copyfile(oldname, newname)
imglistPath = os.path.dirname(gtxmlPath) + '/imglist' + '.txt'
imglistfile = open(imglistPath, 'w+')
if (len(bfiles) > 0):
for xmlname in bfiles:
tmp1 = xmlname.split(".")
tmp2 = tmp1[0]
imglistfile.write(tmp2)
imglistfile.write('\n')
imglistfile.close()
gtxmlPath = xmlNegPath
return imglistPath, gtxmlPath, resultxmlPath
# ======================================
def cleandata(gtxmlpath):
xml_list = os.listdir(gtxmlpath)
minw = 10000
minh = 10000
maxw = 0
maxh = 0
meanw = 0
meanh = 0
# counter = 0
img_counter = 0
bbox_counter = 0
cls_dict = {}
for idx, fname in enumerate(xml_list):
xml_path = osp.join(gtxmlpath, fname)
tmp_img = bxt.IMGBBox(xml_path=xml_path)
minw = min(minw, tmp_img.width)
minh = min(minh, tmp_img.height)
maxw = max(maxw, tmp_img.width)
maxh = max(maxh, tmp_img.height)
# counter += 1
meanw += tmp_img.width
meanh += tmp_img.height
bbox_num = len(tmp_img.bboxes)
# if bbox_num > 0 and not tmp_img.img is None :
if bbox_num > 0:
# print "-- [%d/%d] %s %d" % (idx, img_num, fname, bbox_num)
# 记录每一个类别的框的个数
for tmpidx, item in enumerate(tmp_img.bboxes):
if cls_dict.has_key(item.name) :
cls_dict[item.name] += 1
else: cls_dict[item.name] = 1
if item.name == 'portable_other' : tmp_img.bboxes[tmpidx].name = 'portable_side'
img_counter += 1
bbox_counter += bbox_num
else:
print "no bbox: " + fname
# print "bbox num :", bbox_counter
# print "img num :", img_counter
print "%s :" % (gtxmlpath)
print "cls :", (cls_dict)
return cls_dict
# ==========================================
def savetxt_extractxml(objcls,resultxmlPath):
# dirPath = '/home/ubuntu/workfile_guoc/pkg_mAP/results_xml'
# outPath = '/home/ubuntu/workfile_guoc/pkg_mAP/output_txt/hammer.txt'
outdirPath = os.path.dirname(resultxmlPath)
# os.mknod(outdirPath + objcls +'.txt3')
outtmpPath = outdirPath + '/tmpcompTxt'
if not (os.path.exists(outtmpPath)):
os.mkdir(outtmpPath)
outPath = outtmpPath + '/' + objcls + '.txt'
dirPath = resultxmlPath
if (os.path.exists(dirPath)):
# filenames = os.listdir(dirPath)
filenames = glob.glob(dirPath + '//*.xml')
# filenames = glob.glob(r'/home/ubuntu/workfile_guoc/pkg_mAP/results_xml1/*.xml')
fileout = open(outPath, 'w+')
# fileout = open(outPath)
for filename in filenames:
# 使用minidom解析器打开 XML 文档
DOMTree = xml.dom.minidom.parse(filename)
collection = DOMTree.documentElement
if collection.hasAttribute("annotation"):
print "Root element : %s" % collection.getAttribute("annotation")
# 在集合中获取所有对象
objects = collection.getElementsByTagName("object")
# 查找每个object的详细信息
for object in objects:
name = object.getElementsByTagName('name')[0]
namestr = name.childNodes[0].data
if namestr == objcls:
score = object.getElementsByTagName('score')[0]
scorestr = score.childNodes[0].data
bndbox = object.getElementsByTagName("bndbox")
bndbox_xmin = bndbox[0].getElementsByTagName('xmin')[0]
bndbox_ymin = bndbox[0].getElementsByTagName('ymin')[0]
bndbox_xmax = bndbox[0].getElementsByTagName('xmax')[0]
bndbox_ymax = bndbox[0].getElementsByTagName('ymax')[0]
bndbox_xminstr = bndbox_xmin.childNodes[0].data
bndbox_yminstr = bndbox_ymin.childNodes[0].data
bndbox_xmaxstr = bndbox_xmax.childNodes[0].data
bndbox_ymaxstr = bndbox_ymax.childNodes[0].data
filebasename = os.path.basename(filename)
temp = [filebasename, scorestr, bndbox_xminstr, bndbox_yminstr, bndbox_xmaxstr, bndbox_ymaxstr]
# temp1 = ' '.join(temp)
# ' '.join(temp)
fileout.writelines(' '.join(temp))
fileout.write('\n')
fileout.close()
return outPath
# ====================================================
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, #该函数主要实现单一类别的AP计算
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5, # ovthresh=0.5
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print 'Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames))
# save
print 'Saving cached annotations to {:s}'.format(cachefile)
with open(cachefile, 'w') as f:
cPickle.dump(recs, f) #根据cpickle模块对recs进行序列化操作
else:
# load
with open(cachefile, 'r') as f:
recs = cPickle.load(f)
# extract gt objects for this class:从groundtruth_xml文件夹中提取单一类别的矩形框
class_recs = {}
npos = 0
# 根据imgname和clasname,从xml文件中抽取出对应的矩形框object
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
# class_recs是一个结构体,用来存放某张图片的某个类别的信息
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets:读取检测结果
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines] #
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence:对单一类别的检测目标,进行降序排序
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs:
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
# R = class_recs[(image_ids[d].split('.'))[0]]
# imagenames没有.xml后缀名,image_ids是有.xml后缀名的,所以报错!!!
R = class_recs[(image_ids[d].split('.'))[0]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
if (npos == 0):
npos = 1e-14
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
# ===================================================
def do_mAP_eval(setNeg, gtxmlPath, resultxmlPath):
if setNeg == '0':
imagesetfile, gtxmlPath , resultxmlPath = check_neg(gtxmlPath, resultxmlPath)
else:
imagesetfile, gtxmlPath, resultxmlPath = add_neg(gtxmlPath, resultxmlPath)
annopath = gtxmlPath + '/{:s}.xml' # 此处方法
# cachedir = os.path.join(self._devkit_path, 'annotations_cache')
cachedir = os.path.join(os.path.dirname(gtxmlPath), 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
# use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
# self_classes = ('__background__', 'spray', 'hammer', 'knife')
classes_dict = cleandata(gtxmlPath)
classes_dict_ref = cleandata(resultxmlPath)
self_classes = classes_dict.keys()
self_classes.append('__background__')
# 此处需要调用cleandata脚本去求出classes矩阵,在类别矩阵需要添加“_background_”
# self_classes = cleanData(gtxmlPath,resultxmlPath)
dict_mAP = {}
for i, cls in enumerate(self_classes): # 此处需要调用classes
if cls == '__background__':
continue
# filename = self._get_voc_results_file_template().format(cls)
filename = savetxt_extractxml(cls, resultxmlPath)
clsFilePath = os.path.dirname(resultxmlPath) + '/tmpcompTxt' + '/' + cls + '.txt'
if osp.exists(clsFilePath)==False or len(open(filename).readlines())==0:
# rec = 0
# prec = 0
ap = 0
else:
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
dict_mAP[cls] = ap
print('Mean AP = {:.4f}'.format(np.mean(aps)))
# dict_mAP['Mean AP'] = np.mean(aps)
mAP_output = os.path.dirname(gtxmlPath) + '/mAP_output' + '.txt'
mAP_os = open(mAP_output, 'w+')
for key, value in dict_mAP.items():
mAP_os.write('AP for: ' + key + ':' + ('%.4f' % value))
mAP_os.write('\n')
mAP_os.write('Mean AP: ' + ('%.4f' % np.mean(aps)))
mAP_os.close()
print('--------------------------------------------------------------')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('--------------------------------------------------------------')
print('')
print('--------------------------------------------------------------')
if osp.exists(cachedir):
# os.removedirs(cachedir)
shutil.rmtree(cachedir)
# if osp.exists(os.path.dirname(resultxmlPath) + '/tmpcompTxt'):
# # os.removedirs(os.path.dirname(resultxmlPath) + '/tmpcompTxt')
# shutil.rmtree(os.path.dirname(resultxmlPath) + '/tmpcompTxt')
if setNeg == '0':
# os.removedirs(resultxmlPath)
shutil.rmtree(resultxmlPath)
# else:
# os.removedirs(gtxmlPath)
shutil.rmtree(gtxmlPath)
# =============================================
# usage:调用do_mAP_eval(groundxmlpath,resultxmlpath)函数,即可输出结果
if __name__ == '__main__':
if len(sys.argv) == 4:
setNeg = sys.argv[1]
gtxmlPath = sys.argv[2]
resultxmlPath = sys.argv[3]
do_mAP_eval(setNeg, gtxmlPath, resultxmlPath)
print("done!")
# ==============================================
| [
"noreply@github.com"
] | noreply@github.com |
207c707157fd441286ecf9952084a3c11def6be1 | 9c8fdfa389eaaf2df4c8ba0e3072d94671b5a622 | /0163. Missing Ranges.py | dbf13be4a24913568795bb380bbbac50fd487f69 | [] | no_license | aidardarmesh/leetcode2 | 41b64695afa850f9cc7847158abb6f2e8dc9abcd | 4cf03307c5caeccaa87ccce249322bd02397f489 | refs/heads/master | 2023-02-27T11:22:09.803298 | 2021-02-07T06:47:35 | 2021-02-07T06:47:35 | 264,491,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | from typing import *
class Solution:
def findMissingRanges(self, nums: List[int], lower: int, upper: int) -> List[str]:
res = []
nums = [lower-1] + nums + [upper+1]
for i in range(len(nums)-1):
delta = nums[i+1] - nums[i]
if delta == 2:
res.append(str(nums[i]+1))
elif delta > 2:
res.append(str(nums[i]+1) + '->' + str(nums[i+1]-1))
return res
| [
"darmesh.aidar@gmail.com"
] | darmesh.aidar@gmail.com |
f572f19251815ab1c976e75b0a87211c5c643151 | 74686d2f8f66ac9f69f5f0240cfaa786738b0efd | /mysite/cinema/tst.py | 263b83b0a722cf95ff1775762cde074931e54d81 | [] | no_license | ColumbusCoders/Tamilsite | fc8c0051e2300f3aab4a7fcb28251ca4985d1215 | 9bbd251cabf6fcfde0027e06cb5a2f2a61f413c2 | refs/heads/master | 2020-03-28T11:40:38.901298 | 2018-09-11T23:34:37 | 2018-09-11T23:34:37 | 148,238,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
import sys
sys.path.append("/Users/saravananveeramani/Coding/pythonprj/mysite")
#print (sys.path)
import common as f1
arr = ["http://www.espncricinfo.com/rss/content/story/feeds/6.xml"]
t=f1.GetParseResults(f1.cinema_urls);
print t
| [
"43099277+ColumbusCoders@users.noreply.github.com"
] | 43099277+ColumbusCoders@users.noreply.github.com |
4f881705ead8533f4b36e3d548cee5f34fd1eb24 | ca3563857ea6cfa40125eeb64a582249f20b0e73 | /pages/base_page.py | aaf66bb97655a8efc603034df270c819b17765c6 | [] | no_license | l3sombre/final_project_stepik | 6ffb6c053aab1d7c5a8565e58198e801f3d0f460 | 80a6b903754371a8fc66fe006ee013a0b9143251 | refs/heads/main | 2023-02-16T18:51:41.577462 | 2021-01-17T16:27:05 | 2021-01-17T16:27:05 | 320,839,216 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,622 | py | from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from .locators import BasePageLocators
import math
class BasePage():
def __init__(self, browser, url, timeout=4):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
def go_to_basket_page(self):
link = self.browser.find_element(*BasePageLocators.BASKET_LINK)
link.click()
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException).\
until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def is_element_present(self, how, what):
try:
self.browser.find_element(how, what)
except (NoSuchElementException):
return False
return True
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def open(self):
self.browser.get(self.url)
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), \
"Login link is not presented."
def should_be_basket_link(self):
assert self.is_element_present(*BasePageLocators.BASKET_LINK), \
"Basket link is not presented."
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), \
"User icon is not presented, probably unauthorised user"
def solve_quiz_and_get_code(self):
WebDriverWait(self.browser, 3).until(EC.alert_is_present())
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented") | [
"karinhizhnyak@gmail.com"
] | karinhizhnyak@gmail.com |
9c9dbf9b2db26b8b664feaa06c4b5878f07a34c8 | 63ede8ffd8642847ef3e100218f2962ef6a4fe26 | /venv/Scripts/easy_install-3.7-script.py | 7e56a26623c52b43de8882e129e9eb548018385f | [] | no_license | Heytec/dashjuly | e406968ac111670d31ec3a7ca494b93438c76120 | add5d3c361cbcc552a99508919df1c4037fd6bb8 | refs/heads/master | 2020-06-29T22:48:16.282015 | 2019-08-05T11:57:04 | 2019-08-05T11:57:04 | 200,646,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | #!C:\Users\t\PycharmProjects\dash_graph\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"johnmuchirim@gmail.com"
] | johnmuchirim@gmail.com |
565722b55ed51c31b9185847c9602271b24781d0 | d17475288410d0f4df1c12dc7f487090c4a0731f | /Different_City_Time/Write_in_mongoDB.py | 8a7b022a5db0f243399cf01e5f088b53876fc4fc | [] | no_license | ExileSaber/Internship-training | e86ecea8dc41051d1ebaaf2733f0cf57fba1833c | b8e419663332566004100ede7b657549d8549b10 | refs/heads/master | 2020-07-03T20:01:16.443478 | 2019-08-13T03:07:34 | 2019-08-13T03:07:34 | 202,033,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | import pymongo
client = pymongo.MongoClient(host='localhost', port=27017)
def write_in_mongoDB(Time_list, keyword):
city = keyword + "房价时间分布"
db = client[city]
string = 'two_year'
collection = db[string]
flag_1 = 0
flag_2 = 0
for time in Time_list:
flag_1 = flag_1 + 1
if collection.insert_one(time):
flag_2 = flag_2 + 1
print('Time list saved to Mongo')
print('一共 ' + str(flag_1) + ' 条数据,存储成功数据条数为:' + str(flag_2))
| [
"ExileSaber@users.com"
] | ExileSaber@users.com |
7408c2daf47695470fc23b16881676211fd053f3 | 4fc3080a23a9eed7f6ac5d6f3996f9f388fb3e11 | /mysite/polls/views.py | becedb731343c57e3d8d6724d2ae486fcda7cdd8 | [] | no_license | wangbobby/PollsSystem | b249382152614039f3ea6ec75dc7ed29c8ca9b1c | d7ab8876b485f87ec81d18c334c1017b685b4a2e | refs/heads/master | 2020-03-24T05:06:54.173693 | 2018-07-26T17:52:55 | 2018-07-26T17:52:55 | 142,475,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.template import loader
from django.http import HttpResponse
from .models import Question
def index(request):
# print(request)
# print("Path: " + request.path)
# print(request.method)
# print(request.COOKIES)
# print(request.session)
# print(request.FILES)
# print(request.GET)
# print(request.POST)
# return HttpResponse("Hello, world. You're at the polls index")
"""
latest_question_list = Question.objects.order_by('-pub_date')[0:5]
# output = ', '.join([q.question_text for q in latest_question_list])
template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list
}
# return HttpResponse(output)
return HttpResponse(template.render(context, request))
"""
latest_question_list = Question.objects.order_by('-pub_date')[0:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
# def detail(request, question_id):
# return HttpResponse("You're looking at question %s." %question_id)
def detail(request, question_id):
print(request)
# return HttpResponse("You're looking at question %s." % question_id)
"""
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("Qustion does not exist")
"""
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
print(request)
response = "You're looking at the results of question %s."
return HttpResponse(response %question_id)
def vote(request, question_id):
print(request)
return HttpResponse("You're voting on question %s." %question_id) | [
"wangyanyao@gmail.com"
] | wangyanyao@gmail.com |
aa45d1380fe3d4470534e60abe11b65beee60370 | 22ae4a4e1192ef95b3547b27bf5aa2d0dbf94da3 | /team_test.py | a3496980b16753d2e34dd3d3758afc42ba012cf1 | [] | no_license | jibryllbrink/SuperheroTeam | ae799d236d5c9faaaa5acb7e7eed7c041b8e1d2d | 3baa36133402a8004aa175bc584c8f0a7939f745 | refs/heads/master | 2020-10-02T08:26:31.382237 | 2019-12-13T11:52:16 | 2019-12-13T11:52:16 | 227,739,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,593 | py | import pytest
import io
import sys
import superheroes
import math
import random
# Helper Function
def capture_console_output(function_body):
# _io.StringIO object
string_io = io.StringIO()
sys.stdout = string_io
function_body()
sys.stdout = sys.__stdout__
return string_io.getvalue()
def create_armor():
armors = [
"Calculator",
"Laser Shield",
"Invisibility",
"SFPD Strike Force",
"Social Workers",
"Face Paint",
"Damaskus Shield",
"Bamboo Wall",
"Forced Projection",
"Thick Fog",
"Wall of Will",
"Wall of Walls",
"Obamacare",
"Thick Goo"]
name = armors[random.randint(0, len(armors) - 1)]
power = random.randint(23, 700000)
return superheroes.Armor(name, power)
def create_weapon():
weapons = [
"Antimatter Gun",
"Star Cannon",
"Black Hole Ram Jet",
"Laser Sword",
"Laser Cannon",
"Ion Accellerated Disc Drive",
"Superhuman Strength",
"Blinding Lights",
"Ferociousness",
"Speed of Hermes",
"Lightning Bolts"]
name = weapons[random.randint(0, len(weapons) - 1)]
power = random.randint(27, 700000)
return superheroes.Weapon(name, power)
def create_ability():
abilities = [
"Alien Attack",
"Science",
"Star Power",
"Immortality",
"Grandmas Cookies",
"Blinding Strength",
"Cute Kittens",
"Team Morale",
"Luck",
"Obsequious Destruction",
"The Kraken",
"The Fire of A Million Suns",
"Team Spirit",
"Canada"]
name = abilities[random.randint(0, len(abilities) - 1)]
power = random.randint(45, 700000)
return superheroes.Ability(name, power)
def build_hero(num_of_weapons=0, num_of_armor=0, num_of_abilities=0):
heroes = [
"Athena",
"Jodie Foster",
"Christina Aguilera",
"Gamora",
"Supergirl",
"Wonder Woman",
"Batgirl",
"Carmen Sandiego",
"Okoye",
"America Chavez",
"Cat Woman",
"White Canary",
"Nakia",
"Mera",
"Iris West",
"Quake",
"Wasp",
"Storm",
"Black Widow",
"San Luis Obispo",
"Ted Kennedy",
"San Francisco",
"Bananas"]
weapons = []
armors = []
for _ in range(num_of_weapons):
weapons.append(create_weapon())
for _ in range(num_of_armor):
armors.append(create_armor())
for _ in range(num_of_abilities):
weapons.append(create_ability())
name = random.choice(heroes)
hero = superheroes.Hero(name)
for item in weapons:
hero.add_ability(item)
for armor in armors:
hero.add_armor(armor)
return hero
def create_hero(max_strength=100, weapons=False, armors=False, health=False):
heroes = [
"Athena",
"Jodie Foster",
"Christina Aguilera",
"Gamora",
"Supergirl",
"Wonder Woman",
"Batgirl",
"Carmen Sandiego",
"Okoye",
"America Chavez",
"Cat Woman",
"White Canary",
"Nakia",
"Mera",
"Iris West",
"Quake",
"Wasp",
"Storm",
"Black Widow",
"San Luis Obispo",
"Ted Kennedy",
"San Francisco",
"Bananas"]
name = heroes[random.randint(0, len(heroes) - 1)]
if health:
power = health
else:
power = random.randint(3, 700000)
hero = superheroes.Hero(name, power)
if weapons and armors:
for weapon in weapons:
hero.add_ability(weapon)
for armor in armors:
hero.add_armor(armor)
if armors and not weapons:
for armor in armors:
hero.add_armor(armor)
return hero
def create_team(heroes=[]):
teams = [
"Orchids",
"Red",
"Blue",
"Cheese Steaks",
"Warriors",
"49ers",
"Marvel",
"DC",
"Rat Pack",
"The Little Red Riding Hoods",
"Team One",
"Generic Team",
"X-men",
"Team Two",
"Golden Champions",
"Vegan Protectors",
"The Cardinals",
"Winky Bears",
"Steelsmiths",
"Boilermakers",
"Nincompoops"]
name = teams[random.randint(0, len(teams) - 1)]
team = superheroes.Team(name)
if len(heroes) > 0:
for member in heroes:
team.add_hero(member)
return team
def create_set():
armor_pieces = random.randint(1, 300)
weapon_pieces = random.randint(1, 300)
ability_ct = random.randint(1, 300)
armors = []
abilities = []
for _ in range(0, armor_pieces):
armors.append(create_armor())
for _ in range(0, weapon_pieces):
abilities.append(create_weapon())
for _ in range(0, ability_ct):
abilities.append(create_ability())
hero_set = {'weapons': abilities, 'armors': armors}
return hero_set
# def test_armor():
# armor = superheroes.Hero("The Ring", 200)
# for _ in range(0, 500):
# defense = armor.defend()
# assert (defense <= 200 and defense >= 0)
def test_hero_default_health():
jodie = superheroes.Hero("Jodie Foster")
assert jodie.current_health == 100
def test_hero_init_new_health():
hero = superheroes.Hero("Jodie Foster", 600)
assert hero.current_health == 600
def test_hero_start_health():
hero = superheroes.Hero("Jodie Foster", 300)
assert hero.starting_health == 300
def test_hero_defense():
jodie = superheroes.Hero("Jodie Foster")
gauntlets = superheroes.Armor("Gauntlets", 30)
jodie.add_armor(gauntlets)
defense = jodie.defend(10)
assert defense >= 0 and defense <= 30
def test_hero_defense_mean_value():
athena = superheroes.Hero("Athena")
strength = random.randint(400, 30000)
big_strength = superheroes.Armor("Overwhelming Shield", strength)
athena.add_armor(big_strength)
calculated_mean = strength // 2
iterations = 8000
total_attack = 0
accepted_window = 400
for _ in range(iterations):
attack_value = athena.defend()
assert attack_value >= 0 and attack_value <= strength
total_attack += attack_value
actual_mean = total_attack / iterations
print("Max Allowed: {}".format(strength))
print("Defenses Tested: {}".format(iterations))
print("Mean -- calculated: {} | actual: {}".format(calculated_mean, actual_mean))
print(
"Acceptable deviation from mean: {} | Current deviation from mean: {}".format(
accepted_window, abs(
calculated_mean - actual_mean)))
print(
"Acceptable Min: {} | Acceptable Max: {}".format(
actual_mean -
accepted_window,
actual_mean +
accepted_window))
assert actual_mean <= calculated_mean + \
accepted_window and actual_mean >= calculated_mean - accepted_window
def test_hero_defense_standard_deviation():
willow_waffle = superheroes.Hero("Willow Waffle")
strength = random.randint(400, 30000)
willow = superheroes.Armor("Willowness", strength)
willow_waffle.add_armor(willow)
defenses = list()
total_defend = 0
number_tests = 100
for _ in range(number_tests):
defense = willow_waffle.defend()
defenses.append(defense)
total_defend += defense
mean = total_defend / number_tests
# Get Square Deviations
for index, value in enumerate(defenses):
defenses[index] = math.pow(value - mean, 2)
standard_dev = math.sqrt(sum(defenses) / len(defenses))
print("Hero Armor must block with random value.")
print("Standard Deviation Cannot be 0.")
assert standard_dev != 0.0
def test_dead_hero_defense():
hero = superheroes.Hero("Vlaad", 0)
garlic = superheroes.Armor("Garlic", 30000)
hero.add_ability(garlic)
assert hero.defend() == 0
def test_hero_equip_armor():
jodie = superheroes.Hero("Jodie Foster")
gauntlets = superheroes.Armor("Gauntlets", 30)
jodie.add_armor(gauntlets)
assert len(jodie.armors) == 1
assert jodie.armors[0].name == "Gauntlets"
def test_hero_defend_multi_armor():
jodie = superheroes.Hero("Jodie Foster")
gauntlets = superheroes.Armor("Gauntlets", 4000)
science = superheroes.Armor("Science", 9000)
jodie.add_armor(gauntlets)
jodie.add_armor(science)
defend = jodie.defend()
assert defend <= 13000 and defend >= 0
# Test Team
def test_team_attack():
team_one = superheroes.Team("One")
jodie = superheroes.Hero("Jodie Foster")
aliens = superheroes.Ability("Alien Friends", 10000)
jodie.add_ability(aliens)
team_one.add_hero(jodie)
team_two = superheroes.Team("Two")
athena = superheroes.Hero("Athena")
socks = superheroes.Armor("Socks", 10)
athena.add_armor(socks)
team_two.add_hero(athena)
assert team_two.heroes[0].current_health == 100
team_one.attack(team_two)
assert team_two.heroes[0].current_health <= 0
def test_team_attack_kills():
team_one = superheroes.Team("One")
jodie = superheroes.Hero("Jodie Foster")
aliens = superheroes.Ability("Alien Friends", 10000)
jodie.add_ability(aliens)
team_one.add_hero(jodie)
team_two = superheroes.Team("Two")
athena = superheroes.Hero("Athena")
socks = superheroes.Armor("Socks", 10)
athena.add_armor(socks)
team_two.add_hero(athena)
assert team_one.heroes[0].kills == 0
team_one.attack(team_two)
assert team_one.heroes[0].kills == 1
def test_team_attack_deaths():
team_one = superheroes.Team("One")
jodie = superheroes.Hero("Jodie Foster")
aliens = superheroes.Ability("Alien Friends", 10000)
jodie.add_ability(aliens)
team_one.add_hero(jodie)
team_two = superheroes.Team("Two")
athena = superheroes.Hero("Athena")
socks = superheroes.Armor("Socks", 10)
athena.add_armor(socks)
team_two.add_hero(athena)
assert team_two.heroes[0].deaths == 0
team_one.attack(team_two)
assert team_two.heroes[0].deaths == 1
def test_revive_heroes():
heroes = []
for _ in range(0, 20):
heroes.append(build_hero(4, 4, 4))
team_one = superheroes.Team("One")
for hero in heroes:
team_one.add_hero(hero)
for hero in team_one.heroes:
hero.current_health == 12
team_one.revive_heroes()
for hero in team_one.heroes:
assert hero.current_health == 100 | [
"jibryllbrink@icloud.com"
] | jibryllbrink@icloud.com |
a18589eb19e3dc70f7ea7d74014f35fe4416b413 | 78bbaf083b39cb72a35e23a4517a40a899f05ba0 | /documents/lambda/sop/change_concurrency_limit/2020-10-26/Tests/step_defs/test_change_concurrency_limit.py | 38936686717517e9139639a88c1103c95dc713e9 | [
"MIT-0"
] | permissive | esabitova/aws-digito-artifacts-gameday | c2babc284e68a00eca71d168a6a30bfdf293111c | 9aff43cc0551066ca78af06ad879556bcf04c5c6 | refs/heads/master | 2023-08-25T19:06:28.191717 | 2021-09-30T17:54:16 | 2021-09-30T17:54:16 | 406,305,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | # coding=utf-8
"""SSM automation document to increase Lambda memory size"""
from pytest_bdd import (
scenario
)
@scenario('../features/change_concurrency_limit.feature', 'Change Concurrency limit of Lambda Function')
def test_change_concurrency_limit():
"""Create AWS resources using CloudFormation template and execute SSM automation document."""
@scenario('../features/change_concurrency_limit.feature', 'Set Concurrency limit out of account limits')
def test_concurrency_limit_out_of_quota():
"""Create AWS resources using CloudFormation template and execute SSM automation document."""
| [
"semiond@amazon.com"
] | semiond@amazon.com |
0fd00087bbe6ec945db73332b6cad077f02cef83 | 2359121ebcebba9db2cee20b4e8f8261c5b5116b | /configs_pytorch/f113-f10_6_pt.py | 473e694402d9b971c6aaf8839943d7c3313f54aa | [] | no_license | EliasVansteenkiste/plnt | 79840bbc9f1518c6831705d5a363dcb3e2d2e5c2 | e15ea384fd0f798aabef04d036103fe7af3654e0 | refs/heads/master | 2021-01-20T00:34:37.275041 | 2017-07-20T18:03:08 | 2017-07-20T18:03:08 | 89,153,531 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,362 | py |
#copy of j25
import numpy as np
from collections import namedtuple
from functools import partial
from PIL import Image
import data_transforms
import data_iterators
import pathfinder
import utils
import app
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.init
import torch.nn.functional as F
import math
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform = {'patch_size': (256, 256),
'channels': 4,
'n_labels': 17}
p_augmentation = {
'rot90_values': [0, 1, 2, 3],
'flip': [0, 1]
}
channel_zmuv_stats = {
'avg': [4970.55, 4245.35, 3064.64, 6360.08],
'std': [1785.79, 1576.31, 1661.19, 1841.09]}
# data preparation function
def data_prep_function_train(x, p_transform=p_transform, p_augmentation=p_augmentation, **kwargs):
x = np.array(x,dtype=np.float32)
x = data_transforms.channel_zmuv(x, img_stats=channel_zmuv_stats, no_channels=4)
x = data_transforms.random_lossless(x, p_augmentation, rng)
return x
def data_prep_function_valid(x, p_transform=p_transform, **kwargs):
x = np.array(x, dtype=np.float32)
x = data_transforms.channel_zmuv(x, img_stats=channel_zmuv_stats, no_channels=4)
return x
def label_prep_function(x):
#cut out the label
return x
# data iterators
batch_size = 32
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
folds = app.make_stratified_split(no_folds=10)
#for checking if folds are equal over multiple config files
for fold in folds:
print sum(fold)
train_ids = folds[1] + folds[2] + folds[3] + folds[4] + folds[5] + folds[0] + folds[7] + folds[8] + folds[9]
valid_ids = folds[6]
all_ids = folds[0] + folds[1] + folds[2] + folds[3] + folds[4] + folds[5] + folds[6] + folds[7] + folds[8] + folds[9]
bad_ids = []
train_ids = [x for x in train_ids if x not in bad_ids]
valid_ids = [x for x in valid_ids if x not in bad_ids]
test_ids = np.arange(40669)
test2_ids = np.arange(20522)
train_data_iterator = data_iterators.DataGenerator(dataset='train',
batch_size=chunk_size,
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=True, infinite=True)
feat_data_iterator = data_iterators.DataGenerator(dataset='train',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
valid_data_iterator = data_iterators.DataGenerator(dataset='train',
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
test_data_iterator = data_iterators.DataGenerator(dataset='test',
batch_size=chunk_size,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test2_data_iterator = data_iterators.DataGenerator(dataset='test2',
batch_size=chunk_size,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
import tta
tta = tta.LosslessTTA(p_augmentation)
tta_test_data_iterator = data_iterators.TTADataGenerator(dataset='test',
tta = tta,
duplicate_label = False,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_test2_data_iterator = data_iterators.TTADataGenerator(dataset='test2',
tta = tta,
duplicate_label = False,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
tta_valid_data_iterator = data_iterators.TTADataGenerator(dataset='train',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
tta_train_data_iterator = data_iterators.TTADataGenerator(dataset='train',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
tta_all_data_iterator = data_iterators.TTADataGenerator(dataset='train',
tta = tta,
duplicate_label = True,
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=True, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 60
validate_every = int(0.5 * nchunks_per_epoch)
save_every = int(10 * nchunks_per_epoch)
learning_rate_schedule = {
0: 5e-2,
int(max_nchunks * 0.2): 2e-2,
int(max_nchunks * 0.4): 1e-2,
int(max_nchunks * 0.6): 3e-3,
int(max_nchunks * 0.8): 1e-3
}
# model
from collections import OrderedDict
class MyDenseNet(nn.Module):
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):
super(MyDenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(4, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock
num_features = num_init_features
self.blocks = []
final_num_features = 0
for i, num_layers in enumerate(block_config):
block = torchvision.models.densenet._DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module('denseblock%d' % (i + 1), block)
self.blocks.append(block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = torchvision.models.densenet._Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
self.classifier_drop = nn.Dropout(p=0.5)
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
def forward(self, x, feat=False):
features = self.features(x)
out = F.relu(features, inplace=True)
out = self.classifier_drop(out)
out = F.avg_pool2d(out, kernel_size=7).view(features.size(0), -1)
if feat:
return out
out = self.classifier(out)
return out
def my_densenet121(pretrained=False, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = MyDenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16))
if pretrained:
model.load_state_dict(torch.utils.model_zoo.load_url(torchvision.models.densenet.model_urls['densenet121']))
return model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.densenet = my_densenet121(pretrained=False)
self.densenet.apply(weight_init)
self.densenet.classifier = nn.Linear(self.densenet.classifier.in_features, p_transform["n_labels"])
self.densenet.classifier.weight.data.zero_()
def forward(self, x, feat=False):
if feat:
return self.densenet(x,feat)
else:
x = self.densenet(x)
return F.sigmoid(x)
def weight_init(m):
if isinstance(m,nn.Conv2d):
m.weight.data=nn.init.orthogonal(m.weight.data)
def build_model():
net = Net()
return namedtuple('Model', [ 'l_out'])( net )
# loss
class MultiLoss(torch.nn.modules.loss._Loss):
def __init__(self, weight):
super(MultiLoss, self).__init__()
self.weight = weight
def forward(self, input, target):
torch.nn.modules.loss._assert_no_grad(target)
weighted = (self.weight*target)*(input-target)**2 +(1-target)*(input-target)**2
return torch.mean(weighted)
def build_objective():
return MultiLoss(5.0)
def build_objective2():
return MultiLoss(1.0)
def score(gts, preds):
return app.f2_score_arr(gts, preds)
# updates
def build_updates(model, learning_rate):
return optim.SGD(model.parameters(), lr=learning_rate,momentum=0.9,weight_decay=0.0002)
| [
"frederic.godin@ugent.be"
] | frederic.godin@ugent.be |
6a4612a06a9e5bff435c71ff64c1536fa7cdd6fc | d25f2c9baac039664eb39843048ad02a1a6a1084 | /DANN_py3-master/main.py | 48fc26d6f68d8a7d6b3a2dbc2b1eef377b77c66c | [
"MIT"
] | permissive | Jinsung-Jeon/DomainAdaptation | 3c58ab5a95190a00fd9611c2656cd3a62ddc26f6 | b11d186b02ba206b7d20e550cc5d8ce65ede9348 | refs/heads/master | 2021-05-18T14:54:09.510102 | 2020-04-24T16:39:29 | 2020-04-24T16:39:29 | 251,289,059 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,748 | py | import random
import os
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from data_loader import GetLoader
from torchvision import datasets
from torchvision import transforms
from model import CNNModel
import numpy as np
from test import test
source_dataset_name = 'CIFAR10'
target_dataset_name = 'STL10'
source_image_root = os.path.join('dataset', source_dataset_name)
target_image_root = os.path.join('dataset', target_dataset_name)
model_root = 'models'
cuda = True
cudnn.benchmark = True
lr = 1e-3
batch_size = 128
image_size = 32
n_epoch = 50
manual_seed = random.randint(1, 10000)
random.seed(manual_seed)
torch.manual_seed(manual_seed)
# load data
img_transform_source = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2430, 0.2610))
])
img_transform_target = transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize((0.4467, 0.4398, 0.4066), (0.2603, 0.2565, 0.2712))
])
dataset_source = datasets.CIFAR10(
root='dataset',
train=True,
transform=img_transform_source,
download=True
)
from modify_cifar_stl import modify_cifar
modify_cifar(dataset_source)
dataloader_source = torch.utils.data.DataLoader(
dataset=dataset_source,
batch_size=batch_size,
shuffle=True,
num_workers=0)
train_list = os.path.join(target_image_root, 'svhn_train_labels.txt')
dataset_target = datasets.STL10(
root='dataset',
transform=img_transform_target,
download=True
)
from modify_cifar_stl import modify_stl
modify_stl(dataset_target)
dataloader_target = torch.utils.data.DataLoader(
dataset=dataset_target,
batch_size=batch_size,
shuffle=True,
num_workers=0)
# load model
my_net = CNNModel()
# setup optimizer
optimizer = optim.Adam(my_net.parameters(), lr=lr)
loss_class = torch.nn.NLLLoss()
loss_domain = torch.nn.NLLLoss()
if cuda:
my_net = my_net.cuda()
loss_class = loss_class.cuda()
loss_domain = loss_domain.cuda()
for p in my_net.parameters():
p.requires_grad = True
# training
for epoch in range(n_epoch):
len_dataloader = min(len(dataloader_source), len(dataloader_target))
data_source_iter = iter(dataloader_source)
data_target_iter = iter(dataloader_target)
i = 0
while i < len_dataloader:
p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader
#alpha = 2. / (1. + np.exp(-10 * p)) - 1
alpha = 1
# training model using source data
data_source = data_source_iter.next()
s_img, s_label = data_source
my_net.zero_grad()
batch_size = len(s_label)
input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)
class_label = torch.LongTensor(batch_size)
domain_label = torch.zeros(batch_size)
domain_label = domain_label.long()
if cuda:
s_img = s_img.cuda()
s_label = s_label.cuda()
input_img = input_img.cuda()
class_label = class_label.cuda()
domain_label = domain_label.cuda()
input_img.resize_as_(s_img).copy_(s_img)
class_label.resize_as_(s_label).copy_(s_label)
class_output, domain_output = my_net(input_data=input_img, alpha=alpha)
err_s_label = loss_class(class_output, class_label)
err_s_domain = loss_domain(domain_output, domain_label)
# training model using target data
data_target = data_target_iter.next()
t_img, _ = data_target
batch_size = len(t_img)
input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)
domain_label = torch.ones(batch_size)
domain_label = domain_label.long()
if cuda:
t_img = t_img.cuda()
input_img = input_img.cuda()
domain_label = domain_label.cuda()
input_img.resize_as_(t_img).copy_(t_img)
_, domain_output = my_net(input_data=input_img, alpha=alpha)
err_t_domain = loss_domain(domain_output, domain_label)
err = err_t_domain + err_s_domain + err_s_label
err.backward()
optimizer.step()
i += 1
print ('epoch: %d [iter: %d / all %d], err_s_label: %f, err_s_domain: %f, err_t_domain: %f' \
% (epoch, i, len_dataloader, err_s_label.data.cpu().numpy(),
err_s_domain.data.cpu().numpy(), err_t_domain.data.cpu().item()))
print('lets save')
torch.save(my_net, '{0}/cifar_stl_model_epoch_{1}.pth'.format(model_root, epoch))
test(source_dataset_name, epoch)
print('source done!')
test(target_dataset_name, epoch)
print('target done!')
print('done')
| [
"jjsjjs0902@naver.com"
] | jjsjjs0902@naver.com |
4a84f62d878637adbdc7231f34f39011cb2eb011 | 5563fc38a479bf31b158e22ad381bcc1ef6677df | /triangles.py | cac783538a7e501568406903122530725b621395 | [] | no_license | MonRes/tester_school_day5 | e6a1d84bc32342e0e03061208458581ac4357f59 | 985fdb344bf7009c4ba3cd50910ba6b9b9fa172e | refs/heads/master | 2020-03-19T05:13:38.891646 | 2018-06-03T14:30:07 | 2018-06-03T14:30:07 | 135,911,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | a = 2
b = 4
c = 4
if a>0 and b>0 and c>0:
if a + b > c and a + c > b and b + c > a:
print ("da się utworzyć trójkąt")
else:
print ("nie da się")
else:
print("nie da się")
#lub preferowana wersja
if a <= 0 or b <= 0 or c <= 0:
print ('nie da się utworzyć trójkąta - któras długość jest ujemna')
elif a + b > c and a + c > b and b + c > a:
print ('Da się utworzyć trójkąt')
else:
print ('nie da się utworzyć trójkąta')
#mozna z powtarzającego się warunku utworzyć zmienną np. length_negative = a <= 0 or b<= 0 c <= 0 | [
"Restek87@gmail.com"
] | Restek87@gmail.com |
06a768b10284ec7d0ca364d50ef7abfd9a2060ff | 358aaf68f3c60ebbbd86b3bc66d4e6c098bcb39e | /fonts/wonder16_8x16.py | ff96b7c5170caead9f8c94e725a350e50d913b60 | [
"MIT"
] | permissive | ccccmagicboy/st7735_mpy | d2de0046abd81978d5176dace45a40758377af82 | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | refs/heads/master | 2022-08-28T23:18:04.353733 | 2020-05-28T04:19:21 | 2020-05-28T04:19:21 | 254,869,035 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | """converted from ..\fonts\WONDER16__8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x3c\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x36\x36\x36\x36\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x6c\x6c\xfe\x6c\x6c\xfe\x6c\x6c\x6c\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x7c\xc6\xc0\x78\x3c\x06\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x62\x66\x0c\x18\x30\x66\xc6\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x38\x30\x76\x7e\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x0c\x0c\x0c\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\x38\xfe\x38\x6c\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xd6\xd6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x78\x18\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x06\x0c\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xcc\xfe\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x0c\x18\x30\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\x7e\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x0c\x0c\x00\x00\x0c\x0c\x0c\x18\x00\x00\x00'\
b'\x00\x00\x00\x0c\x18\x30\x60\xc0\x60\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\xfe\x00\xfe\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7e\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x60\x7c\x60\x60\x60\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x60\x60\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc0\xc0\xc0\xce\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\xd8\xd8\x70\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xcc\xd8\xf0\xf0\xd8\xcc\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xee\xee\xfe\xd6\xd6\xd6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xe6\xe6\xf6\xde\xce\xce\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x66\x7c\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xd6\x7c\x06\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc0\xc0\x70\x1c\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x5a\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x38\x6c\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\x60\x60\x60\x60\x60\x60\x60\x60\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x80\xc0\x60\x30\x18\x0c\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x7c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x7c\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff'\
b'\x00\x18\x18\x18\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xdc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x7c\xcc\xcc\xcc\xcc\xcc\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xfe\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x36\x30\x30\xfc\x30\x30\x30\x30\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xce\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\xe0\x60\x60\x7c\x66\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x0c\x00\x1c\x0c\x0c\x0c\x0c\x0c\xcc\xcc\x78\x00\x00'\
b'\x00\x00\xe0\x60\x60\x66\x66\x6c\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x6c\xfe\xd6\xd6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x7c\x60\x60\xf0\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\x7c\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x30\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xce\x76\x06\xc6\x7c\x00\x00'\
b'\x00\x00\x00\x00\x00\xfe\x86\x0c\x18\x30\x62\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x10\x38\x38\x6c\x6c\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| [
"cuiwei_cv@163.com"
] | cuiwei_cv@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.