blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f960cda94d1d083d78b03e0ba5ad898c559bd3a
|
afbaa5685bf737ec7d16fee2bab54ae13caf96f9
|
/geekbang/core/11.py
|
15eb00956ae41705b7004b30efad678165ef228f
|
[] |
no_license
|
ykdsg/myPython
|
9dcc9afe6f595e51b72257875d66ada1ba04bba6
|
77d2eaa2acb172664b632cc2720cef62dff8f235
|
refs/heads/master
| 2023-06-10T20:11:08.061075
| 2023-06-03T11:39:53
| 2023-06-03T11:39:53
| 10,655,956
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,487
|
py
|
class Document:
WELCOME_STR = 'Welcome! The context for this book is {}.'
def __init__(self, title, author, context):
print('init function called')
self.title = title
self.author = author
self.__context = context
# 类函数,类函数的第一个参数一般为 cls,表示必须传一个类进来
@classmethod
def create_empty_book(cls, title, author):
return cls(title=title, author=author, context='nothing')
# 成员函数
def get_context_length(self):
return len(self.__context)
# 静态函数 ,与类没有什 么关联,最明显的特征便是,静态函数的第一个参数没有任何特殊性
# 静态函数可以用来做一些简单独立的任务
@staticmethod
def get_welcome(context):
return Document.WELCOME_STR.format(context)
empty_book = Document.create_empty_book("hello", "world")
print(empty_book.get_context_length())
print(empty_book.get_welcome('indeed nothing'))
from abc import ABCMeta, abstractmethod
class Entity(metaclass=ABCMeta):
@abstractmethod
def get_title(self):
pass
@abstractmethod
def set_title(self, title):
pass
class Document(Entity):
def get_title(self):
return self.title
def set_title(self, title):
self.title = title
document = Document()
document.set_title('Harry Potter')
print(document.get_title())
# 这行会报错,不能实例化抽象类
entity = Entity()
|
[
"17173as@163.com"
] |
17173as@163.com
|
74360cfb5c22b03c17d47ddf3d113185fd4e08c0
|
10c865b3c0c88ad213cb728a98b1fcf132bb067f
|
/chapter_two/add.py
|
ae92b4daf55ef1fd126ebd2c1de55eac68387075
|
[] |
no_license
|
selcukcihan/crackingthecodinginterview.py
|
386820aa8493a2f11e31db703e8b281af2b1548a
|
a005818cae710d5a0448fa7efbe6376c4f5ae587
|
refs/heads/master
| 2021-01-21T13:21:16.910239
| 2018-01-20T20:46:57
| 2018-01-20T20:46:57
| 53,852,089
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,821
|
py
|
#! /usr/bin/env python
"""
You have two numbers represented by a linked list, where each node contains a single
digit. The digits are stored in reverse order, such tat the 1's digit is at the head of
the list. Write a function that adds the two numbers and returns the sum as a linked list
EXAMPLE
Input: (3 -> 1 -> 5) + (5 -> 9 -> 2)
Output: 8 -> 0 -> 8
"""
from collections import deque
class Node:
def __init__(self, char):
self.char = char
self.next = None
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def append(self, char):
if self.tail is None:
self.head = self.tail = Node(char)
return self
else:
self.tail.next = Node(char)
self.tail = self.tail.next
return self
def __str__(self):
n = self.head
digits = []
while (n is not None):
digits.append(str(n.char))
n = n.next
return " --> ".join(digits)
def add(lst1, lst2):
lst = LinkedList()
p1 = lst1.head
p2 = lst2.head
carry = 0
while p1 is not None and p2 is not None:
digit = p1.char + p2.char + carry
lst.append(digit % 10)
carry = digit / 10
p1 = p1.next
p2 = p2.next
p = p1 if p1 is not None else p2
if p is not None:
digit = p.char + carry
lst.append(digit % 10)
carry = digit / 10
p = p.next
if carry > 0:
lst.append(carry)
return lst
def test_run():
lst1 = LinkedList()
lst1.append(5).append(1).append(8)
lst2 = LinkedList()
lst2.append(7).append(1)
print lst1
print lst2
lst = add(lst1, lst2)
print lst
lst = add(lst2, lst1)
print lst
|
[
"SELCUKCI@CIHANS.intertech.denizbank.com"
] |
SELCUKCI@CIHANS.intertech.denizbank.com
|
5ede0c559bc0d229d11d19c4d07cc1890aedec27
|
09e7b3049d7ff5baa392864eee96c5ebf99655af
|
/cut_stop_process.py
|
7373bb2017ce441bd9b810b1e79e37fd8ea7c499
|
[] |
no_license
|
FelixLiu1996/Sentiment_Analysis
|
3c731abb2ed1f489fd0c4ee6a39da60dfe8ef1c7
|
fb8be20966e0748b3d99315cf2ee21a73fb19ac2
|
refs/heads/master
| 2020-06-18T12:20:03.199987
| 2019-07-13T13:57:01
| 2019-07-13T13:57:01
| 196,301,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,159
|
py
|
import numpy as np
import pandas as pd
import jieba
import jieba.analyse
import re
import codecs
import string
def clean_data(text):
"""去掉标点符号以及中文 """
pattern = re.compile("[\u4e00-\u9fa5]|[\(\)\《\》\——\;\,\。\“\”\<\>\!]") # 只匹配标点符号和中文
res = pattern.findall(text)
res = ''.join(res)
return res
def stopwordlist(filepath):
"""
加载停用词列表
并且将一些对于本研究相关的无用的词添加到停用词表中
"""
stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
stopwords.extend(['大连', '酒店', '宾馆', '康莱德', '希尔德', '友谊', '希尔顿', '良运', '香格里拉', '维多利亚', '船舶丽湾'])
return stopwords
def seg_sentence(sentence):
"""
进行去停用词
:param sentence: 需要去停用词的句子
:return: 返回去除的结果
"""
sentence_seged = jieba.cut(sentence.strip())
stopwords = stopwordlist(file_path + '哈工大停用词表.txt') # 加载停用词的路径
outstr = ''
for word in sentence_seged:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
return outstr
# 文本分割
def sent2word(line):
"""
对文本进行分词操作
"""
segList = jieba.cut(line, cut_all=False)
segSentence = ''
for word in segList:
if word != '\t':
segSentence += word + " "
return segSentence.strip()
file_path = "D:\PythonCodes\Python_Course_Project/"
# 将正样本进行 去英文等 分词 去停用词 等操作
with open(file_path + "positive.txt", encoding='utf-8') as f:
positive = f.readline()
target = open(file_path + 'positive_cut.txt', encoding='utf-8', mode='w')
while positive:
positive = clean_data(positive)
positive = sent2word(positive)
positive = seg_sentence(positive)
target.writelines(positive + '\n')
positive = f.readline()
target.close()
with open(file_path + 'negative.txt', encoding='utf-8') as f:
negative = f.readline()
target = open(file_path + 'negative_cut.txt', encoding='utf-8', mode='w')
while negative:
negative = clean_data(negative)
negative = sent2word(negative)
negative = seg_sentence(negative)
target.writelines(negative + '\n')
negative = f.readline()
target.close()
# positive = pd.read_table(file_path + "positive.txt")
# negative = pd.read_table(file_path + "negative.txt", header=None, index_col=False)
# print(positive)
# print(negative[0])
# print(type(positive))
#
# # with open(file_path + "positive.txt", encoding='utf_8') as f:
# # positive = f.readline()
# # # print(positive)
# # print(positive)
# # positive = f.readline()
# # print(positive)
#
# def prepareData(sourceFile, targetFile):
# f = codecs.open(sourceFile, 'r', encoding='utf-8')
# target = codecs.open(targetFile, 'w', encoding='utf-8')
# print('open source file: ' + sourceFile)
# print('open target file: ' + targetFile)
#
# lineNum = 1
# line = f.readline()
# while line:
# print('---processing ', lineNum, ' article---')
# # line = clearTxt(line)
# # line = clear_data(line)
# seg_line = sent2word(line)
# target.writelines(seg_line + '\n')
# lineNum = lineNum + 1
# line = f.readline()
# print('well done.')
# f.close()
# target.close()
#
#
# # 清洗文本
# # def clearTxt(line):
# # if line != '':
# # line = line.strip()
# # intab = ""
# # outtab = ""
# # trantab = str.maketrans(intab, outtab)
# # pun_num = string.punctuation + string.digits
# # line = line.encode('utf-8')
# # line = line.translate(trantab, pun_num)
# #
# # line = line.decode("utf8")
# # # 去除文本中的英文和数字
# # # line = re.sub("[a-zA-Z0-9]", "", line)
# # # # 去除文本中的中文符号和英文符号
# # # line = re.sub("[\s+\.\!\/_,$%^*(+\"\';:“”.]+|[+——!,。??、~@#¥%……&*()]+".decode("utf8"), "", line)
# # line = clean_data(line)
# # return line
#
#
# # 文本切割
# def sent2word(line):
# segList = jieba.cut(line, cut_all=False)
# segSentence = ''
# for word in segList:
# if word != '\t':
# segSentence += word + " "
# return segSentence.strip()
#
#
# if __name__ == '__main__':
# sourceFile = file_path + 'negative.txt'
# targetFile = file_path + 'negative_cut.txt'
# sourceFile = clean_data(sourceFile)
# prepareData(sourceFile, targetFile)
#
# sourceFile = file_path + 'positive.txt'
# targetFile = file_path + 'positive_cut.txt'
# prepareData(sourceFile, targetFile)
# # file_path = "D:\PythonCodes\Python_Course_Project/"
# # with open(file_path + "positive.txt", encoding="utf_8") as f:
# # positive = f.read()
# #
# # seg =jieba.lcut(positive, cut_all=False)
# # print('/'.join(seg))
|
[
"1321842068@qq.com"
] |
1321842068@qq.com
|
52987e627ba68db3086386c61ae709502816ded6
|
d0b7d681550464492e290d11c2be0c43ba4dc7ca
|
/python/logger/logger.py
|
78c2387780e901414cdfcadac2a0b84179dd2efc
|
[] |
no_license
|
kuzovkov/scrapers
|
0af71a8d5628b604b8c1c9fcfa6c95f63a69976f
|
cae2d758f3a25bc53d401d5a4d46fda231c58445
|
refs/heads/master
| 2020-05-27T21:10:17.701161
| 2017-03-02T16:22:29
| 2017-03-02T16:22:29
| 83,604,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
#coding=utf-8
import time
import os
class Logger:
logfile = None
def __init__(self, logfile=None):
if logfile is not None:
self.logfile = logfile
#Подготовка сообщения к выводу
def _prep_message(self, msg, prefix, status):
date = prefix + ' ' + time.asctime(time.localtime())
return date + ' ' + status + ': ' + str(msg) + "\n"
#Запись сообщения в лог-файл или в stdout
def info(self, msg, prefix=''):
status = '<INFO>'
if self.logfile is not None:
try:
f = open(self.logfile, 'a')
f.write(self._prep_message(msg, prefix, status))
f.close()
except Exception, e:
print 'Logger error: ' + e.strerror
return False
else:
print self._prep_message(msg, prefix, status)
# Запись сообщения об ошибке в лог-файл или в stdout
def error(self, msg, prefix=''):
status = '<ERROR>'
if self.logfile is not None:
try:
f = open(self.logfile, 'a')
f.write(self._prep_message(msg, prefix, status))
f.close()
except Exception, e:
print 'Logger error: ' + e.strerror
return False
else:
print self._prep_message(msg, prefix, status)
#Удаление лог-файла
def clear(self):
if self.logfile is not None:
try:
os.remove(self.logfile)
except Exception, e:
print 'Logger error: ' + e.strerror
return False
|
[
"you@example.com"
] |
you@example.com
|
6500fd2acb8cb277b272726bf1526a92de251566
|
76ad79c64e3f7b0c334e39041490ae93dbc3eb26
|
/demo/backend/backend/urls.py
|
9834012be9c9252b81589a96a6359751aecf7ebf
|
[] |
no_license
|
akonepoint/transform-and-tell
|
21ec1978350c8ffaaff5971229091245a6e9e999
|
47f00da81d606db8df992674101842d797ab1fa1
|
refs/heads/master
| 2023-09-01T16:52:51.474528
| 2021-10-14T09:32:06
| 2021-10-14T09:32:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 797
|
py
|
"""backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('tat.urls')),
]
|
[
"alasdair.tran@gmail.com"
] |
alasdair.tran@gmail.com
|
fe8835b7ad87712f1dfb73b498ca7b3ad992d1f7
|
96288dad8f679376f49ccb52044341b9eec7a7e1
|
/bande_annonce.py
|
4776939838f5382f26cdbe8469afb82450a94988
|
[] |
no_license
|
louismarc-mercier/Manim
|
0bfe673b6eb9aa9c3388e68a3ec8ed2e6788970c
|
adcd36907ed030a551db8a7dac16ae020ef51eca
|
refs/heads/master
| 2023-07-06T21:10:46.102530
| 2021-08-13T13:37:38
| 2021-08-13T13:37:38
| 266,430,156
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,032
|
py
|
from manimlib.imports import *
from manim_utils.slide_template import BoxedTitle, GenChannelLogo, YoutubeLogo
from MyAnim.RevisionSecondaire.id_rem_french import MeasureDistance
class bandeAnnonce(Scene):
def construct(self):
# Channel logo and name (UP LEFT CORNER)
path = r"C:/Users/Utilisateur/Desktop/CEGEP/Cours_maison/"
path_2 = r"C:/Users/Utilisateur/Desktop/CEGEP/MANIM/manim-recent/media/videos/logoCTM/images/"
image = ImageMobject(path_2 + "CTM_boite")
image.scale(1.4)
# Title and subtitle of the trailer (UP RIGHT CORNER)
trailer_title = TextMobject("Code tes maths").set_color("#0079C2")
trailer_title.next_to(np.array([-5.5,3.25,0]))
trailer_title.scale(1.75)
trailer_subtitle = TextMobject(r"\sc{\textbf{Notions de base - Fonction d'impression}}")
trailer_subtitle.set_color(WHITE)
trailer_subtitle.next_to(trailer_title, DOWN, buff=0.2*LARGE_BUFF).align_to(trailer_title, LEFT)
trailer_subtitle.scale(1)
image.next_to(trailer_subtitle.get_center() + np.array([-5,-2,0]))
# Trailer subtitle
left_rectangle = Rectangle(width=0.035, height=2, fill_opacity=1, color=PINK)
left_rectangle.to_corner(DL)
video_description = TextMobject(r"\sc{\textbf{Collège Maisonneuve}}").set_color(WHITE)
departement = TextMobject(r"\sc{\textbf{Département de mathématiques}}").set_color(WHITE).scale(0.7)
noms = TextMobject("Rosalie Bentz-Moffet, Melisande Fortin-Boisvert, Stéphanie Larocque et").scale(0.7)
noms_suite = TextMobject("Louis-Marc Mercier").scale(0.7)
video_description.next_to(left_rectangle.get_corner(UR) + np.array([0.05,-0.15,0]))
departement.next_to(video_description, DOWN).align_to(video_description, LEFT)
noms.next_to(departement, DOWN).align_to(departement, LEFT)
noms_suite.next_to(noms, DOWN).align_to(noms, LEFT)
video_description.scale(1)
# Displaying everything
self.play(FadeIn(trailer_title), FadeIn(trailer_subtitle))
self.play(FadeIn(image))
self.play(FadeIn(left_rectangle), FadeIn(video_description), FadeIn(departement), FadeIn(noms), FadeIn(noms_suite))
self.wait(5)
class Credits(Scene):
def wplay(self, *args, wait=1, run_time=1, rate_func=smooth):
self.play(*args, run_time=run_time, rate_func=rate_func)
if wait != 0:
self.wait(wait)
def construct(self):
credits = TextMobject("Crédits").set_color(YELLOW).scale(1.7)
thanks = TextMobject("Merci d'avoir visionné le vidéo!!").set_color(ORANGE).scale(1.7)
instructor = TexMobject(r"\text{Enseignant}", r"\text{Louis-Marc Mercier}")
viewer = TexMobject(r"\text{Spectateur}", r"\text{You}")
lines = [instructor, viewer]
instructor[0].align_to([-0.5, 0, 0], RIGHT).shift(8 * DOWN)
instructor[1].align_to([0.5, 0, 0], LEFT).shift(8 * DOWN)
viewer[0].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[0], RIGHT)
viewer[1].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[1], LEFT)
credits.set_y(instructor.get_top()[1] + 2 * LARGE_BUFF)
thanks.set_y(-14.5)
def half_start(t):
# this rate function is great for gradually starting into a `linear` rate
# it goes from 0 to 0.5 in value, and from 0 to 1 in slope (speed)
return 1 / 2 * t ** 2
everything_no_thanks = VGroup(credits, *lines)
self.play(VGroup(*everything_no_thanks, thanks).shift, UP, rate_func=half_start)
self.play(VGroup(*everything_no_thanks, thanks).shift, 14 * UP, rate_func=linear, run_time=14)
self.play(everything_no_thanks.shift, 3 * UP, rate_func=linear, run_time=3)
self.remove(*everything_no_thanks)
self.wait(3)
# all done :)
self.wplay(FadeOut(thanks))
|
[
"noreply@github.com"
] |
louismarc-mercier.noreply@github.com
|
944e14c58823984857d813317dfa4880f3c39c97
|
d3f36cdca0315f3b943429e40a097823765f9d4f
|
/src/notification/migrations/0001_initial.py
|
fa2603c60859bb39eb0de7510c1cd941818cad47
|
[] |
no_license
|
camaratc/notify_server
|
320c5de4ef4be397dab1b3089788e076ef7ce081
|
0ae0464f73330e984f3132f3e90623851efd2fc2
|
refs/heads/master
| 2020-03-29T20:09:27.063128
| 2018-11-09T16:05:46
| 2018-11-09T16:05:46
| 150,298,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
# Generated by Django 2.1.1 on 2018-09-28 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=45, verbose_name='Título')),
('message', models.TextField(max_length=2000, verbose_name='Mensagem')),
('author', models.CharField(max_length=100, verbose_name='Autor')),
('tag', models.IntegerField(choices=[(0, 'Erro'), (1, 'Aviso'), (2, 'Recomendação')], verbose_name='Categoria')),
('time_active', models.IntegerField(default=15, verbose_name='Tempo ativa')),
('creation_date', models.DateTimeField(default='2018-09-28T17:45:25.585681+00:00', editable=False, verbose_name='Data de Criação')),
('send_date', models.DateTimeField(blank=True, null=True, verbose_name='Data de Envio')),
],
options={
'verbose_name': 'Notificação',
'verbose_name_plural': 'Notificações',
},
),
]
|
[
"matheusrf96@gmail.com"
] |
matheusrf96@gmail.com
|
da4f39271d6fa04ca91ceae37e78340853405fd8
|
30278f2167c5d355c037df5bf9f94b6ff20a55c2
|
/hello.py
|
aedcf5074649853d0d166d75cb756fd7675c3f0b
|
[] |
no_license
|
ankitthakur007/discord-bot
|
29fe51ff350cd180f2d0b5af691f301f07d8548a
|
59c0bd5b144018958afb73e2b0616dab5e95977f
|
refs/heads/main
| 2023-05-06T13:14:29.409720
| 2021-05-29T16:12:07
| 2021-05-29T16:12:07
| 371,924,261
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 105
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 29 08:23:45 2021
@author: Ankit
"""
print("Hello World!")
|
[
"thakubhai.007@gmail.com"
] |
thakubhai.007@gmail.com
|
3475e23eae2cada8acb2751313d2a5f4684ae72e
|
c3b32dc07fec5759c077f6e983d32a130b523faf
|
/MachineLearning/views.py
|
c62d67a9c7106e908283f20764507a78b40a437c
|
[] |
no_license
|
Winton1992/Visual-Analytics-of-Machine-Learning-Performance
|
c7bad8ccab779e8272b11445b62400ff1b7c5f48
|
b2d274c3649eaca2587ec74afb3c25d0dcd1d1ea
|
refs/heads/master
| 2020-03-14T02:33:44.259332
| 2018-04-28T13:38:13
| 2018-04-28T13:38:13
| 131,401,468
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,877
|
py
|
from django.shortcuts import render
from django.views.generic import View
from rest_framework.response import Response
from rest_framework.views import APIView
from ML.machine_learning import MLResult
# url 'MachineLearning/ML-result/'
class MachineLearningResultView(View):
def get(self, request, *args, **kwargs):
context = {
"title": 'machine-learning-result',
}
return render(request, "webpages/machine_learning/machine_learning_result.html", context)
def post(self, request, *args, **kwargs):
ml = MLResult()
ml.run(new_data=False)
context = {
"title": 'machine-learning-result',
}
return render(request, "webpages/machine_learning/machine_learning_result.html", context)
# url 'api/MachineLearning/accuracy'
class APIMachineLearningAccuracy(APIView):
def get(self, request, format=None, **kwargs):
from ML.preprocessor import PreProcessor
pp = PreProcessor()
pp.update_data()
mlr = MLResult()
data = mlr.results()
# data = {
# 'mlp': [
# {"combination": "part_year", "accuracy": 0.5},
# {"combination": "part_year,diameter", "accuracy": 0.5},
# {"combination": "failure_year", "accuracy": 0.1},
# {"combination": "material,diameter", "accuracy": 0.4},
# {"combination": "length", "accuracy": 0.2},
# {"combination": "failure_year,diameter", "accuracy": 0.3}
# ],
# 'cnn': [
# {"combination": "part_year", "accuracy": 0.2},
# {"combination": "part_year,diameter", "accuracy": 0.9},
# {"combination": "failure_year,diameter", "accuracy": 0.3}
# ],
# 'ann': [
# {"combination": "part_year", "accuracy": 0.3},
# {"combination": "part_year,diameter", "accuracy": 0.1}
# ],
# 'som': [
# {"combination": "part_year", "accuracy": 0.2},
# {"combination": "part_year,diameter", "accuracy": 0.6},
# {"combination": "material,diameter", "accuracy": 0.4},
# {"combination": "length", "accuracy": 0.2}
# ],
# 'knn': [
# {"combination": "part_year", "accuracy": 0.2},
# {"combination": "part_year,diameter", "accuracy": 0.3}
# ],
# }
# print(data)
return Response(data)
# from ML.machine_learning import MLResult # here are the ML results
# mlr = MLResult()
# results = mlr.results() # results of all models
#
# mdl = 'mlp' # name of model
# attrib_list = results["best_results"][mdl]["combination"] # input of model
# predictions = mlr.predictions(mdl, attrib_list) # predictions based on particular model
|
[
"weli0127@uni.sydney.edu.au"
] |
weli0127@uni.sydney.edu.au
|
fef81ea432b7a108e17ce262dea7f5e6e542ddbe
|
55cc304c3e98d998bf20d5deab72852a5149a88f
|
/pyente/client.py
|
5075b489a138a7b3e6405156df0e7b1ff566c0a7
|
[
"MIT"
] |
permissive
|
waschag-tvk/pyente
|
f05cc5226a7130de5b5c83a70124f7b4f0f9edac
|
1f41e6db6e883ac058da3f9de8619021cde9b973
|
refs/heads/master
| 2020-03-20T19:40:55.838333
| 2018-06-25T19:02:53
| 2018-06-25T19:02:53
| 137,648,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,721
|
py
|
import requests
class Client:
def __init__(self, enteapi_url='http://localhost', ente_id=1):
self.appointment_url = enteapi_url + '/enteapi/v1/appointment/'
self.token_auth_url = enteapi_url + '/enteapi/token-auth/'
self.last_used_for_each_machine_url = (
self.appointment_url + 'last_used_for_each_machine/')
self.ente_id = ente_id
def get_available_appointments(self):
return requests.get(self.appointment_url).json()
def get_last_used_for_each_machine(self):
'''see the latest actual users for each machine'''
return requests.get(self.last_used_for_each_machine_url)
def get_authenticated_session(self, username, password):
token = requests.post(
self.token_auth_url,
json={'username': username, 'password': password},
).json()['token']
return AuthenticatedSession(self, token)
def activate(self, token, machine):
appointment = list(filter(
lambda a: a['machine'] == machine,
get_available_appointments()))
if len(appointment) != 1:
raise RuntimeError(
'Appointment for the machine not available or ambiguous!')
appointment_pk = appointment[0]['pk']
requests.post(
self.appointment_url + '{:d}/activate/'.format(appointment_pk),
json={"enteId": self.ente_id},
headers={'Authorization': 'JWT ' + token})
class AuthenticatedSession:
def __init__(self, client, token):
self.client = client
self.token = token
def activate(self, machine):
self.client.activate(self.token, machine)
|
[
"waschag@tvk.rwth-aachen.de"
] |
waschag@tvk.rwth-aachen.de
|
45cdab6f8d281fbbfc1f008c7c4f280c8cb1b340
|
4a451db0a46420131f9961fc6477377913ef6062
|
/Chapter7/utils/__init__.py
|
8f39ea525511b137966df29dde6a9fcf2a7cd649
|
[] |
no_license
|
fuurin/pytorch_advanced
|
252fe6c76ee312159f7413d0c69ec672836a3228
|
aeaa59352278adcba388435b8f39bdf5eb985e70
|
refs/heads/master
| 2022-12-17T00:09:12.888656
| 2020-02-23T17:26:49
| 2020-02-23T17:26:49
| 204,486,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
from .dataloader import *
from .transformer import *
|
[
"komatsu@dais.is.tohoku.ac.jp"
] |
komatsu@dais.is.tohoku.ac.jp
|
c2924fe046e0d04257a3b313c8ba72d98d9b8a8b
|
6fa28562089148f569240ecd0ee0010a776e5b3b
|
/Assigned Questions/dictionaries_02.py
|
78062cb8199e3782fa1bbc5044fa0eef9e6efb94
|
[] |
no_license
|
VinuthnaGangula/261714_Py_DailyCommits
|
d67c93f2de25ffa35fb6bcc4ad3142d7704b88f9
|
8a1185b7e4f186b14132a0ae64f52ef88978b8df
|
refs/heads/main
| 2023-04-20T20:07:58.140441
| 2021-04-29T03:03:49
| 2021-04-29T03:03:49
| 359,351,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
# To convert a list into nested dictionary of keys.
list_ = list(input("Enter values of the list: ").split())
dict_ = {}
dict_temp = dict_
for i in list_:
dict_[i] = {}
dict_ = dict_[i]
print(dict_temp)
|
[
"gvvlvinuthna2000@gmail.com"
] |
gvvlvinuthna2000@gmail.com
|
21f4f6b71376a4700a0c02b29912bc92efbf0bf9
|
cf71c03349937a3d9ec5f3d3ff65801368453b89
|
/setup.py
|
aed3db14d19f7d30ac1417f906574452aac9206e
|
[
"MIT"
] |
permissive
|
movermeyer/django-aggregate-if
|
c90b8ad61a336a0fdf20737291e7866aafec6ffb
|
588c1487bc88a8996d4ee9c2c9d50fa4a4484872
|
refs/heads/master
| 2021-03-19T17:29:32.839653
| 2014-11-20T11:11:06
| 2014-11-20T11:11:06
| 123,967,026
| 0
| 0
|
MIT
| 2018-03-05T19:19:24
| 2018-03-05T19:19:24
| null |
UTF-8
|
Python
| false
| false
| 1,161
|
py
|
# coding: utf-8
from setuptools import setup
import os
setup(name='django-aggregate-if',
version='0.5',
description='Conditional aggregates for Django, just like the famous SumIf in Excel.',
long_description=open(os.path.join(os.path.dirname(__file__), "README.rst")).read(),
author="Henrique Bastos", author_email="henrique@bastos.net",
license="MIT",
py_modules=['aggregate_if'],
install_requires=[
'six>=1.6.1',
],
zip_safe=False,
platforms='any',
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Database',
'Topic :: Software Development :: Libraries',
],
url='http://github.com/henriquebastos/django-aggregate-if/',
)
|
[
"henrique@bastos.net"
] |
henrique@bastos.net
|
09e2968e9aa36cd2e97b1a57c7f01ca1a8163fa0
|
15e751d61495ff17b1a0234ebf58f3818a26726d
|
/SENT.py
|
39894d480a2aa905471570bb02b77ba0252b602e
|
[] |
no_license
|
deodeveloper/sentimentalAnalysis
|
f61f26dd6a0a7fb0acb01fdfd0f23f4fddcc5342
|
6cc2d05cbc4cda751c18abec41f2fb71704b7349
|
refs/heads/master
| 2021-03-27T12:51:05.919696
| 2017-08-01T02:15:07
| 2017-08-01T02:15:07
| 70,414,503
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,103
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 10:14:12 2016
@author: Satya
"""
from tweepy import OAuthHandler
import tweepy
import urllib.request
import json
from unidecode import unidecode
CKEY = "QdjZPGYPwd99r72qQfyZyZEcO"
CSECRET = "yUgsvYUSFtNQIEgLW1aY9DMJRRxYajxfYC2pg3RzFhR3rkcl5L"
ATOKEN = "174336590-kvtw1cqrwuH75LIMKqXZkKSeoU9BfEAB9QBnMIoI"
ATOKENSECRET = "KdnYcVNI0h6ny7i9ACzNw3I0h0hLSlqdjXTSDoYpgDXc5"
URL_SENTIMENT140 = "http://www.sentiment140.com/api/bulkClassifyJson"
d = dict(parameter1="value1", parameter2="value2")
COMPANYNAME = "AAPL"
LIMIT = 2500
LANGUAGE = 'es' # Sentiment140 API only support English or Spanish.
def parse_response(json_response):
negative_tweets, positive_tweets = 0, 0
for j in json_response["data"]:
if int(j["polarity"]) == 0:
negative_tweets += 1
elif int(j["polarity"]) == 4:
positive_tweets += 1
return negative_tweets, positive_tweets
def main():
auth = OAuthHandler(CKEY, CSECRET)
auth.set_access_token(ATOKEN, ATOKENSECRET)
api = tweepy.API(auth)
tweets = []
for tweet in tweepy.Cursor(api.search,
q=COMPANYNAME,
result_type='recent',
include_entities=True,
lang=LANGUAGE).items(LIMIT):
aux = {"text": unidecode(tweet.text.replace('"', '')), "language": LANGUAGE, "query": COMPANYNAME,
"id": tweet.id}
tweets.append(aux)
result = {"data": tweets}
data = urllib.parse.urlencode(d).encode("utf-8")
req = urllib.request.Request(URL_SENTIMENT140)
req.add_header('Content-Type', 'application/json')
response = urllib.request.urlopen(req, str(result))
json_response = json.loads(response.read())
negative_tweets, positive_tweets = parse_response(json_response)
print
"Positive Tweets: " + str(positive_tweets)
print
"Negative Tweets: " + str(negative_tweets)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
deodeveloper.noreply@github.com
|
19ef15f108dcbe8ee9a8bd5e91031584e26407b4
|
96a50d47e6acf3c35f69cb9187145cf6adc048cf
|
/bin/get_oisst_data
|
31d3f3c7de6ab3a19eb32778279e6241618ddfbe
|
[] |
no_license
|
underwoo/oisst
|
edb84b9316c8e9ff0966e000089577e7ee5d3a20
|
16ddfc0905647b829bd8914e7d35a464a95d719d
|
refs/heads/master
| 2020-03-07T15:10:10.572231
| 2018-04-02T15:11:58
| 2018-04-02T15:11:58
| 127,547,316
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,561
|
#!/usr/bin/env python3
import urllib.request
import urllib.parse
import urllib.error
import re
import bs4
import datetime
import os
import sys
import errno
import logging
import logging.handlers
import configobj
class UrlData(object):
def __init__(self, url):
"""Get a list of directories and files served from URL.
The URL string passed in, must be a HTML served list of directories/Files.
"""
# Set dirs and files to be empty lists
self.dirs = []
self.files = []
# Split the URL to get the scheme
split_url = urllib.parse.urlsplit(url)
if re.match('https?', split_url.scheme):
# Deal with http
try:
request=urllib.request.urlopen(url)
except urllib.error.URLError as err:
print("Unable to open URL \"{0}\" for parsing: {1}".format(url,err.reason))
raise
# Verify the URL is a "text/html" type
if not re.match("text/html", request.getheader('Content-Type')):
print("URL \"{0}\" does not return a text/html content type. Unable to parse.".format(url))
else:
# Get the directories and files
try:
data=request.read()
# Soupify, parse the html
soup = bs4.BeautifulSoup(data,'html.parser')
# The version of the web server used places all the files in a table. If the
# web server is updated, or if the format changes, the table parser below will need
# to be updated.
# The directories/files start at row 3
# Get all the directories
rows=soup.table.findAll('tr')[3:]
# Get the directory names and mod time. Directories have a name that end with "/"
self.dirs=[{'name': row.findAll('td')[1].a.get_text(),
'mdate': row.findAll('td')[2].get_text(),
'size': row.findAll('td')[3].get_text()}
for row in rows if len(row.findAll('td')) >= 4 and row.findAll('td')[1].a.get_text().endswith("/")]
# Get the file names size and mod time
self.files = [{'name': row.findAll('td')[1].a.get_text(),
'mdate': row.findAll('td')[2].get_text(),
'size': row.findAll('td')[3].get_text()}
for row in rows if len(row.findAll('td')) >= 4 and not row.findAll('td')[1].a.get_text().endswith("/")]
except:
print("Unable to parse HTML returned from {0}".format(url))
exit(1)
else:
print("Unable to parse URL scheme {0}. Not yet implemented".format(split_url.scheme))
def readConfig(configFile='~/.oisstrc'):
"""Read in the config file
A dictionary will be returned containing the keys and values of
the config file.
The keys currently included in the config file are:
scheme, hostURL, hostPath, outputDir, logDir
"""
# Read in the config file.
# TODO: Put this in a try with exception handeling: http://www.voidspace.org.uk/python/configobj.html#exceptions
# TODO: Add validation to the config file.
return configobj.ConfigObj(infile=configFile, file_error=True)
def init_logging(logDir, logLevel='DEBUG'):
"""Initialize the logging
The logging used in this program will always log to a file. If
run on a tty, then the logging messages will also be displayed in
the console. An option may be added later to not create the log
file.
The logDir must already exist, and the user _must_ have permission
to write to the log file. The application will exit if the user
is unable to write to the log file.
The log file created will be "get_oisst.log".
The log file will be rotated each week (on Sunday), the old log
file will have the date appened with the format "YYYY-MM-DD".
A return of None indicates an error was found creating the logger.
"""
# Check if logDir directory exists, and is a directory
if not os.path.isdir(logDir):
# Logger is not started, must write directly to stderr
print("ERROR: The log directory '{0}' does not exist.\n"
"ERROR: Please create and try again.".format(logDir), file=sys.stderr)
return None
# Check if the user has permission to write to the logDir
if not os.access(logDir, os.W_OK):
# Logger is not started, must write directly to stderr
print("ERROR: Permissions on the log directory '{0}' do not allow the current user to write the log file.\n"
"ERROR: Please correct the permissions and try again.".format(logDir), file=sys.stderr)
return None
# Set the logFile name
logFile = os.path.join(logDir, 'get_oisst.log')
# Get the logger
logger = logging.getLogger(__name__)
logger.setLevel(logLevel)
# Setup the rotating log file, rotate each Sunday (W6), delete
# files more then 4 month old.
logFileHandler = logging.handlers.TimedRotatingFileHandler(logFile,
when='W6',
backupCount=17)
# Log file format
logFileFormat = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
logFileHandler.setFormatter(logFileFormat)
# Add the handler to the logger
logger.addHandler(logFileHandler)
# Create the consol logger if run in a tty
if os.isatty(sys.stdin.fileno()):
# Define a log hander for the console
console = logging.StreamHandler()
console.setLevel(logLevel)
consoleFormat = logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(consoleFormat)
# Add the console logger
logger.addHandler(console)
return logger
def getFile(url='', source='', target=''):
"""Download the file listed at url and source, and place in target
url is a string that contains the base url with scheme and optional path.
url and source will be combined to create the full URL for downloading.
This function will check if the file exists. If it does exist,
then it will check the size and date stamp. If the file on the
url site is newer, or a different size, then the file will be
downloaded again. If the size is the same, and the date stamp on
the ftp site is older, then the download will not be retried.
This funtion will return True if successful (or if the file didn't
need to be downloaded).
"""
# Get the logger
logger = logging.getLogger(__name__)
# Default return value
myRet = False
# Check if scheme is http
split_url = urllib.parse.urlsplit(url)
if re.match('https?', split_url.scheme):
# Default is to attempt the download. doDownload will be changed to False
# if the download should not be retried.
doDownload=True
# Open the URL to the file to collect information about the file
try:
full_url = urllib.parse.urljoin(url,source)
request = urllib.request.urlopen(urllib.parse.urljoin(url,source))
except urllib.error.URLError as err:
print("Unable to open URL \"{0}\" for parsing: {1}".format(full_url,err.reason))
raise
# Check if the target file exists
if os.path.isfile(target):
# Need to get file sizes and ctime
try:
target_size=os.path.getsize(target)
target_ctime=datetime.datetime.utcfromtimestamp(os.path.getctime(target))
except OSError as err:
logger.warning("Unable to get the size or ctime of the target file \"{0}\".".format(target))
logger.warning("Retrying the download. ([{0}] {1})".format(err.errno, err.strerror))
else:
# Need to get source size and ctime
try:
source_size=int(request.getheader("Content-Length"))
source_ctime=datetime.datetime.strptime(request.getheader("Last-Modified"), "%a, %d %b %Y %H:%M:%S %Z")
except Exception as err:
logger.warning("Unable to get the size or ctime of the source file \"{0}\".".format(full_url))
logger.warning("Retrying the download. ({1})".format(err))
# Check if the files are the _same_. Same here is that
# the file sizes are the same, and the source ctime is
# older than the target's ctime.
if source_size == target_size and source_ctime <= target_ctime:
logger.info("File \"{0}\" already retrieved.".format(full_url))
doDownload = False
else:
logger.warning("Target \"{0}\" exists, but does not match the source \"{1}\". Retrieving.".format(target, full_url))
logger.warning("Target size={0}, ctime={1}. Source size={2}, ctime={3}".format(target_size, target_ctime, source_size, source_ctime))
# Now do the download
if doDownload:
try:
logger.info("Downloading file {0} to {1}.".format(full_url, target))
urllib.request.urlretrieve(full_url, target)
except urllib.error.URLError as err:
logger.warning("Error while attempting to retrieve file \"{0}\". ({1})".format(full_url, err))
except OSError as err:
logger.warning("Unable to write target file \"{0}\". ([{1}] {2})".format(target, err.errno, err.strerror))
else:
myRet = True
return myRet
def main():
"""Download the OISST data for use in GFDL's ECDA model"""
# Read in the coniguration file
config = readConfig('oisst.conf')
# Set the configurations into variables
scheme_url = config['scheme']
host_url = config['hostUrl']
path_url = config['hostPath']
rawDataDir = config['outputDir']
# Initiate the Logger
logger = init_logging(config['logDir'], config['logLevel'])
if not isinstance(logger, logging.Logger):
exit("Unable to setup logging")
# path_url requires a final '/', or urllib.parse.join will not work correctly later
if not path_url.endswith("/"):
path_url = path_url+"/"
try:
base_url = urllib.parse.urlunsplit((scheme_url, host_url, path_url, '', ''))
except Exception as err:
logger.exception("Unable to create base full URL from configuration options: scheme={0}, hostUrl={1}, hostPath={3}".format(scheme_url, host_url, path_url))
logger.exception("Got Exception: \"{0}\", \"{1}\"".format(err.errno, err.strerror))
raise
# Check if rawDataDir exists, if not create it
try:
os.makedirs(rawDataDir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Need to get the data from this and last month (estimating last month as 4 weeks ago)
currentTime = datetime.datetime.now()
lessFourWeeks = datetime.timedelta(weeks=-5)
lastMonTime = currentTime + lessFourWeeks
currentDateStr = "{:04d}{:02d}/".format(currentTime.year, currentTime.month)
lastMonDateStr = "{:04d}{:02d}/".format(lastMonTime.year, lastMonTime.month)
url_data_base=UrlData(base_url)
# Download files only in the current month directory, and last month's directory
dirsToUse=(d for d in url_data_base.dirs if d['name'] == currentDateStr or d['name'] == lastMonDateStr)
for d in dirsToUse:
# Create date dir
try:
# Store all files in a directory with the year
dirDate = datetime.datetime.strptime(d['name'], "%Y%m/")
outDir = os.path.join(rawDataDir,dirDate.strftime('%Y'))
os.makedirs(outDir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
dir_url = urllib.parse.urljoin(base_url,d['name'])
my_url_data = UrlData(dir_url)
# Download the files if the file does not already exist, is older than the file
# on the site. (Ideally size as well, but that may be difficult as the size I
# get is not exact (I think))
for f in my_url_data.files:
getFile(dir_url, f['name'], os.path.join(outDir,f['name']))
if __name__ == '__main__':
main()
|
[
"Seth.Underwood@noaa.gov"
] |
Seth.Underwood@noaa.gov
|
|
ce6a11fe641cb24056fb5e96e49a8af20a35752a
|
615d87a178d96678df4711db235e97932c8880b7
|
/fraud-detection.py
|
e515c2b0b3350f5810a90b3a640f89570e834d13
|
[] |
no_license
|
Radhika007/credit-card-fraud-detection
|
f7723aba1f532d21f187c8158b6422688f898ba9
|
7bd7e3402f3966ca15298dd6a90c946510f39457
|
refs/heads/master
| 2020-06-13T12:22:10.468394
| 2019-08-04T16:31:43
| 2019-08-04T16:31:43
| 194,651,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,060
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 14:31:27 2019
@author: radhika
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing the data
dataset = pd.read_csv('creditcard.csv')
X = dataset.iloc[:, 0:30 ].values
y = dataset['Class']
# Labelling the data as fraud or not_fraud
frauds = dataset.loc[dataset['Class'] == 1]
non_frauds = dataset.loc[dataset['Class'] == 0]
print("We have ", len(frauds), "frauds and" , len(non_frauds), "non-frauds")
# Splitting the dataset into test data and train data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# Using Logistic Regression
from sklearn import linear_model
logistic = linear_model.LogisticRegression(C=1e5)
logistic.fit(X_train,y_train)
score = logistic.score(X_test,y_test)
print("Score: ", score)
# Prediction
y_pred = np.array(logistic.predict(X_test))
#Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test,y_pred)
|
[
"noreply@github.com"
] |
Radhika007.noreply@github.com
|
b7dfc930e3d569213e48e5d836b04309dc32179e
|
675e47bd94633152a01dd0e9ee36158c9db99cdc
|
/card.py
|
49ff4ce276766fa52e0b846948985d4cf501bf27
|
[] |
no_license
|
qq456cvb/enumlator
|
d7722317b31d4981eb6f975d501c83e178102b81
|
99b1070ef6901079c18042c64eb64de97f9b07d9
|
refs/heads/master
| 2021-01-01T16:45:45.841206
| 2017-08-16T03:54:34
| 2017-08-16T03:54:34
| 97,910,048
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,353
|
py
|
from collections import Counter
import numpy as np
import itertools
import functools
def get_action_space():
actions = [[]]
# max_cards = 20
# single
for card in Card.cards:
actions.append([card])
# pair
for card in Card.cards:
if card != '*' and card != '$':
actions.append([card] * 2)
# triple
for card in Card.cards:
if card != '*' and card != '$':
actions.append([card] * 3)
# 3 + 1
for main in Card.cards:
if main != '*' and main != '$':
for extra in Card.cards:
if extra != main:
actions.append([main] * 3 + [extra])
# 3 + 2
for main in Card.cards:
if main != '*' and main != '$':
for extra in Card.cards:
if extra != main and extra != '*' and extra != '$':
actions.append([main] * 3 + [extra] * 2)
# single sequence
for start_v in range(Card.to_value('3'), Card.to_value('2')):
for end_v in range(start_v + 5, Card.to_value('2')):
seq = range(start_v, end_v)
actions.append(Card.to_cards(seq))
# double sequence
for start_v in range(Card.to_value('3'), Card.to_value('2')):
for end_v in range(start_v + 3, int(min(start_v + 20 / 2, Card.to_value('2')))):
seq = range(start_v, end_v)
actions.append(Card.to_cards(seq) * 2)
# triple sequence
for start_v in range(Card.to_value('3'), Card.to_value('2')):
for end_v in range(start_v + 2, int(min(start_v + 20 / 3, Card.to_value('2')))):
seq = range(start_v, end_v)
actions.append(Card.to_cards(seq) * 3)
# 3 + 1 sequence
for start_v in range(Card.to_value('3'), Card.to_value('2')):
for end_v in range(start_v + 2, int(min(start_v + 20 / 4, Card.to_value('2')))):
seq = range(start_v, end_v)
main = Card.to_cards(seq)
remains = [card for card in Card.cards if card not in main]
for extra in list(itertools.combinations(remains, end_v - start_v)):
if not ('*' in list(extra) and '$' in list(extra)):
actions.append(main * 3 + list(extra))
# 3 + 2 sequence
for start_v in range(Card.to_value('3'), Card.to_value('2')):
for end_v in range(start_v + 2, int(min(start_v + 20 / 5, Card.to_value('2')))):
seq = range(start_v, end_v)
main = Card.to_cards(seq)
remains = [card for card in Card.cards if card not in main and card not in ['*', '$']]
for extra in list(itertools.combinations(remains, end_v - start_v)):
actions.append(main * 3 + list(extra) * 2)
# bomb
for card in Card.cards:
if card != '*' and card != '$':
actions.append([card] * 4)
# bigbang
actions.append(['*', '$'])
# 4 + 1 + 1
for main in Card.cards:
if main != '*' and main != '$':
remains = [card for card in Card.cards if card != main]
for extra in list(itertools.combinations(remains, 2)):
if not ('*' in list(extra) and '$' in list(extra)):
actions.append([main] * 4 + list(extra))
return actions
class Card:
cards = ['3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A', '2', '*', '$']
# full_cards = [x for pair in zip(cards, cards, cards, cards) for x in pair if x not in ['*', '$']]
# full_cards += ['*', '$']
cards.index('3')
cards_to_onehot_idx = dict((x, i * 4) for (i, x) in enumerate(cards))
cards_to_onehot_idx['*'] = 52
cards_to_onehot_idx['$'] = 53
cards_to_value = dict(zip(cards, range(len(cards))))
value_to_cards = dict((v, c) for (c, v) in cards_to_value.items())
def __init__(self):
pass
@staticmethod
def to_onehot(cards):
counts = Counter(cards)
onehot = np.zeros(54)
for x in cards:
if x in ['*', '$']:
onehot[Card.cards_to_onehot_idx[x]] = 1
else:
subvec = np.zeros(4)
subvec[:counts[x]] = 1
onehot[Card.cards_to_onehot_idx[x]:Card.cards_to_onehot_idx[x]+4] = subvec
return onehot
@staticmethod
def to_value(card):
if type(card) is list or type(card) is range:
val = 0
for c in card:
val += Card.cards_to_value[c]
return val
else:
return Card.cards_to_value[card]
@staticmethod
def to_cards(values):
if type(values) is list or type(values) is range:
cards = []
for v in values:
cards.append(Card.value_to_cards[v])
return cards
else:
return Card.value_to_cards[values]
class CardGroup:
def __init__(self, cards, t, val):
self.type = t
self.cards = cards
self.value = val
def __len__(self):
return len(self.cards)
def bigger_than(self, g):
if g.type == 'bigbang':
return False
if g.type == 'bomb':
if (self.type == 'bomb' and self.value > g.value) or self.type == 'bigbang':
return True
else:
return False
if (self.type == 'bomb' or self.type == 'bigbang') or \
(self.type == g.type and len(self) == len(g) and self.value > g.value):
return True
else:
return False
@staticmethod
def isvalid(cards):
return CardGroup.folks(cards) == 1
@staticmethod
def to_cardgroup(cards):
candidates = CardGroup.analyze(cards)
for c in candidates:
if len(c.cards) == len(cards):
return c
print("cards error!")
print(cards)
raise Exception("Invalid Cards!")
@staticmethod
def folks(cards):
cand = CardGroup.analyze(cards)
cnt = 10000
# if not cards:
# return 0
# for c in cand:
# remain = list(cards)
# for card in c.cards:
# remain.remove(card)
# if CardGroup.folks(remain) + 1 < cnt:
# cnt = CardGroup.folks(remain) + 1
# return cnt
spec = False
for c in cand:
if c.type == 'triple_seq' or c.type == 'triple+single' or \
c.type == 'triple+double' or c.type == 'quadric+singles' or \
c.type == 'quadric+doubles' or c.type == 'triple_seq+singles' or \
c.type == 'triple_seq+doubles' or c.type == 'single_seq' or \
c.type == 'double_seq':
spec = True
remain = list(cards)
for card in c.cards:
remain.remove(card)
if CardGroup.folks(remain) + 1 < cnt:
cnt = CardGroup.folks(remain) + 1
if not spec:
cnt = len(cand)
return cnt
@staticmethod
def analyze(cards):
cards = list(cards)
candidates = []
counts = Counter(cards)
if '*' in cards and '$' in cards:
candidates.append((CardGroup(['*', '$'], 'bigbang', 10000)))
cards.remove('*')
cards.remove('$')
quadrics = []
# quadric
for c in counts:
if counts[c] == 4:
quadrics.append(c)
candidates.append(CardGroup([c] * 4, 'bomb', Card.to_value(c)))
cards = list(filter(lambda a: a != c, cards))
counts = Counter(cards)
singles = [c for c in counts if counts[c] == 1]
doubles = [c for c in counts if counts[c] == 2]
triples = [c for c in counts if counts[c] == 3]
singles.sort(key=lambda k: Card.cards_to_value[k])
doubles.sort(key=lambda k: Card.cards_to_value[k])
triples.sort(key=lambda k: Card.cards_to_value[k])
# continuous sequence
if len(singles) > 0:
cnt = 1
cand = [singles[0]]
for i in range(1, len(singles)):
if Card.to_value(singles[i]) >= Card.to_value('2'):
break
if Card.to_value(singles[i]) == Card.to_value(cand[-1]) + 1:
cand.append(singles[i])
cnt += 1
else:
if cnt >= 5:
candidates.append(CardGroup(cand, 'single_seq', Card.to_value(cand[-1])))
# for c in cand:
# cards.remove(c)
cand = [singles[i]]
cnt = 1
if cnt >= 5:
candidates.append(CardGroup(cand, 'single_seq', Card.to_value(cand[-1])))
# for c in cand:
# cards.remove(c)
if len(doubles) > 0:
cnt = 1
cand = [doubles[0]] * 2
for i in range(1, len(doubles)):
if Card.to_value(doubles[i]) >= Card.to_value('2'):
break
if Card.to_value(doubles[i]) == Card.to_value(cand[-1]) + 1:
cand += [doubles[i]] * 2
cnt += 1
else:
if cnt >= 3:
candidates.append(CardGroup(cand, 'double_seq', Card.to_value(cand[-1])))
# for c in cand:
# if c in cards:
# cards.remove(c)
cand = [doubles[i]] * 2
cnt = 1
if cnt >= 3:
candidates.append(CardGroup(cand, 'double_seq', Card.to_value(cand[-1])))
# for c in cand:
# if c in cards:
# cards.remove(c)
if len(triples) > 0:
cnt = 1
cand = [triples[0]] * 3
for i in range(1, len(triples)):
if Card.to_value(triples[i]) >= Card.to_value('2'):
break
if Card.to_value(triples[i]) == Card.to_value(cand[-1]) + 1:
cand += [triples[i]] * 3
cnt += 1
else:
if cnt >= 2:
candidates.append(CardGroup(cand, 'triple_seq', Card.to_value(cand[-1])))
# for c in cand:
# if c in cards:
# cards.remove(c)
cand = [triples[i]] * 3
cnt = 1
if cnt >= 2:
candidates.append(CardGroup(cand, 'triple_seq', Card.to_value(cand[-1])))
# for c in cand:
# if c in cards:
# cards.remove(c)
for t in triples:
candidates.append(CardGroup([t] * 3, 'triple', Card.to_value(t)))
counts = Counter(cards)
singles = [c for c in counts if counts[c] == 1]
doubles = [c for c in counts if counts[c] == 2]
# single
for s in singles:
candidates.append(CardGroup([s], 'single', Card.to_value(s)))
# double
for d in doubles:
candidates.append(CardGroup([d] * 2, 'double', Card.to_value(d)))
# 3 + 1, 3 + 2
for c in triples:
triple = [c] * 3
for s in singles:
if s not in triple:
candidates.append(CardGroup(triple + [s], 'triple+single',
Card.to_value(c) * 1000 + Card.to_value(s)))
for d in doubles:
if d not in triple:
candidates.append(CardGroup(triple + [d] * 2, 'triple+double',
Card.to_value(c) * 1000 + Card.to_value(d)))
# 4 + 2
for c in quadrics:
for extra in list(itertools.combinations(singles, 2)):
candidates.append(CardGroup([c] * 4 + list(extra), 'quadric+singles',
Card.to_value(c) * 1000 + Card.to_value(list(extra))))
for extra in list(itertools.combinations(doubles, 2)):
candidates.append(CardGroup([c] * 4 + list(extra) * 2, 'quadric+doubles',
Card.to_value(c) * 1000 + Card.to_value(list(extra))))
# 3 * n + n, 3 * n + 2 * n
triple_seq = [c.cards for c in candidates if c.type == 'triple_seq']
for cand in triple_seq:
cnt = int(len(cand) / 3)
for extra in list(itertools.combinations(singles, cnt)):
candidates.append(
CardGroup(cand + list(extra), 'triple_seq+singles',
Card.to_value(cand[-1]) * 1000 + Card.to_value(list(extra))))
for extra in list(itertools.combinations(doubles, cnt)):
candidates.append(
CardGroup(cand + list(extra) * 2, 'triple_seq+doubles',
Card.to_value(cand[-1]) * 1000 + Card.to_value(list(extra))))
importance = ['single', 'double', 'double_seq', 'single_seq', 'triple+single',
'triple+double', 'triple_seq+singles', 'triple_seq+doubles',
'triple_seq', 'triple', 'quadric+singles', 'quadric+doubles',
'bomb', 'bigbang']
candidates.sort(key=functools.cmp_to_key(lambda x, y: importance.index(x.type) - importance.index(y.type)
if importance.index(x.type) != importance.index(y.type) else x.value - y.value))
# for c in candidates:
# print c.cards
return candidates
if __name__ == '__main__':
pass
# CardGroup.to_cardgroup(['6', '6', 'Q', 'Q', 'Q'])
actions = get_action_space()
for i in range(1, len(actions)):
CardGroup.to_cardgroup(actions[i])
# print(CardGroup.folks(['3', '4', '3', '4', '3', '4', '*', '$']))
# CardGroup.to_cardgroup(['3', '4', '3', '4', '3', '4', '*', '$'])
# print actions[561]
# print CardGroup.folks(actions[561])
# CardGroup.to_cardgroup(actions[i])
# print Card.to_onehot(['3', '4', '4', '$'])
# print len(actions)
# print Card.to_cards(1)
# CardGroup.analyze(['3', '3', '3', '4', '4', '4', '10', 'J', 'Q', 'A', 'A', '2', '2', '*', '$'])
|
[
"neilyou@qq.com"
] |
neilyou@qq.com
|
dccff8c20bf96ece6ddcaaf44cb871aab46ff061
|
e9e135908e901613a5bd9bcc27987305fccd5d71
|
/src/ploting.py
|
5ce97095c6eba9b19ee1f229f56d64f756bc3514
|
[] |
no_license
|
pythontz/IndabaX_Tanzania
|
6358966c3a411e77fdbcdbea54ba8b39a08d1c43
|
c32bce6b0dcf3e2080a52d978c3a3678adbfa3ea
|
refs/heads/master
| 2020-03-14T16:56:51.644867
| 2018-05-01T12:05:45
| 2018-05-01T12:05:45
| 131,708,457
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,511
|
py
|
import matplotlib
import matplotlib.dates as dates
import matplotlib as mpl
from matplotlib import rcParams
import pandas as pd
import numpy as np
from datetime import datetime, timezone
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
# Format python plot with Latex
SPINE_COLOR = 'gray'
dark_colors = ["#ba1a31", "#2d6dce",
(0.8509803921568627, 0.37254901960784315, 0.00784313725490196),
(0.4588235294117647, 0.4392156862745098, 0.7019607843137254),
(0.9058823529411765, 0.1607843137254902, 0.5411764705882353),
(0.4, 0.6509803921568628, 0.11764705882352941),
(0.9019607843137255, 0.6705882352941176, 0.00784313725490196),
(0.6509803921568628, 0.4627450980392157, 0.11372549019607843),
(0.4, 0.4, 0.4)]
def beatify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {#'backend': 'ps',
#'text.latex.preamble': ['\\usepackage{gensymb}'],
'axes.labelsize': 6, # fontsize for x and y labels (was 10)
'axes.titlesize': 6,
'font.size': 8, # was 10
'font.family': "sans serif",
# 'font.serif': ['Times', 'Palatino', 'New Century Schoolbook', 'Bookman', 'Computer Modern Roman'],
# 'font.sans-serif' : ['Helvetica', 'Avant Garde', 'Computer Modern Sans serif'],
'legend.fontsize': 8, # was 10
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'grid.linestyle':"-",
'patch.edgecolor': 'none',
'grid.linewidth': 0.1,
'grid.color':"gray" ,
'axes.facecolor': "white",
'figure.dpi': 600,
'lines.linewidth':0.75,
'axes.prop_cycle':cycler('color', dark_colors),
'figure.figsize': [fig_width,fig_height],
}
matplotlib.rcParams.update(params)
def format_axes(ax):
# Format axes plot
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(.75)
ax.spines[spine].set_smart_bounds(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
# matplotlib.pyplot.tight_layout()
return ax
def get_datesstring(timestamps):
"""
Transform a date string into unix timestam
"""
time = [datetime.fromtimestamp(x).strftime('%Y-%m-%d %H:%M')
for x in timestamps]
return time
|
[
"sambaiga@gmail.com"
] |
sambaiga@gmail.com
|
615bae9151deaabf675595e167e23f46558cbee5
|
08d7462edc78ae720350401bcbddbffcfe5b6d12
|
/music/migrations/0004_album_album_logo.py
|
cb712980507d0ae0a1b68473e79f73162bc44f15
|
[] |
no_license
|
praniel1/music
|
ff461613c1eacb35f65c7121f88c99d00a3e6dbf
|
fdcd825f09212e55d4fa82db3020f21414ed5308
|
refs/heads/master
| 2021-06-27T15:22:37.870587
| 2017-09-12T09:26:12
| 2017-09-12T09:26:12
| 103,249,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-08-23 20:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music', '0003_remove_album_album_logo'),
]
operations = [
migrations.AddField(
model_name='album',
name='album_logo',
field=models.FileField(null=True, upload_to=b''),
),
]
|
[
"praniel1@Praniels-MacBook-Air.local"
] |
praniel1@Praniels-MacBook-Air.local
|
cfa60ce5fe3927c5de3c3798a1b99d5f3e6b65b2
|
458b1133df5b38a017f3a690a624a54f0f43fda7
|
/PaperExperiments/XHExp027/parameters.py
|
37d8e07fd2063ff93490e451e1c805acdf32f468
|
[
"MIT"
] |
permissive
|
stefan-c-kremer/TE_World2
|
9c7eca30ee6200d371183c5ba32b3345a4cc04ee
|
8e1fae218af8a1eabae776deecac62192c22e0ca
|
refs/heads/master
| 2020-12-18T14:31:00.639003
| 2020-02-04T15:55:49
| 2020-02-04T15:55:49
| 235,413,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,710
|
py
|
# parameters.py
"""
Exp 27 - {'Initial_genes': '5000', 'Host_mutation_rate': '0.30', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Triangle( pmax=0, pzero=3.0/3.0 )', 'Carrying_capacity': '30', 'TE_excision_rate': '0.5', 'Junk_BP': '14', 'Gene_Insertion_Distribution': 'Triangle( pzero=1.0/3.0, pmax=1 )', 'mutation_effect': '0.01', 'TE_death_rate': '0.0005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Triangle( pmax=0, pzero=3.0/3.0 );
Gene_Insertion_Distribution = Triangle( pzero=1.0/3.0, pmax=1 );
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.0005;
TE_excision_rate = 0.5; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 5000;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 14 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.30;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.01,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.01
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.01,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.01
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
|
[
"stefan@kremer.ca"
] |
stefan@kremer.ca
|
d9ea40e86d880c00089429a874c50b32a1e44416
|
286d8c1ff92c49d51efda52ba281621e89cc0b64
|
/SemanticParse/ValidURI.py
|
75b1d5e774f6b06668a7e193eeabdc40690d36b5
|
[
"MIT"
] |
permissive
|
zointblackbriar/QuestionAnswering
|
e154d16fa84e1abd1946fe62ff343d4ec2d96ff5
|
319c3623ced22254d75c2918929a875090bd2bf5
|
refs/heads/master
| 2020-03-28T01:34:57.874688
| 2019-12-12T16:06:33
| 2019-12-12T16:06:33
| 147,515,150
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,572
|
py
|
# This is a library that reliably creates valid (parts of) IRIs from strings (spaces are turned into underscores, etc.).
# Copyright (c) 2015 Rinke Hoekstra (VU University Amsterdam/University of Amsterdam)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import urllib
import rfc3987
#This library is related to unicode
import sys
try:
import urlparse
except:
import urllib.parse as urlparse
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
def to_iri(iri):
"""
Safely quotes an IRI in a way that is resilient to unicode and incorrect
arguments (checks for RFC 3987 compliance and falls back to percent encoding)
"""
# First decode the IRI if needed (python 2)
logger.debug("Converting IRI to unicode")
iri = iri.decode('utf-8')
try:
# If we can safely parse the URI, then we don't
# need to do anything special here
rfc3987.parse(iri, rule='IRI')
logger.debug("This is already a valid IRI, doing nothing...")
return iri
except:
# The URI is not valid, so we'll have to fix it.
logger.debug("The IRI is not valid, proceeding to quote...")
# First see whether we can actually parse it *as if* it is a URI
parts = urlparse.urlsplit(iri)
if not parts.scheme or not parts.netloc:
# If there is no scheme (e.g. http) nor a net location (e.g.
# example.com) then we cannot do anything
logger.error("The argument you provided does not comply with "
"RFC 3987 and is not parseable as a IRI"
"(there is no scheme or no net location part)")
logger.error(iri)
raise Exception("The argument you provided does not comply with"
"RFC 3987 and is not parseable as a IRI"
"(there is no scheme or no net location part)")
logger.debug("The IRI contains all necessary parts (scheme + net location)")
quoted_parts = {}
# We'll now convert the path, query and fragment parts of the URI
# Get the 'anti-pattern' for the valid characters (see rfc3987 package)
# This is roughly the ipchar pattern plus the '/' as we don't need to match
# the entire path, but merely the individual characters
no_invalid_characters = rfc3987.get_compiled_pattern("(?!%(iunreserved)s|%(pct_encoded)s|%(sub_delims)s|:|@|/)(.)")
# Replace the invalid characters with an underscore (no need to roundtrip)
quoted_parts['path'] = no_invalid_characters.sub(u'_', parts.path)
if parts.fragment:
quoted_parts['fragment'] = no_invalid_characters.sub(u'_', parts.fragment)
if parts.query:
quoted_parts['query'] = urllib.quote(parts.query.encode('utf-8'),safe="&=")
# Leave these untouched
quoted_parts['scheme'] = parts.scheme
quoted_parts['authority'] = parts.netloc
# Extra check to make sure we now have a valid IRI
quoted_iri = rfc3987.compose(**quoted_parts)
try:
rfc3987.parse(quoted_iri)
except:
# Unable to generate a valid quoted iri, using the straightforward
# urllib percent quoting (but this is ugly!)
logger.warning('Could not safely quote as IRI, falling back to '
'percent encoding')
quoted_iri = urllib.quote(iri.encode('utf-8'))
return quoted_iri
|
[
"freelancerzoint@gmail.com"
] |
freelancerzoint@gmail.com
|
7eb80ac47f3cc0c5b7cad625ed6e385a0477b357
|
22ef551e041279e62d969124d367e78e485ff1e9
|
/client.py
|
c5b9a97bf8a6a5eb71429763c31ef64e6038e892
|
[] |
no_license
|
dmmeteo/simple_blockchain
|
df9ebecb52691e0206de044724c671323a6af59d
|
a788f69c554f0986c0cfc20e6bf5ab994d6db216
|
refs/heads/master
| 2020-03-20T06:36:11.066102
| 2018-06-13T22:31:55
| 2018-06-13T22:31:55
| 137,254,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
#!/usr/bin/env python
from datetime import datetime
from blockchain import Block
def create_genesis_block():
return Block(0, datetime.now(), "My first block data", "0")
def next_block(last_block):
index = last_block.index + 1
timestamp = datetime.now()
data = "Hey! I'm block %s" % index
last_hash = last_block.hash
return Block(index, timestamp, data, last_hash)
def main():
blockchain = [create_genesis_block()]
previous_block = blockchain[0]
num_of_blocks_to_add = 20
for b in range(num_of_blocks_to_add):
block_to_add = next_block(previous_block)
blockchain.append(block_to_add)
previous_block = block_to_add
print("Block #{} has been added to the blockchain!".format(block_to_add.index))
print("Hash: {}\n".format(block_to_add.hash))
if __name__ == "__main__":
main()
|
[
"dmmeteo@gmail.com"
] |
dmmeteo@gmail.com
|
97e441009e1f49f86a5dde27dc42cdbb54ec11ed
|
fc31e1963f11d95e786be8c80a052dd7b2623f1b
|
/PythonUtils/FileWriter.py
|
b4be0c214c30d213ab2f17fffa4ce02c2e29a0b4
|
[] |
no_license
|
xbinglzh/PythonTools
|
a98b1bbd2178b82cdcecff9ac8ecca8147760d80
|
8d5d7fc18e0e82cc713fb6a8f3d1aa78cb15817f
|
refs/heads/master
| 2021-01-10T21:29:19.406044
| 2014-08-14T03:27:17
| 2014-08-14T03:27:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
'''
Created on Apr 26, 2011
'''
import struct
import array
import os
class FileWriter():
'''
classdocs
'''
def WriteFileHead(self):
# write byte order, but the Value of Java Big/Lit end order?
self.WriteByte(4)
# num of Modle
self.WriteBigInt(1)
# write i
self.WriteBigInt(1)
def __Write(self, data):
self.file.write(data)
def WriteAny(self, i, typedef='f'):
data = struct.pack(typedef, i)
self.__Write(data)
def WriteByte(self, b):
data = struct.pack('B', b)
self.__Write(data)
def WriteBigInt(self, i):
data = struct.pack('>i', i)
self.__Write(data)
def WriteInt(self, i):
data = struct.pack('i', i)
self.__Write(data)
def WriteShort(self, i):
data = struct.pack('h', i)
self.__Write(data)
def WriteFloat(self, f):
data = struct.pack('f', f)
self.__Write(data)
def WriteLong(self, l):
data = struct.pack('l', l)
self.__Write(data)
def WriteArray(self, array, typedef='f'):
for i in array:
self.WriteAny(i, typedef)
def WriteArrayList(self, list, typedef='f'):
for array in list:
self.WriteBigInt(len(array))
self.WriteArray(array)
self.WriteBigInt(-1)
def WriteList(self, l, typedef='h'):
for i in l:
self.WriteAny(i, typedef)
def WriteString(self, us):
# from unicode to string
s = us.encode('ascii', 'ignore')
self.WriteAny(len(s), 'i')
data = struct.pack("%ds" % len(s), s)
self.__Write(data)
def Write3DVector(self, pValue):
self.WriteFloat(pValue[0])
self.WriteFloat(pValue[1])
self.WriteFloat(pValue[2])
def Write(self, data):
self.__Write(data)
def close(self):
self.file.close()
def __init__(self, fileName):
'''
Constructor
'''
out_dir = os.path.dirname(fileName)
if not (os.path.exists(out_dir) & os.path.isdir(out_dir)):
try:
os.makedirs(out_dir)
except:
pass
self.file = open(fileName, 'wb')
|
[
"xbinglzh@gmail.com"
] |
xbinglzh@gmail.com
|
c97ba71b53a1fe656b048d2baa597f096b14ee10
|
720126354851961926277c61f41318184c79fad2
|
/venv/lib/python3.8/site-packages/pm4py/algo/discovery/dfg/algorithm.py
|
9fa87ac3c59691cbf97fb6bf192a585897c90f62
|
[] |
no_license
|
yclei/co-co-eaa
|
c4057025e9b91e6af01d2765196df743bfe5985d
|
aeb6d42ab3ce4bc83b08a1ec9a4a3d6d3da380b4
|
refs/heads/main
| 2023-03-12T14:49:03.972865
| 2021-03-03T22:38:48
| 2021-03-03T22:38:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,350
|
py
|
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py import util as pmutil
from pm4py.algo.discovery.dfg.variants import native, performance, freq_triples
from pm4py.objects.conversion.log import converter as log_conversion
from pm4py.util import xes_constants as xes_util
from pm4py.util import exec_utils
from pm4py.algo.discovery.parameters import Parameters
from enum import Enum
import pkgutil
class Variants(Enum):
NATIVE = native
FREQUENCY = native
PERFORMANCE = performance
FREQUENCY_GREEDY = native
PERFORMANCE_GREEDY = performance
FREQ_TRIPLES = freq_triples
DFG_NATIVE = Variants.NATIVE
DFG_FREQUENCY = Variants.FREQUENCY
DFG_PERFORMANCE = Variants.PERFORMANCE
DFG_FREQUENCY_GREEDY = Variants.FREQUENCY_GREEDY
DFG_PERFORMANCE_GREEDY = Variants.PERFORMANCE_GREEDY
FREQ_TRIPLES = Variants.FREQ_TRIPLES
DEFAULT_VARIANT = Variants.NATIVE
VERSIONS = {DFG_NATIVE, DFG_FREQUENCY, DFG_PERFORMANCE, DFG_FREQUENCY_GREEDY, DFG_PERFORMANCE_GREEDY, FREQ_TRIPLES}
def apply(log, parameters=None, variant=DEFAULT_VARIANT):
"""
Calculates DFG graph (frequency or performance) starting from a log
Parameters
----------
log
Log
parameters
Possible parameters passed to the algorithms:
Parameters.AGGREGATION_MEASURE -> performance aggregation measure (min, max, mean, median)
Parameters.ACTIVITY_KEY -> Attribute to use as activity
Parameters.TIMESTAMP_KEY -> Attribute to use as timestamp
variant
Variant of the algorithm to use, possible values:
- Variants.NATIVE
- Variants.FREQUENCY
- Variants.FREQUENCY_GREEDY
- Variants.PERFORMANCE
- Variants.PERFORMANCE_GREEDY
- Variants.FREQ_TRIPLES
Returns
-------
dfg
DFG graph
"""
if parameters is None:
parameters = {}
activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters, xes_util.DEFAULT_NAME_KEY)
start_timestamp_key = exec_utils.get_param_value(Parameters.START_TIMESTAMP_KEY, parameters, None)
timestamp_key = exec_utils.get_param_value(Parameters.TIMESTAMP_KEY, parameters, xes_util.DEFAULT_TIMESTAMP_KEY)
case_id_glue = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, pmutil.constants.CASE_CONCEPT_NAME)
if pkgutil.find_loader("pandas"):
import pandas
from pm4py.algo.discovery.dfg.adapters.pandas import df_statistics
from pm4py.objects.log.util import dataframe_utils
if isinstance(log, pandas.core.frame.DataFrame) and not variant == Variants.FREQ_TRIPLES:
log = dataframe_utils.convert_timestamp_columns_in_df(log, timest_columns=[
timestamp_key])
dfg_frequency, dfg_performance = df_statistics.get_dfg_graph(log, measure="both",
activity_key=activity_key,
timestamp_key=timestamp_key,
case_id_glue=case_id_glue,
start_timestamp_key=start_timestamp_key)
if variant in [Variants.PERFORMANCE, Variants.PERFORMANCE_GREEDY]:
return dfg_performance
else:
return dfg_frequency
return exec_utils.get_variant(variant).apply(log_conversion.apply(log, parameters, log_conversion.TO_EVENT_LOG), parameters=parameters)
|
[
"javetter@mail.uni-mannheim.de"
] |
javetter@mail.uni-mannheim.de
|
4f63c83d1d501199e03ccb024912d8bedd00baaa
|
e1703dcec7bda83b397e5a3d0754dd6a9a296aa8
|
/setup.py
|
fe58a71760337d0d145264d94015a199ca0bda54
|
[
"MIT"
] |
permissive
|
moyogo/ttfautohint-py
|
03e28131d21a4add1b61e91ce3ed198722407fd2
|
3cd84b536a9060d53e6d326c957097fbdcb5633e
|
refs/heads/master
| 2021-05-11T18:29:39.304432
| 2019-04-10T15:21:26
| 2019-04-10T15:21:26
| 117,827,495
| 0
| 0
| null | 2018-01-17T11:30:54
| 2018-01-17T11:30:53
| null |
UTF-8
|
Python
| false
| false
| 4,498
|
py
|
from __future__ import print_function, absolute_import
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
from distutils.file_util import copy_file
from distutils.dir_util import mkpath
from distutils import log
import os
import sys
import subprocess
from io import open
cmdclass = {}
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
print("warning: wheel package is not installed", file=sys.stderr)
else:
class UniversalBdistWheel(bdist_wheel):
def get_tag(self):
return ('py2.py3', 'none',) + bdist_wheel.get_tag(self)[2:]
cmdclass['bdist_wheel'] = UniversalBdistWheel
class SharedLibrary(Extension):
if sys.platform == "darwin":
suffix = ".dylib"
elif sys.platform == "win32":
suffix = ".dll"
else:
suffix = ".so"
def __init__(self, name, cmd, cwd=".", output_dir=".", env=None):
Extension.__init__(self, name, sources=[])
self.cmd = cmd
self.cwd = os.path.normpath(cwd)
self.output_dir = os.path.normpath(output_dir)
self.env = env or dict(os.environ)
class SharedLibBuildExt(build_ext):
def get_ext_filename(self, ext_name):
for ext in self.extensions:
if isinstance(ext, SharedLibrary):
return os.path.join(*ext_name.split('.')) + ext.suffix
return build_ext.get_ext_filename(self, ext_name)
def build_extension(self, ext):
if not isinstance(ext, SharedLibrary):
build_ext.build_extension(self, ext)
return
log.info("running '%s'" % " ".join(ext.cmd))
if not self.dry_run:
rv = subprocess.Popen(ext.cmd,
cwd=ext.cwd,
env=ext.env,
shell=True).wait()
if rv != 0:
sys.exit(rv)
lib_name = ext.name.split(".")[-1] + ext.suffix
lib_fullpath = os.path.join(ext.output_dir, lib_name)
dest_path = self.get_ext_fullpath(ext.name)
mkpath(os.path.dirname(dest_path),
verbose=self.verbose, dry_run=self.dry_run)
copy_file(lib_fullpath, dest_path,
verbose=self.verbose, dry_run=self.dry_run)
cmdclass['build_ext'] = SharedLibBuildExt
env = dict(os.environ)
if sys.platform == "win32":
import struct
# select mingw32 or mingw64 toolchain depending on python architecture
bits = struct.calcsize("P") * 8
toolchain = "mingw%d" % bits
PATH = ";".join([
"C:\\msys64\\%s\\bin" % toolchain,
"C:\\msys64\\usr\\bin",
env["PATH"]
])
env.update(
PATH=PATH,
MSYSTEM=toolchain.upper(),
# this tells bash to keep the current working directory
CHERE_INVOKING="1",
)
# we need to run make from an msys2 login shell
cmd = ["bash", "-lc", "make"]
else:
cmd = ["make"]
libttfautohint = SharedLibrary("ttfautohint.libttfautohint",
cmd=cmd,
cwd="src/c",
env=env,
output_dir="build/local/lib")
with open("README.rst", "r", encoding="utf-8") as readme:
long_description = readme.read()
setup(
name="ttfautohint-py",
use_scm_version=True,
description=("Python wrapper for ttfautohint, "
"a free auto-hinter for TrueType fonts"),
long_description=long_description,
author="Cosimo Lupo",
author_email="cosimo@anthrotype.com",
url="https://github.com/fonttools/ttfautohint-py",
license="MIT",
platforms=["posix", "nt"],
package_dir={"": "src/python"},
packages=find_packages("src/python"),
ext_modules=[libttfautohint],
zip_safe=False,
cmdclass=cmdclass,
setup_requires=['setuptools_scm'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Text Processing :: Fonts",
"Topic :: Multimedia :: Graphics",
],
)
|
[
"cosimo@anthrotype.com"
] |
cosimo@anthrotype.com
|
0b7323b6ee686fa5b8c464e4598b9331e04093f1
|
96bcae976f8d748d75398484fd67441a3ed42452
|
/mimic/creat_akin_baseline.py
|
9c04803534d47f1403a41f65ececf751f43dfd6b
|
[] |
no_license
|
will4906/MimicAdmin
|
3a84ba1245522e11590a22ac7c480b32b59e567e
|
b97137529b731fe34125a9b2d34be7ac0211018e
|
refs/heads/master
| 2020-03-07T05:44:12.717935
| 2018-11-15T15:22:44
| 2018-11-15T15:22:44
| 127,304,281
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,641
|
py
|
# -*- coding: utf-8 -*-
"""
根据kdigo等评级标准计算肌酐的评级。
Created on 2018/7/15
@author: will4906
"""
import psycopg2 as psql
from decimal import Decimal
def akin(min_value, max_value):
if min_value > 0:
if max_value - min_value > Decimal(4) or max_value / min_value >= Decimal(3):
return 3
elif Decimal(3) > max_value / min_value > Decimal(2):
return 2
elif max_value - min_value > Decimal(0.3) or Decimal(2) > max_value / min_value >= Decimal(1.5):
return 1
else:
return 0
return None
if __name__ == '__main__':
TABLE_NAME = 'head_project'
STANDARD = 'akin'
conn = psql.connect(database="tmimic", user="postgres", password="1234")
cursor = conn.cursor()
try:
cursor.execute("ALTER TABLE {} ADD stage_{}_creat_by_base INT;".format(TABLE_NAME, STANDARD))
conn.commit()
except Exception as e:
conn.rollback()
cursor.execute("UPDATE {} SET stage_{}_creat_by_base = NULL;".format(TABLE_NAME, STANDARD))
conn.commit()
cursor.execute("SELECT hadm_id, age, gender, ethnicity FROM {};".format(TABLE_NAME))
patients = cursor.fetchall()
for patient in patients:
hadm_id = patient[0]
age = patient[1]
gender = int(patient[2]) # 0:female, 1: male
ethnicity = int(patient[3])
print(hadm_id, age, gender, ethnicity)
# cursor.execute("SELECT MIN(valuenum) FROM {}_creatinine_chart WHERE hadm_id = {};".format(TABLE_NAME, hadm_id))
# min_value = cursor.fetchone()[0]
# Baseline is (75 * 1.73 + 1.154 * age + 0.203 * (0.742 if female) * (1.210 if black)) / 186
temp1 = 0.742 if gender == 0 else 1
temp2 = 1.210 if ethnicity == 2 else 1
min_value = (Decimal(75) * Decimal(1.73) + Decimal(1.154) * age + Decimal(0.203) * Decimal(temp1) * Decimal(temp2)) / Decimal(186)
if min_value is None:
continue
cursor.execute("SELECT MAX(valuenum) FROM {}_creatinine_chart_2d WHERE hadm_id = {};".format(TABLE_NAME, hadm_id))
max_value = cursor.fetchone()[0]
if max_value is None:
continue
if STANDARD == 'akin':
stage = akin(min_value, max_value)
elif STANDARD == 'rifie':
stage = rifie(min_value, max_value)
else:
pass
# print(hadm_id, 'kdigo:', kdigo_stage)
print(hadm_id, STANDARD, stage)
cursor.execute("UPDATE {} SET stage_{}_creat_by_base = {} WHERE hadm_id = {};".format(TABLE_NAME, STANDARD, stage, hadm_id))
conn.commit()
|
[
"553105821@qq.com"
] |
553105821@qq.com
|
586469e5c63f6210dd5a83fc485d032cd5e9c5d3
|
23057669a8e495e87068bf70b0f1626860b3ef97
|
/django_food/wsgi.py
|
c4e7f565a33ddc66f8f859029d3fe661e9f8b1bb
|
[] |
no_license
|
AlexWattt/website
|
ad0ee44d11cb7b9ac67c7805f463fe2bae1fda10
|
3be06491d5c8135b6251de6ec306c0f70e29c012
|
refs/heads/master
| 2020-04-04T15:09:12.624740
| 2018-11-03T21:43:56
| 2018-11-03T21:43:56
| 156,026,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for django_food project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_food.settings')
application = get_wsgi_application()
|
[
"watt.alexand@uwlax.edu"
] |
watt.alexand@uwlax.edu
|
da957b8fd7190adfd4adbb7d99f8e3c6b4820d61
|
4cc285b0c585241ff4404087e6fbb901195639be
|
/NeuralNetworkNumbers/venv/Lib/site-packages/tensorflow/python/keras/preprocessing/text_dataset.py
|
45d7cc20aaabb64bb84de4fdc25aad9c11a751f0
|
[] |
no_license
|
strazhg/NeuralNetworksPython
|
815542f4ddbb86e918e657f783158f8c078de514
|
15038e44a5a6c342336c119cdd2abdeffd84b5b1
|
refs/heads/main
| 2023-04-16T18:51:29.602644
| 2021-04-27T14:46:55
| 2021-04-27T14:46:55
| 361,944,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:43bfb21607bacd094926ca654a9356031750b9c5f137c011e25577e9d0fa2a0f
size 8034
|
[
"golubstrazh@gmail.com"
] |
golubstrazh@gmail.com
|
7068fc6d23da133b6e906991971090faa7f25f05
|
afa3e6265437b3fe4d2c9a6c164525d333838976
|
/virtual/bin/pyreverse
|
b460059891d342b970678b536872d68210944f0a
|
[] |
no_license
|
Mantongash/rating
|
3602da4f5d32accd28b22fe2997d0976b298d3e7
|
9ee6f7ffd083e49d10672ecad374e87d6045d37f
|
refs/heads/master
| 2022-12-15T07:43:40.189766
| 2020-01-24T08:31:02
| 2020-01-24T08:31:02
| 235,352,051
| 0
| 3
| null | 2022-12-08T03:29:35
| 2020-01-21T13:43:46
|
Python
|
UTF-8
|
Python
| false
| false
| 277
|
#!/home/moringa/Desktop/Python/module_project/rating/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
|
[
"you@example.com"
] |
you@example.com
|
|
1a6d072a76e2e868c8217fadce8efd8a4fd27547
|
ed764cb6f5bddf2990a489baac72f0c7187a2fad
|
/ex2.py
|
b1e778fb434ede08680717ca354bbf6c4d5a985c
|
[] |
no_license
|
chyngyzs1/ex2part2
|
08d77145c642af51e80ccd75a5741f031d38cc38
|
4eb4389882ddf110df08549644cf1c67c2605561
|
refs/heads/master
| 2020-06-03T08:55:05.944788
| 2019-06-12T06:59:35
| 2019-06-12T06:59:35
| 191,515,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
x = int(input())
y = int(input())
a = int(y / x)
b = int(y % x)
print("Apple=",(a))
print("V korzinke=",(b))
|
[
"0777755169ch@gmail.com"
] |
0777755169ch@gmail.com
|
217de8d2b5d16655952f7680349ac36f0e78bcf1
|
a26a4dc2cb9a7946b9b899ebd53a5e28e76311e2
|
/day03/try_except.py
|
69d02ec0593b827f1f79f3fbae6dd19c9e471209
|
[] |
no_license
|
qiulu0714/myedu-1904
|
8b62b5f38786708fcdf0907a8c5cebaf35277e1e
|
4dc86aa390fe139c72425643d93485bd139b7473
|
refs/heads/master
| 2020-05-29T19:33:00.288061
| 2019-07-01T04:37:22
| 2019-07-01T04:37:22
| 189,334,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 108
|
py
|
if __name__ == '__main__':
try:
a= 5/0
print('true')
except:
print('false')
|
[
"qiulu0714@163.com"
] |
qiulu0714@163.com
|
f4ff270e4ab2f4aec06f4b28c1b1f1415da4d374
|
7cd501291a2464c599fa7fd73d0599ca475bb32d
|
/alias_object.py
|
0806090475ca11b0721f377d8506ba7d51467f16
|
[] |
no_license
|
jcvaldez1/inband_controller
|
6a51f8946e15fef9806d966722d90b1c35c40501
|
f2dbdc593ce1b3126103beba937c7f2a6f14c0e5
|
refs/heads/master
| 2021-06-12T06:38:59.194810
| 2020-09-29T05:25:13
| 2020-09-29T05:25:13
| 195,508,262
| 0
| 0
| null | 2021-06-02T00:38:58
| 2019-07-06T06:54:50
|
Python
|
UTF-8
|
Python
| false
| false
| 856
|
py
|
'''
Alias object for storing info about a registered
entry of a cloned cloud service in the DDH
@ATTRIBUTES:
real_port : the real destination TCP port to be used
by the IoT device
fake_port : the TCP port of the DDH to be exposed for
network traffic corresponding to the value of
real_port
cloud_ip : the IPv4 address of the cloud service to be
cloned via a docker container in the DDH
'''
class Alias(object):
def __init__(self, *args, **kwargs):
super(Alias, self).__init__()
self.real_port = kwargs['real_port']
self.fake_port = kwargs['fake_port']
self.cloud_ip = kwargs['cloud_ip']
self.name = kwargs['name']
#self.name
|
[
"jcvaldez1@up.edu.ph"
] |
jcvaldez1@up.edu.ph
|
aafc4e22015469a8570bde22216506ce32bc842d
|
5ecdfb08af2c29fc37824d5e1ebc0f77b4203423
|
/pagetag
|
9b20d1fb77bb30036d2b5f0fc93c22e21cc506b6
|
[] |
no_license
|
aakrosh/ldtools
|
3c139c031d12818ed0638c59069eba215f527d00
|
8de6b4f26601a7e08f022f3cf44d97a63ba7188d
|
refs/heads/master
| 2021-06-24T03:01:08.033238
| 2017-08-27T14:30:16
| 2017-08-27T14:30:16
| 101,557,866
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,337
|
#!/usr/bin/env python
"""
This accepts as input a file of the following format:
Site Sample Allele1 Allele2
for example:
000834 D001 G G
000834 D002 G G
000834 D003 G G
000834 D004 G G
000834 D005 N N
000834 E001 G G
000834 E002 G G
000834 E003 G G
000834 E004 G G
000834 E005 G G
000963 D001 T T
000963 D002 T T
000963 D003 T T
000963 D004 T T
000963 D005 N N
000963 E001 T T
000963 E002 N N
000963 E003 G T
000963 E004 G G
000963 E005 G T
and a rsquare threshold and outputs two files:
a) a file of input snps (one on each line). A SNP is identified by the "Site"
column in the input file
b) a file where each line has the following:
SNP list
where SNP is one of the SNPs and the "list" is a comma separated list of SNPs
that exceed the rsquare threshold with the first SNP.
"""
from sys import argv, stderr, exit
from getopt import getopt, GetoptError
__author__ = "Aakrosh Ratan"
__email__ = "ratan@bx.psu.edu"
# do we want the debug information to be printed?
debug_flag = False
# denote different combos of alleles in code
HOMC = str(1)
HOMR = str(2)
HETE = str(3)
OTHER = str(4)
indexcalculator = {(HOMC,HOMC) : 0,
(HOMC,HOMR) : 1,
(HOMC,HETE) : 2,
(HOMR,HOMC) : 3,
(HOMR,HOMR) : 4,
(HOMR,HETE) : 5,
(HETE,HOMC) : 6,
(HETE,HOMR) : 7,
(HETE,HETE) : 8}
def read_inputfile(filename, samples):
input = {}
file = open(filename, "r")
for line in file:
position,sample,allele1,allele2 = line.split()
# if the user specified a list of samples, then only use those samples
if samples != None and sample not in samples: continue
if position in input:
v = input[position]
v[sample] = (allele1,allele2)
else:
v = {sample : (allele1, allele2)}
input[position] = v
file.close()
return input
def annotate_locus(input, minorallelefrequency, snpsfile):
locus = {}
for k,v in input.items():
genotypes = [x for x in v.values()]
alleles = [y for x in genotypes for y in x]
alleleset = list(set(alleles))
alleleset = list(set(alleles) - set(["N","X"]))
if len(alleleset) == 2:
genotypevec = ""
num1 = len([x for x in alleles if x == alleleset[0]])
num2 = len([x for x in alleles if x == alleleset[1]])
if num1 > num2:
major = alleleset[0]
minor = alleleset[1]
minorfreq = (num2 * 1.0)/(num1 + num2)
else:
major = alleleset[1]
minor = alleleset[0]
minorfreq = (num1 * 1.0)/(num1 + num2)
if minorfreq < minorallelefrequency: continue
for gen in genotypes:
if gen == (major,major):
genotypevec += HOMC
elif gen == (minor,minor):
genotypevec += HOMR
elif gen == (major, minor) or gen == (minor, major):
genotypevec += HETE
else:
genotypevec += OTHER
locus[k] = genotypevec,minorfreq
elif len(alleleset) > 2:
print >> snpsfile, k
return locus
def calculateLD(loci, rsqthreshold):
snps = list(loci)
rsquare = {}
for index,loc1 in enumerate(snps):
for loc2 in snps[index + 1:]:
matrix = [0]*9
vec1 = loci[loc1][0]
vec2 = loci[loc2][0]
for gen in zip(vec1,vec2):
if gen[0] == OTHER or gen[1] == OTHER: continue
matrix[indexcalculator[gen]] += 1
n = sum(matrix)
x11 = 2*matrix[0] + matrix[2] + matrix[6]
x12 = 2*matrix[1] + matrix[2] + matrix[7]
x21 = 2*matrix[3] + matrix[6] + matrix[5]
x22 = 2*matrix[4] + matrix[6] + matrix[5]
p = (x11 + x12 + matrix[8] * 1.0) / (2 * n)
q = (x11 + x21 + matrix[8] * 1.0) / (2 * n)
p11 = p * q
oldp11 = p11
range = 0.0
converged = False
convergentcounter = 0
if p11 > 0.0:
while converged == False and convergentcounter < 100:
if (1.0 - p - q + p11) != 0.0 and oldp11 != 0.0:
num = matrix[8] * p11 * (1.0 - p - q + p11)
den = p11 * (1.0 - p - q + p11) + (p - p11)*(q - p11)
p11 = (x11 + (num/den))/(2.0*n)
range = p11/oldp11
if range >= 0.9999 and range <= 1.001:
converged = True
oldp11 = p11
convergentcounter += 1
else:
converged = True
dvalue = 0.0
if converged == True:
dvalue = p11 - (p * q)
if dvalue != 0.0:
rsq = (dvalue**2)/(p*q*(1-p)*(1-q))
if rsq >= rsqthreshold:
rsquare["%s %s" % (loc1,loc2)] = rsq
return rsquare
def main(inputfile, snpsfile, neigborhoodfile, \
rsquare, minorallelefrequency, samples):
# read the input file
input = read_inputfile(inputfile, samples)
print >> stderr, "Read %d locations" % len(input)
# open the snpsfile to print
file = open(snpsfile, "w")
# annotate the inputs, remove the abnormal loci (which do not have 2 alleles
# and add the major and minor allele to each loci
loci = annotate_locus(input, minorallelefrequency, file)
print >> stderr, "Read %d interesting locations" % len(loci)
# print all the interesting loci as candidate snps
for k in loci.keys(): print >> file, k
file.close()
print >> stderr, "Finished creating the snpsfile"
# calculate the LD values and store it if it exceeds the threshold
lds = calculateLD(loci, rsquare)
print >> stderr, "Calculated all the LD values"
# create a list of SNPs
snps = {}
ldvals = {}
for k,v in lds.items():
s1,s2 = k.split()
if s1 in snps: snps[s1].append(s2)
else : snps[s1] = [s2]
if s2 in snps: snps[s2].append(s1)
else : snps[s2] = [s1]
if s1 in ldvals: ldvals[s1].append(str(v))
else : ldvals[s1] = [str(v)]
if s2 in ldvals: ldvals[s2].append(str(v))
else : ldvals[s2] = [str(v)]
# print the snps to the output file
file = open(neigborhoodfile, "w")
for k,v in snps.items():
ldv = ldvals[k]
if debug_flag == True:
print >> file, "%s\t%s\t%s" % (k, ",".join(v), ",".join(ldv))
else:
print >> file, "%s\t%s" % (k, ",".join(v))
file.close()
def read_list(filename):
file = open(filename, "r")
list = {}
for line in file:
list[line.strip()] = 1
file.close()
return list
def usage():
f = stderr
print >> f, "usage:"
print >> f, "pagetag [options] input.txt snps.txt neighborhood.txt"
print >> f, "where input.txt is the prettybase file"
print >> f, "where snps.txt is the first output file with the snps"
print >> f, "where neighborhood.txt is the output neighborhood file"
print >> f, "where the options are:"
print >> f, "-h,--help : print usage and quit"
print >> f, "-d,--debug: print debug information"
print >> f, "-r,--rsquare: the rsquare threshold (default : 0.64)"
print >> f, "-f,--freq : the minimum MAF required (default: 0.0)"
print >> f, "-s,--sample : a list of samples to be clustered"
if __name__ == "__main__":
try:
opts, args = getopt(argv[1:], "hds:r:f:",\
["help", "debug", "rsquare=","freq=", "sample="])
except GetoptError, err:
print str(err)
usage()
exit(2)
rsquare = 0.64
minorallelefrequency = 0.0
samples = {}
for o, a in opts:
if o in ("-h", "--help"):
usage()
exit()
elif o in ("-d", "--debug"):
debug_flag = True
elif o in ("-r", "--rsquare"):
rsquare = float(a)
elif o in ("-f", "--freq"):
minorallelefrequency = float(a)
elif o in ("-s", "--sample"):
samples = read_list(a)
else:
assert False, "unhandled option"
if rsquare < 0.00 or rsquare > 1.00:
print >> stderr, "input value of rsquare should be in [0.00, 1.00]"
exit(3)
if minorallelefrequency < 0.0 or minorallelefrequency > 0.5:
print >> stderr, "input value of MAF should be (0.00,0.50]"
exit(4)
if len(args) != 3:
usage()
exit(5)
main(args[0], args[1], args[2], rsquare, minorallelefrequency, samples)
|
[
"aakrosh@users.noreply.github.com"
] |
aakrosh@users.noreply.github.com
|
|
9af01fe4a309e859b8c255b666bf2d90c4634957
|
5f6aaf7f38f116d07e3f11124e9c88c63d33526a
|
/examples/example_genGeodes_walking_on_circle.py
|
06ac7002e21251629da16a8205c7b225dd2421b2
|
[] |
no_license
|
dhockaday/manapprox
|
aa690a5bd4a5d286fe73a68a09c3b6a8297021a0
|
c517a8ed48fc5924b98a849bf7830d8590fe3a88
|
refs/heads/main
| 2023-05-06T09:31:07.722747
| 2021-05-13T17:29:11
| 2021-05-13T17:29:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,805
|
py
|
import manapprox
import numpy as np
import matplotlib.pyplot as plt
#### Generate data
r = 5
s = 2.5
N = 5000
#rs = r + np.random.rand(N)*s*2 - s # Not uniform distibution
theta = np.random.rand(N)*2*np.pi
A = 2/((r+s)**2 - (r-s)**2)
rs = np.sqrt(2*np.random.rand(N)/A + (r-s)**2)
data = np.array([rs*np.sin(theta), rs*np.cos(theta)])
#### running algorithm
ma = manapprox.ManApprox(data)
ma.manifold_dim = 1
ma.poly_deg = 2
ma.sparse_factor = 10
#mmls.calculateSigma()
ma.sigma = 3.5
ma.createTree()
print(ma)
point = np.array([-1,-6])
delta_x = 1
max_delta_y = 3
projected_p = []
move_with_polynomail_approx = False
projected_p = ma.genGeodesDirection(point, 1, num_of_steps = 30, step_size_x = delta_x)
##############################
#### PLOT
##############################
c = plt.Circle((0,0),5, color ='b', lw = 1, fill=False)
plt.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
plt.plot(data[0,:], data[1,:], '.y', markersize=2, alpha=0.16)
plt.plot([point[0]], [point[1]], '.r')
plt.gca().add_patch(c)
plt.arrow(point[0], point[1], projected_p[0][0] - point[0], projected_p[0][1] - point[1],
head_width=0.2, head_length=0.15, overhang=0.3, width=0.001, color="r",linestyle = (0,(1,3)),
head_starts_at_zero = False, length_includes_head = True)
for i in range(1, len(projected_p)):
plt.arrow(projected_p[i-1][0], projected_p[i-1][1], projected_p[i][0] - projected_p[i-1][0], projected_p[i][1] - projected_p[i-1][1],
head_width=0.3, head_length=0.2, overhang=0.3, width=0.001, color="k",head_starts_at_zero = False, length_includes_head = True)
#plt.plot([projected_p[i-1][0]], [projected_p[i-1][1]], '.g')
#plt.title("iter: %d"%i)
plt.draw()
plt.pause(.05)
#plt.savefig("geoWalk_circle.svg", bbox_inches='tight')
plt.show()
print("")
|
[
"aizeny@gmail.com"
] |
aizeny@gmail.com
|
82708d6e1161090b366fdfa51646948600dba3bd
|
2b0ef0c0c2be71df849b6a0999991719c42c8533
|
/renote/settings.py
|
c2b1118f60d974dfadb265eb9c22ab205957dc2a
|
[] |
no_license
|
ShahroozD/renote
|
dcc52c92aaeda103fb9d97ce4c10fc23559c20bf
|
58be722a0b0abe3468a6da096cc8a590deebacb1
|
refs/heads/master
| 2020-03-28T21:02:24.705697
| 2018-10-09T09:58:09
| 2018-10-09T09:58:09
| 149,124,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,501
|
py
|
"""
Django settings for renote project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vfv=gxqkjx(0_68m1wyj^v0f=7h6v_=l6uf#1#t(0=ere@0r!o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'renote.pythonanywhere.com',
'127.0.0.1',
'192.168.1.9'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
'web',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'renote.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'renote.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'Asia/Tehran'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR + '/data/static/'
MEDIA_URL = '/img/'
MEDIA_ROOT = BASE_DIR + '/data/img/'
FONT_URL = '/fonts/'
FONT_ROOT = BASE_DIR + '/data/fonts/'
FILES_URL = '/files/'
FILES_ROOT = BASE_DIR + '/data/files/'
IMGUPLOAD_URL = 'upload'
IMGUPLOAD_ROOT = BASE_DIR + '/data/img/upload/'
|
[
"shahroozsanei@gmail.com"
] |
shahroozsanei@gmail.com
|
e87222fe39476bc74c1040fac6f5fd94962204c5
|
ea01ed735850bf61101b869b1df618d3c09c2aa3
|
/自动化测试/httprunner/testcenter-master/httprunner/response.py
|
b59feae6478f1abbf89c80fb1a842037fd7487d9
|
[] |
no_license
|
liuzhipeng17/python-common
|
867c49ac08719fabda371765d1f9e42f6dd289b9
|
fb44da203d4e3a8304d9fe6205e60c71d3a620d8
|
refs/heads/master
| 2021-09-27T10:39:45.178135
| 2018-11-08T01:49:33
| 2018-11-08T01:49:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,626
|
py
|
import logging
import re
from collections import OrderedDict
from httprunner import exception, utils
from requests.structures import CaseInsensitiveDict
text_extractor_regexp_compile = re.compile(r".*\(.*\).*")
class ResponseObject(object):
def __init__(self, resp_obj):
""" initialize with a requests.Response object
@param (requests.Response instance) resp_obj
"""
self.resp_obj = resp_obj
self.resp_text = resp_obj.text
self.resp_body = self.parsed_body()
def parsed_body(self):
try:
return self.resp_obj.json()
except ValueError:
return self.resp_text
def parsed_dict(self):
return {
'status_code': self.resp_obj.status_code,
'headers': self.resp_obj.headers,
'body': self.resp_body
}
def _extract_field_with_regex(self, field):
""" extract field from response content with regex.
requests.Response body could be json or html text.
@param (str) field should only be regex string that matched r".*\(.*\).*"
e.g.
self.resp_text: "LB123abcRB789"
field: "LB[\d]*(.*)RB[\d]*"
return: abc
"""
matched = re.search(field, self.resp_text)
if not matched:
err_msg = u"Extractor error: failed to extract data with regex!\n"
err_msg += u"response body: {}\n".format(self.resp_text)
err_msg += u"regex: {}\n".format(field)
logging.error(err_msg)
raise exception.ParamsError(err_msg)
return matched.group(1)
def _extract_field_with_delimiter(self, field):
""" response content could be json or html text.
@param (str) field should be string joined by delimiter.
e.g.
"status_code"
"content"
"headers.content-type"
"content.person.name.first_name"
"""
try:
# string.split(sep=None, maxsplit=-1) -> list of strings
# e.g. "content.person.name" => ["content", "person.name"]
try:
top_query, sub_query = field.split('.', 1)
except ValueError:
top_query = field
sub_query = None
if top_query in ["body", "content", "text"]:
top_query_content = self.parsed_body()
else:
top_query_content = getattr(self.resp_obj, top_query)
if sub_query:
if not isinstance(top_query_content, (dict, CaseInsensitiveDict, list)):
err_msg = u"Extractor error: failed to extract data with regex!\n"
err_msg += u"response: {}\n".format(self.parsed_dict())
err_msg += u"regex: {}\n".format(field)
logging.error(err_msg)
raise exception.ParamsError(err_msg)
# e.g. key: resp_headers_content_type, sub_query = "content-type"
return utils.query_json(top_query_content, sub_query)
else:
# e.g. key: resp_status_code, resp_content
return top_query_content
except AttributeError:
err_msg = u"Failed to extract value from response!\n"
err_msg += u"response: {}\n".format(self.parsed_dict())
err_msg += u"extract field: {}\n".format(field)
logging.error(err_msg)
raise exception.ParamsError(err_msg)
def extract_field(self, field):
""" extract value from requests.Response.
"""
if text_extractor_regexp_compile.match(field):
return self._extract_field_with_regex(field)
else:
return self._extract_field_with_delimiter(field)
def extract_response(self, extractors):
""" extract value from requests.Response and store in OrderedDict.
@param (list) extractors
[
{"resp_status_code": "status_code"},
{"resp_headers_content_type": "headers.content-type"},
{"resp_content": "content"},
{"resp_content_person_first_name": "content.person.name.first_name"}
]
@return (OrderDict) variable binds ordered dict
"""
extracted_variables_mapping = OrderedDict()
extract_binds_order_dict = utils.convert_to_order_dict(extractors)
for key, field in extract_binds_order_dict.items():
if not isinstance(field, utils.string_type):
raise exception.ParamsError("invalid extractors in testcase!")
extracted_variables_mapping[key] = self.extract_field(field)
return extracted_variables_mapping
def validate(self, validators, variables_mapping):
""" Bind named validators to value within the context.
@param (list) validators
[
{"check": "status_code", "comparator": "eq", "expect": 201},
{"check": "resp_body_success", "comparator": "eq", "expect": True}
]
@param (dict) variables_mapping
{
"resp_body_success": True
}
@return (list) content differences
[
{
"check": "status_code",
"comparator": "eq", "expect": 201, "value": 200
}
]
"""
for validator_dict in validators:
check_item = validator_dict.get("check")
if not check_item:
raise exception.ParamsError("check item invalid: {}".format(check_item))
if "expect" in validator_dict:
expect_value = validator_dict.get("expect")
elif "expected" in validator_dict:
expect_value = validator_dict.get("expected")
else:
raise exception.ParamsError("expected value missed in testcase validator!")
comparator = validator_dict.get("comparator", "eq")
if check_item in variables_mapping:
validator_dict["actual_value"] = variables_mapping[check_item]
else:
try:
validator_dict["actual_value"] = self.extract_field(check_item)
except exception.ParseResponseError:
raise exception.ParseResponseError("failed to extract check item in response!")
utils.match_expected(
validator_dict["actual_value"],
expect_value,
comparator,
check_item
)
return True
|
[
"liucpliu@sina.cn"
] |
liucpliu@sina.cn
|
408d9462951750a661ccc93b1ed833bd2798d25c
|
066bb2e3183735be6e20798074c8e1ec592f1a39
|
/study/study_memory_other.py
|
a1580aa329ef9b0007973616e6a3ece79b8862b2
|
[
"MIT"
] |
permissive
|
ChristophRaab/rrslvq
|
3477dd3264e2de676beaebeeff320b4561dd9598
|
e265f62e023bd3ca23273b51e06035fd3c0b7c94
|
refs/heads/master
| 2021-07-25T13:53:30.595769
| 2021-07-09T13:50:12
| 2021-07-09T13:50:12
| 157,715,586
| 3
| 1
| null | 2019-04-29T11:56:58
| 2018-11-15T13:25:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,919
|
py
|
from __future__ import division
from joblib import Parallel, delayed
from reoccuring_drift_stream import ReoccuringDriftStream
from bix.classifiers.rslvq import RSLVQ
from skmultiflow.data.mixed_generator import MIXEDGenerator
from skmultiflow.evaluation.evaluate_prequential import EvaluatePrequential
from skmultiflow.lazy import KNN
from skmultiflow.meta.oza_bagging_adwin import OzaBaggingAdwin
from rrslvq import ReactiveRobustSoftLearningVectorQuantization
from reoccuring_drift_stream import ReoccuringDriftStream
from bix.classifiers.rrslvq import RRSLVQ
from skmultiflow.trees.hoeffding_adaptive_tree import HAT
from skmultiflow.lazy.sam_knn import SAMKNN
from skmultiflow.meta.adaptive_random_forests import AdaptiveRandomForest
from bix.evaluation.study import Study
from skmultiflow.data.concept_drift_stream import ConceptDriftStream
from skmultiflow.data.sea_generator import SEAGenerator
def init_classifiers():
n_prototypes_per_class = 4
sigma = 4
rslvq = RSLVQ(prototypes_per_class=4, sigma=4)
arslvq = ARSLVQ(prototypes_per_class=n_prototypes_per_class, sigma=sigma, confidence=0.0001, window_size=300)
oza = OzaBaggingAdwin(base_estimator=KNN())
adf = AdaptiveRandomForest()
samknn = SAMKNN()
hat = HAT()
clfs = [hat,rslvq, arslvq, adf, oza]
names = [hat,"rslvq", "arslvq", "adf", "oza"]
# clfs = [rslvq]
# names = ["rslvq"]
return clfs,names
def evaluate(stream,metrics,study_size):
clfs,names = init_classifiers()
stream.prepare_for_use()
evaluator = EvaluatePrequential(show_plot=False, batch_size=10, max_samples=study_size, metrics=metrics,
output_file=stream.name+"_memory_other.csv")
evaluator.evaluate(stream=stream, model=clfs, model_names=names)
s = Study()
parallel =2
study_size = 50000 #100000
metrics = ['accuracy','model_size']
s1 = MIXEDGenerator(classification_function = 1, random_state= 112, balance_classes = False)
s2 = MIXEDGenerator(classification_function = 0, random_state= 112, balance_classes = False)
mixed_ra = ReoccuringDriftStream(stream=s1, drift_stream=s2,random_state=None,alpha=90.0, position=2000,width=100,pause = 1000)
mixed_a = ConceptDriftStream(stream=s1,
drift_stream=s2,
alpha=90.0,
random_state=None,
position=int(study_size/2),
width=1)
sea_a = ConceptDriftStream(stream=SEAGenerator(random_state=112, noise_percentage=0.1),
drift_stream=SEAGenerator(random_state=112,
classification_function=2, noise_percentage=0.1),
alpha=90.0,
random_state=None,
position=int(study_size/2),
width=1)
sea_ra = ReoccuringDriftStream(stream=SEAGenerator(random_state=112, noise_percentage=0.1),
drift_stream=SEAGenerator(random_state=112,
classification_function=2, noise_percentage=0.1),
alpha=90.0,
random_state=None,
position=2000,
width=1)
# metrics = ["accuracy","model_size"]
#evaluate(stream,clfs,metrics,names,study_size)
streams = s.init_standard_streams() + s.init_reoccuring_standard_streams()
streams = [mixed_a,mixed_ra]
# for stream in streams:
# evaluate(stream,metrics,study_size)
# for stream in streams:
# evaluate(stream,metrics,study_size)
Parallel(n_jobs=parallel,max_nbytes=None)(delayed(evaluate)(stream,metrics,study_size) for stream in streams)
# streams = s.init_real_world()
# Parallel(n_jobs=parallel,max_nbytes=None)(delayed(evaluate)(stream,metrics,study_size) for stream in streams)
# #
|
[
"christophraab@outlook.de"
] |
christophraab@outlook.de
|
71040b59ec54d10f08717a8d580862c2c9542046
|
740c72bce146cab71b74073dafd7aa29ad7697c6
|
/UserInput.py
|
0c7821bc546c3dd8afb6d5e48dcd7f0301a794f1
|
[] |
no_license
|
mlumley/Tic-Tac-Toe
|
283cc3354b68812a68cb26aa0dd8afd7ce31879c
|
33e545c0ff10aef110111a9f5b88e9595442bf52
|
refs/heads/master
| 2020-03-26T16:01:09.550098
| 2018-08-27T02:15:55
| 2018-08-27T02:15:55
| 145,077,972
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
from Coordinate import Coordinate
from Error import InvalidCoordinateLengthError
class UserInput():
"""
Responsible for getting and parsing user input
"""
def processUserInput(self, player):
"""
Get player input from the terminal and return a coordinate
corresponding to the move or exit the game if the player
chose to quit.
"""
action = input(
(
"Player " + player.id + " enter a coord x,y"
" to place your " + player.symbol + " or"
" enter 'q' to give up: "
)
)
if action == 'q':
self.exitGame()
action = action.split(',')
numCoordinate = len(action)
if numCoordinate != 2:
raise InvalidCoordinateLengthError(
(
"Error: Invalid coordinate length. "
"Coordinates should be of the format x,y"
)
)
else:
return self.listToCoordinate(action)
def exitGame(self):
print("Exiting")
exit()
def listToCoordinate(self, lst):
coord = Coordinate(lst[0], lst[1])
# Convert coordinate from 1 based to 0 based
coord.subtract(1)
return coord
|
[
"mlumley@student.unimelb.edu.au"
] |
mlumley@student.unimelb.edu.au
|
86996034a9c8ff01cda3320cbb2c7282133e7e9b
|
b7c8e8f3715f6079643c74654e808d40500bd999
|
/solution-011.py
|
84847a8d9fdc681acaa7c0358767c02a175ba8e2
|
[] |
no_license
|
ManaswitaDatta/Advanced-Algorithm-Lab-Assignment
|
02e4749d23af7f512142d853d50b63adf95ad6e1
|
e08d7104e30e66f98fd16e1c2fc6eca62339d4b2
|
refs/heads/main
| 2023-02-19T13:04:38.189300
| 2021-01-08T09:35:20
| 2021-01-08T09:35:20
| 315,086,740
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
import sys
def distribute(arr, n):
left = 0
right = 0
if n == 0:
print(str(0) + " " + str(0))
return
if n == 1:
print(str(0) + " " + str(arr[0]))
return
for i in range(n-1, -1, -2):
if right > left:
right += arr[i-1]
left += arr[i]
else:
right += arr[i]
left += arr[i-1]
if i-3 < 0:
break
if n % 2 == 1:
if right > left:
left += arr[0]
else:
right += arr[0]
if right > left:
print(str(left) + " " + str(right))
else:
print(str(right) + " " + str(left))
return
def main():
if len(sys.argv) != 2:
return
fname = sys.argv[1]
file = open(fname, 'r')
n = int(file.readline()) # total no of packets
arr = []
for i in range(n):
arr.append(int(file.readline()))
arr.sort()
distribute(arr, n)
file.close()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
ManaswitaDatta.noreply@github.com
|
9bde2d5e18d76501364a23a40ed7699d0f07a06b
|
bbcd2c01151abc533fe1692c8dd1f5d5dad3fb11
|
/basic2/practice_4/cliff_dyna_q.py
|
153d99f089c9ae1ca9c52bc4b10adfc64105fd5b
|
[
"MIT"
] |
permissive
|
linklab/e_learning_rl
|
ded0ca2245c55735eb111e925b33f4a289d366d5
|
16c11c17dfb304959cb80912e29d0540e6ed6cd5
|
refs/heads/master
| 2023-08-15T09:41:33.969407
| 2021-10-05T11:48:49
| 2021-10-05T11:48:49
| 282,601,043
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,670
|
py
|
import os
import random
import numpy as np
import matplotlib.pyplot as plt
from basic.practice_1.cliff import CliffGridWorld
from basic.practice_1.gridworld import GridWorld
# 그리드월드 높이와 너비
GRID_HEIGHT = 4
GRID_WIDTH = 12
NUM_ACTIONS = 4
# 초기 상태와 종료 상태
START_STATE = (3, 0)
TERMINAL_STATES = [(3, 11)]
CLIFF_STATES = [(3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (3, 10)]
# 최대 에피소드
MAX_EPISODES = 50
# 감가율
GAMMA = 0.95
# 탐색(exploration) 확률 파라미터
INITIAL_EPSILON = 0.1
FINAL_EPSILON = 0.01
LAST_SCHEDULED_EPISODES = 30
# 스텝 사이즈
ALPHA = 0.1
# 총 실험 횟수 (성능에 대한 평균을 구하기 위함)
TOTAL_RUNS = 25
def epsilon_scheduled(current_episode):
fraction = min(current_episode / LAST_SCHEDULED_EPISODES, 1.0)
epsilon = min(INITIAL_EPSILON + fraction * (FINAL_EPSILON - INITIAL_EPSILON), INITIAL_EPSILON)
return epsilon
# Dyna-Q의 계획 과정에서 사용하는 간단한 모델
class EnvModel:
def __init__(self):
self.model = dict()
# 경험 샘플 저장
def store(self, state, action, reward, next_state):
if state not in self.model.keys():
self.model[state] = dict()
self.model[state][action] = [reward, next_state]
# 저장해 둔 경험 샘플들에서 임으로 선택하여 반환험
def sample(self):
state = random.choice(list(self.model.keys()))
action = random.choice(list(self.model[state].keys()))
reward, next_state = self.model[state][action]
return state, action, reward, next_state
# 비어있는 행동 가치 테이블을 0~1 사이의 임의의 값으로 초기화하며 생성함
def generate_initial_q_value(env):
q_table = np.zeros((GRID_HEIGHT, GRID_WIDTH, env.NUM_ACTIONS))
for i in range(GRID_HEIGHT):
for j in range(GRID_WIDTH):
if (i, j) not in env.TERMINAL_STATES:
for action in env.ACTIONS:
q_table[i, j, action] = random.random()
return q_table
# 모든 상태에서 수행 가능한 행동에 맞춰 임의의 정책을 생성함
# 초기에 각 행동의 선택 확률은 모두 같음
def generate_initial_random_policy(env):
policy = dict()
for i in range(GRID_HEIGHT):
for j in range(GRID_WIDTH):
if (i, j) not in env.TERMINAL_STATES:
actions = []
prob = []
for action in env.ACTIONS:
actions.append(action)
prob.append(0.25)
policy[(i, j)] = (actions, prob)
return policy
# epsilon-탐욕적 정책 갱신
def update_epsilon_greedy_policy(env, state, q_value, policy, current_episode):
max_prob_actions = [action_ for action_, value_ in enumerate(q_value[state[0], state[1], :]) if
value_ == np.max(q_value[state[0], state[1], :])]
epsilon = epsilon_scheduled(current_episode)
actions = []
action_probs = []
for action in env.ACTIONS:
actions.append(action)
if action in max_prob_actions:
action_probs.append(
(1 - epsilon) / len(max_prob_actions) + epsilon / env.NUM_ACTIONS
)
else:
action_probs.append(
epsilon / env.NUM_ACTIONS
)
policy[state] = (actions, action_probs)
# Dyna-Q 알고리즘의 각 에피소드 별 학습
# @q_table: 행동 가치 테이블, dyna_q 함수 수행 후 값이 갱신 됨
# @model: 계획시 사용할 모델
# @env: 미로 환경
def dyna_q(q_table, policy, env_model, env, episode, planning_repeat, step_size=ALPHA):
state = env.reset()
steps = 0
rewards = 0.0
done = False
while not done:
# 타임 스텝 기록
steps += 1
# 행동 얻어오기
actions, prob = policy[state]
action = np.random.choice(actions, size=1, p=prob)[0]
# 행동 수행후
next_state, reward, done, _ = env.step(action)
# Q-러닝 갱신
target = reward + GAMMA * np.max(q_table[next_state[0], next_state[1], :])
q_table[state[0], state[1], action] += step_size * (target - q_table[state[0], state[1], action])
# 경험 샘플을 모델에 저장 (모델 구성)
env_model.store(state, action, reward, next_state)
# 모델로 부터 샘플 얻어오면서 Q-계획 반복 수행
for t in range(planning_repeat):
state_, action_, reward_, next_state_ = env_model.sample()
target = reward_ + GAMMA * np.max(q_table[next_state_[0], next_state_[1], :])
q_table[state_[0], state_[1], action_] += step_size * (target - q_table[state_[0], state_[1], action_])
update_epsilon_greedy_policy(env, state, q_table, policy, episode)
state = next_state
rewards += reward
return steps, rewards
def cliff_dyna_q(env):
planning_repeats = [0, 3, 30]
performance_steps = np.zeros((len(planning_repeats), MAX_EPISODES))
for run in range(TOTAL_RUNS):
print("RUN: {0}".format(run))
for i, planning_repeat in enumerate(planning_repeats):
# 행동 가치 저장
q_table = generate_initial_q_value(env)
# Dyna-Q를 위한 환경 모델 생성
env_model = EnvModel()
policy = generate_initial_random_policy(env)
for episode in range(MAX_EPISODES):
steps_, _ = dyna_q(q_table, policy, env_model, env, episode, planning_repeat)
performance_steps[i, episode] += steps_
# 총 수행 횟수에 대한 평균 값 산출
performance_steps /= TOTAL_RUNS
linestyles = ['-', '--', ':']
for i in range(len(planning_repeats)):
plt.plot(performance_steps[i, :], linestyle=linestyles[i], label='Number of planning steps: {0}'.format(planning_repeats[i]))
plt.xlabel('Episode')
plt.ylabel('Executed steps per episode')
plt.legend()
plt.savefig('images/cliff_dyna_q.png')
plt.close()
def cliff_dyna_q_main():
# 이미지 저장 경로 확인 및 생성
if not os.path.exists('images/'):
os.makedirs('images/')
env = CliffGridWorld(
height=GRID_HEIGHT,
width=GRID_WIDTH,
start_state=START_STATE,
terminal_states=TERMINAL_STATES,
transition_reward=-1.0,
terminal_reward=-1.0,
outward_reward=-1.0,
cliff_states=[(s, START_STATE, -100.0) for s in CLIFF_STATES]
)
env.reset()
env.render()
cliff_dyna_q(env)
if __name__ == '__main__':
cliff_dyna_q_main()
|
[
"yh21.han@gmail.com"
] |
yh21.han@gmail.com
|
889663a43b247ad2f4f68cf01e0b3fd4be843fc9
|
dab495a386298d92414c8586d4d7f6e860099f34
|
/Work_Wechat_GUI_App/utils/get_func_name.py
|
917a06de0c4f7538d18ed311c94681f0e2e9ea35
|
[] |
no_license
|
BlueZUJIUPUP/HogwartsSDE18
|
bc83884355e619554cdb2704e288e95600ca635f
|
8891c10e094b13d07cf4830855c37cebd07bd3d4
|
refs/heads/main
| 2023-07-17T13:07:17.445744
| 2021-08-30T14:45:00
| 2021-08-30T14:45:00
| 356,118,129
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
# -*- coding: utf-8 -*-
# @File : get_func_name
# @Time : 2021/4/26 14:48
# @Author : BLUE_JUZIUPUP
import sys
def get_fanc_name(result):
return (f"{sys._getframe().f_code.co_name}用例结果为:"+str(result))
|
[
"z1003033614@163.com"
] |
z1003033614@163.com
|
8446998f05eb5d902544da0f8d899fe616d0c230
|
d4daa41bc97e74a83c2a106b0a2f1bbda5222c53
|
/BlackJack.py
|
3e06f6608d26a92968585a38121450e45fe42492
|
[] |
no_license
|
Rafael-Leafar/Games
|
68d09c724a0153f5eb3aec1a50ec933623da5497
|
8710790e8e3e84caf834863b2752c151635bd7a8
|
refs/heads/main
| 2023-03-05T11:44:21.315210
| 2021-02-26T09:18:43
| 2021-02-26T09:18:43
| 342,517,935
| 0
| 0
| null | 2021-02-26T09:18:43
| 2021-02-26T08:57:55
|
Python
|
UTF-8
|
Python
| false
| false
| 5,942
|
py
|
#!/usr/bin/env python
# coding: utf-8
import random
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two':2, 'Three':3, 'Four':4, 'Five':5, 'Six':6, 'Seven':7, 'Eight':8, 'Nine':9, 'Ten':10, 'Jack':10,
'Queen':10, 'King':10, 'Ace':11}
playing = True
class Card:
def __init__(self,suits,ranks):
self.suits = suits
self.ranks = ranks
def __str__(self):
return self.ranks + ' of ' + self.suits
class Deck:
def __init__(self):
self.deck = [] # start with an empty list
for suit in suits:
for rank in ranks:
newcard = Card(suit,rank)
self.deck.append(newcard)
def __str__(self):
return f'{len(self.deck)}'
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
return self.deck.pop(0)
class Hand:
def __init__(self):
self.cards = [] # start with an empty list as we did in the Deck class
self.value = 0 # start with zero value
self.aces = 0 # add an attribute to keep track of aces
def add_card(self,card):
self.cards.append(card)
self.value += values[card.ranks]
if card.ranks == 'Ace':
self.aces += 1
def adjust_for_ace(self):
if self.aces > 0:
self.value -= 10
class Chips:
def __init__(self):
self.total = 1000 # This can be set to a default value or supplied by a user input
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
def take_bet(funds):
while True:
bet = int(input('Please place your bet: '))
if bet <= funds:
return bet
print("Your bet has been accepted.")
break
else:
print("You do not have enouth chips to bet!")
def hit(deck,hand):
hand.add_card(deck.deal())
if hand.value > 21:
hand.adjust_for_ace()
def hit_or_stand(deck,hand):
global playing # to control an upcoming while loop
answer = ""
playing = True
while answer not in ["H","S"] or answer == 'H':
answer = input('Hit or Stand? H or S: ').upper()
if answer == "H":
hit(deck,hand)
print(hand.cards[-1])
elif answer == "S":
playing = False
break
else:
print('Sorry, you need to choose H or S.')
def show_some(player,dealer):
print('\nPlayer cards: \n')
for card in player.cards:
print(f'{card}')
print(f'Total Player value: {player.value}\n')
print('Dealer cards: \n')
print(f'***Hidden***')
for card in dealer.cards[1:]:
print(f'{card}')
hidden_card = dealer.cards[0]
dealer_value = dealer.value - values[hidden_card.ranks]
print(f'Total Dealer value: {dealer_value}')
def show_all(player,dealer):
print('\nPlayer cards: \n')
for card in player.cards:
print(f'{card}')
print(f'Total Player value: {player.value}\n')
print('Dealer cards: \n')
for card in dealer.cards:
print(f'{card}')
print(f'Total Dealer value: {dealer.value}\n')
pass
def player_busts(player_hand,dealer_hand,chips):
print("Player BUST!")
chips.lose_bet()
def player_wins(player_hand,dealer_hand,chips):
print("Player Wins!")
chips.win_bet()
def dealer_busts(player_hand,dealer_hand,chips):
print("Dealer BUST!")
chips.win_bet()
def dealer_wins(player_hand,dealer_hand,chips):
print("Dealer Wins!")
chips.lose_bet()
def push():
print('Draw')
pass
# ### And now on to the game!!
while True:
print('Welcome to Blackjack Game \n')
newdeck = Deck()
newdeck.shuffle()
dealer_hand = Hand()
player_hand = Hand()
hit(newdeck,dealer_hand)
hit(newdeck,player_hand)
hit(newdeck,dealer_hand)
hit(newdeck,player_hand)
# Set up the Player's chips
player_chips = Chips()
player_chips.total = int(input('Player deposit your chips: '))
# Prompt the Player for their bet
player_chips.bet = take_bet(player_chips.total)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
playing = True
while playing == True: # recall this variable from our hit_or_stand function
# Prompt for Player to Hit or Stand
hit_or_stand(newdeck,player_hand)
# Show cards (but keep one dealer card hidden)
show_some(player_hand,dealer_hand)
# If player's hand exceeds 21, run player_busts() and break out of loop
if player_hand.value > 21:
player_busts(player_hand,dealer_hand,player_chips)
break
# If Player hasn't busted, play Dealer's hand until Dealer reaches 17
else:
while dealer_hand.value < 17:
hit(newdeck,dealer_hand)
# Show all cards
show_all(player_hand,dealer_hand)
# Run different winning scenarios
if dealer_hand.value > 21:
dealer_busts(player_hand,dealer_hand,player_chips)
elif player_hand.value > dealer_hand.value:
player_wins(player_hand,dealer_hand,player_chips)
elif player_hand.value < dealer_hand.value:
dealer_wins(player_hand,dealer_hand,player_chips)
else:
push()
# Inform Player of their chips total
print(f'Player your current chips are: {player_chips.total}\n')
# Ask to play again
if input('Do you want to play again?"("Y/N")" ').upper() == 'Y':
True
else:
break
|
[
"noreply@github.com"
] |
Rafael-Leafar.noreply@github.com
|
ea7a891dfd699c004f09d9b65a9d801890d1debf
|
090f60e856b653eb6e86c3f4b4b88a181adcde08
|
/For.py
|
e1b8bc22bc3cf48c3e703af2b91fa77506bc632a
|
[] |
no_license
|
MariaCuenca26/For
|
ade6b4133cf32da170add482e7cb4d14778a51e2
|
2d3d593d0e4cab3ef235a2341b952bab8493d779
|
refs/heads/main
| 2023-06-20T15:21:12.010930
| 2021-07-17T23:39:46
| 2021-07-17T23:39:46
| 387,055,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
class For:
def __init__(self):
self.numero=0
def usoFor(self):
nombre = "Fernanda"
datos=["Fernanda",20,True]
numeros=(2,5,6,4,1)
docente = {"nombre": "Fernanda","edad":20,"fac": "faci"}
listanotas = [(30,40),(20,40,50),(50,40)]
listaalumnos = [{"nombre":"maria","fianl":70},{"nombre":"Yessenia","fianl":60},{"nombre":"Fabian","fianl":90}]
for i in range(5):
print("i={}".format(i))
bucle = For()
bucle.usoFor()
print(bucle.nombre)
|
[
"noreply@github.com"
] |
MariaCuenca26.noreply@github.com
|
d099d0d65823fa6e12e26af366297eb3cbd3a094
|
a39a19301d3ac0e85ef5b340312fb2f750358577
|
/04_dqn_noisy_net.py
|
280e41f175e88b5a3a97dd6fbb36057a05941b25
|
[] |
no_license
|
Prakhar-FF13/Reinforcement-Learning-With-Python
|
3d3d81b80d6997532a73474f89ac64084efee37d
|
b8dec285e3f63472bd88edda34e5f71b81b56c5f
|
refs/heads/master
| 2020-05-02T21:37:34.480608
| 2019-03-31T10:02:19
| 2019-03-31T10:02:19
| 178,226,412
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,467
|
py
|
#!/usr/bin/env python3
import gym
import ptan
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from lib2 import dqn_model, common
class NoisyDQN(nn.Module):
def __init__(self, input_shape, n_actions):
super(NoisyDQN, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.noisy_layers = [
dqn_model.NoisyLinear(conv_out_size, 512),
dqn_model.NoisyLinear(512, n_actions)
]
self.fc = nn.Sequential(
self.noisy_layers[0],
nn.ReLU(),
self.noisy_layers[1]
)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
fx = x.float() / 256
conv_out = self.conv(fx).view(fx.size()[0], -1)
return self.fc(conv_out)
def noisy_layers_sigma_snr(self):
return [
((layer.weight ** 2).mean().sqrt() / (layer.sigma_weight ** 2).mean().sqrt()).item()
for layer in self.noisy_layers
]
if __name__ == "__main__":
params = common.HYPERPARAMS['pong']
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action="store_true", help="Enable cuda")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
env = gym.make(params['env_name'])
env = ptan.common.wrappers.wrap_dqn(env)
writer = SummaryWriter(comment="-" + params['run_name'] + "-noisy-net")
net = NoisyDQN(env.observation_space.shape, env.action_space.n).to(device)
tgt_net = ptan.agent.TargetNet(net)
agent = ptan.agent.DQNAgent(net, ptan.actions.ArgmaxActionSelector(), device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=params['gamma'], steps_count=1)
buffer = ptan.experience.ExperienceReplayBuffer(exp_source, buffer_size=params['replay_size'])
optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])
frame_idx = 0
with common.RewardTracker(writer, params['stop_reward']) as reward_tracker:
while True:
frame_idx += 1
buffer.populate(1)
new_rewards = exp_source.pop_total_rewards()
if new_rewards:
if reward_tracker.reward(new_rewards[0], frame_idx):
break
if len(buffer) < params['replay_initial']:
continue
optimizer.zero_grad()
batch = buffer.sample(params['batch_size'])
loss_v = common.calc_loss_dqn(batch, net, tgt_net.target_model, gamma=params['gamma'], device=device)
loss_v.backward()
optimizer.step()
if frame_idx % params['target_net_sync'] == 0:
tgt_net.sync()
if frame_idx % 500 == 0:
for layer_idx, sigma_l2 in enumerate(net.noisy_layers_sigma_snr()):
writer.add_scalar("sigma_snr_layer_%d" % (layer_idx+1),
sigma_l2, frame_idx)
|
[
"prakhar.meerut@gmail.com"
] |
prakhar.meerut@gmail.com
|
02630f6afedcf58bbf977378dfb4cbe97d908c59
|
88bd86d4c889b786023ed3a1c244141aee603201
|
/tests/SpaceInvaders-DDQN/deep_Q.py
|
731e9c3f4d074a6367c7d81f2eac4d77e1a76855
|
[
"MIT"
] |
permissive
|
webclinic017/AI_Framework
|
ff19cb820aa31a1293aaacd444ea33dcce990a65
|
3889d69e4aa68067f29285b6cb6a07f4f3886636
|
refs/heads/main
| 2023-03-01T18:21:56.886524
| 2021-02-13T09:16:02
| 2021-02-13T09:16:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,455
|
py
|
import gym
import numpy as np
import random
import cv2
from replay_buffer import ReplayBuffer
from tensorflow.keras.models import load_model, Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
# List of hyper-parameters and constants
DECAY_RATE = 0.99
BUFFER_SIZE = 40000
MINIBATCH_SIZE = 64
TOT_FRAME = 3000000
EPSILON_DECAY = 1000000
MIN_OBSERVATION = 500
FINAL_EPSILON = 0.05
INITIAL_EPSILON = 0.85
NUM_ACTIONS = 6
TAU = 0.01
# Number of frames to throw into network
NUM_FRAMES = 3
class DeepQ(object):
"""Constructs the desired deep q learning network"""
def __init__(self):
self.construct_q_network()
def construct_q_network(self):
# Uses the network architecture found in DeepMind paper
self.model = Sequential()
self.model.add(Conv2D(32, (8, 8), strides=4, input_shape=(84, 84, NUM_FRAMES)))
self.model.add(Activation('relu'))
self.model.add(Conv2D(64, (4, 4), strides=2))
self.model.add(Activation('relu'))
self.model.add(Conv2D(64, (3, 3)))
self.model.add(Activation('relu'))
self.model.add(Flatten())
self.model.add(Dense(512))
self.model.add(Activation('relu'))
self.model.add(Dense(NUM_ACTIONS))
self.model.compile(loss='mse', optimizer=Adam(lr=0.00001))
# Creates a target network as described in DeepMind paper
self.target_model = Sequential()
self.target_model.add(Conv2D(32, (8, 8), strides=4, input_shape=(84, 84, NUM_FRAMES)))
self.target_model.add(Activation('relu'))
self.target_model.add(Conv2D(64, (4, 4), strides=2))
self.target_model.add(Activation('relu'))
self.target_model.add(Conv2D(64, (3, 3)))
self.target_model.add(Activation('relu'))
self.target_model.add(Flatten())
self.target_model.add(Dense(512))
self.model.add(Activation('relu'))
self.target_model.add(Dense(NUM_ACTIONS))
self.target_model.compile(loss='mse', optimizer=Adam(lr=0.00001))
self.target_model.set_weights(self.model.get_weights())
print("Successfully constructed networks.")
def predict_movement(self, data, epsilon):
"""Predict movement of game controler where is epsilon
probability randomly move."""
q_actions = self.model.predict(data.reshape(1, 84, 84, NUM_FRAMES), batch_size = 1)
opt_policy = np.argmax(q_actions)
rand_val = np.random.random()
if rand_val < epsilon:
opt_policy = np.random.randint(0, NUM_ACTIONS)
return opt_policy, q_actions[0, opt_policy]
def train(self, s_batch, a_batch, r_batch, d_batch, s2_batch, observation_num):
"""Trains network to fit given parameters"""
batch_size = s_batch.shape[0]
targets = np.zeros((batch_size, NUM_ACTIONS))
for i in range(batch_size):
targets[i] = self.model.predict(s_batch[i].reshape(1, 84, 84, NUM_FRAMES), batch_size = 1)
fut_action = self.target_model.predict(s2_batch[i].reshape(1, 84, 84, NUM_FRAMES), batch_size = 1)
targets[i, a_batch[i]] = r_batch[i]
if d_batch[i] == False:
targets[i, a_batch[i]] += DECAY_RATE * np.max(fut_action)
loss = self.model.train_on_batch(s_batch, targets)
# Print the loss every 10 iterations.
if observation_num % 10 == 0:
print("We had a loss equal to ", loss)
def save_network(self, path):
# Saves model at specified path as h5 file
self.model.save(path)
print("Successfully saved network.")
def load_network(self, path):
self.model = load_model(path)
print("Succesfully loaded network.")
def target_train(self):
model_weights = self.model.get_weights()
target_model_weights = self.target_model.get_weights()
for i in range(len(model_weights)):
target_model_weights[i] = TAU * model_weights[i] + (1 - TAU) * target_model_weights[i]
self.target_model.set_weights(target_model_weights)
if __name__ == "__main__":
print("Haven't finished implementing yet...'")
space_invader = SpaceInvader()
space_invader.load_network("saved.h5")
# print space_invader.calculate_mean()
# space_invader.simulate("deep_q_video", True)
space_invader.train(TOT_FRAME)
|
[
"frederic.lambert@graphicstream.fr"
] |
frederic.lambert@graphicstream.fr
|
68a0d3ae1a45c2cfbdb711a4d8cb4ecdd082387a
|
c38b22c34357b9baaf50f56db1f22c34d399fe30
|
/test/test_google.py
|
cbaabd337570b78ab5eb0aafa31d6bf7444b25c3
|
[] |
no_license
|
oleolesya/PySelenium
|
054364165cbc795865c26146fc3e94878eae1a9e
|
368d33310cdbf9aaed6717127b7e575d05ebb77e
|
refs/heads/master
| 2021-01-25T07:40:43.731932
| 2017-06-13T05:59:32
| 2017-06-13T05:59:32
| 93,650,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
#from webdriver_manager.chrome import ChromeDriverManager
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
def test_google():
driver = webdriver.Chrome()
#driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("http://www.google.com")
driver.find_element_by_name("q").send_keys("webdriver")
driver.find_element_by_name("btnG").click()
WebDriverWait(driver, 3).until(ec.title_is ("webdriver - Пошук Google"))
driver.quit()
|
[
"schmetterling4olesya@gmail.com"
] |
schmetterling4olesya@gmail.com
|
31f2e47e9d5792b8e4db1c39ee2cb78ee812e6fb
|
4050799f59f7478b2ab92d15163a46aac8d7d55f
|
/bin/python-config
|
2ad97d37fa624b17365ebe49c81e09f3ac954439
|
[] |
no_license
|
higorcoimbra/django_tutorial
|
cae97eea9305ae3779e00b4e183ac7f30161b272
|
f0e4e6f8277b9b58363f93bf1d9a3c1ee4ca0525
|
refs/heads/master
| 2021-01-19T02:16:11.398131
| 2016-12-12T17:16:07
| 2016-12-12T17:16:07
| 62,657,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,345
|
#!/home/higor/django_tutorial/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"higor1308@gmail.com"
] |
higor1308@gmail.com
|
|
1a1e24afacddac4b6572a81f04e03aa4a80cd94e
|
11d29d2a35740fd972622a7f3670bc83736ece21
|
/2021-02-09-Interview.py
|
6617ec754aa866d4d8fe8c805ef95ba395486276
|
[] |
no_license
|
GabrielSuzuki/Daily-Interview-Question
|
52f19a2f1c0fbfd068841408502429f9f0916741
|
56730f7e296b35403479a3904bd10978bd6ca36a
|
refs/heads/main
| 2023-03-05T20:00:29.350949
| 2021-02-22T17:38:53
| 2021-02-22T17:38:53
| 315,160,045
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
#Hi, here's your problem today. This problem was recently asked by Amazon:
#You are given an array of integers. Return the length of the longest consecutive elements sequence in the array.
#For example, the input array [100, 4, 200, 1, 3, 2] has the longest consecutive sequence 1, 2, 3, 4, and thus, you should return its length, 4.
def longest_consecutive(nums):
i = 0
j = 0
k = 0
temp = 0
tempmax = 1
total = 0
while i < len(nums):
j = i + 1
k = 0
temp = nums[i] + 1
if(j == len(nums)):
j = 0
while(k < len(nums)):
if(temp == nums[j]):
temp = nums[j] + 1
tempmax += 1
j += 1
k += 1
if(j == len(nums)):
j = 0
if(total < tempmax):
total = tempmax
i += 1
return total
# Fill this in.
print(longest_consecutive([100, 4, 200, 1, 3, 2]))
# 4
|
[
"60160196+GabrielSuzuki@users.noreply.github.com"
] |
60160196+GabrielSuzuki@users.noreply.github.com
|
ec41dec00883ddaab1e22e1752bf324a827f56ea
|
30d8df1db8a2c848a6f585cfdf2428d279b45c44
|
/main.py
|
4a43502a4ca36b45c68458cc578e2668626f6300
|
[
"MIT"
] |
permissive
|
BBernYY/GalgjeGame
|
fcfdab635523bcf297c92d6e0c7205d28f6c299c
|
a12b6d2f4cc41211eefdc88eaffdd70cb22975a1
|
refs/heads/main
| 2023-07-27T07:20:37.329864
| 2021-09-13T05:52:32
| 2021-09-13T05:52:32
| 387,531,802
| 0
| 0
|
MIT
| 2021-07-20T12:47:23
| 2021-07-19T16:41:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,276
|
py
|
import random
import frontend as f
import english_words as ew
def round_check(lives, no_letters, word, wordP):
if lives == 1:
event("die", word)
elif not "_" in wordP:
event("win", word)
else:
return getword(no_letters, word, wordP, f.func(lives, no_letters, wordP), lives)
def getword(no_letters, word, wordP, letter, lives):
if letter in word:
s = list(wordP)
looper = 0
for i in word:
if i == letter:
s[looper] = letter
looper += 1
wordP = "".join(s)
else:
no_letters.append(letter)
lives -= 1
return {"no_letters": no_letters, "word": word, "wordP": wordP, "lives": lives}
def event(e_type, word):
global running
if e_type == "die":
print("You died... Sadly...")
print("The word was "+word)
else:
print("GG! well played!")
print("Starting next round\n\n")
running = False
while True:
d = {}
d["no_letters"], d["word"], d["wordP"], d["lives"] = [], random.choice(["louise", "simone", "sytze", "koos"]), "", 11
for i in range(len(d["word"])):
d["wordP"] += "_"
running = True
while running:
d = round_check(d["lives"], d["no_letters"], d["word"], d["wordP"])
|
[
"66414852+BBernYY@users.noreply.github.com"
] |
66414852+BBernYY@users.noreply.github.com
|
fef7406b19678804c0d366120298d97f69c99661
|
db21596455baadb1c0fbbea4783271714a2a72b5
|
/57.First_Order_Derivative_Sobel.py
|
5531ce9beb92d9eccb0fb80757aa9540f11ef007
|
[] |
no_license
|
KSanjayReddy/OpenCvCourse
|
c7fb3f16b0c683d7e162be3781b9a226272a384b
|
67917fe6444f2ca89d1a78a618e51036b7c5158e
|
refs/heads/master
| 2021-06-28T03:56:50.403387
| 2020-12-30T01:41:47
| 2020-12-30T01:41:47
| 199,123,948
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 944
|
py
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
plt.rc('image', cmap = 'gray')
path = "Images/week4/truth.png"
#path = "Images/week4/sample.jpg"
img = cv2.imread(path, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gauss = np.array([1,2,1,2,4,2,1,2,1])
gauss = gauss.reshape(3,3)
print("Gauss : ")
print(gauss)
print("Perwitt :")
perwitt = np.array([-1,0,1,-1,0,1,-1,0,1])
perwitt = perwitt.reshape(3,3)
print(perwitt)
sobel = gauss * perwitt
print("Sobel")
print(sobel)
# Sobel opencv starts here
sobelx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
sobely = cv2.Sobel(img, cv2.CV_32F, 0, 1)
#The sobel X and Y have both positive and negative values,normalize them to 0-1
sobelx = cv2.normalize(sobelx, sobelx, 0, 1, cv2.NORM_MINMAX)
sobely = cv2.normalize(sobely, sobely, 0, 1, cv2.NORM_MINMAX)
cv2.imshow("img", img)
cv2.imshow("sobelX", sobelx)
cv2.imshow("sobelY", sobely)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"ksanjayreddy817@gmail.com"
] |
ksanjayreddy817@gmail.com
|
95bc33b5bb6cef458a63d1b4418119da1b9e9597
|
5094c5a93f17787855f1d0a380fe885c037e2526
|
/English/texts.py
|
e3268cf7f798b53550086825270bbb11585ec929
|
[] |
no_license
|
paradox70/WMarkProBot
|
e0c589123a8b46b28f9b5b891670200371a9d8cf
|
2832680f79c0a59fecae72442fc1be4a63a52ecc
|
refs/heads/master
| 2022-10-12T15:08:57.136256
| 2020-06-12T17:46:22
| 2020-06-12T17:46:22
| 268,281,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,309
|
py
|
RTL = ''
LTR = ''
LANGUAGE = 'Language'
USER = 'User'
SELECT_LANGUAGE = '🌐 Please choose your desired language:'
NEW_INVITATION_NOTIF = '👥 User {} started the bot with your dedicated link.\n' \
'Notification settings /notification'
START_AGAIN = 'No need to start again.\n' \
'In case you need help, read the bot manual.'
UNKNOWN_CANCEL = 'Operation successfully canceled.'
CANCEL_INVALID = 'There is nothing to cancel!'
TIPS_HEADER = '💎 Using this bot, you can easily watermark your photos, videos and gifs.\n\n'
START_TIPS = '✔️ <b><u>📜 Manual</u></b>:\n\n' \
'▪️ In [🛠 logo management] you can add up to 4 logos and customize them to your liking. \n\n' \
'▪️ when you send a file (image, video, gif) to the bot, ' \
'it will send the same file back to you with inline keyboard ' \
'and using those keyboard you can watermark that file.\n\n' \
'▪️ In case you want the bot to automatically watermark photos for you, you just need to enter ' \
'the [⚙️ settings] and adjust the watermark automating settings as you see fit.\n\n' \
'▪️ Every day you can watermark up to 3 photos and one video/gif for free, ' \
'but if you need to watermark more photos, videos and GIFs, you can ' \
'buy the premium subscription.\n\n' \
'▪️ Before paying for the premium subscription, you can check the premium features for free for 3 days ' \
'and make sure that it will meet your needs. ' \
'This gift includes a 30MB watermark quota.\n\n' \
'▪️ If you have bought the premium subscription you can watermark unlimited number of photos and ' \
'watermark videos and GIFs until your watermark quota is up\n\n' \
'▪️ Every time you watermark a video or GIF, you’re watermark quota will be reduced by the file size، ' \
'but watermarking photos does not follow the same rule and as long as your premium subscription holds, ' \
'you can watermark photos. .\n\n' \
'▫️ In case you have any trouble contact @WMarkProBotSupport for support.'
START_DEAR = 'Dear'
START_WELCOME = 'Welcome to the <b>Pro Watermark Bot</b>.'
TUT_HEADER = '📜 Where to begin?'
TUT1 = '1- First, click/tap on [🛠 logo management] button and add a new log and customize its settings to your liking.'
TUT2 = '2- Now, you just need to send the photo, video or gif that you want to watermark to the bot ' \
'and wait for the bot to respond. Then use the inline keyboard to watermark your file.'
TUT3 = '3- In case you want the bot to automatically watermark photos for you, you just need to enter ' \
'the [⚙️ settings] and adjust the watermark automating settings as you see fit.'
LANGUAGE_SET = '🌐 Your language was changed to {}.'
QUIT_ERROR = '✖️ This message was sent more than 48 hours ago. It cannot be deleted anymore. 🙄\n\n' \
'You can delete it yourself or leave it be.'
QUIT = '👌✖️'
LIMIT_REPLY_CHARACTER = '⚠️ The number of allowed characters is {max_length} ' \
'while you have entered {user_length} characters!'
SHORTEN_YOUR_TEXT = 'Please shorten your text and send it again:'
CANCEL = '{}/Cancel'.format(LTR)
CORRECT_COMMENT = 'Please adjust your text and resend it:'
PARAGRAPH_FORMAT = '⚠️ Please send your text in the form of a single line and don’t add a new line!'
SELECT_LOGO_POSITION = '👉 Please define the position of your logo:'
LOGO_SETTING = '🛠 <b>Adding and managing logos</b>:\n\n' \
'🔻 You can add and manage up to 4 different logos.\n' \
'🔻 In order to add a new logo click/tap on the [➕ New logo] button.\n' \
'🔻 To customize each logo, click/tap on that logo’s button.'
SEND_LOGO = '🖼 There are a few ways to add new logos and customize them:\n\n' \
'1- Send your desired text to the bot (e.g. @RobomizBot). ' \
'In this case, a default telegram logo will be created for you, ' \
'which you can then customize it using the tools at your disposal.\n\n' \
'2- Use image editing apps (e.g. Photoshop, PicsArt, etc) and make your own transparent logo ' \
'and then send it to the bot (‘Send as file’).\n\n' \
'3- In case your image includes a black or white background ' \
'and you want to remove that background, just send the logo file (‘Send as photo’) ' \
'and the bot will give you an option to remove the background.\n\n' \
'4- You can also send static telegram stickers as a logo.\n\n' \
'👉 <b>Please send your file/text according the above instructions:</b>'
LOGO_SPEC = '<b> 🔖 Logo specifications:</b>'
LOGO_TEXT = '✏️ Logo text: {}'
LOGO_NAME = '✏️ Logo name: {}'
LOGO_OPACITY = '💧 Logo opacity: {}'
LOGO_RATIO = '🔳Ratio of logo▪️ to image⬜ size: {}'
LEFT_RIGHT = '↔ Left and right margin: {}'
UP_DOWN = '↕ Top and bottom margin: {}'
CHANGE_LOGO_SPEC = '👉 To change each one of these specifications, choose the corresponding button.'
SUBMIT_TEXT_LOGO = '👉 Please send the logo text in the form of a single line, using no more than 50 characters:'
SUBMIT_NAME_LOGO = '👉 Please send a name for your logo in the form of a single line, using no more than 20 characters:'
NEW_LOGO_CREATED = '✔️ The new logo is ready.'
SUBMIT_LOGO_COLOR = '🎨 Please choose a color for the icon:'
SUBMIT_TEXT_COLOR = '🎨 Please choose a color for the text:'
SUBMIT_OPACITY = '💧 Please define the logo opacity with respect to the image in percentage:\n\n' \
'Hint: the higher you set this value, the clearer your logo will be. ' \
'And by lowering this value you will increase the transparency of the logo.'
SUBMIT_RATIO = '🔳 Please choose ratio of the logo to the image width :\n' \
'🔻 You can also type in a number between 0-100!'
ONLY_PHOTO = '👉 Now you need to send your logo.\n' \
'Please send your logo:'
LOGO_ACCEPTED = '✔️ Logo received.'
ONLY_RATIO = '👉 Now, you need to set the logo to image size ratio.\n' \
'Please type in a number between 0-100:'
SET_NEW_RATIO = '✔️ The size ratio is changed to {}%.'
SET_NEW_LOGO_NAME= '✔️ The logo name is set.'
INSERT_MARGIN = '👉 To increase the accuracy of logo-positioning ' \
'you can define the space between the logo and each side of your image.\n' \
'To adjust each one, choose the corresponding button:'
INSERT_MARGIN_W = '👉 Here you can define the space between the logo and the left or right edge of the image ' \
'(depending on its position) as a percentage of image width.\n' \
'If you want the logo to stick to the side, choose zero, otherwise set a larger value:'
INSERT_MARGIN_H = '👉 Here you can define the space between the logo and the upper or lower edge of the image ' \
'(depending on its position) as a percentage of image height.\n' \
'If you want the logo to stick to the top/bottom, choose zero. otherwise set a larger value:'
SET_FONT = '🎨 Please choose a font type:'
REMOVE_LOGO = 'Are you sure you want to remove the log?'
SELECT_LOGOTYPE = '👉 Please choose one of the provided designs:'
LOGO_REMOVED = '✅ Logo was removed!'
LOGO_REMOVED_TEMP = '✅ Logo is removed temporarily!\n\n' \
'To apply the changes, click/tap on the [✅ Finalize watermark] button.' # TODO
NO_LOGO_YET = '🔻 You have not set a logo yet. To add a new logo click/tap on [🛠 logo management] button.'
JUST_PRO_WATERMARK = '⚠️ Only Premium users can attach their logo on images that they send.'
SUBMIT_STROKE_COLOR = '🎨 Please pick the color of the text stroke:'
AUTO_RATIO = '✅ The size ratio is set to Auto!\n' \
'In Auto mode, the size ratio will be set according to the logo and image size.'
AUTO_STAT = 'Auto mode'
WHITE_TO_TRANSPARENT = '✅ White background was successfully removed.'
BLACK_TO_TRANSPARENT = '✅ Black background was successfully removed.'
LOGO_TEST_WATERMARKED = '✅ The photo was successfully watermarked.'
LOGO_WATERMARKED = 'The photo was watermarked.'
LOGO_WATERMARKED_TEMP = '✅ THe thumbnail was watermarked.\n\n' \
'👉 To apply the changes, click/tap on the [✅ Finalize watermark] button.' # TODO
ONLY_TEXT = '⚠️ Please send a text message:'
SPAM_LIMIT = '⚠️ In order to counter bot attacks and prevent the bot from slowing down, ' \
'users can not send more than 20 messages in a minute!'
PER_DAY_LIMIT = '⚠️ As a free subscriber, you can only watermark 3 images per day.\n' \
'To get rid of this limitation, please buy the premium subscription.\n' \
'{}/upgrade'.format(LTR)
VIDEO_PER_DAY_LIMIT = '⚠️ As a free subscriber, you can only watermark 1 video or GIF per day.\n' \
'To get rid of this limitation, please buy the premium subscription.\n' \
'{}/upgrade'.format(LTR)
FREE_LIMIT = '⚠️ As a free subscriber, you are not allowed to watermark video’s or gif files.\n' \
'To get rid of this limitation, please buy the premium subscription.\n' \
'/upgrade'.format(LTR)
BE_PATIENT = 'Please be patient 🙏'
WATERMARK_ALLOWED = 'Please send a photo, a video or a gif file:'
LOW_WATERMARK_VOLUME = '⚠️ The size of this video exceeds your watermark quota!\n' \
'To increase your watermark quota, click/tap on [💎 Premium Subscription] button.'
# JUST_PRO_WATERMARK_VIDEO = '⚠️ Watermarking videos and gif is limited to premium users.\n' \
# 'To get premium subscription, click/tap on [💎 Premium Subscription] button.'
LIMITED_WATERMARK_SIZE = '⚠️ Currently, the max file size of a video to be watermarked is 20MB!\n\n' \
'Watermarking larger files will be possible in the future by upgrading our servers.'
WATERMARK_PRO_VOLUME = 'Remaining watermark quota'
PRO_SUBSCRIPTION = 'Premium subscription validity date'
ACTIVE_TILL = 'Active till'
DEACTIVE_FROM = 'Deactive from'
DEACTIVE = 'Deactive'
NO_LANGUAGE = 'Unknown'
SETTINGS = 'Settings'
SELECT_LOGOS = '👉 In case you want your images to be watermarked automatically and instantly ' \
'You just need to pick a logo and define its default position.'
NO_LOGOS = '⚠️ You haven’t added any logos yet.\n' \
' 👉 Please add a logo form [logo management] section and try again.'
AUTO_LOGO_DEACTIVATED = '☑️ Automatic watermarking logo is deactivated.'
AUTO_LOGO_ACTIVATED = '✅ Automatic watermarking logo is activated!'
VIP_INSERTED = '#Renew subscription\n' \
'User: {} #u{}\n' \
'Days: {}\n' \
'Old date: {}, new date: {}\n' \
'Volume: {}\n' \
'Old volume: {} MB, new volume: {} MB'
PROMOTE_BY_ADMIN_HEADER = 'Dear {}:\n\n'
TIME_PROMOTION = '✅ Your premium subscription was renewed and is valid until {}. \n\n'
VOL_INCREASE = '✅ Your watermark quota was increased to {} MB. \n\n'
PROMOTE_BY_ADMIN_FOOTER = 'Thank you for your purchase 🙏😊'
END_OF_PROMOTION = 'Dear user {}:\n\n' \
'⚠️ Your premium subscription has come to an end.\n' \
'👉 To renew your subscription you just need to send /upgrade and act accordingly.\n\n' \
'Thank you 🙏😊'
SUBMIT_ICON_SIZE = '🎨 Please define the icon size:'
CURRENT_SUBSCRIPTION = 'Your current subscription: {}'
FREE = 'Free'
PREMIUM = 'Premium'
PREMIUM_NOTICE = '🔶 Things to keep in mind before buying a premium subscription'
PREMIUM_UNLIMITED_PHOTO = '🔸 A premium subscription enables you to watermark an unlimited number of images ' \
'before your subscription ends.'
PREMIUM_VIDEO_SIZE_LIMIT = '🔸 Currently, using the premium subscription the max video/gif size ' \
'you can watermark is 20MB.' \
' (Watermarking larger files will be possible in the future by upgrading our servers.)'
PREMIUM_FREE = '🔸 Using our free subscription plan you can watermark up to 3 photos and one video/GIF per day.'
PREMIUM_FREE_TRIAL = '🔸 Before attempting to buy the premium subscription ' \
'you can check out the bot’s performance and features using our 3-day free trial.'
PREMIUM_VOLUME = '🔸 Upon the end of your premium subscription, your watermark quota will not be lost ' \
'and after renewing your subscription it will be added to your purchased quota.'
PREMIUM_WATERMARK_VOLUME = '🔸 You can renew your subscription or increase your watermark quota at any time. ' \
'In the case, your purchased quota will be added to your current quota.'
PREMIUM_PLANS = '📜 Premium plans:'
PREMIUM_ONE = '➊ 📦 1 month subscription + 200MB watermark quota ---» ($ 4.99)'
PREMIUM_TREE = '➋ 📦 3 month subscription + 600MB watermark quota ---» ($ 11.99)'
PREMIUM_SIX = '➌ 📦 6 month subscription + 1200MB watermark quota ---» ($ 17.99)'
PREMIUM_TWELVE = '➍ 📦 12 month subscription + 2400MB watermark quota ---» ($ 23.99)'
EXTRA_VOLUME = '🔵 In case you need to increase your watermark quota you can purchase one of the below options:'
VOLUME1 = '➎ 🎞 200MB ---» (1.99 $)'
VOLUME2 = '➏ 🎞 500MB ---» (3.99 $)'
VOLUME3 = '➐ 🎞 1500MB ---» (9.99 $)'
VOLUME4 = '➑ 🎞 5000MB ---» (24.99 $)'
PAYMENT_DESCRIPTION = '🔻 Depending on your needs, choose one of the subscription plans ' \
'and complete your payment using one of the methods provided.'
PRO_ACTIVATED = '🔥🎁 Free 3-day premium subscription with 30MB watermark quota is activated for you:'
START = '📅 Start:'
END = '📅 End:'
PRO_ACTIVATE_BEFORE = '⚠️ Free premium subscription has already been activated for you!\n\n'
PAYMENT_METHODS = 'Please pay for your desired plan using one of the methods provided and then ' \
' send your ID along with your purchase details to our support team.\n' \
'Your ID: {}\n' \
'Support: @WMarkProBotSupport'
PAYMENT_HEADER = '🏦 Payment methods:'
PAYPAL = '💳 Pay with Paypal'
PAYPAL_DESCRIPTION = 'Pay through the link below.\n' \
'https://paypal.me/robomizbot'
CRYPTOCURRENCY = '💎 Pay using the cryptocurrencies'
CRYPTOCURRENCY_DESCRIPTION = 'Multiply the amount in dollar by the desired currency’s exchange rate ' \
'and transfer the result to the provided wallet address.'
EXCHANGE_RATE = 'Exchange rate: {}'
BITCOIN = 'Bitcoin'
ETHEREUM = 'Ethereum'
LITECOIN = 'Litecoin'
TETHER = 'Tether'
WALLET_ADDRESS = 'Wallet address: {}'
OTHER_CRYPTO = '🔻 To pay using other currencies contact our support team @WMarkProBotSupport.'
|
[
"noreply@github.com"
] |
paradox70.noreply@github.com
|
18ba892ab45521ae1b1b371ff1ae391f347566b3
|
c7c054ae527385c60233f0805cd365bddc185198
|
/src/model/stream_temporal/self_attention.py
|
208675d23d7bb07858016e561dd7305d09729f54
|
[] |
no_license
|
tlhar-hcmut/tkhar-service
|
8b97788205b9a53bf25aa1b8821dbe7095c743fc
|
748693f5b071dcb2c5a429b9a9bdd81c339e8874
|
refs/heads/master
| 2023-07-03T18:56:24.692810
| 2021-08-09T11:31:16
| 2021-08-09T11:31:16
| 382,856,390
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
import torch
from torch import nn
import torch.nn.functional as F
import math
class SelfAttention(nn.Module):
def __init__(self,len_feature_input_selfA, len_feature_hidden_selfA, dropout = 0, **kwargs):
super(SelfAttention, self).__init__(**kwargs)
self.len_feature_hidden_selfA = len_feature_hidden_selfA
self.W_k =nn.Linear(len_feature_input_selfA, len_feature_hidden_selfA, bias=False)
self.W_q =nn.Linear(len_feature_input_selfA, len_feature_hidden_selfA, bias=False)
self.W_v = nn.Linear(len_feature_input_selfA, len_feature_hidden_selfA, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, X, mask):
K = self.W_k(X)
Q = self.W_q(X)
V = self.W_v(X)
K_T = K.permute(0,2,1)
scores = torch.matmul(Q,K_T)/math.sqrt(self.len_feature_hidden_selfA)
mask_1 = mask.float().unsqueeze(-1)
mask_2 = mask.float().unsqueeze(1)
mask =mask_1 * mask_2
scores = F.softmax(scores, -1)
scores = scores.masked_fill(mask==0.0, 10e-10)
scores = self.dropout(scores)
#len_seq x len_feature_hidden_selfA
attention = torch.matmul(scores, V)
return attention, scores
|
[
"khoidd@outlook.com"
] |
khoidd@outlook.com
|
713c986d6b9f1ef1cdad8fc0b7896d0b3c1a21db
|
e97044be1d4f7a64ae36e73536a1ea73c4f96b40
|
/05-functions/4.6-exercise.py
|
db23154efa9ae3c3535eb5e46eca0d757af2d0f7
|
[] |
no_license
|
saikatdas0790/ossu-python-for-everybody
|
d8f203cb49181965d43a635744c6b93a26f7c6a0
|
4691983af0558ebd0b178f4e7e6d79e37c02ca54
|
refs/heads/main
| 2023-02-16T11:51:20.240186
| 2021-01-15T13:53:18
| 2021-01-15T13:53:18
| 328,195,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
# 4.6 Write a program to prompt the user for hours and rate per hour using input to compute gross pay. Pay should be the normal rate for hours up to 40 and time-and-a-half for the hourly rate for all hours worked above 40 hours. Put the logic to do the computation of pay in a function called computepay() and use the function to do the computation. The function should return a value. Use 45 hours and a rate of 10.50 per hour to test the program (the pay should be 498.75). You should use input to read a string and float() to convert the string to a number. Do not worry about error checking the user input unless you want to - you can assume the user types numbers properly. Do not name your variable sum or use the sum() function.
def computepay(hours, ratePerHour):
if hours < 40:
grossPay = hours * ratePerHour
else:
grossPay = 40 * ratePerHour + 1.5 * (hours - 40) * ratePerHour
return grossPay
hours = float(input("Enter hours: "))
ratePerHour = float(input("Enter rate per hour: "))
print("Pay % s" % computepay(hours, ratePerHour))
|
[
"saikatdas0790@gmail.com"
] |
saikatdas0790@gmail.com
|
34e6ed520139f454251dab9794f32b71f1aa5427
|
c26699c80e58f2bb89b18c1dd051084cb8f889ef
|
/exemple2.py
|
a5b6eef0cdcc9e483a45046e8cecc1308025ee35
|
[] |
no_license
|
CryptM/Phyton-Curse-exadel-exemple
|
0141863d2449c7a364242e91958212ac49830b53
|
c104cc17346b075b89e67dd6ff34e446082065f5
|
refs/heads/master
| 2021-01-20T20:14:46.188995
| 2016-07-15T08:25:52
| 2016-07-15T08:25:52
| 62,055,061
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
balance=float(input('Enter outstanding balance on the credit card in $:\n'))
interest=float(input('Enter annual interest rate in %:\n'))/100
minpay=float(input('Enter minimum monthly payment rate in %:\n'))/100
x=1
tpay=0
for x in range (1,13):
ipay=round((interest/12*balance),2)
minpayt=round((minpay*balance),2)
ppayd=round((minpayt-ipay),2)
balance=round((balance-ppayd),2)
print('Month:',x,'\nMinimum monthly payment:',minpayt,'\nInterest paid:',ipay,'\nPrincipal paid:',ppayd,'\nRemaining balance:',balance)
tpay=round((tpay+minpayt),2)
x+=1
print ('RESULTS\nTotal amount paid:',tpay,'\nRemaining balance:',balance)
|
[
"noreply@github.com"
] |
CryptM.noreply@github.com
|
429d78f14c7c81553c3df334eaa9608e3404e005
|
bf32a07fa820048888649d8238b69c58ef0526da
|
/bland/settings.py
|
c74708383db1264fb9ec149d2989a71ab1d2ac13
|
[] |
no_license
|
isebarn/blandcar
|
e3de5c6c8ecbea5d1bbe98b84fddd19df0aeb573
|
b8e8d5b4d6e1cf5f467db6df51fc92727ddf654b
|
refs/heads/master
| 2023-08-02T23:46:30.981650
| 2021-10-04T19:15:38
| 2021-10-04T19:15:38
| 406,508,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,074
|
py
|
# Scrapy settings for bland project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'bland'
SPIDER_MODULES = ['bland.spiders']
NEWSPIDER_MODULE = 'bland.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'bland (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'bland.middlewares.BlandSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'bland.middlewares.BlandDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'bland.pipelines.BlandPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"isebarn182@gmail.com"
] |
isebarn182@gmail.com
|
9b9c230693ad6cc15ec90c66e1ff92d1b31b70ee
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/Mailpile/2015/4/workers.py
|
138de34ac66eda21d4d632d1356d11929363fc9b
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005
| 2021-01-28T19:40:51
| 2021-01-28T19:40:51
| 306,497,459
| 1
| 1
| null | 2020-11-24T20:56:18
| 2020-10-23T01:18:07
| null |
UTF-8
|
Python
| false
| false
| 12,188
|
py
|
import threading
import traceback
import time
import mailpile.util
from mailpile.i18n import gettext as _
from mailpile.i18n import ngettext as _n
from mailpile.util import *
##[ Specialized threads ]######################################################
class Cron(threading.Thread):
"""
An instance of this class represents a cron-like worker thread
that manages and executes tasks in regular intervals
"""
def __init__(self, name=None, session=None):
"""
Initializes a new Cron instance.
Note that the thread will not be started automatically, so
you need to call start() manually.
Keyword arguments:
name -- The name of the Cron instance
session -- Currently unused
"""
threading.Thread.__init__(self)
self.ALIVE = False
self.daemon = mailpile.util.TESTING
self.name = name
self.session = session
self.last_run = time.time()
self.running = 'Idle'
self.schedule = {}
self.sleep = 10
# This lock is used to synchronize
self.lock = WorkerLock()
def __str__(self):
return '%s: %s (%ds)' % (threading.Thread.__str__(self),
self.running, time.time() - self.last_run)
def add_task(self, name, interval, task):
"""
Add a task to the cron worker queue
Keyword arguments:
name -- The name of the task to add
interval -- The interval (in seconds) of the task
task -- A task function
"""
with self.lock:
self.schedule[name] = [name, interval, task, time.time()]
self.sleep = 1
self.__recalculateSleep()
def __recalculateSleep(self):
"""
Recalculate the maximum sleep delay.
This shall be called from a lock zone only
"""
# (Re)alculate how long we can sleep between tasks
# (sleep min. 1 sec, max. 61 sec)
# --> Calculate the GCD of the task intervals
for i in range(2, 61): # i = second
# Check if any scheduled task intervals are != 0 mod i
filteredTasks = [True for task in self.schedule.values()
if int(task[1]) % i != 0]
# We can sleep for i seconds if i divides all intervals
if (len(filteredTasks) == 0):
self.sleep = i
def cancel_task(self, name):
"""
Cancel a task in the current Cron instance.
If a task with the given name does not exist,
ignore the request.
Keyword arguments:
name -- The name of the task to cancel
"""
if name in self.schedule:
with self.lock:
del self.schedule[name]
self.__recalculateSleep()
def run(self):
"""
Thread main function for a Cron instance.
"""
# Main thread loop
with self.session.config.index_check:
self.ALIVE = True
while self.ALIVE and not mailpile.util.QUITTING:
tasksToBeExecuted = [] # Contains tuples (name, func)
now = time.time()
# Check if any of the task is (over)due
with self.lock:
for task_spec in self.schedule.values():
name, interval, task, last = task_spec
if last + interval <= now:
tasksToBeExecuted.append((name, task))
# Execute the tasks
for name, task in tasksToBeExecuted:
# Set last_executed
self.schedule[name][3] = time.time()
try:
self.last_run = time.time()
self.running = name
task()
except Exception, e:
self.session.ui.error(('%s failed in %s: %s'
) % (name, self.name, e))
finally:
self.last_run = time.time()
self.running = 'Finished %s' % self.running
# Some tasks take longer than others, so use the time before
# executing tasks as reference for the delay
sleepTime = self.sleep
delay = time.time() - now + sleepTime
# Sleep for max. 1 sec to react to the quit signal in time
while delay > 0 and self.ALIVE:
# self.sleep might change during loop (if tasks are modified)
# In that case, just wake up and check if any tasks need
# to be executed
if self.sleep != sleepTime:
delay = 0
else:
# Sleep for max 1 second to check self.ALIVE
time.sleep(max(0, min(1, delay)))
delay -= 1
def quit(self, session=None, join=True):
"""
Send a signal to the current Cron instance
to stop operation.
Keyword arguments:
join -- If this is True, this method will wait until
the Cron thread exits.
"""
self.ALIVE = False
if join:
try:
self.join()
except RuntimeError:
pass
class Worker(threading.Thread):
def __init__(self, name, session, daemon=False):
threading.Thread.__init__(self)
self.daemon = mailpile.util.TESTING or daemon
self.name = name or 'Worker'
self.ALIVE = False
self.JOBS = []
self.JOBS_LATER = []
self.LOCK = threading.Condition(WorkerRLock())
self.last_run = time.time()
self.running = 'Idle'
self.pauses = 0
self.session = session
self.important = False
def __str__(self):
return ('%s: %s (%ds, jobs=%s, jobs_after=%s)'
% (threading.Thread.__str__(self),
self.running,
time.time() - self.last_run,
len(self.JOBS), len(self.JOBS_LATER)))
def add_task(self, session, name, task,
after=None, unique=False, first=False):
with self.LOCK:
if unique:
for s, n, t in self.JOBS:
if n == name:
return
if unique and after:
for ts, (s, n, t) in self.JOBS_LATER:
if n == name:
return
snt = (session, name, task)
if first:
self.JOBS[:0] = [snt]
elif after:
self.JOBS_LATER.append((after, snt))
else:
self.JOBS.append(snt)
self.LOCK.notify()
def add_unique_task(self, session, name, task, **kwargs):
return self.add_task(session, name, task, unique=True, **kwargs)
def do(self, session, name, task, unique=False):
if session and session.main:
# We run this in the foreground on the main interactive session,
# so CTRL-C has a chance to work.
try:
self.pause(session)
rv = task()
finally:
self.unpause(session)
else:
self.add_task(session, name, task, unique=unique)
if session:
rv = session.wait_for_task(name)
else:
rv = True
return rv
def _keep_running(self, **ignored_kwargs):
return (self.ALIVE and not mailpile.util.QUITTING)
def _failed(self, session, name, task, e):
self.session.ui.debug(traceback.format_exc())
self.session.ui.error(('%s failed in %s: %s'
) % (name, self.name, e))
if session:
session.report_task_failed(name)
def _play_nice_with_threads(self):
play_nice_with_threads()
def run(self):
self.ALIVE = True
while self._keep_running():
with self.LOCK:
while len(self.JOBS) < 1:
if not self._keep_running(locked=True):
return
self.LOCK.wait()
self._play_nice_with_threads()
with self.LOCK:
session, name, task = self.JOBS.pop(0)
if len(self.JOBS) < 0:
now = time.time()
self.JOBS.extend(snt for ts, snt
in self.JOBS_LATER if ts <= now)
self.JOBS_LATER = [(ts, snt) for ts, snt
in self.JOBS_LATER if ts > now]
try:
self.last_run = time.time()
self.running = name
if session:
session.ui.mark('Starting: %s' % name)
session.report_task_completed(name, task())
else:
task()
except (IOError, OSError), e:
self._failed(session, name, task, e)
time.sleep(1)
except Exception, e:
self._failed(session, name, task, e)
finally:
self.last_run = time.time()
self.running = 'Finished %s' % self.running
def pause(self, session):
with self.LOCK:
self.pauses += 1
first = (self.pauses == 1)
if first:
def pause_task():
session.report_task_completed('Pause', True)
session.wait_for_task('Unpause', quiet=True)
self.add_task(None, 'Pause', pause_task)
session.wait_for_task('Pause', quiet=True)
def unpause(self, session):
with self.LOCK:
self.pauses -= 1
if self.pauses == 0:
session.report_task_completed('Unpause', True)
def die_soon(self, session=None):
def die():
self.ALIVE = False
self.add_task(session, '%s shutdown' % self.name, die)
def quit(self, session=None, join=True):
self.die_soon(session=session)
if join:
try:
self.join()
except RuntimeError:
pass
class ImportantWorker(Worker):
def _keep_running(self, _pass=1, locked=False):
# This is a much more careful shutdown test, that refuses to
# stop with jobs queued up and tries to compensate for potential
# race conditions in our quitting code by waiting a bit and
# then re-checking if it looks like it is time to die.
if len(self.JOBS) > 0:
return True
else:
if _pass == 2:
return Worker._keep_running(self)
if self.ALIVE and not mailpile.util.QUITTING:
return True
else:
if locked:
try:
self.LOCK.release()
time.sleep(1)
finally:
self.LOCK.acquire()
else:
time.sleep(1)
return self._keep_running(_pass=2, locked=locked)
def _failed(self, session, name, task, e):
# Important jobs! Re-queue if they fail, it might be transient
Worker._failed(self, session, name, task, e)
self.add_unique_task(session, name, task)
def _play_nice_with_threads(self):
# Our jobs are important, if we have too many we stop playing nice
if len(self.JOBS) < 10:
play_nice_with_threads()
class DumbWorker(Worker):
def add_task(self, session, name, task, unique=False):
with self.LOCK:
return task()
def add_unique_task(self, session, name, task):
return self.add_task(session, name, task)
def do(self, session, name, task, unique=False):
return self.add_task(session, name, task)
def run(self):
pass
if __name__ == "__main__":
import doctest
import sys
result = doctest.testmod(optionflags=doctest.ELLIPSIS,
extraglobs={'junk': {}})
print '%s' % (result, )
if result.failed:
sys.exit(1)
|
[
"rodrigosoaresilva@gmail.com"
] |
rodrigosoaresilva@gmail.com
|
1619261524bf979aaeaaa95a5d6a31fb3612b6ce
|
79fb5c5552645e3b2d4428a857ee46d448183eac
|
/train_mvter.py
|
f7f10d1b2e553eae44d7aaaf531cb541a94447fb
|
[] |
no_license
|
ChengChen2020/mvter
|
cac2e582880b1bb500080c8ecd437db9e4507882
|
aafe576b1a4d9b6233eec714e81763c26e84978e
|
refs/heads/main
| 2023-06-17T02:22:49.371659
| 2021-06-25T03:24:28
| 2021-06-25T03:24:28
| 372,432,718
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,503
|
py
|
import os
import shutil
import json
import argparse
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tools.mvDataset import mvterDataset
from tools.mvTrainer import mvterTrainer
from model.MVTER import MVTER
parser = argparse.ArgumentParser(description='Multi-View Transformation Equivariant Representations')
parser.add_argument("-name", "--name", type=str, help="name of the experiment", default="train_mvter_on_modelnet40")
parser.add_argument("-bs", "--batch_size", type=int, help="batch size", default=24)
parser.add_argument("-lr", type=float, help="learning rate", default=1e-3)
parser.add_argument("-momentum", type=float, help="momentum", default=0.9)
parser.add_argument("-weight_decay", type=float, help="weight decay", default=1e-4)
parser.add_argument("-step_size", type=float, help="decay lr every step epochs", default=10)
parser.add_argument("-gamma", type=float, help="lr decay factor", default=0.5)
parser.add_argument("-cnn_name", "--cnn_name", type=str, help="backbone cnn model name", default="googlenet")
parser.add_argument("-num_views", type=int, help="number of views", default=12)
parser.add_argument("-origin_path", type=str, default="rawdata/origin_12x")
parser.add_argument("-rotate_path", type=str, default="rawdata/rotate_12x")
parser.add_argument("-results_dir", type=str, help="path to cache (default: none)", default='')
parser.add_argument("-resume", type=str, default='')
parser.set_defaults(train=False)
def create_folder(log_dir):
# make summary folder
if not os.path.exists(log_dir):
os.mkdir(log_dir)
else:
print('WARNING: summary folder already exists!! It will be overwritten!!')
shutil.rmtree(log_dir)
os.mkdir(log_dir)
if __name__ == '__main__':
args = parser.parse_args()
# log_dir = args.name
# create_folder(args.name)
if args.results_dir == '':
args.results_dir = '/cache-' + datetime.now().strftime("%Y-%m-%d-%H-%M-%S-mvter")
log_dir = args.name + args.results_dir
create_folder(log_dir)
config_f = open(os.path.join(log_dir, 'config.json'), 'w')
json.dump(vars(args), config_f)
config_f.close()
mvter = MVTER(m=args.num_views, nclasses=33, cnn_name=args.cnn_name)
optimizer = torch.optim.SGD(mvter.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
epoch_start = 1
rotate_patch = np.load('rotate_gd/rotate_patched.npy', allow_pickle=True).item()
assert(len(rotate_patch) == 9449)
train_dataset = mvterDataset(args.origin_path, args.rotate_path, rotate_patch, train=True)
test_dataset = mvterDataset(args.origin_path, args.rotate_path, rotate_patch, train=False)
train_iter = DataLoader(dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
drop_last=True)
test_iter = DataLoader(dataset=test_dataset,
batch_size=args.batch_size,
shuffle=False,
pin_memory=True,
drop_last=True)
# trainer
trainer = mvterTrainer(log_dir, mvter, train_iter, test_iter, optimizer, scheduler, num_views=12, w=1.0)
trainer.train(args.resume, epoch_start, 200)
|
[
"cc6858@nyu.edu"
] |
cc6858@nyu.edu
|
ebdfbced451f8967c73a5e01be1bc02195a54966
|
9fcb747d9ac1eea6162ff5ac083d221c6b38f680
|
/test.py
|
593e8b719763ffa3f15e58be86ac7024efcfc923
|
[] |
no_license
|
rockiecxh/learnpython
|
61c3a7f9ea033cbde72c6266e8554dd3529837af
|
9bde1c2c079991605a1c3695ae648351c7ffdfbc
|
refs/heads/master
| 2021-01-20T03:40:24.116147
| 2015-08-15T06:25:39
| 2015-08-15T06:25:39
| 37,505,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,336
|
py
|
#!/usr/bin/env python
#-*- encoding:UTF-8 -*-
import os, sys, gzip, time, optparse, itertools, datetime, heapq, operator
from operator import itemgetter
from heapq import *
from math import *
from collections import defaultdict, namedtuple
earth_radius=6378.137 * 1000 # Unit in meter
def rad(d):
""" convert degree to radian """
return d * pi / 180.0;
# calculate the distance between to coordinates
def distance(lat1, lng1, lat2, lng2):
radlat1=rad(lat1)
radlat2=rad(lat2)
a=radlat1-radlat2
b=rad(lng1)-rad(lng2)
s=2*asin(sqrt(pow(sin(a/2),2)+cos(radlat1)*cos(radlat2)*pow(sin(b/2),2)))
s=s*earth_radius
return abs(s)
print distance(37.5077724, 127.0565523, 37.50753185, 127.0560124)
# convert a date to int in milliseconds
def datetime2timestamp(date, pattern):
timeArray = time.strptime(date, pattern)
return int(time.mktime(timeArray))
# a function to define a enum
def enum(*sequential, **named):
enums = dict((x, i) for i, x in enumerate(sequential), **named)
return type('Enum', (), enums)
# define the enum
State = enum('STOP', 'RUN')
print(State.STOP)
print(isinstance(State.STOP , State))
a=State.STOP
b=State.STOP
def isStop(a):
return a == State.STOP
print(isStop(a))
# sample
now = "2015-07-24 09:53:21"
then = "2015-07-24 09:53:31"
# calculate the time diff
nowt = datetime2timestamp(now, "%Y-%m-%d %H:%M:%S")
thent = datetime2timestamp(then, "%Y-%m-%d %H:%M:%S")
print thent - nowt
# define a dict of list
d = defaultdict(list)
d['a'].append('a')
d['a'].append('b')
d['b'].append('b')
print "length of defaultdict is %s" % len(d)
# generate a list of STOP | RUN | STOP | RUN ...
states=[]
for x in range(5):
states.append((x, State.RUN if x % 2 == 1 else State.STOP))
def args_to_tuple(lst):
it = iter(map(float, lst.split()))
return [(item, next(it)) for item in it]
print states, len(states)
# composite a (0,1),(2,3),(4,5) tuple list
offset=0
for x in zip(states[offset::2], states[offset+1::2]):
print x
print "%s, %s" % (x[0], x[1])
# get all combinations of (x, y, z) with offsets 0,1,2
tl=[(x,y,z) for x in states[0::2] for y in states[1::2] for z in states[2::2]]
for x in tl:
print x
# get all the indexes of (0,2,4,6,...)
indexes=[x*2+offset for x in range(len(states)/2) if x*2+2 < len(states) - offset]
print "indexes: %s" % indexes
result=[]
for x in indexes:
print "offset: %s,%s,%s" % (x, x+1, x+2)
result.append((states[x], states[x+1], states[x+2]))
print result
for (x, y, z) in result:
print "x:%s, y:%s, z:%s)" % (x, y, z)
tl=[(x,y,z) for x in states[0::2] for y in states[1::2] for z in states[2::2]]
for x in tl:
#print x
pass
class Invoice(object):
"""docstring for Invoice"""
def __init__(self, (line, text)):
self.line=line
#self.text=text
(
self.invoiceNumber,
self.workerId,
self.lat,
self.lng
)=(part.strip() for part in text.split(delimiter))
def key(self):
return self.invoiceNumber
def __str__(self):
return "{invoiceNumber: %s, workerId: %s, lat: %s, lng: %s}" % (self.invoiceNumber, self.workerId, self.lat, self.lng)
delimiter='|'
invoice = Invoice(('1','|'.join( ['invoiceNumber','workerId','lat','lng'] )))
if invoice:
print invoice
|
[
"rockiecxh@gmail.com"
] |
rockiecxh@gmail.com
|
744eac14054c0bec2159ee8243ccad896cd83d51
|
67d8173a716da10a7350213d98938aae9f2115ce
|
/LeetCode/LC_PY_ANSWERS/reconstruct-original-digits-from-english.py
|
60ac4b9e82b9b8cf1a1df6cd3c39427c092ff584
|
[
"MIT"
] |
permissive
|
jxie0755/Learning_Python
|
94490d41bdf93acf8396f843328e38b6da310b0f
|
143422321cbc3715ca08f6c3af8f960a55887ced
|
refs/heads/master
| 2021-11-02T22:47:35.790239
| 2021-09-26T04:26:23
| 2021-09-26T04:26:23
| 101,445,132
| 0
| 2
| null | 2019-02-19T15:48:44
| 2017-08-25T22:00:16
|
Python
|
UTF-8
|
Python
| false
| false
| 901
|
py
|
# Time: O(n)
# Space: O(1)
from collections import Counter
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
# The count of each char in each number string.
cnts = [Counter(_) for _ in ["zero", "one", "two", "three", \
"four", "five", "six", "seven", \
"eight", "nine"]]
# The order for greedy method.
order = [0, 2, 4, 6, 8, 1, 3, 5, 7, 9]
# The unique char in the order.
unique_chars = ["z", "o", "w", "t", "u", \
"f", "x", "s", "g", "n"]
cnt = Counter(list(s))
res = []
for i in order:
while cnt[unique_chars[i]] > 0:
cnt -= cnts[i]
res.append(i)
res.sort()
return "".join(map(str, res))
|
[
"30805062+jxie0755@users.noreply.github.com"
] |
30805062+jxie0755@users.noreply.github.com
|
415b9ed00563831f2888b134fb2da5063812ed97
|
461c78e97658e4a0c89655ca36af3774f7c40457
|
/src/apps/posts/views.py
|
865eee02ba6eacf1a4977a1e751c47ad0807a28a
|
[] |
no_license
|
AlekseiChirkov/social-network
|
62bfb76467c1b3e7a156b6265f1f23cac0d0d140
|
3f33b13ffdf03f41cfdfbd2291d23ddcb8d7f2ed
|
refs/heads/master
| 2023-06-20T03:06:47.886321
| 2021-07-23T10:36:32
| 2021-07-23T10:36:32
| 386,913,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,048
|
py
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import CreateAPIView, ListAPIView, ListCreateAPIView
from rest_framework.permissions import IsAuthenticated, AllowAny
from apps.posts.models import Post, Like
from apps.posts.serializers import (
PostSerializer, LikeSerializer, LikeAnalyticsSerializer
)
from apps.posts.services import LikesAnalyticsService
class PostListCreateAPIView(ListCreateAPIView):
"""
Class for posts creation
"""
queryset = Post.objects.all()
serializer_class = PostSerializer
permission_classes = (AllowAny, )
def list(self, request, *args, **kwargs):
"""
Method returns list of posts
:param request: WSGIRequest - method and url
:param args: other args
:param kwargs: other kwargs
:return: response with status and data
"""
posts = self.queryset.all()
serializer = self.serializer_class(posts, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs) -> Response:
"""
Methods creates posts objects
:param request: WSGIRequest - method and url
:param args: other args
:param kwargs: other kwargs
:return: response with status and data
"""
user = request.user
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save(creator=user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class LikeCreateAPIView(CreateAPIView):
"""
Class for post's likes
"""
serializer_class = LikeSerializer
permission_classes = (IsAuthenticated, )
def create(self, request, *args, **kwargs) -> Response:
"""
Method creates like for post
:param request: WSGIRequest - method and url
:param args: other args
:param kwargs: other kwargs
:return: response with status and data
"""
user = request.user
try:
post = Post.objects.get(pk=self.kwargs.get("pk"))
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save(post=post, user=user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Post.DoesNotExist:
return Response(
{"error": "Post does not exist"},
status=status.HTTP_400_BAD_REQUEST
)
class LikeAnalyticsListAPIView(ListAPIView):
"""
View for likes analytics
"""
serializer_class = LikeAnalyticsSerializer
permission_classes = (AllowAny, )
def get_queryset(self) -> Like:
"""
Method returns Like queryset
:return: Like queryset
"""
return LikesAnalyticsService.get_likes_distinct_by_date(self.request)
|
[
"tektonikboy98@gmail.com"
] |
tektonikboy98@gmail.com
|
839236f8a55b3ed0ff7e7f86ec49317b04dd94c7
|
ca15a53ec203c7110885b9a70719efab81d0d4cf
|
/fishfinder/search_planes
|
2a0d430adeef27896d49ea248133e4d50972fef8
|
[] |
no_license
|
wojonet/fishfinder
|
d6aa956f0c3bfe009575fa713c38a7e26b4e9c60
|
564b267f23864ce8db5114617174367057dbc11f
|
refs/heads/master
| 2023-05-23T02:43:37.807505
| 2020-07-27T17:08:18
| 2020-07-27T17:08:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
#!/usr/bin/env python
import os, sys
if __name__ == "__main__":
# Setup environ
sys.path.append(os.getcwd())
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fishfinder.settings")
# Setup django
import django
django.setup()
from planes.search import run
run()
|
[
"sdstolworthy@gmail.com"
] |
sdstolworthy@gmail.com
|
|
b4265e0575a338b20fa06e18f133f38cad2bbe44
|
9ebf684c1fea42afdfdad01dfde12193b26390ef
|
/controller_15/src/punto1.py
|
c68a3065b0e875debaa1d65b6ce346ec8e0a9d96
|
[] |
no_license
|
NicolasRochaPacheco/robotica_15
|
b27a8debcacc53db053d95287b31ad6dec91d961
|
fc6e220bd821cd898c74b0f319f86b0d7b118dc1
|
refs/heads/master
| 2020-04-20T02:42:50.206992
| 2019-02-14T00:12:44
| 2019-02-14T00:12:44
| 168,578,545
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,079
|
py
|
#!/usr/bin/env python
#----------------------------------------------------------------------
#
# TAREA0, PUNTO1
# GRUPO 15
#
#----------------------------------------------------------------------
# Importamos la libreria de rospy
import rospy
# Importamos el hilo para leer los comandos del teclado.
from keyboard import Keyreader
# Importamos los mensajes y servicios de PacMan
from pacman.msg import actions
from pacman.msg import pacmanPos
from pacman.srv import mapService
#----------------------------------------------------------------------
# FUNCIONES
#----------------------------------------------------------------------
# Defiinimos la funcion de callback para la actualizacion
# del topico de la posicion del Pacman.
def posCallback(data):
pass
# Creamos el objeto del hilo para leer la tecla que esta siendo
# presionada.
key = Keyreader()
#----------------------------------------------------------------------
# VARIABLES / CONSTANTES
#----------------------------------------------------------------------
# Variable para almacenar el nombre del jugador
nombre = 'grupo_15'
# Variable para mantener el nodo corriendo
running = True
#----------------------------------------------------------------------
# LOGICA DEL NODO
#----------------------------------------------------------------------
# Inicializamos el nodo, creamos un publisher y un suscriptor
rospy.init_node('grupo15_punto1', anonymous=True)
pub = rospy.Publisher('pacmanActions0', actions, queue_size=10)
rospy.Subscriber('pacmanCoord0', pacmanPos, posCallback)
# Enviamos la solicitud para iniciar el juego
mapRequestClient = rospy.ServiceProxy('pacman_world', mapService)
mapa = mapRequestClient(nombre)
# Enviamos el comando para controlar a Pacman
rate = rospy.Rate(60)
msg = actions()
print( 'Para terminar la ejecucion presione ESC y luego CTRL + C' )
# Lanzamos el hilo que revisa que tecla esta siendo presionada.
key.start()
# Ciclo en el cual trabaja el nodo.
while not rospy.is_shutdown():
msg.action = key.getNumber()
pub.publish(msg.action)
rate.sleep()
|
[
"n.rocha11@uniandes.edu.co"
] |
n.rocha11@uniandes.edu.co
|
7ac6c567137e093e45add9034149f5ecf92582c1
|
619c472bb5f794e6407e6a9013cef1f3b9c43940
|
/1.7/Excersise_4.py
|
451f568058ea5a006361730d51c3d901b0f589f8
|
[] |
no_license
|
KFranciszek/pylove-training
|
273cb1fd317060a0246f09bd6840da55499a46d2
|
93fb3f00f5011b2b6bb6b890ab1a378987c21217
|
refs/heads/master
| 2021-05-02T09:18:29.769784
| 2018-01-24T22:47:11
| 2018-01-24T22:47:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 487
|
py
|
from flask import Flask, request
import requests
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/planet-details", methods=["GET"])
def get_planet_details():
planet = request.args.get("planet", "")
resp = requests.get("https://swapi.co/api/planets/?search={}".format(planet)).json()["results"]
if len(resp) == 0:
return "Planet {} does not exist.".format(planet)
else:
return str(resp[0])
app.run(debug=True)
|
[
"julia.anholcer@interia.pl"
] |
julia.anholcer@interia.pl
|
f283ed77fac1e26bf32a635310400b886d276a65
|
6ab67facf12280fedf7cc47c61ae91da0bcf7339
|
/service/yowsup/yowsup/layers/protocol_presence/protocolentities/test_presence_unavailable.py
|
3601798e10d6561e342a0900a112e11753688b59
|
[
"GPL-3.0-only",
"GPL-3.0-or-later",
"MIT"
] |
permissive
|
PuneethReddyHC/whatsapp-rest-webservice
|
2f035a08a506431c40b9ff0f333953b855f9c461
|
822dfc46b80e7a26eb553e5a10e723dda5a9f77d
|
refs/heads/master
| 2022-09-17T14:31:17.273339
| 2017-11-27T11:16:43
| 2017-11-27T11:16:43
| 278,612,537
| 0
| 1
|
MIT
| 2020-07-10T11:04:42
| 2020-07-10T11:04:41
| null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
from yowsup.layers.protocol_presence.protocolentities.presence_unavailable import UnavailablePresenceProtocolEntity
from yowsup.layers.protocol_presence.protocolentities.test_presence import PresenceProtocolEntityTest
class UnavailablePresenceProtocolEntityTest(PresenceProtocolEntityTest):
def setUp(self):
super(UnavailablePresenceProtocolEntityTest, self).setUp()
self.ProtocolEntity = UnavailablePresenceProtocolEntity
self.node.setAttribute("type", "unavailable")
|
[
"svub@x900.svub.net"
] |
svub@x900.svub.net
|
890ab4cc1c5add53062976ab2352f72fba222ff5
|
4d327de5447519d3c00e6572f74362380783006f
|
/source/res/scripts/client/gui/shared/items_parameters/formatters.py
|
c49b23718f814eb432d7b084dc1b4016ac087bc6
|
[] |
no_license
|
XFreyaX/WorldOfTanks-Decompiled
|
706ac55d919b766aa89f90c97a75672bf2142611
|
5025466edd0dd3e5e50a6c60feb02ae793f6adac
|
refs/heads/master
| 2021-09-21T15:10:32.655452
| 2018-08-28T07:34:00
| 2018-08-28T07:34:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,329
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/shared/items_parameters/formatters.py
from itertools import chain
import BigWorld
from debug_utils import LOG_ERROR
from gui.Scaleform.genConsts.HANGAR_ALIASES import HANGAR_ALIASES
from gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS
from gui.Scaleform.locale.MENU import MENU
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.shared.formatters import text_styles
from gui.shared.items_parameters import RELATIVE_PARAMS
from gui.shared.items_parameters.comparator import PARAM_STATE
from gui.shared.items_parameters.params_helper import hasGroupPenalties, getCommonParam, PARAMS_GROUPS
from gui.shared.utils import AUTO_RELOAD_PROP_NAME
from items import vehicles, artefacts, getTypeOfCompactDescr, ITEM_TYPES
from web_stubs import i18n
MEASURE_UNITS = {'aimingTime': MENU.TANK_PARAMS_S,
'areaRadius': MENU.TANK_PARAMS_M,
'areaSquare': MENU.TANK_PARAMS_SQM,
'armor': MENU.TANK_PARAMS_FACEFRONTBOARDINMM,
'artDelayRange': MENU.TANK_PARAMS_S,
'avgDamageList': MENU.TANK_PARAMS_VAL,
'avgPiercingPower': MENU.TANK_PARAMS_MM,
'bombDamage': MENU.TANK_PARAMS_VAL,
'bombsNumberRange': MENU.TANK_PARAMS_CNT,
'chassisRotationSpeed': MENU.TANK_PARAMS_GPS,
'circularVisionRadius': MENU.TANK_PARAMS_M,
'clipFireRate': MENU.TANK_PARAMS_CLIPSEC,
'avgDamage': MENU.TANK_PARAMS_VAL,
'avgDamagePerMinute': MENU.TANK_PARAMS_VPM,
'fireStartingChance': MENU.TANK_PARAMS_PERCENT,
'maxHealth': MENU.TANK_PARAMS_VAL,
'flyDelayRange': MENU.TANK_PARAMS_S,
'enginePower': MENU.TANK_PARAMS_P,
'enginePowerPerTon': MENU.TANK_PARAMS_PT,
'explosionRadius': MENU.TANK_PARAMS_M,
'gunYawLimits': MENU.TANK_PARAMS_GRADS,
'hullArmor': MENU.TANK_PARAMS_FACEFRONTBOARDINMM,
'maxLoad': MENU.TANK_PARAMS_T,
'piercingPower': MENU.TANK_PARAMS_MM,
'pitchLimits': MENU.TANK_PARAMS_GRADS,
'radioDistance': MENU.TANK_PARAMS_M,
'reloadMagazineTime': MENU.TANK_PARAMS_S,
'reloadTime': MENU.TANK_PARAMS_SPM,
'reloadTimeSecs': MENU.TANK_PARAMS_S,
'rotationSpeed': MENU.TANK_PARAMS_GPS,
'shellReloadingTime': MENU.TANK_PARAMS_S,
'shotDispersionAngle': MENU.TANK_PARAMS_M,
'shotsNumberRange': MENU.TANK_PARAMS_CNT,
'shellsCount': MENU.TANK_PARAMS_CNT,
'speedLimits': MENU.TANK_PARAMS_MPH,
'turretArmor': MENU.TANK_PARAMS_FACEFRONTBOARDINMM,
'turretYawLimits': MENU.TANK_PARAMS_GRADS,
'vehicleWeight': MENU.TANK_PARAMS_T,
'weight': MENU.TANK_PARAMS_KG,
'caliber': MENU.TANK_PARAMS_MM,
'damage': MENU.TANK_PARAMS_VAL,
'gunRotationSpeed': MENU.TANK_PARAMS_GPS,
'turretRotationSpeed': MENU.TANK_PARAMS_GPS,
'invisibilityStillFactor': MENU.TANK_PARAMS_PERCENT,
'invisibilityMovingFactor': MENU.TANK_PARAMS_PERCENT,
'maxShotDistance': MENU.TANK_PARAMS_M,
'switchOnTime': MENU.TANK_PARAMS_S,
'switchOffTime': MENU.TANK_PARAMS_S,
'switchTime': MENU.TANK_PARAMS_S,
'stunMaxDuration': MENU.TANK_PARAMS_S,
'stunMinDuration': MENU.TANK_PARAMS_S,
'stunMaxDurationList': MENU.TANK_PARAMS_S,
'stunMinDurationList': MENU.TANK_PARAMS_S,
'cooldownSeconds': MENU.TANK_PARAMS_S,
AUTO_RELOAD_PROP_NAME: MENU.TANK_PARAMS_S}
COLORLESS_SCHEME = (text_styles.stats, text_styles.stats, text_styles.stats)
NO_BONUS_SIMPLIFIED_SCHEME = (text_styles.warning, text_styles.warning, text_styles.warning)
NO_BONUS_BASE_SCHEME = (text_styles.error, text_styles.stats, text_styles.stats)
SIMPLIFIED_SCHEME = (text_styles.critical, text_styles.warning, text_styles.statInfo)
BASE_SCHEME = (text_styles.error, text_styles.stats, text_styles.bonusAppliedText)
EXTRACTED_BONUS_SCHEME = (text_styles.error, text_styles.bonusAppliedText, text_styles.bonusAppliedText)
SITUATIONAL_SCHEME = (text_styles.critical, text_styles.warning, text_styles.bonusPreviewText)
VEHICLE_PARAMS = tuple(chain(*[ PARAMS_GROUPS[param] for param in RELATIVE_PARAMS ]))
ITEMS_PARAMS_LIST = {ITEM_TYPES.vehicleRadio: ('radioDistance', 'weight'),
ITEM_TYPES.vehicleChassis: ('maxLoad', 'rotationSpeed', 'weight'),
ITEM_TYPES.vehicleEngine: ('enginePower', 'fireStartingChance', 'weight'),
ITEM_TYPES.vehicleTurret: ('armor', 'rotationSpeed', 'circularVisionRadius', 'weight'),
ITEM_TYPES.vehicle: VEHICLE_PARAMS,
ITEM_TYPES.equipment: {artefacts.RageArtillery: ('damage', 'piercingPower', 'caliber', 'shotsNumberRange', 'areaRadius', 'artDelayRange'),
artefacts.RageBomber: ('bombDamage', 'piercingPower', 'bombsNumberRange', 'areaSquare', 'flyDelayRange')},
ITEM_TYPES.shell: ('caliber', 'avgPiercingPower', 'damage', 'stunMinDuration', 'stunMaxDuration', 'explosionRadius'),
ITEM_TYPES.optionalDevice: ('weight',),
ITEM_TYPES.vehicleGun: ('caliber',
'shellsCount',
'shellReloadingTime',
'reloadMagazineTime',
AUTO_RELOAD_PROP_NAME,
'reloadTime',
'avgPiercingPower',
'avgDamageList',
'stunMinDurationList',
'stunMaxDurationList',
'dispertionRadius',
'aimingTime',
'maxShotDistance',
'weight')}
_COUNT_OF_AUTO_RELOAD_SLOTS_TIMES_TO_SHOW_IN_INFO = 5
def measureUnitsForParameter(paramName):
return i18n.makeString(MEASURE_UNITS[paramName])
def isRelativeParameter(paramName):
return paramName in RELATIVE_PARAMS
def isRelativeParameterVisible(parameter):
return isRelativeParameter(parameter.name) and isDiffEnoughToDisplay(parameter.state[1])
def isDiffEnoughToDisplay(value):
return abs(int(value)) > 0
def getParameterSmallIconPath(parameter):
return RES_ICONS.MAPS_ICONS_VEHPARAMS_SMALL + '/%s.png' % parameter
def getParameterBigIconPath(parameter):
return RES_ICONS.MAPS_ICONS_VEHPARAMS_BIG + '/%s.png' % parameter
def formatModuleParamName(paramName):
builder = text_styles.builder()
builder.addStyledText(text_styles.main, MENU.moduleinfo_params(paramName))
builder.addStyledText(text_styles.standard, MEASURE_UNITS.get(paramName, ''))
return builder.render()
def formatVehicleParamName(paramName, showMeasureUnit=True):
if isRelativeParameter(paramName):
return text_styles.middleTitle(MENU.tank_params(paramName))
else:
builder = text_styles.builder()
builder.addStyledText(text_styles.main, MENU.tank_params(paramName))
if showMeasureUnit:
builder.addStyledText(text_styles.standard, MEASURE_UNITS.get(paramName, ''))
return builder.render()
def getRelativeDiffParams(comparator):
relativeParams = [ p for p in comparator.getAllDifferentParams() if isRelativeParameterVisible(p) ]
return sorted(relativeParams, cmp=lambda a, b: cmp(RELATIVE_PARAMS.index(a.name), RELATIVE_PARAMS.index(b.name)))
_niceFormat = {'rounder': BigWorld.wg_getNiceNumberFormat}
_niceRangeFormat = {'rounder': BigWorld.wg_getNiceNumberFormat,
'separator': '-'}
_listFormat = {'rounder': lambda v: BigWorld.wg_getIntegralFormat(int(v)),
'separator': '/'}
_niceListFormat = {'rounder': BigWorld.wg_getNiceNumberFormat,
'separator': '/'}
_integralFormat = {'rounder': BigWorld.wg_getIntegralFormat}
_percentFormat = {'rounder': lambda v: '%d%%' % v}
def _autoReloadPreprocessor(reloadTimes, rowStates):
times = []
states = []
for idx, slotTime in enumerate(reloadTimes):
if isinstance(slotTime, (float, int)) or slotTime is None:
times.append(slotTime)
if rowStates:
states.append(rowStates[idx])
continue
if isinstance(slotTime, tuple):
minSlotTime, maxSlotTime = slotTime
if minSlotTime == maxSlotTime:
times.append(minSlotTime)
if rowStates:
states.append(rowStates[idx][0])
else:
LOG_ERROR('Different auto-reload times for same gun and slot')
return
if len(times) > _COUNT_OF_AUTO_RELOAD_SLOTS_TIMES_TO_SHOW_IN_INFO:
if states:
minTime, maxTime = min(times), max(times)
minState, maxState = (None, None)
for idx, time in enumerate(times):
if time == minTime:
minState = states[idx]
if time == maxTime:
maxState = states[idx]
return ((min(times), max(times)), '-', (minState, maxState))
return ((min(times), max(times)), '-', None)
else:
return (times, '/', states if states else None)
FORMAT_SETTINGS = {'relativePower': _integralFormat,
'damage': _niceRangeFormat,
'piercingPower': _niceRangeFormat,
'reloadTime': _niceRangeFormat,
'reloadTimeSecs': _niceRangeFormat,
'gunRotationSpeed': _niceFormat,
'turretRotationSpeed': _niceFormat,
'turretYawLimits': _niceListFormat,
'gunYawLimits': _niceListFormat,
'pitchLimits': _niceListFormat,
'clipFireRate': _niceListFormat,
'aimingTime': _niceRangeFormat,
'shotDispersionAngle': _niceFormat,
'avgDamagePerMinute': _niceFormat,
'relativeArmor': _integralFormat,
'avgDamage': _niceFormat,
'maxHealth': _integralFormat,
'hullArmor': _listFormat,
'turretArmor': _listFormat,
'relativeMobility': _integralFormat,
'vehicleWeight': _niceListFormat,
'weight': _niceRangeFormat,
'enginePower': _integralFormat,
'enginePowerPerTon': _niceFormat,
'speedLimits': _niceListFormat,
'chassisRotationSpeed': _niceFormat,
'relativeVisibility': _integralFormat,
'relativeCamouflage': _integralFormat,
'circularVisionRadius': _niceFormat,
'radioDistance': _niceFormat,
'maxLoad': _niceFormat,
'rotationSpeed': _niceFormat,
'fireStartingChance': _percentFormat,
'armor': _listFormat,
'caliber': _niceFormat,
'shotsNumberRange': _niceFormat,
'areaRadius': _niceFormat,
'artDelayRange': _niceFormat,
'bombDamage': _niceRangeFormat,
'bombsNumberRange': _niceFormat,
'areaSquare': _niceFormat,
'flyDelayRange': _niceFormat,
'explosionRadius': _niceFormat,
'shellsCount': _niceRangeFormat,
'shellReloadingTime': _niceRangeFormat,
'reloadMagazineTime': _niceRangeFormat,
'avgPiercingPower': _listFormat,
'avgDamageList': _listFormat,
'dispertionRadius': _niceRangeFormat,
'invisibilityStillFactor': _niceListFormat,
'invisibilityMovingFactor': _niceListFormat,
'switchOnTime': _niceFormat,
'switchOffTime': _niceFormat,
'switchTime': _niceListFormat,
'stunMaxDuration': _niceFormat,
'stunMinDuration': _niceFormat,
'stunMaxDurationList': _niceListFormat,
'stunMinDurationList': _niceListFormat,
'cooldownSeconds': _niceFormat,
AUTO_RELOAD_PROP_NAME: {'preprocessor': _autoReloadPreprocessor,
'rounder': lambda v: str(int(round(v)))}}
def _deltaWrapper(fn):
def wrapped(paramValue):
formattedValue = fn(paramValue)
if formattedValue == '0':
return '~0'
return '+%s' % formattedValue if isinstance(paramValue, (int, float)) and paramValue > 0 else formattedValue
return wrapped
def _getDeltaSettings():
detlaSettings = {}
for paramName, setting in FORMAT_SETTINGS.iteritems():
settingCopy = setting.copy()
rounder = settingCopy['rounder']
settingCopy['rounder'] = _deltaWrapper(rounder)
detlaSettings[paramName] = settingCopy
return detlaSettings
DELTA_PARAMS_SETTING = _getDeltaSettings()
_SMART_ROUND_PARAMS = ('damage', 'piercingPower', 'bombDamage', 'shellsCount', 'shellReloadingTime', 'reloadMagazineTime', 'reloadTime', 'dispertionRadius', 'aimingTime', 'weight')
_STATES_INDEX_IN_COLOR_MAP = {PARAM_STATE.WORSE: 0,
PARAM_STATE.NORMAL: 1,
PARAM_STATE.BETTER: 2}
def _colorize(paramStr, state, colorScheme):
stateType, _ = state
return paramStr if stateType == PARAM_STATE.NOT_APPLICABLE else colorScheme[_STATES_INDEX_IN_COLOR_MAP[stateType]](paramStr)
def colorizedFormatParameter(parameter, colorScheme):
return formatParameter(parameter.name, parameter.value, parameter.state, colorScheme)
def colorizedFullFormatParameter(parameter, colorScheme):
return formatParameter(parameter.name, parameter.value, parameter.state, colorScheme, allowSmartRound=False)
def simplifiedDeltaParameter(parameter, isSituational=False):
mainFormatter = SIMPLIFIED_SCHEME[1]
delta = int(parameter.state[1])
paramStr = formatParameter(parameter.name, parameter.value)
if delta:
sign = '-' if delta < 0 else '+'
scheme = SITUATIONAL_SCHEME if isSituational else SIMPLIFIED_SCHEME
deltaStr = _colorize('%s%s' % (sign, abs(delta)), parameter.state, scheme)
return '(%s) %s' % (deltaStr, mainFormatter(paramStr))
return mainFormatter(paramStr)
def _applyFormat(value, state, settings, doSmartRound, colorScheme):
if doSmartRound:
value = _cutDigits(value)
if isinstance(value, str):
paramStr = value
elif value is None:
paramStr = '--'
else:
paramStr = settings['rounder'](value)
if state is not None and colorScheme is not None:
paramStr = _colorize(paramStr, state, colorScheme)
return paramStr
def formatParameter(parameterName, paramValue, parameterState=None, colorScheme=None, formatSettings=None, allowSmartRound=True, showZeroDiff=False):
formatSettings = formatSettings or FORMAT_SETTINGS
settings = formatSettings.get(parameterName, _listFormat)
doSmartRound = allowSmartRound and parameterName in _SMART_ROUND_PARAMS
preprocessor = settings.get('preprocessor')
if preprocessor:
values, separator, parameterState = preprocessor(paramValue, parameterState)
else:
values = paramValue
separator = None
if values is None:
return
elif isinstance(values, (tuple, list)):
if parameterState is None:
parameterState = [None] * len(values)
if doSmartRound and len(set(values)) == 1:
if values[0] > 0:
return _applyFormat(values[0], parameterState[0], settings, doSmartRound, colorScheme)
return
separator = separator or settings['separator']
paramsList = [ _applyFormat(val, state, settings, doSmartRound, colorScheme) for val, state in zip(values, parameterState) ]
return separator.join(paramsList)
else:
return None if not showZeroDiff and values == 0 else _applyFormat(values, parameterState, settings, doSmartRound, colorScheme)
def formatParameterDelta(pInfo, deltaScheme=None, formatSettings=None):
diff = pInfo.getParamDiff()
return formatParameter(pInfo.name, diff, pInfo.state, deltaScheme or BASE_SCHEME, formatSettings or DELTA_PARAMS_SETTING, allowSmartRound=False, showZeroDiff=True) if diff is not None else None
def getFormattedParamsList(descriptor, parameters, excludeRelative=False):
if vehicles.isVehicleDescr(descriptor):
compactDescr = descriptor.type.compactDescr
else:
compactDescr = descriptor.compactDescr
itemTypeIdx = getTypeOfCompactDescr(compactDescr)
if itemTypeIdx == ITEM_TYPES.equipment:
eqDescr = vehicles.getItemByCompactDescr(compactDescr)
paramsList = ITEMS_PARAMS_LIST[itemTypeIdx].get(type(eqDescr), [])
else:
paramsList = ITEMS_PARAMS_LIST[itemTypeIdx]
params = []
for paramName in paramsList:
if excludeRelative and isRelativeParameter(paramName):
continue
paramValue = parameters.get(paramName)
if paramValue:
fmtValue = formatParameter(paramName, paramValue)
if fmtValue:
params.append((paramName, fmtValue))
return params
def getBonusIcon(bonusId):
if bonusId.find('Rammer') >= 0 and bonusId != 'deluxRammer':
iconName = 'rammer'
elif bonusId.find('enhanced') >= 0 and bonusId not in ('enhancedAimDrives', 'enhancedAimDrivesBattleBooster'):
iconName = 'enhancedSuspension'
else:
iconName = bonusId.split('_class')[0]
return RES_ICONS.getParamsTooltipIcon('bonuses', iconName)
def getPenaltyIcon(penaltyId):
return RES_ICONS.getParamsTooltipIcon('penalties', penaltyId)
def packSituationalIcon(text, icon):
return '<nobr>'.join((text, icon))
def getGroupPenaltyIcon(parameter, comparator):
return RES_ICONS.MAPS_ICONS_VEHPARAMS_ICON_DECREASE if hasGroupPenalties(parameter.name, comparator) else ''
def getAllParametersTitles():
result = []
for _, groupName in enumerate(RELATIVE_PARAMS):
data = getCommonParam(HANGAR_ALIASES.VEH_PARAM_RENDERER_STATE_SIMPLE_TOP, groupName)
data['titleText'] = formatVehicleParamName(groupName)
data['isEnabled'] = True
data['tooltip'] = TOOLTIPS_CONSTANTS.BASE_VEHICLE_PARAMETERS
result.append(data)
for paramName in PARAMS_GROUPS[groupName]:
data = getCommonParam(HANGAR_ALIASES.VEH_PARAM_RENDERER_STATE_ADVANCED, paramName)
data['iconSource'] = getParameterSmallIconPath(paramName)
data['titleText'] = formatVehicleParamName(paramName)
data['isEnabled'] = False
data['tooltip'] = TOOLTIPS_CONSTANTS.BASE_VEHICLE_PARAMETERS
result.append(data)
return result
def _cutDigits(value):
if abs(value) > 99:
return round(value)
return round(value, 1) if abs(value) > 9 else round(value, 2)
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
eb89bf03793cf76a9919dce7e81b162d23655a69
|
6bcd5c0b102459b307a057423bcd09707a2328e5
|
/python webserver/tools/server.py
|
08eacc7639512999b59a330fc5a82ee83fc57b3e
|
[
"MIT"
] |
permissive
|
emeric254/open-door
|
13f6cc0c177b5dc50a3cf9078f20802bb803535a
|
7c8844ec0164489f7e87233c4b3a7f28bc39e23a
|
refs/heads/master
| 2021-01-11T20:26:25.770078
| 2017-01-16T17:24:21
| 2017-01-16T17:24:21
| 79,115,394
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
# -*- coding: utf-8 -*-
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.netutil
import tornado.process
import tornado.web
def start_http(app: tornado.web.Application, http_port: int = 80):
"""Create app instance(s) binding a port.
:param app: the app to execute in server instances
:param http_port: port to bind
"""
# HTTP socket
http_socket = tornado.netutil.bind_sockets(http_port)
#
# try to create threads
try:
tornado.process.fork_processes(0) # fork
except KeyboardInterrupt: # except KeyboardInterrupt to "properly" exit
tornado.ioloop.IOLoop.current().stop()
except AttributeError: # OS without fork() support ...
print('Can\' fork, continuing with only one (the main) thread ...', file=sys.stderr)
pass # don't fork and continue without multi-threading
#
# bind http port
print('Start an HTTP request handler on port : ' + str(http_port))
tornado.httpserver.HTTPServer(app).add_sockets(http_socket)
#
# loop forever to satisfy user's requests, except KeyboardInterrupt to "properly" exit
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
tornado.ioloop.IOLoop.current().stop()
|
[
"emeric254@live.fr"
] |
emeric254@live.fr
|
8868845dc854c1f0bf450dd1fbf0dbdb1a27ef8a
|
a5a76c9fc62917fd125b2b8c1e1a05abf50cd3f6
|
/distributed_utils.py
|
e79791edb532b7084ffe3a288e6755c554ad8319
|
[
"Apache-2.0"
] |
permissive
|
CV-IP/ACAR-Net
|
cf8b1f4ce088a076fb85ed94dbe189c205a9f9a5
|
d684203eabda68b882c0b959b69e44a7bab1f247
|
refs/heads/master
| 2023-08-18T19:39:20.211051
| 2021-10-08T09:20:14
| 2021-10-08T09:20:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
import os
import torch
import torch.distributed as dist
def get_env_variable(variables, default=None):
for candidate in variables:
if candidate in os.environ:
return os.environ[candidate]
return default
def init_distributed(local_rank, args):
if args.nnodes is not None:
n_nodes = args.nnodes
else:
n_nodes = int(get_env_variable(['SLURM_NTASKS', 'MV2_COMM_WORLD_SIZE', 'PMI_SIZE'], default=1))
if args.node_rank is not None:
node_id = args.node_rank
else:
node_id = int(get_env_variable(['SLURM_PROCID', 'MV2_COMM_WORLD_RANK', 'PMI_RANK'], default=0))
os.environ['MASTER_PORT'] = str(args.master_port)
os.environ['MASTER_ADDR'] = str(args.master_addr)
world_size = n_nodes * args.nproc_per_node
rank = node_id * args.nproc_per_node + local_rank
dist.init_process_group(backend=args.backend, init_method='env://', world_size=world_size, rank=rank)
print('[rank {:04d}]: distributed init: world_size={}, local_rank={}'.format(rank, world_size, local_rank), flush=True)
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(local_rank%num_gpus)
return rank, world_size
|
[
"siyuchen@pku.edu.cn"
] |
siyuchen@pku.edu.cn
|
521403705ec4d0e89e9f08e3bcc2d940dfb65f75
|
7394b0ed53dc27c3ba909f1577c2a3082fe6a7d5
|
/portal/urls.py
|
824671c4599f7174128040010cc6dbf0a8fc7b2d
|
[] |
no_license
|
mohammed-yasim/abin-project
|
b5477b40e4de339a3b7d0eaf6ea1a2d34c37d65f
|
a35419f522c3ba7fbe7cb08d8431b8d1b5caec23
|
refs/heads/master
| 2023-05-03T11:03:00.083535
| 2021-05-20T07:07:23
| 2021-05-20T07:07:23
| 260,738,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
from django.urls import path,re_path
from django.conf.urls import url,include
from . import views as portal_views
#hello
urlpatterns = [
url(r'^$',portal_views.index,name="Poral Home"),
url(r'dashboard',portal_views.dashboard),
url(r'logout',portal_views.logout),
path('auth',portal_views.auth),
path('q/<str:query>',portal_views.queries),
path('save_profile',portal_views.profile_save),
#path('test',portal_views.test)
]
|
[
"mohammedyasim.edkm@gmail.com"
] |
mohammedyasim.edkm@gmail.com
|
cdd5619854d4eaf90f9beb9c32d6785bb18ddff5
|
6e917ecc9247c9acd94fcf841e2ce5b0cadb2d27
|
/dp/552/lc_552_v1.py
|
996f7b4726c681b5535a1b21f5f81ee4dd8815a9
|
[] |
no_license
|
lection/leetcode-practice
|
4b25f7a9c0c7363a25e6e01e08b3566c4436d030
|
6d708e3cc2ee501b30e19a2a169abf1f86baffd2
|
refs/heads/master
| 2020-06-23T01:57:05.880049
| 2019-12-08T18:03:59
| 2019-12-08T18:03:59
| 198,466,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,784
|
py
|
"""
给定一个正整数 n,返回长度为 n 的所有可被视为可奖励的出勤记录的数量。 答案可能非常大,你只需返回结果mod 109 + 7的值。
学生出勤记录是只包含以下三个字符的字符串:
'A' : Absent,缺勤
'L' : Late,迟到
'P' : Present,到场
如果记录不包含多于一个'A'(缺勤)或超过两个连续的'L'(迟到),则该记录被视为可奖励的。
示例 1:
输入: n = 2
输出: 8
解释:
有8个长度为2的记录将被视为可奖励:
"PP" , "AP", "PA", "LP", "PL", "AL", "LA", "LL"
只有"AA"不会被视为可奖励,因为缺勤次数超过一次。
注意:n 的值不会超过100000。
============
先暴力一次 肯定不通过,不用提交,就练练手。
还有很大剪枝和调优的空间,但是也算了吧,没必要。
"""
MOD_NUMBER = 10 ** 9 + 7
class Solution:
def checkRecord(self, n: int) -> int:
if not n:
return 0
def check(s):
a_count = 0
l_count = 0
for w in s:
if w == 'A':
a_count += 1
l_count = 0
elif w == 'L':
l_count += 1
else:
l_count = 0
if a_count > 1 or l_count > 2:
return 0
print(s)
return 1
def rec(cn, s):
if cn == 0:
return check(s)
nn = cn - 1
result = rec(nn, s + ['A']) + rec(nn, s + ['L']) + rec(nn, s + ['P'])
return result % MOD_NUMBER
return rec(n, [])
s = Solution()
# print(s.checkRecord(1) == 3)
print(s.checkRecord(2) == 8)
# print(s.checkRecord(3) == 19)
# print(s.checkRecord(4) == 43)
|
[
"lection.yu@gmail.com"
] |
lection.yu@gmail.com
|
7f8cf88695121fc872a232143de1c7e79e2dc13f
|
6f56da8db171d4a6c006b5d944437bf061069faf
|
/XCat.v.0.0.1/source/healpy/visufunc.py
|
a464d00cba5f2f241397a7b3a606a5a0c82da455
|
[] |
no_license
|
afarahi/XCat
|
16819bef7087e994907c413dd6331cdebde72ffb
|
498602eb7f61696d169f071185115345c68bcf86
|
refs/heads/master
| 2021-01-21T01:59:36.907059
| 2013-05-03T05:12:07
| 2013-05-03T05:12:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,120
|
py
|
#
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
import projaxes as PA
import pylab
import numpy as npy
import matplotlib
import matplotlib.colors as colors
import matplotlib.cbook as cbook
import pixelfunc
pi = npy.pi
dtor = pi/180.
def mollview(map=None,fig=None,rot=None,coord=None,unit='',
xsize=800,title='Mollweide view',nest=False,
min=None,max=None,flip='astro',
remove_dip=False,remove_mono=False,
gal_cut=0,
format='%g',cbar=True,cmap=None, notext=False,
norm=None,hold=False,margins=None,sub=None):
"""Plot an healpix map (given as an array) in Mollweide projection.
Input:
- map : an ndarray containing the map
if None, use map with inf value (white map), useful for
overplotting
Parameters:
- fig: a figure number. Default: create a new figure
- rot: rotation, either 1,2 or 3 angles describing the rotation
Default: None
- coord: either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to make
rotation from the first to the second coordinate system.
Default: None
- unit: a text describing the unit. Default: ''
- xsize: the size of the image. Default: 800
- title: the title of the plot. Default: 'Mollweide view'
- nest: if True, ordering scheme is NEST. Default: False (RING)
- min: the minimum range value
- max: the maximum range value
- flip: 'astro' (default, east towards left, west towards right) or 'geo'
- remove_dip: if True, remove the dipole+monopole
- remove_mono: if True, remove the monopole
- gal_cut: galactic cut for the dipole/monopole fit
- format: the format of the scale. Default: '%g'
- notext: if True, no text is printed around the map
- norm: color normalization, hist= histogram equalized color mapping, log=
logarithmic color mapping, default: None (linear color mapping)
- hold: if True, replace the current Axes by a MollweideAxes.
use this if you want to have multiple maps on the same
figure. Default: False
- sub: use a part of the current figure (same syntax as subplot).
Default: None
- margins: either None, or a sequence (left,bottom,right,top)
giving the margins on left,bottom,right and top
of the axes. Values are relative to figure (0-1).
Default: None
"""
# Starting to draw : turn interactive off
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if map is None:
map = npy.zeros(12)+npy.inf
cbar=False
if not (hold or sub):
f=pylab.figure(fig,figsize=(8.5,5.4))
extent = (0.02,0.05,0.96,0.9)
elif hold:
f=pylab.gcf()
left,bottom,right,top = npy.array(f.gca().get_position()).ravel()
extent = (left,bottom,right-left,top-bottom)
f.delaxes(f.gca())
else: # using subplot syntax
f=pylab.gcf()
if hasattr(sub,'__len__'):
nrows, ncols, idx = sub
else:
nrows, ncols, idx = sub/100, (sub%100)/10, (sub%10)
if idx < 1 or idx > ncols*nrows:
raise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,
ncols,
idx))
c,r = (idx-1)%ncols,(idx-1)/ncols
if not margins:
margins = (0.01,0.0,0.0,0.02)
extent = (c*1./ncols+margins[0],
1.-(r+1)*1./nrows+margins[1],
1./ncols-margins[2]-margins[0],
1./nrows-margins[3]-margins[1])
extent = (extent[0]+margins[0],
extent[1]+margins[1],
extent[2]-margins[2]-margins[0],
extent[3]-margins[3]-margins[1])
#extent = (c*1./ncols, 1.-(r+1)*1./nrows,1./ncols,1./nrows)
#f=pylab.figure(fig,figsize=(8.5,5.4))
ax=PA.HpxMollweideAxes(f,extent,coord=coord,rot=rot,
format=format,flipconv=flip)
f.add_axes(ax)
if remove_dip:
map=pixelfunc.remove_dipole(map,gal_cut=gal_cut,
nest=nest,copy=True,
verbose=True)
elif remove_mono:
map=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,
copy=True,verbose=True)
ax.projmap(map,nest=nest,xsize=xsize,coord=coord,vmin=min,vmax=max,
cmap=cmap,norm=norm)
if cbar:
im = ax.get_images()[0]
b = im.norm.inverse(npy.linspace(0,1,im.cmap.N+1))
v = npy.linspace(im.norm.vmin,im.norm.vmax,im.cmap.N)
if matplotlib.__version__ >= '0.91.0':
cb=f.colorbar(ax.get_images()[0],ax=ax,
orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.05,fraction=0.1,boundaries=b,values=v)
else:
# for older matplotlib versions, no ax kwarg
cb=f.colorbar(ax.get_images()[0],orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.05,fraction=0.1,boundaries=b,values=v)
ax.set_title(title)
if not notext:
ax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',transform=ax.transAxes)
if cbar:
cb.ax.text(1.05,0.30,unit,fontsize=14,fontweight='bold',
transform=cb.ax.transAxes,ha='left',va='center')
f.sca(ax)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
def gnomview(map=None,fig=None,rot=None,coord=None,unit='',
xsize=200,ysize=None,reso=1.5,degree=False,
title='Gnomonic view',nest=False,remove_dip=False,
remove_mono=False,gal_cut=0,
min=None,max=None,flip='astro',
format='%g',cbar=True,
cmap=None, norm=None,
hold=False,sub=None,margins=None,notext=False):
"""Plot an healpix map (given as an array) in Gnomonic projection.
Input:
- map : an ndarray containing the map.
if None, use map with inf value (white map), useful for
overplotting
Parameters:
- fig: a figure number. Default: create a new figure
- rot: rotation, either 1,2 or 3 angles describing the rotation
Default: None
- coord: either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to make
rotation from the first to the second coordinate system.
Default: None
- unit: a text describing the unit. Default: ''
- xsize: the size of the image. Default: 200
- ysize: the size of the image. Default: xsize
- reso: resolution in arcmin if degree is False. Default: 1.5 arcmin
- degree: if True, reso is in degree. Default: False
- title: the title of the plot. Default: 'Mollweide view'
- nest: if True, ordering scheme is NEST. Default: False (RING)
- min: the minimum range value
- max: the maximum range value
- flip: 'astro' (default, east towards left, west towards right) or 'geo'
- remove_dip: if True, remove the dipole+monopole
- remove_mono: if True, remove the monopole
- gal_cut: galactic cut for the dipole/monopole fit
- format: the format of the scale. Default: '%.3g'
- hold: if True, replace the current Axes by a MollweideAxes.
use this if you want to have multiple maps on the same
figure. Default: False
- sub: use a part of the current figure (same syntax as subplot).
Default: None
- margins: either None, or a sequence (left,bottom,right,top)
giving the margins on left,bottom,right and top
of the axes. Values are relative to figure (0-1).
Default: None
- notext: True: do not add resolution info text
Default=False
"""
# Starting to draw : turn interactive off
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if map is None:
map = npy.zeros(12)+npy.inf
cbar=False
if not (hold or sub):
f=pylab.figure(fig,figsize=(5.5,6))
if not margins:
margins = (0.075,0.05,0.075,0.05)
extent = (0.0,0.0,1.0,1.0)
elif hold:
f=pylab.gcf()
left,bottom,right,top = npy.array(pylab.gca().get_position()).ravel()
if not margins:
margins = (0.0,0.0,0.0,0.0)
extent = (left,bottom,right-left,top-bottom)
f.delaxes(pylab.gca())
else: # using subplot syntax
f=pylab.gcf()
if hasattr(sub,'__len__'):
nrows, ncols, idx = sub
else:
nrows, ncols, idx = sub/100, (sub%100)/10, (sub%10)
if idx < 1 or idx > ncols*nrows:
raise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,
ncols,
idx))
c,r = (idx-1)%ncols,(idx-1)/ncols
if not margins:
margins = (0.01,0.0,0.0,0.02)
extent = (c*1./ncols+margins[0],
1.-(r+1)*1./nrows+margins[1],
1./ncols-margins[2]-margins[0],
1./nrows-margins[3]-margins[1])
extent = (extent[0]+margins[0],
extent[1]+margins[1],
extent[2]-margins[2]-margins[0],
extent[3]-margins[3]-margins[1])
#f=pylab.figure(fig,figsize=(5.5,6))
ax=PA.HpxGnomonicAxes(f,extent,coord=coord,rot=rot,
format=format,flipconv=flip)
f.add_axes(ax)
if remove_dip:
map=pixelfunc.remove_dipole(map,gal_cut=gal_cut,nest=nest,copy=True)
elif remove_mono:
map=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,copy=True)
ax.projmap(map,nest=nest,coord=coord,vmin=min,vmax=max,
xsize=xsize,ysize=ysize,reso=reso,cmap=cmap,norm=norm)
if cbar:
if matplotlib.__version__ >= '0.91.0':
cb=f.colorbar(ax.get_images()[0],ax=ax,
orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1)
else:
cb=f.colorbar(ax.get_images()[0],orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1)
ax.set_title(title)
if not notext:
ax.text(-0.07,0.02,
"%g '/pix, %dx%d pix"%(ax.proj.arrayinfo['reso'],
ax.proj.arrayinfo['xsize'],
ax.proj.arrayinfo['ysize']),
fontsize=12,verticalalignment='bottom',
transform=ax.transAxes,rotation=90)
ax.text(-0.07,0.6,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',rotation=90,transform=ax.transAxes)
lon,lat = npy.around(ax.proj.get_center(lonlat=True),ax._coordprec)
ax.text(0.5,-0.03,'on (%g,%g)'%(lon,lat),
verticalalignment='center', horizontalalignment='center',
transform=ax.transAxes)
if cbar:
cb.ax.text(1.05,0.30,unit,fontsize=14,fontweight='bold',
transform=cb.ax.transAxes,ha='left',va='center')
f.sca(ax)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
def cartview(map=None,fig=None,rot=None,zat=None,coord=None,unit='',
xsize=800,ysize=None,lonra=None,latra=None,
title='Cartesian view',nest=False,remove_dip=False,
remove_mono=False,gal_cut=0,
min=None,max=None,flip='astro',
format='%g',cbar=True,
cmap=None, norm=None,aspect=None,
hold=False,sub=None,margins=None,notext=False):
"""Plot an healpix map (given as an array) in Cartesian projection.
Input:
- map : an ndarray containing the map.
if None, use map with inf value (white map), useful for
overplotting
Parameters:
- fig: a figure number. Default: create a new figure
- rot: rotation, either 1,2 or 3 angles describing the rotation
Default: None
- coord: either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to make
rotation from the first to the second coordinate system.
Default: None
- unit: a text describing the unit. Default: ''
- xsize: the size of the image. Default: 200
- lonra: range in longitude. Default: [-180,180]
- latra: range in latitude. Default: [-90,90]
- title: the title of the plot. Default: 'Mollweide view'
- nest: if True, ordering scheme is NEST. Default: False (RING)
- min: the minimum range value
- max: the maximum range value
- flip: 'astro' (default, east towards left, west towards right) or 'geo'
- remove_dip: if True, remove the dipole+monopole
- remove_mono: if True, remove the monopole
- gal_cut: galactic cut for the dipole/monopole fit
- format: the format of the scale. Default: '%.3g'
- hold: if True, replace the current Axes by a MollweideAxes.
use this if you want to have multiple maps on the same
figure. Default: False
- sub: use a part of the current figure (same syntax as subplot).
Default: None
- margins: either None, or a sequence (left,bottom,right,top)
giving the margins on left,bottom,right and top
of the axes. Values are relative to figure (0-1).
Default: None
- notext: True: do not add resolution info text
Default=False
"""
# Starting to draw : turn interactive off
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
if map is None:
map = npy.zeros(12)+npy.inf
cbar=False
if not (hold or sub):
f=pylab.figure(fig,figsize=(8.5,5.4))
if not margins:
margins = (0.075,0.05,0.075,0.05)
extent = (0.0,0.0,1.0,1.0)
elif hold:
f=pylab.gcf()
left,bottom,right,top = npy.array(pylab.gca().get_position()).ravel()
if not margins:
margins = (0.0,0.0,0.0,0.0)
extent = (left,bottom,right-left,top-bottom)
f.delaxes(pylab.gca())
else: # using subplot syntax
f=pylab.gcf()
if hasattr(sub,'__len__'):
nrows, ncols, idx = sub
else:
nrows, ncols, idx = sub/100, (sub%100)/10, (sub%10)
if idx < 1 or idx > ncols*nrows:
raise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,
ncols,
idx))
c,r = (idx-1)%ncols,(idx-1)/ncols
if not margins:
margins = (0.01,0.0,0.0,0.02)
extent = (c*1./ncols+margins[0],
1.-(r+1)*1./nrows+margins[1],
1./ncols-margins[2]-margins[0],
1./nrows-margins[3]-margins[1])
extent = (extent[0]+margins[0],
extent[1]+margins[1],
extent[2]-margins[2]-margins[0],
extent[3]-margins[3]-margins[1])
#f=pylab.figure(fig,figsize=(5.5,6))
if zat and rot:
raise ValueError('Only give rot or zat, not both')
if zat:
rot = npy.array(zat,dtype=npy.float64)
rot.resize(3)
rot[1] -= 90
ax=PA.HpxCartesianAxes(f,extent,coord=coord,rot=rot,
format=format,flipconv=flip)
f.add_axes(ax)
if remove_dip:
map=pixelfunc.remove_dipole(map,gal_cut=gal_cut,nest=nest,copy=True)
elif remove_mono:
map=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,copy=True)
ax.projmap(map,nest=nest,coord=coord,vmin=min,vmax=max,
xsize=xsize,ysize=ysize,lonra=lonra,latra=latra,
cmap=cmap,norm=norm,aspect=aspect)
if cbar:
if matplotlib.__version__ >= '0.91.0':
cb=f.colorbar(ax.get_images()[0],ax=ax,
orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1)
else:
cb=f.colorbar(ax.get_images()[0],orientation='horizontal',
shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),
pad=0.08,fraction=0.1)
ax.set_title(title)
if not notext:
ax.text(-0.07,0.6,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',rotation=90,transform=ax.transAxes)
if cbar:
cb.ax.text(1.05,0.30,unit,fontsize=14,fontweight='bold',
transform=cb.ax.transAxes,ha='left',va='center')
f.sca(ax)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
def graticule(dpar=None,dmer=None,coord=None,local=None,**kwds):
"""Create a graticule, either on an existing mollweide map or not.
Parameters:
- dpar, dmer: interval in degrees between meridians and between parallels
- coord: the coordinate system of the graticule (make rotation if needed,
using coordinate system of the map if it is defined)
- local: True if local graticule (no rotation is performed)
Return:
None
"""
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
f = pylab.gcf()
if len(f.get_axes()) == 0:
ax=PA.HpxMollweideAxes(f,(0.02,0.05,0.96,0.9),coord=coord)
f.add_axes(ax)
ax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,
fontweight='bold',transform=ax.transAxes)
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ax.graticule(dpar=dpar,dmer=dmer,coord=coord,
local=local,**kwds)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
graticule.__doc__ = PA.SphericalProjAxes.graticule.__doc__
def delgraticules():
wasinteractive = pylab.isinteractive()
pylab.ioff()
try:
f = pylab.gcf()
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ax.delgraticules()
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
delgraticules.__doc__ = PA.SphericalProjAxes.delgraticules.__doc__
def projplot(*args,**kwds):
wasinteractive = pylab.isinteractive()
pylab.ioff()
ret = None
try:
f = pylab.gcf()
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ret = ax.projplot(*args,**kwds)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
return ret
projplot.__doc__ = PA.SphericalProjAxes.projplot.__doc__
def projscatter(*args,**kwds):
wasinteractive = pylab.isinteractive()
pylab.ioff()
ret=None
try:
f = pylab.gcf()
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ret = ax.projscatter(*args,**kwds)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
return ret
projscatter.__doc__ = PA.SphericalProjAxes.projscatter.__doc__
def projtext(*args,**kwds):
wasinteractive = pylab.isinteractive()
pylab.ioff()
ret = None
try:
f = pylab.gcf()
for ax in f.get_axes():
if isinstance(ax,PA.SphericalProjAxes):
ret = ax.projtext(*args,**kwds)
finally:
if wasinteractive:
pylab.ion()
pylab.draw()
pylab.show()
return ret
projtext.__doc__ = PA.SphericalProjAxes.projtext.__doc__
|
[
"aryaf66@gmail.com"
] |
aryaf66@gmail.com
|
bedd1a983e70eb679bf6b6b389f94cccbaf74e18
|
1a9549ae528485844f1f40938c0e76b6b98039ad
|
/PyLab/W143.py
|
6ec6a74f7e79309a8dd8422a57a6f25132a18475
|
[] |
no_license
|
junzhougriffith/test
|
55c584dd83c85952984597fb043bcbe8e7dfb5fc
|
4264cffc999a797edb3b308eceb2000158717d7e
|
refs/heads/master
| 2020-12-30T11:02:39.963987
| 2017-08-01T02:32:58
| 2017-08-01T02:32:58
| 98,840,061
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
##////////////////////////////// PROBLEM STATEMENT /////////////////////////////
## Write Python code that reads two integers from the keyboard. Given two //
## inclusive ranges -8..-4 and 10..16 print True if both integers are in one //
## of these ranges or both integers are not in these ranges. Otherwise print //
## False. //
## You must not use the Python if or if-else statement. //
## //
## All ranges are inclusive //
## //
## Example inputs/outputs are: //
## 10 50 -> False //
## 20 5 -> True //
## 100 20 -> True //
## -5 20 -> False //
## -5 12 -> True //
## -10 0 -> True //
##//////////////////////////////////////////////////////////////////////////////
a = int(input())
b = int(input())
range1 = a in range(-8, -3) or a in range(10, 17)
range2 = b in range(-8, -3) or b in range(10, 17)
print((range1 and range2) or (not range1 and not range2))
|
[
"jun.zhou@griffith.edu.au"
] |
jun.zhou@griffith.edu.au
|
8b1868de8d717f8748a3d707ae66b4199e840443
|
e7c6f77ede337fee8f2af1d77e2bbf24627d2ec7
|
/python/CreateVectorResources.py
|
05f2113cb954e6977e5b6003474d359a112d19b8
|
[
"Apache-2.0"
] |
permissive
|
blockspacer/DeepSea
|
3c2488e39bd1a85c8b02b7b53651ab6fb69153e9
|
960237e3fff5dd4a7fa32cb4057a4397319bc279
|
refs/heads/master
| 2021-05-24T18:19:57.018596
| 2020-04-06T03:01:34
| 2020-04-06T03:01:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,443
|
py
|
#!/usr/bin/env python
# Copyright 2018-2020 Aaron Barany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import json
import os
import shutil
import subprocess
import sys
import tempfile
import flatbuffers
from DeepSeaVectorDraw.FaceGroup import *
from DeepSeaVectorDraw.FileOrData import *
from DeepSeaVectorDraw.FileReference import *
from DeepSeaVectorDraw.Font import *
from DeepSeaVectorDraw.FontCacheSize import *
from DeepSeaVectorDraw.FontQuality import *
from DeepSeaVectorDraw.RawData import *
from DeepSeaVectorDraw.Resource import *
from DeepSeaVectorDraw.VectorResources import *
class VectorResources:
"""Class containing the information for vector resources to be saved in FlatBuffer format."""
def __init__(self, cuttlefishTool = 'cuttlefish'):
"""
Constructs this in the default state.
cuttlefish is used to convert images into textures. By default it will check for
"cuttlefish" on PATH, but the path may be passed into the constructor.
"""
self.cuttlefish = cuttlefishTool
def load(self, contents, basePath = None):
"""
Loads the information for the VectorResources from a map.
basePath: The base path to load relative paths from, typically relative to the original
file.
json format:
{
"textures":
[
{
"name": "<name used to reference the texture>" (optional, defaults to path
filename without extension),
"path": "<path to image>",
"format": "<texture format; see cuttlefish help for details>",
"type": "<texture channel type; see cuttlefish help for details>",
"srgb": <true|false> (optional, defaults to false),
"size": [<width>, <height>] (optional),
"quality": "<lowest|low|normal|high|highest>" (optional),
"container": "<pvr|dds|ktx>" (optional, defaults to pvr),
"embed": <true|false to embed in resource file> (optional, defaults to false)
},
...
],
"faceGroups":
[
{
"name": "<name used to reference the face group>",
"faces":
[
{
"name": "<name used to reference the font>" (optional),
"path": "<path to font file>"
},
...
]
},
...
],
"fonts":
[
{
"name": "<name used to reference the font>",
"faceGroup": "<faceGroup name">,
"faces":
[
"<font in faceGroup>",
...
],
"quality": "<low|medium|high|veryhigh>",
"cacheSize": "<small|large>" (optional, defaults to large),
"embed": <true|false to embed in resource file> (optional, defaults to false)
}
]
}
"""
self.basePath = basePath
if 'textures' in contents:
self.textures = contents['textures']
if 'faceGroups' in contents:
self.faceGroups = contents['faceGroups']
if 'fonts' in contents:
self.fonts = contents['fonts']
faceGroupFaces = {}
for faceGroup in self.faceGroups:
name = faceGroup['name']
faceGroupFaces[name] = {}
for face in faceGroup['faces']:
faceGroupFaces[name][face['name']] = face['path']
for font in self.fonts:
if font['faceGroup'] not in faceGroupFaces:
raise Exception('Face group "' + font['faceGroup'] + '" not present.')
faces = faceGroupFaces[font['faceGroup']]
for face in font['faces']:
if face not in faces:
raise Exception(
'Face "' + face + '" not in face group "' + font['faceGroup'] + '"')
def loadJson(self, json, basePath = None):
"""Loads from a string containing json data. See load() for expected json format."""
self.load(json.loads(json), basePath)
def loadStream(self, stream, basePath = None):
"""Loads from a stream containing json data. See load() for expected json format."""
self.load(json.load(stream), basePath)
def loadFile(self, jsonFile):
"""Loads from a json file. See load() for expected json format."""
with open(jsonFile) as f:
self.load(json.load(f), os.path.dirname(jsonFile))
def save(self, outputPath, quiet = False, multithread = True):
"""
Saves the vector resources.
This will create a directory named "<filename w/o extension>_resources" in order to hold
the textures and font files. When moving the resources around, the file and directory
should stay together.
"""
(root, filename) = os.path.split(outputPath)
resourceDirName = os.path.splitext(filename)[0] + '_resources'
resourceDir = os.path.join(root, resourceDirName)
def createResourceDir():
if not os.path.exists(resourceDir):
os.makedirs(resourceDir)
def createResourceData(filePath, outputName, removeEmbedded = False):
if outputName:
pathOffset = builder.CreateString(outputName.replace('\\', '/'))
FileReferenceStart(builder)
FileReferenceAddPath(builder, pathOffset)
return FileOrData.FileReference, FileReferenceEnd(builder)
else:
with open(filePath, 'r+b') as dataFile:
dataOffset = builder.CreateByteVector(dataFile.read())
if removeEmbedded:
os.remove(filePath)
RawDataStart(builder)
RawDataAddData(builder, dataOffset)
return FileOrData.RawData, RawDataEnd(builder)
builder = flatbuffers.Builder(0)
textureOffsets = []
with tempfile.NamedTemporaryFile() as tempFile:
for texture in self.textures:
path = texture['path']
if 'name' in texture:
name = texture['name']
else:
name = os.path.splitext(os.path.basename(path))[0]
if 'container' in texture:
extension = '.' + texture['container']
else:
extension = '.pvr'
embed = 'embed' in texture and texture['embed']
if embed:
textureOutputPath = tempFile.name + extension
outputName = None
else:
createResourceDir()
outputName = os.path.join(resourceDirName, name + extension)
textureOutputPath = os.path.join(root, outputName)
commandLine = [self.cuttlefish, '-i', os.path.join(self.basePath, path),
'-o', textureOutputPath, '-f', texture['format'], '-t', texture['type']]
if quiet:
commandLine.append('-q')
if multithread:
commandLine.append('-j')
if 'srgb' in texture and texture['srgb']:
commandLine.append('--srgb')
if 'size' in texture:
size = texture['size']
commandLine.extend(['-r', size[0], size[1]])
if 'quality' in texture:
commandLine.extend(['-Q', texture['quality'].lower()])
if not quiet:
print('Converting texture "' + path + '"...')
sys.stdout.flush()
try:
subprocess.check_call(commandLine)
nameOffset = builder.CreateString(name)
dataType, dataOffset = createResourceData(textureOutputPath, outputName, True)
except:
if os.path.isfile(textureOutputPath):
os.remove(textureOutputPath)
raise
ResourceStart(builder)
ResourceAddName(builder, nameOffset)
ResourceAddDataType(builder, dataType)
ResourceAddData(builder, dataOffset)
textureOffsets.append(ResourceEnd(builder))
VectorResourcesStartTexturesVector(builder, len(textureOffsets))
for offset in reversed(textureOffsets):
builder.PrependUOffsetTRelative(offset)
texturesOffset = builder.EndVector(len(textureOffsets))
faceGroupOffsets = []
for faceGroup in self.faceGroups:
nameOffset = builder.CreateString(faceGroup['name'])
faces = faceGroup['faces']
faceOffsets = []
for face in faces:
name = face['name']
path = face['path']
embed = 'embed' in face and face['embed']
if embed:
outputName = None
fontOutputPath = os.path.join(self.basePath, path)
else:
outputName = os.path.join(resourceDirName,
name + os.path.splitext(extension)[1])
fontOutputPath = os.path.join(root, outputName)
createResourceDir()
shutil.copyfile(os.path.join(self.basePath, path), fontOutputPath)
faceNameOffset = builder.CreateString(name)
dataType, dataOffset = createResourceData(fontOutputPath, outputName)
ResourceStart(builder)
ResourceAddName(builder, faceNameOffset)
ResourceAddDataType(builder, dataType)
ResourceAddData(builder, dataOffset)
faceOffsets.append(ResourceEnd(builder))
FaceGroupStartFacesVector(builder, len(faceOffsets))
for offset in reversed(faceOffsets):
builder.PrependUOffsetTRelative(offset)
facesOffset = builder.EndVector(len(faceOffsets))
FaceGroupStart(builder)
FaceGroupAddName(builder, nameOffset)
FaceGroupAddFaces(builder, facesOffset)
faceGroupOffsets.append(FaceGroupEnd(builder))
VectorResourcesStartFaceGroupsVector(builder, len(faceGroupOffsets))
for offset in reversed(faceGroupOffsets):
builder.PrependUOffsetTRelative(offset)
faceGroupsOffset = builder.EndVector(len(faceGroupOffsets))
fontOffsets = []
qualityValues = {'low': FontQuality.Low, 'medium': FontQuality.Medium, \
'high': FontQuality.High, 'veryhigh': FontQuality.VeryHigh}
cacheValues = {'small': FontCacheSize.Small, 'large': FontCacheSize.Large}
for font in self.fonts:
nameOffset = builder.CreateString(font['name'])
faceGroupOffset = builder.CreateString(font['faceGroup'])
faces = font['faces']
faceOffsets = []
for face in faces:
faceOffsets.append(builder.CreateString(face))
FontStartFacesVector(builder, len(faceOffsets))
for offset in reversed(faceOffsets):
builder.PrependUOffsetTRelative(offset)
facesOffset = builder.EndVector(len(faceOffsets))
FontStart(builder)
FontAddName(builder, nameOffset)
FontAddFaceGroup(builder, faceGroupOffset)
FontAddFaces(builder, facesOffset)
FontAddQuality(builder, qualityValues[font['quality'].lower()])
if 'cacheSize' in font:
cacheSize = cacheValues[font['cacheSize'].lower()]
else:
cacheSize = FontCacheSize.Large
FontAddCacheSize(builder, cacheSize)
fontOffsets.append(FontEnd(builder))
VectorResourcesStartFontsVector(builder, len(fontOffsets))
for offset in reversed(fontOffsets):
builder.PrependUOffsetTRelative(offset)
fontsOffset = builder.EndVector(len(fontOffsets))
VectorResourcesStart(builder)
VectorResourcesAddTextures(builder, texturesOffset)
VectorResourcesAddFaceGroups(builder, faceGroupsOffset)
VectorResourcesAddFonts(builder, fontsOffset)
builder.Finish(VectorResourcesEnd(builder))
with open(outputPath, 'wb') as f:
f.write(builder.Output())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description =
'Create vector resources to be used by Deep Sea.')
parser.add_argument('-i', '--input', required = True,
help = 'input json description of the resources')
parser.add_argument('-o', '--output', required = True,
help = 'output file name, typically with the extension ".dsvr"')
parser.add_argument('-c', '--cuttlefish', default = 'cuttlefish',
help = 'path to the cuttlefish tool for texture conversion')
parser.add_argument('-j', '--multithread', default = False, action = 'store_true',
help = 'multithread texture conversion')
args = parser.parse_args()
resources = VectorResources(args.cuttlefish)
resources.loadFile(args.input)
resources.save(args.output)
|
[
"akb825@gmail.com"
] |
akb825@gmail.com
|
76c7974719833f81282c2f4e3e9f1b7e83b3aa14
|
480ce26b01e99883dd4cce55fe1f90f997438b79
|
/backend/manage.py
|
f6b2fe3d35be0f446ae9a54113d3d9119db161b8
|
[] |
no_license
|
crowdbotics-apps/test-app-2234-dev-2488
|
57ed6ae2d60e14117bac4df70c6a9e4471aac7be
|
acbbb1192c0c8dfc983295211e366f9b0ca70195
|
refs/heads/master
| 2023-02-06T21:42:56.253812
| 2020-04-09T07:10:43
| 2020-04-09T07:10:43
| 254,288,533
| 0
| 0
| null | 2023-01-24T03:10:26
| 2020-04-09T06:22:41
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_app_2234_dev_2488.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ffee62b035fd663cc4e39e36ce36e355f53b9cba
|
3933c37c63a66c5440baba22d008a11353cd3846
|
/app/modules/frontend/__init__.py
|
233d4e4dff2563beb1508aa0c5711286ca6f4766
|
[
"MIT"
] |
permissive
|
ivanklee86/hello-world
|
0290d1b7616d4c9f6f1ec8ab6efe43a32c5574aa
|
109764ddf990d806330c8083ad0bfdccce626be3
|
refs/heads/master
| 2023-03-01T13:08:37.127735
| 2021-10-06T20:25:45
| 2021-10-06T20:25:45
| 144,956,092
| 0
| 0
|
MIT
| 2023-02-15T19:53:27
| 2018-08-16T07:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 115
|
py
|
def init_app(app, **kwargs):
from . import resources
app.register_blueprint(resources.frontend_blueprint)
|
[
"ivanklee86@gmail.com"
] |
ivanklee86@gmail.com
|
88882f70604cdef53e37845543b4e88140621857
|
6047442241543e6ce358ebd6037d0d8a9a3a87b4
|
/hsapp/serializers.py
|
320e4561b0f73129985467d1df184240f13e7f25
|
[
"MIT"
] |
permissive
|
jkbm/esports
|
806e999cf4f6b1de47856616b0fedeaa2e87026e
|
5b62f13df39698a7d903dc32cc83a9e17642cd80
|
refs/heads/master
| 2022-12-14T11:11:09.955288
| 2017-11-05T21:07:54
| 2017-11-05T21:07:54
| 102,506,785
| 0
| 0
|
NOASSERTION
| 2022-11-22T01:52:25
| 2017-09-05T16:47:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
from rest_framework import serializers
from .models import Player, Tournament, Match
class PlayerSerializer(serializers.ModelSerializer):
class Meta:
model = Player
fields = ('name', 'country')
class TournamentSerializer(serializers.ModelSerializer):
winner = serializers.HyperlinkedRelatedField(
read_only=True,
view_name='hs:player_detail'
)
players = serializers.SlugRelatedField(
many=True,
read_only=True,
slug_field='name')
class Meta:
model = Tournament
fields = ('title', 'start_date', 'end_date', 'winner',
'players', 'groups', 'format', 'image')
class MatchSerializer(serializers.ModelSerializer):
player1 = serializers.SlugRelatedField(read_only=True, slug_field='name')
player2 = serializers.SlugRelatedField(read_only=True, slug_field='name')
winner = serializers.SlugRelatedField(read_only=True, slug_field='name')
class Meta:
model = Match
fields = ('date', 'time', 'stage', 'format',
'player1', 'player2', 'winner', 'score')
|
[
"bigman33321@hotmail.com"
] |
bigman33321@hotmail.com
|
5247dbfa3c360228cdbc809dbed853bc2a5614e1
|
d2ee771480799cd89dad61de5b7bda9f772d656a
|
/XProject/camshift.py
|
f7f56156d57a26e006d16aa4c7fe8cd0ecd62be3
|
[] |
no_license
|
CuriosityCreations/OpencvProject
|
b20a767405ca90263d3fcc774bfd6eda532a9719
|
7b3af1442eb811dd32e19ca958f05cede38b209f
|
refs/heads/master
| 2020-05-21T20:30:15.627092
| 2016-09-28T04:36:18
| 2016-09-28T04:36:18
| 64,070,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
import numpy as np
import cv2
cap = cv2.VideoCapture('inputcar.avi')
# take first frame of the video
ret,frame = cap.read()
# setup initial location of window
r,h,c,w = 250,90,400,125 # simply hardcoded the values
track_window = (c,r,w,h)
# set up the ROI for tracking
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 60.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
while(1):
ret ,frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# apply meanshift to get the new location
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
# Draw it on image
pts = cv2.boxPoints(ret)
pts = np.int0(pts)
img2 = cv2.polylines(frame,[pts],True, 255,2)
cv2.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break
else:
cv2.imwrite(chr(k)+".jpg",img2)
else:
break
cv2.destroyAllWindows()
cap.release()
|
[
"noreply@github.com"
] |
CuriosityCreations.noreply@github.com
|
f2ab002d34be50716c362ee13e0453bb1d824bff
|
675a56c032f95eb6bc58f0c560ff6e5b21121753
|
/leetcode/book/2.4.2.py
|
5b73f1b32eca5b2163b48bcc0f07deba54d03d57
|
[] |
no_license
|
1290259791/Python
|
f5b0ab6f92088734429c27aa83ab2d69a1125bed
|
829cc71762c2a832aa2fa08d30da8f256e59d0f7
|
refs/heads/master
| 2020-03-16T19:54:17.562579
| 2018-05-10T18:07:35
| 2018-05-10T18:07:35
| 132,930,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 634
|
py
|
def MaxSum(array, n):
"""
连续子数组的最大乘积
动态规划 Max=Max{a[i],Max[i-1]*a[i],Min[i-1]*a[i]}Min=Min
创建一维数组
:param array:
:param n:
:return:
"""
maxA = [0 for i in range(n)]
minA = [0 for i in range(n)]
maxA[0] = array[0]
minA[0] = array[0]
value = maxA[0]
for i in range(1, n):
maxA[i] = max(array[i], maxA[i - 1] * array[i], minA[i - 1] * array[i])
minA[i] = min(array[i], maxA[i - 1] * array[i], minA[i - 1] * array[i])
value = max(value, maxA[i])
print(value)
List = [-2, -3, 8, -5, -2]
MaxSum(List, len(List))
|
[
"hubo@Mac.local"
] |
hubo@Mac.local
|
44dd2f1cb1beb44a55ed1c20e6910d3e5f9e83aa
|
f104c152e75cd791aed39755ab9144aab8773be6
|
/param_analysis.py
|
720c84d8f389dfbc4834ecec163fdc26dfc09bd4
|
[] |
no_license
|
vaibhav0195/AdhdAnalysis
|
b11984c8bb9ec876d8ba5a896976635bb7e9f91a
|
aa341533ec15a05ead6a9a8983fc5b3334b74ae9
|
refs/heads/master
| 2023-04-10T17:29:45.409513
| 2021-04-16T18:09:23
| 2021-04-16T18:09:23
| 352,808,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,529
|
py
|
seance_categories = 'nwords,Admiration/Awe_GALC,Amusement_GALC,Anger_GALC,Anxiety_GALC,Beingtouched_GALC,Boredom_GALC,Compassion_GALC,Contempt_GALC,Contentment_GALC,Desperation_GALC,Disappointment_GALC,Disgust_GALC,Dissatisfaction_GALC,Envy_GALC,Fear_GALC,Feelinglove_GALC,Gratitude_GALC,Guilt_GALC,Happiness_GALC,Hatred_GALC,Hope_GALC,Humility_GALC,Interest/Enthusiasm_GALC,Irritation_GALC,Jealousy_GALC,Joy_GALC,Longing_GALC,Lust_GALC,Pleasure/Enjoyment_GALC,Pride_GALC,Relaxation/Serenity_GALC,Relief_GALC,Sadness_GALC,Shame_GALC,Surprise_GALC,Tension/Stress_GALC,Positive_GALC,Negative_GALC,Anger_EmoLex,Anticipation_EmoLex,Disgust_EmoLex,Fear_EmoLex,Joy_EmoLex,Negative_EmoLex,Positive_EmoLex,Sadness_EmoLex,Surprise_EmoLex,Trust_EmoLex,Valence,Valence_nwords,Arousal,Arousal_nwords,Dominance,Dominance_nwords,pleasantness,attention,sensitivity,aptitude,polarity,vader_negative,vader_neutral,vader_positive,vader_compound,hu_liu_pos_perc,hu_liu_neg_perc,hu_liu_pos_nwords,hu_liu_neg_nwords,hu_liu_prop,Positiv_GI,Negativ_GI,Pstv_GI,Affil_GI,Ngtv_GI,Hostile_GI,Strong_GI,Power_GI,Weak_GI,Submit_GI,Active_GI,Passive_GI,Pleasur_GI,Pain_GI,Feel_GI,Arousal_GI,Emot_GI,Virtue_GI,Vice_GI,Ovrst_GI,Undrst_GI,Academ_GI,Doctrin_GI,Econ_2_GI,Exch_GI,Econ_GI,Exprsv_GI,Legal_GI,Milit_GI,Polit_2_GI,Polit_GI,Relig_GI,Role_GI,Coll_GI,Work_GI,Ritual_GI,Socrel_GI,Race_GI,Kin_2_GI,Male_GI,Female_GI,Nonadlt_GI,Hu_GI,Ani_GI,Place_GI,Social_GI,Region_GI,Route_GI,Aquatic_GI,Land_GI,Sky_GI,Object_GI,Tool_GI,Food_GI,Vehicle_GI,Bldgpt_GI,Comnobj_GI,Natobj_GI,Bodypt_GI,Comform_GI,Com_GI,Say_GI,Need_GI,Goal_GI,Try_GI,Means_GI,Persist_GI,Complet_GI,Fail_GI,Natrpro_GI,Begin_GI,Vary_GI,Increas_GI,Decreas_GI,Finish_GI,Stay_GI,Rise_GI,Exert_GI,Fetch_GI,Travel_GI,Fall_GI,Think_GI,Know_GI,Causal_GI,Ought_GI,Perceiv_GI,Compare_GI,Eval_2_GI,Eval_GI,Solve_GI,Abs_2_GI,Abs_GI,Quality_GI,Quan_GI,Numb_GI,Ord_GI,Card_GI,Freq_GI,Dist_GI,Time_2_GI,Time_GI,Space_GI,Pos_GI,Dim_GI,Rel_GI,Color_GI,Self_GI,Our_GI,You_GI,Name_GI,Yes_GI,No_GI,Negate_GI,Intrj_GI,Iav_GI,Dav_GI,Sv_GI,Ipadj_GI,Indadj_GI,Powgain_Lasswell,Powloss_Lasswell,Powends_Lasswell,Powaren_Lasswell,Powcon_Lasswell,Powcoop_Lasswell,Powaupt_Lasswell,Powpt_Lasswell,Powdoct_Lasswell,Powauth_Lasswell,Powoth_Lasswell,Powtot_Lasswell,Rcethic_Lasswell,Rcrelig_Lasswell,Rcgain_Lasswell,Rcloss_Lasswell,Rcends_Lasswell,Rctot_Lasswell,Rspgain_Lasswell,Rsploss_Lasswell,Rspoth_Lasswell,Rsptot_Lasswell,Affgain_Lasswell,Affloss_Lasswell,Affpt_Lasswell,Affoth_Lasswell,Afftot_Lasswell,Wltpt_Lasswell,Wlttran_Lasswell,Wltoth_Lasswell,Wlttot_Lasswell,Wlbgain_Lasswell,Wlbloss_Lasswell,Wlbphys_Lasswell,Wlbpsyc_Lasswell,Wlbpt_Lasswell,Wlbtot_Lasswell,Enlgain_Lasswell,Enlloss_Lasswell,Enlends_Lasswell,Enlpt_Lasswell,Enloth_Lasswell,Enltot_Lasswell,Sklasth_Lasswell,Sklpt_Lasswell,Skloth_Lasswell,Skltot_Lasswell,Trngain_Lasswell,Trnloss_Lasswell,Tranlw_Lasswell,Meanslw_Lasswell,Endslw_Lasswell,Arenalw_Lasswell,Ptlw_Lasswell,Nation_Lasswell,Anomie_Lasswell,Negaff_Lasswell,Posaff_Lasswell,Surelw_Lasswell,If_Lasswell,Notlw_Lasswell,Timespc_Lasswell,formlw_Lasswell,negative_adjectives_component,social_order_component,action_component,positive_adjectives_component,joy_component,affect_friends_and_family_component,fear_and_digust_component,politeness_component,polarity_nouns_component,polarity_verbs_component,virtue_adverbs_component,positive_nouns_component,respect_component,trust_verbs_component,failure_component,well_being_component,economy_component,certainty_component,positive_verbs_component,objects_component'
# For all indicies from SEANCE
#lr_params = [-1.9676686305193278e-05,111.59295946802287,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-65.79307688033919,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,13.781383579030607,0.0,-7.098704168760668,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-38.168525175150094,0.0,0.044705597166679656,42.685203612163235,0.0,-18.680046630347785,-18.153378461604703,-20.895755108664005,0.0,14.545756950719944,0.1078524950491247,-1.3764158728118392,0.026864553615880384,0.0,-0.10809258393835157,-1.164900193699859,5.409293858410309,-2.4916577822806474,-3.3446229839506305,-5.884684056243014,-1.919821157462971,-7.604468161361336,0.4429908863239393,3.3727717338737504,-0.5403473460924434,0.34785147841333575,-1.38014538681266,8.766063836953325,1.146695903696704,-0.17068932685940727,-10.96653880963947,-2.5955435320053035,-26.085397105022853,-15.196696095281816,-3.9682924581372596,-5.210654059753168,-15.045636467750517,0.0,0.0,8.395554108775707,0.5200183988739043,-16.859559279055908,0.0,0.0,0.0,0.0,0.0,12.129202934648838,28.02361304021337,6.696582574299313,7.690199464862953,70.30507479709465,0.0,0.0,0.0,-25.349313628230078,30.5236310254976,8.884539209504823,0.0,0.0,0.0,0.0,0.0,66.61860874141402,-16.623458989504506,6.517292452126836,-6.487338125693732,0.0,-55.14086794695408,-68.47760828818767,-53.39924944003924,0.0,0.0,0.0,-16.130533483483227,2.991935147553716,0.0,0.0,0.0,0.0,0.0,8.523640507073127,6.565764622727236,0.0,0.0,0.0,-1.734735091229451,-35.53355766796724,35.44775715158875,-25.222882450175725,0.0,-6.089872137894065,-0.2550904008367392,0.0,2.6920108310996693,-21.586038288101445,17.63324628921828,0.0,-16.46101650737262,1.1314565024312955,0.0,14.316658354751691,14.88933817847782,0.0,-21.43665115159897,0.0,0.0,-1.2104909232298469,0.0,0.0,0.0,0.0,1.9197407962545001,-6.132822846324047,0.0,13.822619993029795,0.0,0.0,29.81685596322505,0.0,-5.2300796426284535,0.0,1.9882173436687856,-12.002738513521654,0.0,0.0,0.0,-29.262479330750484,0.0,0.0,17.375056951789666,0.0,0.0,3.018802115437115,0.0,0.0,19.073634268948126,-70.16082276376783,9.855130779976871,36.652727707606985,0.0,0.0,2.376971063792762,0.0,26.052743775129247,-5.4083382325090215,13.78293477154322,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-106.95731911362378,-58.48324313217828,0.0,0.0,25.169850112366838,0.0,6.035850188461939,-32.00268953864018,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-23.005800241444955,0.0,0.0,-51.50281530119379,49.657124799785734,-99.94052397318042,0.0,0.0,0.0,0.0,21.076754765716053,23.040607893279233,0.0,-18.61556773908253,-8.337851601570206,11.295871154108617,-4.140151149068755,0.0,0.0,0.0,0.0,0.0,-2.1978060425775374,0.0,13.48735011974021,0.0,0.019345437297159332,25.11226910066915,6.1342366273559845,-5.380580527845692,5.390820439209932,-0.7525131982005955,0.0,29.640832425767325,16.040457352772442,0.0,0.0,0.0,-20.51896873621568,2.5986087317243514,0.0,8.14988708826119,21.853215558554204,-0.34922357441640245,2.4919368077604234,2.078468691784888,0.3781389006661364,0.48372513467051004,5.671422132887349,3.6575895323242658,-0.8460114809414128,-0.9840184594082542,0.005611093108750811,0.7125754917856094,0.06871116387931901,4.906891912392948,2.807766882705047,6.057800309760228,2.86751091345395,1.6226081393765885,3.518229599379164,-0.039366786715963095,0.8220515632300275]
#svm_params = [-4.291659476934092e-06,25.38172638760338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-6.3000213485544,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.36134204477961,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-11.90348039478183,0.0,0.0,11.147261667248866,-1.627982802158003,-4.110618021110179,-5.3801781497492875,-5.051254384512322,0.0,4.783737929216325,0.08848528164792109,-0.5711867246340318,0.0332211027037219,0.048793087826133175,-0.12033586797390064,0.0,1.09112583606846,-0.6614952014841987,-1.0017416872587581,-1.7887187866213736,-0.005718814708961474,-1.9875559081523628,0.42308934503034695,1.505847122331266,-0.1418127052445384,-0.003236824565703113,-0.4480953062844248,1.9536719600495978,0.0,-0.047590023488084734,-1.3904121325102936,0.0,-8.478092538083802,-4.37021722154157,-1.388573292039676,-1.2077772732599301,-4.316350342921308,0.0,0.0,1.8216308703429747,0.0,-4.4998444086972,0.0,0.0,0.0,0.0,0.0,3.5312408289151245,6.424117392839377,1.568877310004739,1.386741239749393,16.391480045105773,0.0,0.0,0.0,-7.11663055214363,7.933282071984499,0.9099830170215273,0.0,0.0,0.0,0.0,0.0,18.08251352196888,-3.6704328146643443,1.2262567478122048,-1.626732939981033,0.0,-14.274936797021642,-20.99556061132937,-15.96167377078311,0.0,0.0,-0.1341399236565043,-4.4113499842956605,2.0623811160303807,0.0,0.0,0.0,0.0,0.0,1.5940899536745092,1.0078814817226622,0.0,0.0,0.0,-0.8846655425460486,-10.64650069220579,10.892452024373458,-6.877768300303171,0.0,-0.8099266842320058,-1.225601016554477,0.0,0.0,-5.5931679890204276,5.6315039751498395,0.0,-3.969516656450305,0.233864963928846,0.0,4.649505650487774,4.260514450156241,0.0,-6.453576544137398,0.03982462025771515,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-2.270693931610377,0.0,3.1510434739537825,0.0,0.0,6.940653691118402,0.0,-1.4260691451739707,0.0,0.0,-2.7660673053246523,0.0,0.0,0.0,-8.348326242388039,0.0,0.0,4.453261232565251,0.0,0.0,0.2796746347611594,0.0,0.0,5.21883971643005,-19.654618757801668,2.945161316027823,5.029474854920243,0.0,0.0,1.28559496966217,0.14742998937114057,7.313379198046306,-1.641040049176929,3.5347086968020167,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-29.938657183613177,-14.615392706865359,0.0,0.0,7.2586260167284395,0.0,1.01830172456574,-7.475297613710808,0.0,0.0,0.0,0.0,0.0,0.0,0.0,-7.880602775838318,0.0,0.0,-9.041687200233707,17.853161112253574,-29.13860394299851,0.0,0.0,0.0,0.6145604617255818,3.917735804051427,8.42511985345347,0.0,-3.4964483610626944,-2.0468197991694668,0.9068968327624675,-0.5497176883222363,0.0,0.0,0.0,0.0,0.0,-0.10247642483560493,0.0,0.0,0.0,1.2362605109610911,6.162954545818725,1.6357142445807176,-0.7367503380762149,1.7007914038861518,0.0,0.0,7.913363719524639,4.1797375900344145,0.0,0.0,0.0,-4.757814358222161,0.6827440322995454,0.0,1.9404191723208832,5.666249249562594,-0.09879592825177896,0.7352443047808457,0.5246174969283562,0.13393517793936072,0.10130052462220124,1.4515899049817877,0.8932184886261251,-0.25636101034268033,-0.3010862648108571,0.0010155122413111253,0.15175412575356886,0.025761210686337428,1.4981882536007274,0.7935099618580602,1.7501723746722588,0.6216685578057768,0.4720168586596177,1.14148618262848,-0.006764910506045407,0.44355844412604684]
# For components only
seance_categories = ','.join(x for x in seance_categories.split(',') if '_component' in x)
lr_params = [-0.46516760872483265,0.16122249013897566,0.05832011071968432,-0.9578493298442231,0.14565035906021623,-5.03094928869611,-2.5742852950562494,-2.4682220885701214,-2.0450476344767226,-0.017714900258256342,-0.08674422087445156,0.16446265702455404,2.3890155871959267,-0.5722108368940703,-0.8092706718817742,4.81932591991923,1.5348687649679726,2.6419581681009094,0.026339369805203783,-1.422017178187169]
svm_params = [-0.16146663191644006,0.05601960701526046,0.023957234486622268,-0.31151378509147404,0.04693211428197631,-1.8423737548912924,-0.9129755221467613,-0.7913551056949573,-0.7358193955845378,-0.0068797840133494855,-0.044778265328767566,0.054090382612487935,0.8688269048639334,-0.17001986625368684,-0.25817793495680263,1.6840373077881958,0.5202026496052411,0.957924481877318,0.008227150468716143,-0.44380672543264726]
seance_categories = [cat for cat in seance_categories.split(',')]
lr_sorted = sorted(zip(lr_params, seance_categories), reverse=True, key = lambda x : abs(x[0]))
svm_sorted = sorted(zip(svm_params, seance_categories), reverse=True, key = lambda x : abs(x[0]))
with open('params_sorted.csv', 'w') as out:
print('LR params', file=out)
for param, cat in lr_sorted:
print(str(cat) + ',' + str(param), file=out)
print(file=out)
print('SVM params', file=out)
for param, cat in svm_sorted:
print(str(cat) + ',' + str(param), file=out)
print(file=out)
|
[
"jeffrey.sardina@gmail.com"
] |
jeffrey.sardina@gmail.com
|
a7119c0161bf122d65bb45d6b28fb318db55e3ee
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/420/usersdata/346/88353/submittedfiles/exe11.py
|
5a42218fb2efb0224e56919f9581e59be8f7fa0f
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
# -*- coding: utf-8 -*-
x= int(input('Digite um valor com 8 casas decimais: '))
y= 10000000
z= 100000000
soma= 0
while (True):
if x<y:
print('NAO SEI')
break
if x>y or x<z:
while x:
soma += x%10
x//=10
print(soma)
break
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
07eb3ac3fd3558a1b5feb8bd0195a1bc1d059583
|
049c29351641e7245a6dc1fc44a684c0fd17ebf7
|
/snn/librispeech/model/snn.py
|
5c69a2e138e4c4c82eb057bd84c98310eb0bef25
|
[
"MIT"
] |
permissive
|
vjoki/fsl-experi
|
c093d13efd27ea22a1e990e0c073df6bd30135e6
|
a5d81afbc69dfec0c01f545168c966fd03a037f1
|
refs/heads/master
| 2023-07-11T02:00:25.772149
| 2021-08-05T11:06:52
| 2021-08-05T11:06:52
| 304,908,552
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,455
|
py
|
from typing import Tuple, List, Any
import torch
import torch.nn.functional as F
from .base import BaseNet
class SNN(BaseNet):
def forward(self, x1: torch.Tensor, x2: torch.Tensor) -> torch.Tensor: # type: ignore[override]
x1 = self.cnn(x1)
x2 = self.cnn(x2)
dist = torch.abs(x1 - x2)
return self.out(dist)
def training_step(self, # type: ignore[override]
batch: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
batch_idx: int) -> torch.Tensor:
x1, x2, y = batch
x1 = self.spectogram_transform(x1, augmentable=self.specaugment)
x2 = self.spectogram_transform(x2, augmentable=self.specaugment)
out = self(x1, x2)
# dist = F.pairwise_distance(x1, x2, keepdim=True)
# loss = torch.mean((1.0 - y) * torch.pow(dist, 2) + y * torch.pow(torch.clamp(1.0 - dist, min=0.0), 2))
loss = F.binary_cross_entropy_with_logits(out + 1e-8, y)
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=False, logger=True)
acc = self.train_accuracy(out, y)
self.log('train_acc_step', acc, on_step=True, on_epoch=False)
return loss
def training_epoch_end(self, training_step_outputs: List[Any]):
self.log('train_acc_epoch', self.train_accuracy.compute())
def validation_step(self, # type: ignore[override]
batch: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
batch_idx: int):
x1, x2, y = batch
x1 = self.spectogram_transform(x1)
x2 = self.spectogram_transform(x2)
out = self(x1, x2)
loss = F.binary_cross_entropy_with_logits(out + 1e-8, y)
self.log('val_loss', loss, on_step=False, on_epoch=True, prog_bar=True)
out = torch.sigmoid(out)
self.val_accuracy(out, y)
return [out.detach(), y.detach()]
def test_step(self, # type: ignore[override]
batch: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
batch_idx: int):
x1, x2, y = batch
x1 = self.spectogram_transform(x1)
x2 = self.spectogram_transform(x2)
out = self(x1, x2)
loss = F.binary_cross_entropy_with_logits(out + 1e-8, y)
self.log('test_loss', loss, on_step=False, on_epoch=True)
out = torch.sigmoid(out)
self.test_accuracy(out, y)
return [out.detach(), y.detach()]
|
[
"vjoki@zv.fi"
] |
vjoki@zv.fi
|
54fa651074faf0cbdee12a93f566f88c6399c9e5
|
5a300ea871905d4c9a70d70e8f7641c72a095cf5
|
/original_scripts/macro/collect.py
|
665b0146d93bdc728373845985a274e64412571c
|
[] |
no_license
|
tennessejoyce/OctopusScripts
|
6a21129d832d392274ae088b50a7980550346b6b
|
a06af73ff497dfa47375dae45bf52ba24fffb5b3
|
refs/heads/master
| 2023-02-04T21:36:38.956163
| 2020-09-09T18:00:17
| 2020-09-09T18:00:17
| 279,387,167
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
import numpy as np
import sys
#This script will collect the results from a sweep into a
#single convenient output file.
#By default, intensities are chosen at Chebyshev nodes, which
#allow for efficient polynomial interpolation of the results.
#Gets the intensites for the sweep
def getChebNodes(n):
k = np.array(range(n))
x = np.cos(np.pi*k/(2*n))
return x
order = int(sys.argv[1]) #How many intensities to calculate (command line argument 2)
dipoleY=[]
dipoleZ=[]
for i in range(order):
dataName = str(order)+"/"+str(i) +"/"+"td.general/multipoles"
multipoles = np.loadtxt(dataName)
dipoleY.append(multipoles[:,4] + multipoles[:,8])
dipoleZ.append(multipoles[:,5] + multipoles[:,9])
dipole = np.array([np.array(dipoleY),np.array(dipoleZ)])
print(dipole.shape)
np.save('dipole.npy',dipole)
|
[
"47008993+tennessejoyce@users.noreply.github.com"
] |
47008993+tennessejoyce@users.noreply.github.com
|
6ffd7ff2b4f04009b292931cc32cad245413ced1
|
40ee79feba728f1e8a3428b6a61fe5f470f50ddb
|
/restApi/product/views.py
|
7cfed3fbae57407289221469f52827048ceb9b89
|
[
"MIT"
] |
permissive
|
rtx-abir/ecom
|
6fc67e19976ed4bcd027bab71523fde94277a695
|
87a7ae00ca06934151155ae0dca4230397386cd7
|
refs/heads/main
| 2023-02-04T00:36:09.140635
| 2020-12-22T06:53:40
| 2020-12-22T06:53:40
| 321,841,054
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 277
|
py
|
from rest_framework import viewsets
from .serializers import ProductSerializer
from .models import product
class ProductViewSet(viewsets.ModelViewSet):
queryset = product.objects.all().order_by('name')
serializer_class = ProductSerializer
# Create your views here.
|
[
"Debabir91@gmail.com"
] |
Debabir91@gmail.com
|
d8388845b9c1e2b9a0be086bd1c7d29a271afc87
|
1063f9dc09a156d05eb06c35a196f217b7deab4a
|
/profiles_api/permissions.py
|
9b22cab9903fc5614b0ecdbae6d57d01db0fde6d
|
[] |
no_license
|
ayenisholah/profiles-rest-api
|
1a9fd843f665fa94927b5e5294142b61f91589e1
|
5ac1ff9f14ca65bb9f98d56a0eaf4850db8d0171
|
refs/heads/master
| 2022-05-24T13:27:08.271369
| 2019-10-29T19:58:27
| 2019-10-29T19:58:27
| 214,676,214
| 0
| 0
| null | 2022-04-22T22:36:40
| 2019-10-12T16:09:10
|
Python
|
UTF-8
|
Python
| false
| false
| 703
|
py
|
from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow users to edit only their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
"""Check the user is trying to update their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
|
[
"ayenisholah@yahoo.com"
] |
ayenisholah@yahoo.com
|
5858ee65fabd230224a32537d2b686f65763b599
|
44ac52b18a5068713628bd671d8998901cb627aa
|
/Python/Email.py
|
c507732d8c898d095daef2e7d3fbbb73243ef6a6
|
[] |
no_license
|
KaiXtr/my-beginner-projects
|
176c8fa72900c6201d5afdd49ef4bdd6f5daedbd
|
d6929d0cd07854ebf8003d03380ea245914c9c21
|
refs/heads/master
| 2023-06-01T07:52:25.285987
| 2021-06-22T01:42:12
| 2021-06-22T01:42:12
| 264,756,469
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,654
|
py
|
from tkinter import *
from imaplib import *
from tkinter import messagebox
import imaplib
import email
import ctypes
import getpass
import webbrowser
mail=IMAP4_SSL('imap.gmail.com',993)
check=False
def Set():
if messagebox.askokcancel('Error',"The program can't run the email service because\nyour email don't give permission for other apps to use email\n\nPlease click OK to set this configuration on"):
webbrowser.open_new('https://www.google.com/settings/security/lesssecureapps')
def Entries(event=None):
us=eu.get()
ps=ep.get()
try:
mail.login(us,ps)
mail.select("INBOX")
log.destroy()
Main()
except:
Set()
def Loop():
while True:
n=0
(code,mess)=mail.search(None,'(UNSEEN)')
if code=='OK':
for i in mess[0].split():
n+=1
typ,dat=mail.fetch(i,'(RFC822)')
for res in dat:
if isinstance(res,tuple):
orig=email.message_from_string(res[1])
txt=orig['From']+": "+orig['Subject']
print(txt)
posts.create_text(10,10,text=txt)
def Main():
root=Tk()
root.title('Email')
root.geometry("400x300+100+100")
posts=Canvas(root)
root.mainloop()
Loop()
def passhow():
global check
check=not check
if check==False:
ep['show']='*'
if check==True:
ep['show']=''
log=Tk()
log.title('Email')
log.geometry("200x200+100+100")
log.resizable(0,0)
Ch=IntVar()
eu=Entry(log)
ep=Entry(log, show='*')
sh=Checkbutton(log, text='Show Password', variable=Ch, onvalue=1, offvalue=0, command=passhow)
bl=Button(log, text='OK', command=Entries)
eu.grid(row=1, column=1)
ep.grid(row=2, column=1)
sh.grid(row=3, column=1)
bl.grid(row=4, column=1)
log.bind('<Return>', Entries)
log.mainloop()
|
[
"mattkai@users.noreply.github.com"
] |
mattkai@users.noreply.github.com
|
880c59169b0647ed4759b2eb193c3fc139ce3c99
|
668c146c84912e39c820d1e4d5971e8282d86d70
|
/applications/elearning/signals.py
|
11509262168ff7bc54099358f62ea327eaf3665a
|
[
"BSD-2-Clause"
] |
permissive
|
guinslym/Django-Code-Review-CodeEntrepreneurs
|
cc7cb1f81770b77e3285e62ec5fd18bb130fb7da
|
2ad9bd3d352f7eba46e16a7bf24e06b809049d62
|
refs/heads/master
| 2021-01-01T04:22:19.983344
| 2017-08-03T14:29:40
| 2017-08-03T14:29:40
| 97,162,498
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from .models import UserProfile
"""
TODO: this doen't work
purpose : create a profile when the user just SignUp
All my signals doens't seems to work include the one in the Models.py
"""
def create_profile(sender, **kwargs):
""" Create a profile on post_save signal of User object. """
user = kwargs["instance"]
if kwargs["created"]:
user_profile = UserProfile(user=user)
user_profile.save()
post_save.connect(create_profile, sender=User)
|
[
"guinslym@gmail.com"
] |
guinslym@gmail.com
|
7d687e85dac01012c01b1172306e6731d6d0b234
|
c1ecc4856455831570805577f25222c8278b59f5
|
/flanker/addresslib/plugins/icloud.py
|
aa8b1f9028eb52a818344ed380144ea6ea8a734e
|
[
"Apache-2.0"
] |
permissive
|
nylas/flanker
|
999cdcf5f7475d8f3fc3ba6ce9eb95efe74ff3c1
|
433839220cce28fd9b3c6ed58e869461b0eae89e
|
refs/heads/master
| 2022-10-05T00:42:21.199483
| 2018-04-06T00:22:36
| 2018-04-06T00:22:36
| 35,391,481
| 10
| 7
|
Apache-2.0
| 2022-08-23T21:20:42
| 2015-05-10T23:03:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,809
|
py
|
# coding:utf-8
'''
Email address validation plugin for icloud.com email addresses.
Notes:
3-20 characters
must start with letter
must end with letter or number
must use letters, numbers, dot (.) or underscores (_)
no consecutive dot (.) or underscores (_)
case is ignored
any number of plus (+) are allowed if followed by at least one alphanum
Grammar:
local-part -> icloud-prefix { [ dot | underscore ] icloud-root }
icloud-suffix
icloud-prefix = alpha
icloud-root = alpha | num | plus
icloud-suffix = alpha | num
Other limitations:
* Length of local-part must be no more than 20 characters, and no
less than 3 characters.
Open questions:
* Are dot-underscore (._) or underscore-dot (_.) allowed?
* Is name.@icloud.com allowed?
'''
import re
from flanker.addresslib.tokenizer import TokenStream
ALPHA = re.compile(r'''
[A-Za-z]+
''', re.MULTILINE | re.VERBOSE)
ALPHANUM = re.compile(r'''
[A-Za-z0-9]+
''', re.MULTILINE | re.VERBOSE)
ICLOUD_PREFIX = re.compile(r'''
[A-Za-z]+
''', re.MULTILINE | re.VERBOSE)
ICLOUD_BASE = re.compile(r'''
[A-Za-z0-9\+]+
''', re.MULTILINE | re.VERBOSE)
DOT = re.compile(r'''
\.
''', re.MULTILINE | re.VERBOSE)
UNDERSCORE = re.compile(r'''
\_
''', re.MULTILINE | re.VERBOSE)
def validate(localpart):
# check string exists and not empty
if not localpart:
return False
lparts = localpart.split('+')
real_localpart = lparts[0]
# length check
l = len(real_localpart)
if l < 3 or l > 20:
return False
# can not end with +
if localpart[-1] == '+':
return False
# must start with letter
if ALPHA.match(real_localpart[0]) is None:
return False
# must end with letter or digit
if ALPHANUM.match(real_localpart[-1]) is None:
return False
# check grammar
return _validate(real_localpart)
def _validate(localpart):
stream = TokenStream(localpart)
# localpart must start with alpha
alpa = stream.get_token(ICLOUD_PREFIX)
if alpa is None:
return False
while True:
# optional dot or underscore
stream.get_token(DOT) or stream.get_token(UNDERSCORE)
base = stream.get_token(ICLOUD_BASE)
if base is None:
break
if not stream.end_of_stream():
return False
return True
|
[
"rjones@mailgunhq.com"
] |
rjones@mailgunhq.com
|
9dcaac00e19e2180f4960ca4299bde226792e82d
|
b7a3d5c5624599a00cc45d36b9a884bd289303b0
|
/adoptions/admin.py
|
72ff632e6ed6b6d42357a06c5a62793d62362b09
|
[] |
no_license
|
Jrmromao/Django
|
f41dcffcb116cf1eddcbbcfcb4e07d398fd6fd77
|
c21abdb99e98a98086d26f120ad53fc968b6ed17
|
refs/heads/master
| 2020-03-27T06:45:15.648983
| 2018-08-26T16:40:39
| 2018-08-26T16:40:39
| 146,133,278
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
from django.contrib import admin
#import pet model
from .models import Pet
@admin.register(Pet)
class PetAdmin(admin.ModelAdmin):
list_display = ['name', 'species', 'breed', 'age', 'sex']
|
[
"jrmromao"
] |
jrmromao
|
292bda3ce776e2a9c2239acc828bae26b9f15f75
|
93684882400d0249ad733249f5b2c8dbd230110f
|
/ClassExercise & studio/chapter 4/chapter 4 turtle clock.py
|
80e7461cd98b417791c54feda62e0249c5f7be95
|
[] |
no_license
|
SmileShmily/LaunchCode-summerofcode-Unit1
|
c492bbed966547cc8c1be7f15d7a23cb989d407b
|
03474cf77b0dae2bcfaf8513711d3fec72bd4166
|
refs/heads/master
| 2021-01-16T23:19:23.413010
| 2017-06-29T02:49:19
| 2017-06-29T02:49:19
| 95,730,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
import turtle
screen=turtle.Screen()
trtl=turtle.Turtle()
screen.setup(620,620)
screen.bgcolor('black')
#clr=['red','green','blue','yellow','purple']
trtl.pensize(4)
n=0
trtl.shape('turtle')
trtl.penup()
trtl.pencolor('red')
for m in range(12):
m=m+1
trtl.penup()
trtl.forward(150)
trtl.pendown()
trtl.forward(25)
trtl.penup()
trtl.forward(20)
trtl.write(m,align="center",font=("Arial", 12, "normal"))
#if m == 12:m = 0
trtl.home()
trtl.right(n)
n = n + 30
trtl.home()
trtl.setpos(0,-250)
trtl.pendown()
trtl.pensize(10)
trtl.pencolor('blue')
trtl.circle(250)
trtl.penup()
trtl.setpos(150,-270)
trtl.pendown()
#trtl.pencolor('pink')
#trtl.write('Shmily',font=("Arial", 12, "normal"))
#trtl.ht()
|
[
"zyxjyaya@gmail.com"
] |
zyxjyaya@gmail.com
|
4bd2b4c2457b20c2e32f34ecd2fa726d80c407dd
|
0bf0f87fd4e1519d2db12662d9a40b0cad5d5791
|
/FaceDR/code/ui_event.py
|
2ea64ecc0d9d9c40e91fd6a0d988265d6c2af600
|
[] |
no_license
|
ZSharp7/Model-deployment
|
83c86a0f9dccc3cef48af353e65f8782cde29a28
|
bebe60e763f4816284d044d84f2d1f22f342d9ed
|
refs/heads/master
| 2022-11-17T04:08:25.541933
| 2020-07-10T00:31:00
| 2020-07-10T00:31:00
| 258,907,203
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,831
|
py
|
import os
import configparser
import win32api
import time
from PyQt5 import QtWidgets,QtGui,QtCore,Qt
import sys
from PyQt5.QtCore import QTimer
import cv2
from PIL import Image
import numpy as np
import threading
from code.compare_face import VisitMilvus
from code.database import DB
from code.save_face import Ui_Form
from code.show_data import ShowData
from code.image_tosave import FromImageSave
from code.ui_design import Ui_MainWindow
from code.thread_use import CameraThread
from code.face_detect import FaceDR
from code.face_net import FaceNet
class window(QtWidgets.QMainWindow):
def __init__(self):
super(window,self).__init__()
self.thread_status = True
def closeEvent(self,event):
result = QtWidgets.QMessageBox.question(self,
"退出",
"是否退出?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
event.ignore()
if result == QtWidgets.QMessageBox.Yes:
self.thread_status = False
event.accept()
class Event(Ui_MainWindow):
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('./config.ini')
self.save_path = self.config.get('ui_event', 'save_path')
self.camera_number = 0
self.app = QtWidgets.QApplication(sys.argv)
self.MainWindow = window()
self.setupUi(self.MainWindow)
self.MainWindow.setWindowIcon(QtGui.QIcon(self.config.get('ui_event', 'icon')))
self.camera_btn.setEnabled(False)
self.printf("界面构造成功。")
self.emb_flag = True
# milvus
self.milvus = VisitMilvus()
if self.milvus.status == False:
self.printf("Milvus connect is failed.")
else:
self.printf("Milvus is connected.")
self.tablename = 'facetable'
# camera
self.cap = cv2.VideoCapture(self.camera_number + cv2.CAP_DSHOW)
# model
self.detect = FaceDR()
self.detect_flag = False
self.facenet = FaceNet()
self.face_times = 0
self.face_flag = True
# mysql
self.db = DB()
self.emb_db = DB()
self.camera_lock = threading.Lock()
# 相机拍照
self.camera_thread = threading.Thread(target=self._load_image)
# self.camera_thread.tarigger.connect(self._load_image)
# self.camera_time = QTimer()
# self.camera_time.timeout.connect(lambda : threading.Thread(target=self._load_image).start())
# 模型初始化
self.model_thread = threading.Thread(target=self._init_model)
self.model_thread.start()
# 按钮函数连接信号槽
self.add_data_btn.clicked.connect(self.add_data)
self.camera_btn.clicked.connect(self._start_camera)
# 菜单栏栏信号槽
self.show_m.triggered.connect(self.show_data)
self.save_m.triggered.connect(self.save_from_image)
self.setting_m.triggered.connect(self.setting)
self.about_m.triggered.connect(self.about)
def _init_model(self):
'''
初始化模型,利用线程在软件打开时进行初始化
:return:
'''
self.printf("Face Detect model is loading.")
load_image = self.config.get('ui_event','load_image')
image = cv2.imread(load_image)
self.detect.detect_face(image)
self.printf("Face Detect model is loaded.")
self.printf("FaceNet model is loading.")
image = cv2.resize(image,(160,160))
image = image[np.newaxis, :]
self.facenet.run(image)
self.printf("FaceNet model is loaded.")
self.camera_btn.setEnabled(True)
self.detect_flag = True
def about(self):
# self.camera_time.stop()
# self.camera_btn.setEnabled(True)
print('display_info ')
message_box = QtWidgets.QMessageBox
message_box.information(self.Form, "About", "毕设项目--人脸检测识别器(2020.5)", message_box.Yes)
def setting(self):
'''
菜单栏,设置命令,使用win记事本打开配置文件
:return:
'''
self.printf("打开配置文件.")
# self.camera_time.stop()
# self.camera_btn.setEnabled(True)
win32api.ShellExecute(0, 'open', 'notepad.exe', './config.ini', '', 1)
def add_data(self):
'''
截取人脸图片,调用子窗口,存储数据到mysql和milvus中
:return:
'''
self.printf("增加数据。。")
# self.camera_time.stop()
# self.camera_btn.setEnabled(True)
self.MainWindow2 = QtWidgets.QMainWindow()
# id = self.milvus.table_len(self.tablename)
try:
self.Ui_Form = Ui_Form(self.facenet,
self.milvus,
self.face_img,
self.save_path)
self.Ui_Form.setupUi(self.MainWindow2)
self.MainWindow2.show()
except:
print(' is error')
def save_from_image(self):
'''
菜单栏存储数据命令,从本地图片中导入数据至数据库中
:return:
'''
# self.camera_time.stop()
# self.camera_btn.setEnabled(True)
self.MainWindow_fromImage = QtWidgets.QMainWindow()
self.ui = FromImageSave(self.detect,self.facenet)
self.ui.setupUi(self.MainWindow_fromImage)
self.MainWindow_fromImage.show()
def show_data(self):
'''
菜单栏,显示数据命令,从mysql中读取数据并显示
:return:
'''
# self.camera_time.stop()
# self.camera_btn.setEnabled(True)
self.MainWindow_show = QtWidgets.QMainWindow()
self.ui = ShowData()
self.ui.setupUi(self.MainWindow_show)
self.MainWindow_show.show()
def _letterbox_image(self,image, size):
'''resize image with unchanged aspect ratio using padding'''
image = Image.fromarray(image)
iw, ih = image.size
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128))
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
return np.array(new_image)
def _start_camera(self):
'''
开始相机线程
:return:
'''
self.db = DB()
self.printf("Start Camera and Using model.")
self.camera_thread.start()
self.printf("FaceNet 向量生成速度2s/次. ")
self.camera_btn.setEnabled(False)
def get_emb(self):
'''
生成人脸特征向量,并人脸检索和检索信息显示
:return:
'''
image = self.face_img.copy()
face_img = self._letterbox_image(image,(160, 160))
face_img = face_img[np.newaxis,:]
vectors = self.facenet.run(face_img)
result = self.milvus.select_info(self.tablename,vectors.tolist())
print("结果:",result)
if result != -1:
data = self.db.select(str(result))
self.disp_info(data)
else:
self.disp_info(None)
def disp_info(self,data):
'''
显示人脸检索到的信息
:param data:
:return:
'''
if data == None:
output = ''
image = cv2.imread(self.config.get('ui_event','background_image'))
image = cv2.resize(image,(130,130))
else:
output = "ID:%d\n姓名:%s\n性别:%s\n出生日期:%s\n手机号:%s\n地址:%s"%(
data['id'],data['name'],data['sex'],data['born'],data['phone'],data['adress'])
image = cv2.imread(data['image_path'])
self.info_text.setText(output) # 在指定的区域显示提示信息
self.disp_label(self.face_lab,image)
def _load_image(self):
'''
相机线程
:return:
'''
while True:
if self.Form.thread_status == False:
break
self.face_times += 1
ret, frame = self.cap.read()
if ret == False:
for i in range(3):
self.printf('第%d次,相机重连。。'%i)
self.cap = cv2.VideoCapture(0)
ret,frame = self.cap.read()
if ret:
break
frame = cv2.flip(frame,1)
if self.detect_flag:
bboxes = self.detect.run(frame,False)
area = [(box[2]-box[0])*(box[3]-box[1]) for box in bboxes]
area.sort(reverse=True)
if len(bboxes) < 1:
self.disp_info(None)
for box in bboxes:
xmin,ymin,xmax,ymax = box
if (xmax-xmin)*(ymax-ymin) == area[0]:
if self.face_times % 10 == 0:
self.face_img = frame[ymin+3:ymax-3, xmin+3:xmax-3]
try:
threading.Thread(target=self.get_emb).start()
except:
print('识别异常。')
cv2.rectangle(frame,(xmin,ymin),(xmax,ymax),(0,255,0),1)
if len(bboxes) <= 0:
self.face_img = None
self.disp_label(self.camera_lab,frame)
def run(self):
'''
开始运行
:return:
'''
self.MainWindow.show()
sys.exit(self.app.exec_())
if __name__ == '__main__':
Event().run()
|
[
"1343159083@qq.com"
] |
1343159083@qq.com
|
812219c0328e50c8ba02ef954f0de680d51d52b1
|
e7de67a6bbcb21b005c10ab45a29b4c77b0e3b80
|
/EWasteManagement/asgi.py
|
cf0424e774d7d97c7731d897ec22922de5887a41
|
[] |
no_license
|
dj-2002/E-WasteManagement-Django
|
cd65a31c4c728d756ff10c136e80327613e9f1f1
|
727256eab528b8a6899ba838ccc8dc1509cf762f
|
refs/heads/main
| 2023-03-28T21:39:08.291694
| 2021-04-03T04:48:57
| 2021-04-03T04:48:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
ASGI config for EWasteManagement project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EWasteManagement.settings')
application = get_asgi_application()
|
[
"dabhijayesh2002djbsa@gmail.com"
] |
dabhijayesh2002djbsa@gmail.com
|
0fbaff7f0fdfddcd49281420401ad4b13123f3dc
|
64841ec10c6b54de87e79c5e61210f15122b6be6
|
/class/CreateTenantWithToken.py
|
609563f2360cd6f5bbb20fa80dc34ef411ed437b
|
[] |
no_license
|
hegleran/hello-world
|
124fb9c0e054ef369429d18aba44aaa1b83c57bc
|
1aaeb3d5c99c42e1c1b4cffd66fc827250dd1af0
|
refs/heads/master
| 2020-03-27T11:12:46.707440
| 2018-08-31T13:23:11
| 2018-08-31T13:23:11
| 146,472,382
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
import requests
import json
url = "http://192.168.10.1/api/aaaLogin.json"
payload = "{\n\t\"aaaUser\": {\n\t\t\"attributes\": {\n\t\t\t\"name\" : \"admin\",\n\t\t\t\"pwd\" : \"ciscoapic\"\n\t\t}\n\t}\n}"
headers = {
'Content-Type': "application/vnd.yang.data+json",
'Accept': "application/vnd.yang.data+json",
'Authorization': "Basic YWRtaW46Y2lzY29hcGlj"
}
response = requests.request("POST", url, data=payload, headers=headers)
json_response = json.loads(response.text)
#print(json_response)['imdata'][0]['aaaLogin']['attributes']['token']
tokenfromlogin = (json_response['imdata'][0]['aaaLogin']['attributes']['token'])
url = "http://192.168.10.1/api/node/mo/uni/tn-testtenant.json"
payload = "{\"fvTenant\":{\"attributes\":{\"dn\":\"uni/tn-testtenant\",\"name\":\"testtenant\",\"rn\":\"tn-testtenant\",\"status\":\"created\"},\"children\":[]}}"
cookie = {"APIC-cookie": tokenfromlogin}
response = requests.request("POST", url, data=payload, cookies=cookie)
print(response.text)
|
[
"hegleran@yahoo.com"
] |
hegleran@yahoo.com
|
c481c92b5873d057f7d5e00fbc5d90a50765a369
|
a4eb024f83b41ec7c3f4e214e3611753b9c659bc
|
/record_audio.py
|
0df4d787c760df34a64ba7a16d5f17527146af59
|
[] |
no_license
|
markheimann/pitch_analysis
|
33e4b834a8ebdb0af3f91e3ba68ad001a874e184
|
80107a44023f854169a7841a81b71cdcfd85e4ac
|
refs/heads/master
| 2021-01-12T08:33:46.592626
| 2016-12-16T22:17:01
| 2016-12-16T22:17:01
| 76,611,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
#from https://gist.github.com/mabdrabo/8678538
import pyaudio
import wave
import easygui as g
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = 10 #maximum number of seconds user can record
WAVE_OUTPUT_FILENAME = "file.wav"
#Record input from the computer microphone
def record(OUTPUT_FNAME):
audio = pyaudio.PyAudio()
# start recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
frames = []
#Record up to the maximum length we allow the user to record
#Stop early if the user throws a keyboard interrupt (Control-C)
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
try:
data = stream.read(CHUNK)
frames.append(data)
except KeyboardInterrupt: #user stops recording by hitting Control-C
break
# stop recording
stream.stop_stream()
stream.close()
audio.terminate()
# save recording
waveFile = wave.open(OUTPUT_FNAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
if __name__ == "__main__":
enter = raw_input("Press Enter to continue...")
record(WAVE_OUTPUT_FILENAME)
|
[
"mark.heimann@yahoo.com"
] |
mark.heimann@yahoo.com
|
fa080af7e621fcb0f7268184926424b21dad2bab
|
3a9f2b3d79cf214704829427ee280f4b49dca70a
|
/saigon/rat/RuckusAutoTest/components/lib/fm/fw_manager_fm_old.py
|
6c203630b9f3db17a512503f7462d5fdc7f56a8a
|
[] |
no_license
|
jichunwei/MyGitHub-1
|
ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791
|
f826fc89a030c6c4e08052d2d43af0b1b4b410e3
|
refs/heads/master
| 2021-01-21T10:19:22.900905
| 2016-08-20T03:34:52
| 2016-08-20T03:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,809
|
py
|
import logging, time
from RuckusAutoTest.common.utils import *
'''
NOTE: Lowercase Module
'''
model_name_id= {
"UNKNOWN" : 0,
"ZF2925" : 1,
"ZF2942" : 2,
"VF2825" : 3,
"VF7811" : 4,
"ZD1006" : 5,
"ZD1012" : 6,
"ZD1025" : 7,
"ZD1050" : 8,
"ZD3025" : 9,
"ZD3050" : 10,
"ZD3100" : 11,
"ZD3250" : 12,
"ZF7942" : 13,
"VF2811" : 14,
"ZF2741" : 15,
"ZF7962" :16,
"ZF7762" : 17,
"ZF7731" : 18,
"ZF7343" : 20,
"ZF7363" : 22,
"ZD3500" : 23,
"ZF7762-S" : 24,
"ZF7025" :25,
"ZD1106" : 26,
"ZD1112" : 27,
"ZD1125" : 28,
"ZD1150" : 29,
"ZF7341" : 30,
}
Locators = dict(
RefreshBtn = "//img[@id='cmdrefresh']",
Tbl = "//table[@widgetid='firmwarelist']",
DeleteLinkTmpl = "//table[@widgetid='firmwarelist']//tr[%s]/td/a[.='Delete']",
EditLinkTmpl = "//table[@widgetid='firmwarelist']//tr[%s]/td/a[.='Edit']",
Nav = "//td[contains(preceding-sibling::td, 'Number of files')]/table",
NewUploadLink = "//div[@id='new-firmware']",
#modle_id_convention
ModelCheckboxTmpl = "//span[contains(.,'%s')]/input[contains(@id,'-model')]",
ModelCheckboxEditTmpl = "//span/input[@id='%s-model']",
ModelCheckboxEditTextTmpl = "//span[input[@id='%s-model']]",
UploadDetailForm = "//fieldset[@id='uploaddetail']",
FwFileTxt = "//input[@id='filepath']",
OkBtn = "//input[@id='cmdupload']",
EditOkBtn = "//input[@id='cmdupdate']",
)
DeleteFwErrMsgs = [
'You cannot delete this firmware', # file because an existing task is using it.',
]
def _find_firmware(fm, **kwa):
'''
+ Assume current page is Inv > Manage Firmware Files
- Click on refresh button for getting latest data
- Find and return the row
kwa:
- criteria: something likes
{'firmwarename': '2925_', 'description': '7.1.0.0.39'}
return:
- the first matched row (and its index, template) or None, None, None
'''
s, l = fm.selenium, Locators
s.click_and_wait(l['RefreshBtn'])
p = dict(table = l['Tbl'], navigator = l['Nav'], ignore_case = True)
p.update(kwa)
return fm.find_list_table_row(**p)
def find_firmware(fm, **kwa):
'''
- wrapper for _find_firmware()
kwa:
- criteria: something likes
{'firmwarename': '2925_', 'description': '7.1.0.0.39'}
return:
- the first matched row (and its index, template) or None, None, None
'''
fm.navigate_to(fm.PROVISIONING, fm.PROV_MANAGE_FIRMWARE_FILES)
return _find_firmware(fm, **kwa)
def upload_firmware(fm, **kwa):
'''
- with the given firmware filename, make sure there is no file
uploaded before uploading to flexmaster. Otherwise, raise an exception
- create new upload
. select appropriate models
. ignore the description (optional) for now
. select the firmware filename
. click ok
- monitor the uploading progress, make sure it is uploaded successfully
by making sure the uploaddetail is hidden
kwa:
- filepath: full path filename
- models: as a list, likes ['ZD3100', 'ZF2925']
'''
s, l = fm.selenium, Locators
fm.navigate_to(fm.PROVISIONING, fm.PROV_MANAGE_FIRMWARE_FILES)
s.click_and_wait(l['NewUploadLink'])
for m in kwa['models']:
s.click_if_not_checked(l['ModelCheckboxTmpl'] % m.upper())
s.type_text(l['FwFileTxt'], kwa['filepath'])
s.click(l['OkBtn'])
s.wait_for_element_disappered(l['UploadDetailForm'], 60)
def delete_firmware(fm, **kwa):
'''
- find the firmware by name on the list and delete it
kwa:
- name
return:
- True/False accordingly
'''
log('kwa:\n%s' % pformat(kwa))
s, l = fm.selenium, Locators
fm.navigate_to(fm.PROVISIONING, fm.PROV_MANAGE_FIRMWARE_FILES)
r, i, t = _find_firmware(fm, criteria = {'firmwarename': kwa['name']})
if not r:
return False
logging.info('Delete firmware "%s"' % kwa['name'])
s.choose_ok_on_next_confirmation()
s.click_and_wait(l['DeleteLinkTmpl'] % i, 2)
(r, msg) = fm.get_status()
for errmsg in DeleteFwErrMsgs:
if errmsg.lower() in msg.lower():
logging.info(msg)
return False
return True
def edit_firmware(fm, **kwa):
'''
- clear all the check first
- check what in the 'models' list
- firmware description is not supported now
kwa:
- name: which firmware to edit?
- models: which models to be selected
'''
s, l = fm.selenium, Locators
fm.navigate_to(fm.PROVISIONING, fm.PROV_MANAGE_FIRMWARE_FILES)
r, i, t = _find_firmware(fm, criteria = {'firmwarename': kwa['name']})
if not r:
raise Exception('Firmware cannot be found: %s' % kwa['name'])
s.click_and_wait(l['EditLinkTmpl'] % i, .5)
# Fixed, uncheck all models in model_name_id.
for value in model_name_id.values():
if s.is_element_present(l['ModelCheckboxEditTmpl'] % value, 2):
s.click_if_checked(l['ModelCheckboxEditTmpl'] % value)
for m in kwa['models']:
s.click_if_not_checked(l['ModelCheckboxTmpl'] % m.upper())
time.sleep(.5)
s.click_and_wait(l['EditOkBtn'])
def get_firmware(fm, **kwa):
'''
- get the applied models of Firmware
- also get the description (if needed)
kwa:
- name:
return:
- a list of selected models (in lower case)
- model id mapping:
0 UNKNOWN UNKNOWN.png UNKNOWN
1 ZF2925 clip-5port.png ZF2925
2 ZF2942 zf_ap.png ZF2942
3 VF2825 clip-5port.png VF2825
4 VF7811 clip-1port.png VF7811
5 ZD1006 zd1000_tiny.png ZD1006
6 ZD1012 zd1000_tiny.png ZD1012
7 ZD1025 zd1000_tiny.png ZD1025
8 ZD1050 zd1000_tiny.png ZD1050
9 ZD3025 zd3000_tiny.png ZD3025
10 ZD3050 zd3000_tiny.png ZD3050
11 ZD3100 zd3000_tiny.png ZD3100
12 ZD3250 zd3000_tiny.png ZD3250
13 ZF7942 zf_ap.png ZF7942
14 VF2811 clip-1port.png VF2811
15 ZF2741 zf_ap.png ZF2741
16 ZF7962 zf_ap.png ZF7962
17 ZF7762 zf_ap.png ZF7762
18 ZF7731 zf_ap.png ZF7731
20 ZF7343 zf_ap.png ZF7343
22 ZF7363 zf_ap.png ZF7363
23 ZD3500 zd3000_tiny.png ZD3500
24 ZF7762-S zf_ap.png ZF7762-S
25 ZF7025 walle-halfzoomed.jpg ZF7025
26 ZD1106 zd1000_tiny.png ZD1106
27 ZD1112 zd1000_tiny.png ZD1112
28 ZD1125 zd1000_tiny.png ZD1125
29 ZD1150 zd1000_tiny.png ZD1150
'''
s, l = fm.selenium, Locators
fm.navigate_to(fm.PROVISIONING, fm.PROV_MANAGE_FIRMWARE_FILES)
r, i, t = _find_firmware(fm, criteria = {'firmwarename': kwa['name']})
if not r: raise Exception('Firmware cannot be found: %s' % kwa['name'])
s.click_and_wait(l['EditLinkTmpl'] % i, .5)
models = []
i = 1
#temp fixed
max_model_id = 30
while i < max_model_id:
if s.is_element_present(l['ModelCheckboxEditTmpl'] % i, 1):
if s.is_checked(l['ModelCheckboxEditTmpl'] % i):
models.append((s.get_text(l['ModelCheckboxEditTextTmpl'] % i)).strip().lower())
i += 1
s.click_and_wait(l['RefreshBtn']) # close the edit mode
return models
def get_all_firmwares(fm):
'''
return:
- a list of all firmwares (titles are in lowercase)
'''
s, l = fm.selenium, Locators
fm.navigate_to(fm.PROVISIONING, fm.PROV_MANAGE_FIRMWARE_FILES)
s.click_and_wait(l['RefreshBtn'])
return fm.get_list_table(table = l['Tbl'], navigator = l['Nav'], ignore_case = True)
|
[
"tan@xx.com"
] |
tan@xx.com
|
9fd5878581ca751ce8d18b7ff05b80af1a360d88
|
cb2a40b70bc21d0057c96ddb2c86edceffe19707
|
/studioadmin/tests/test_forms/test_users_forms.py
|
204225f3587851571025acae63c68e31e1f505da
|
[] |
no_license
|
rebkwok/pipsevents
|
ceed9f420b08cd1a3fa418800c0870f5a95a4067
|
c997349a1b4f3995ca4bb3a897be6a73001c9810
|
refs/heads/main
| 2023-08-09T14:11:52.227086
| 2023-07-27T20:21:01
| 2023-07-27T20:21:01
| 29,796,344
| 1
| 1
| null | 2023-09-13T14:32:16
| 2015-01-24T23:53:34
|
Python
|
UTF-8
|
Python
| false
| false
| 40,807
|
py
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from model_bakery import baker
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
from booking.models import Event, BlockType
from common.tests.helpers import PatchRequestMixin
from studioadmin.forms import AddBookingForm, ChooseUsersFormSet, \
EditBookingForm, EditPastBookingForm, \
EmailUsersForm, UserFilterForm, UserBookingFormSet, UserBlockFormSet
class ChooseUsersFormSetTests(TestCase):
def setUp(self):
self.user = baker.make_recipe('booking.user')
def formset_data(self, extra_data={}):
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-0-id': str(self.user.id),
}
for key, value in extra_data.items():
data[key] = value
return data
def test_choose_users_formset_valid(self):
formset = ChooseUsersFormSet(data=self.formset_data())
self.assertTrue(formset.is_valid())
class EmailUsersFormTests(TestCase):
def setUp(self):
pass
def form_data(self, extra_data={}):
data = {
'subject': 'Test subject',
'from_address': settings.DEFAULT_FROM_EMAIL,
'message': 'Test message'
}
for key, value in extra_data.items():
data[key] = value
return data
def test_form_valid(self):
form = EmailUsersForm(data=self.form_data())
self.assertTrue(form.is_valid())
def test_missing_from_address(self):
form = EmailUsersForm(
data=self.form_data({'from_address': ''})
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['from_address'],
['This field is required.']
)
def test_missing_message(self):
form = EmailUsersForm(
data=self.form_data({'message': ''})
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['message'],
['This field is required.']
)
class UserFilterFormTests(TestCase):
def setUp(self):
events = baker.make_recipe(
'booking.future_EV',
_quantity=3
)
classes = baker.make_recipe(
'booking.future_PC',
_quantity=4)
def test_events_dropdown(self):
form = UserFilterForm()
event_field = form.fields['events']
event_choices = [
choice for choice in event_field.widget.choices
]
self.assertEqual(len(event_choices), 3)
event_ids = [id for (id, name) in event_choices]
event_type = set([
event.event_type.event_type
for event in Event.objects.filter(id__in=event_ids)
])
self.assertEqual(event_type, set(['EV']))
def test_lessons_dropdown(self):
form = UserFilterForm()
lesson_field = form.fields['lessons']
lesson_choices = [
choice for choice in lesson_field.widget.choices
]
self.assertEqual(len(lesson_choices), 4)
lesson_ids = [id for (id, name) in lesson_choices]
event_type = set([
event.event_type.event_type
for event in Event.objects.filter(id__in=lesson_ids)
])
self.assertEqual(event_type, set(['CL']))
class UserBookingFormSetTests(PatchRequestMixin, TestCase):
def setUp(self):
super(UserBookingFormSetTests, self).setUp()
self.event = baker.make_recipe('booking.future_EV')
self.user = baker.make_recipe('booking.user')
self.block_type = baker.make_recipe('booking.blocktype',
event_type=self.event.event_type)
# 5 active blocks for other users
baker.make_recipe(
'booking.block',
block_type=self.block_type,
paid=True,
_quantity=5
)
self.booking = baker.make_recipe(
'booking.booking', event=self.event, user=self.user
)
def formset_data(self, extra_data={}):
data = {
'bookings-TOTAL_FORMS': 1,
'bookings-INITIAL_FORMS': 1,
'bookings-0-id': self.booking.id,
'bookings-0-event': self.event.id,
'bookings-0-status': self.booking.status,
}
for key, value in extra_data.items():
data[key] = value
return data
def test_form_valid(self):
formset = UserBookingFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
self.assertTrue(formset.is_valid(), formset.errors)
def test_additional_data_in_form(self):
baker.make_recipe('booking.block',
block_type=self.block_type,
user=self.user,
paid=True)
formset = UserBookingFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
form = formset.forms[0]
self.assertTrue(form.has_available_block)
self.assertEqual(form.paid_id, 'paid_0')
def test_block_queryset_with_new_form(self):
"""
New form should show all active user blocks
"""
baker.make_recipe(
'booking.block', block_type=self.block_type, user=self.user,
paid=True
)
baker.make_recipe( 'booking.block', user=self.user, paid=True)
formset = UserBookingFormSet(instance=self.user,
user=self.user)
# get the last form, which will be the new empty one
form = formset.forms[-1]
block = form.fields['block']
# queryset shows only the two active blocks for this user
self.assertEqual(2, block.queryset.count())
def test_block_queryset_with_existing_booking_with_active_user_block(self):
"""
Existing booking should show only user's active blocks for the
same event type.
"""
active_user_block = baker.make_recipe('booking.block',
block_type=self.block_type,
user=self.user,
paid=True)
active_user_block_diff_type = baker.make_recipe('booking.block',
user=self.user,
paid=True)
formset = UserBookingFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
# get the first form
form = formset.forms[0]
block = form.fields['block']
# queryset shows only the active blocks for this user and event type
self.assertEqual(1, block.queryset.count())
# empty_label shows the "None"
self.assertEqual(
block.empty_label,
"--------None--------",
)
# assign this block to the user's booking
self.booking.block = active_user_block
self.booking.save()
formset = UserBookingFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
# get the first form
form = formset.forms[0]
block = form.fields['block']
# queryset still only shows active blocks for this user and event type
self.assertEqual(1, block.queryset.count())
# empty_label shows the "Remove block" instruction
self.assertEqual(
block.empty_label,
"---REMOVE BLOCK (TO CHANGE BLOCK, REMOVE AND SAVE FIRST)---",
)
def test_block_queryset_with_existing_booking_no_active_user_block(self):
active_user_block_diff_type = baker.make_recipe('booking.block',
user=self.user,
paid=True)
formset = UserBookingFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
# get the first form
form = formset.forms[0]
block = form.fields['block']
# no active blocks for this user and event type
self.assertEqual(0, block.queryset.count())
def test_block_choice_label_format(self):
active_user_block = baker.make_recipe('booking.block',
block_type=self.block_type,
user=self.user,
paid=True)
formset = UserBookingFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
# get the first form
form = formset.forms[0]
block = form.fields['block']
# queryset shows only the active blocks for this user and event type
self.assertEqual(1, block.queryset.count())
self.assertEqual(
"{}; exp {}; {} left".format(
active_user_block.block_type.event_type.subtype,
active_user_block.expiry_date.strftime('%d/%m'),
active_user_block.block_type.size - active_user_block.bookings_made()),
block.label_from_instance(active_user_block)
)
def test_event_choices_with_new_form(self):
"""
New form should show all events the user is not booked for
"""
events = baker.make_recipe('booking.future_PC', _quantity=5)
formset = UserBookingFormSet(instance=self.user,
user=self.user)
# get the last form, which will be the new empty one
form = formset.forms[-1]
event = form.fields['event']
# queryset shows only the two active blocks for this user
self.assertEqual(6, Event.objects.count())
self.assertEqual(5, event.queryset.count())
self.assertFalse(self.event in event.queryset)
def test_event_choices_with_existing_booking(self):
"""
Existing booking should show all events in event choices
).
"""
events = baker.make_recipe('booking.future_PC', _quantity=5)
formset = UserBookingFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
# get the first form
form = formset.forms[0]
event = form.fields['event']
# queryset shows all events (will be hidden in the template)
self.assertEqual(6, event.queryset.count())
def test_widgets_disabled(self):
"""
Cancelled: no_show widget, paid, deposit_paid, free_class disabled
Block: paid, deposit_paid, free_class disabled
No-show: no_show widget, paid, deposit_paid, free_class enabled but
greyed
No-show with block: no_show widget enabled but greyed, paid,
deposit_paid, free_class disabled
"""
events = baker.make_recipe('booking.future_PC', _quantity=4)
user = baker.make_recipe('booking.user')
block = baker.make_recipe(
'booking.block', user=user,
block_type__event_type=events[1].event_type
)
cancelled_booking = baker.make_recipe(
'booking.booking', user=user, event=events[0], paid=True,
payment_confirmed=True, status='CANCELLED'
)
block_booking = baker.make_recipe(
'booking.booking', user=user, event=events[1], paid=True,
payment_confirmed=True, status='OPEN', block=block
)
no_show_booking = baker.make_recipe(
'booking.booking', user=user, event=events[2], paid=True,
payment_confirmed=True, status='OPEN', no_show=True
)
no_show_block_booking = baker.make_recipe(
'booking.booking', user=user, event=events[3], paid=True,
payment_confirmed=True, status='OPEN', block=block, no_show=True
)
data = {
'bookings-TOTAL_FORMS': 4,
'bookings-INITIAL_FORMS': 4,
'bookings-0-id': cancelled_booking.id,
'bookings-0-event': cancelled_booking.event.id,
'bookings-0-status': cancelled_booking.status,
'bookings-1-id': block_booking.id,
'bookings-1-event': block_booking.event.id,
'bookings-1-status': block_booking.status,
'bookings-2-id': no_show_booking.id,
'bookings-2-event': no_show_booking.event.id,
'bookings-2-status': no_show_booking.status,
'bookings-3-id': no_show_block_booking.id,
'bookings-3-event': no_show_block_booking.event.id,
'bookings-3-status': no_show_block_booking.status,
}
formset = UserBookingFormSet(
data=data, instance=user, user=self.user
)
cancelled_form = formset.forms[0]
for field in ['no_show', 'paid', 'deposit_paid', 'free_class']:
self.assertEqual(
cancelled_form.fields[field].widget.attrs['class'],
'regular-checkbox regular-checkbox-disabled'
)
self.assertEqual(
cancelled_form.fields[field].widget.attrs['OnClick'],
'javascript:return ReadOnlyCheckBox()'
)
block_form = formset.forms[1]
for field in ['paid', 'deposit_paid', 'free_class']:
self.assertEqual(
block_form.fields[field].widget.attrs['class'],
'regular-checkbox regular-checkbox-disabled'
)
self.assertEqual(
block_form.fields[field].widget.attrs['OnClick'],
'javascript:return ReadOnlyCheckBox()'
)
self.assertEqual(
block_form.fields['no_show'].widget.attrs['class'], 'form-check-input'
)
no_show_form = formset.forms[2]
for field in ['no_show', 'paid', 'deposit_paid', 'free_class']:
self.assertEqual(
no_show_form.fields[field].widget.attrs['class'],
'regular-checkbox regular-checkbox-disabled'
)
self.assertIsNone(
no_show_form.fields[field].widget.attrs.get('OnClick', None)
)
no_show_block_form = formset.forms[3]
for field in ['paid', 'deposit_paid', 'free_class']:
self.assertEqual(
no_show_block_form.fields[field].widget.attrs['class'],
'regular-checkbox regular-checkbox-disabled'
)
self.assertEqual(
no_show_block_form.fields[field].widget.attrs['OnClick'],
'javascript:return ReadOnlyCheckBox()'
)
self.assertEqual(
block_form.fields['no_show'].widget.attrs['class'], 'form-check-input'
)
self.assertIsNone(
no_show_form.fields['no_show'].widget.attrs.get('OnClick', None)
)
class UserBlockFormSetTests(PatchRequestMixin, TestCase):
def setUp(self):
super(UserBlockFormSetTests, self).setUp()
event_type = baker.make_recipe('booking.event_type_PC')
self.user = baker.make_recipe('booking.user')
self.block_type = baker.make_recipe(
'booking.blocktype', event_type=event_type)
self.block = baker.make_recipe(
'booking.block', block_type=self.block_type, user=self.user,
paid=True
)
def formset_data(self, extra_data={}):
data = {
'blocks-TOTAL_FORMS': 1,
'blocks-INITIAL_FORMS': 1,
'blocks-0-id': self.block.id,
'blocks-0-block_type': self.block.block_type.id,
'blocks-0-start_date': self.block.start_date.strftime('%d %b %Y')
}
for key, value in extra_data.items():
data[key] = value
return data
def test_form_valid(self):
formset = UserBlockFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
self.assertTrue(formset.is_valid(), formset.errors)
def test_additional_data_in_form(self):
event_type = baker.make_recipe('booking.event_type_OE')
available_block_type = baker.make_recipe('booking.blocktype',
event_type=event_type)
formset = UserBlockFormSet(data=self.formset_data(),
instance=self.user,
user=self.user)
form = formset.forms[0]
self.assertTrue(form.can_buy_block)
self.assertEqual(form.paid_id, 'paid_0')
def test_block_type_queryset_for_new_form(self):
"""
Block_type choices should not include blocktypes for which the user
already has an active block
"""
available_block_type = baker.make_recipe('booking.blocktype',
_quantity=5)
self.assertEqual(BlockType.objects.all().count(), 6)
formset = UserBlockFormSet(instance=self.user, user=self.user)
form = formset.forms[-1]
block_type_queryset = form.fields['block_type'].queryset
self.assertEqual(block_type_queryset.count(), 5)
self.assertFalse(self.block_type in block_type_queryset)
# blocktypes of unpaid blocks which are otherwise active are also not
# included in the choices
self.block.paid = False
self.block.save()
formset = UserBlockFormSet(instance=self.user, user=self.user)
form = formset.forms[-1]
block_type_queryset = form.fields['block_type'].queryset
self.assertEqual(block_type_queryset.count(), 5)
self.assertFalse(self.block_type in block_type_queryset)
# blocktypes of expired blocks are included in the choices
self.block_type.duration = 2
self.block_type.save()
self.block.paid = True
self.block.save()
self.block.start_date = timezone.now() - timedelta(100)
self.block.save()
self.assertTrue(self.block.expired)
formset = UserBlockFormSet(instance=self.user, user=self.user)
form = formset.forms[-1]
block_type_queryset = form.fields['block_type'].queryset
self.assertEqual(block_type_queryset.count(), 6)
self.assertIn(self.block_type, block_type_queryset)
def test_delete_checkbox(self):
"""
Delete checkbox should be active only for unpaid blocks, unused free
blocks or unused transfer blocks
"""
unpaid = baker.make_recipe(
'booking.block', user=self.user, paid=False
)
free_block_type = baker.make_recipe('booking.free_blocktype')
free = baker.make(
'booking.block', user=self.user, paid=True,
block_type=free_block_type
)
free_used = baker.make_recipe(
'booking.block', user=self.user, paid=True,
block_type=free_block_type
)
baker.make_recipe('booking.booking', user=self.user, block=free_used)
transfer = baker.make_recipe(
'booking.block', user=self.user, paid=True,
block_type__identifier='transferred'
)
transfer_used = baker.make_recipe(
'booking.block', user=self.user, paid=True,
block_type__identifier='transferred'
)
baker.make_recipe(
'booking.booking', user=self.user, block=transfer_used
)
cannot_delete = [self.block, free_used, transfer_used]
formset = UserBlockFormSet(instance=self.user, user=self.user)
self.assertEqual(len(formset.forms), 7) # 6 blocks plus new form
for form in formset.forms[:-1]:
disabled = form.fields['DELETE'].widget.attrs.get('disabled', None)
if form.instance in cannot_delete:
self.assertEqual(disabled, 'disabled')
else:
self.assertIsNone(disabled)
class EditPastBookingFormTests(PatchRequestMixin, TestCase):
def setUp(self):
super(EditPastBookingFormTests, self).setUp()
self.event = baker.make_recipe('booking.past_event')
self.cancelled_event = baker.make_recipe(
'booking.past_event', cancelled=True
)
self.user = baker.make_recipe('booking.user')
self.block_type = baker.make_recipe(
'booking.blocktype5', event_type=self.event.event_type
)
# 5 active blocks for other users
baker.make_recipe(
'booking.block',
block_type=self.block_type,
paid=True,
_quantity=5
)
self.booking = baker.make_recipe(
'booking.booking', event=self.event, user=self.user
)
self.booking_for_cancelled = baker.make_recipe(
'booking.booking', event=self.cancelled_event, user=self.user,
status='CANCELLED'
)
def test_block_choices(self):
# block drop down lists all blocks for event type with spaces
# if booking is already assigned to a block that's full, still list
# block in options
# includes expired blocks for past events
# user block, not expired, not full
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
baker.make_recipe(
'booking.booking', user=self.user, block=block1, _quantity=4
)
# user block, not expired, full
block2 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
baker.make_recipe(
'booking.booking', user=self.user, block=block2, _quantity=5
)
# user block, expired, not full
block3 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user, start_date=timezone.now() - timedelta(days=90)
)
baker.make_recipe(
'booking.booking', user=self.user, block=block3, _quantity=4
)
# user block, expired, full
block4 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user, start_date=timezone.now() - timedelta(days=90)
)
baker.make_recipe(
'booking.booking', user=self.user, block=block4, _quantity=5
)
form = EditPastBookingForm(instance=self.booking)
self.assertCountEqual(
form.fields['block'].queryset, [block1, block3]
)
# Add booking to block 1; block 1 is now full, but is included in
# queryset
self.booking.block = block1
self.booking.save()
form = EditPastBookingForm(instance=self.booking)
self.assertCountEqual(
form.fields['block'].queryset, [block1, block3]
)
# A different booking does NOT include full block 1
booking = baker.make_recipe(
'booking.booking', user=self.user,
event__event_type=self.block_type.event_type
)
form = EditPastBookingForm(instance=booking)
self.assertCountEqual(
form.fields['block'].queryset, [block3]
)
def test_disabled_checkboxes(self):
fields_to_disable = [
'attended', 'paid', 'deposit_paid', 'free_class', 'no_show'
]
# checkboxes made readonly with JS for cancelled bookings
form = EditPastBookingForm(instance=self.booking_for_cancelled)
for field in fields_to_disable:
assert form.fields[field].disabled
# checkboxes still usable for no-shows
self.booking.no_show = True
self.booking.save()
form = EditPastBookingForm(instance=self.booking)
for field in fields_to_disable:
assert not form.fields[field].disabled
def test_changing_status_to_cancelled(self):
# sets paid to False and block to None
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
self.booking.block = block1
self.booking.paid = True
self.booking.save()
data = {
'id': self.booking.id,
'paid': self.booking.paid,
'status': 'CANCELLED',
'block': self.booking.block.id
}
form = EditPastBookingForm(instance=self.booking, data=data)
self.assertTrue(form.is_valid())
self.assertFalse(form.cleaned_data['paid'])
self.assertIsNone(form.cleaned_data['block'])
def test_error_messages_for_cancelled_event(self):
# can't assign booking for cancelled event to block
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
data = {
'id': self.booking_for_cancelled.id,
'paid': self.booking_for_cancelled.paid,
'status': self.booking_for_cancelled.status,
'block': block1.id
}
form = EditPastBookingForm(instance=self.booking_for_cancelled, data=data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'block': [
'{} is cancelled. Cannot assign booking to a '
'block.'.format(self.cancelled_event)
]
}
)
# can't change status to open
data.update(status='OPEN', block=None)
form = EditPastBookingForm(instance=self.booking_for_cancelled, data=data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'status': [
'{} is cancelled. Cannot reopen booking for cancelled '
'event.'.format(self.cancelled_event)
]
}
)
# can't change status to free
data.update(status=self.booking_for_cancelled.status, free_class=True)
form = EditBookingForm(instance=self.booking_for_cancelled, data=data)
assert form.is_valid()
assert not form.cleaned_data["free_class"]
# can't change to paid
data.update(paid=True, free_class=False)
form = EditBookingForm(instance=self.booking_for_cancelled, data=data)
assert form.is_valid()
assert not form.cleaned_data["paid"]
# can't change to attended
data.update(paid=False, attended=True)
form = EditBookingForm(instance=self.booking_for_cancelled, data=data)
assert form.is_valid()
assert not form.cleaned_data["attended"]
# can't change to no-show
data.update(no_show=True, attended=False)
form = EditBookingForm(instance=self.booking_for_cancelled, data=data)
assert form.is_valid()
assert not form.cleaned_data["no_show"]
def test_cannot_assign_free_class_to_block(self):
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
self.booking.free_class = True
self.booking.save()
data = {
'id': self.booking.id,
'paid': self.booking.paid,
'status': self.booking.status,
'free_class': self.booking.free_class,
'block': block1.id
}
form = EditBookingForm(instance=self.booking, data=data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'free_class': ['Free class cannot be assigned to a block.']}
)
def test_cannot_assign_cancelled_class_to_block(self):
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
self.booking.status = 'CANCELLED'
self.booking.save()
data = {
'id': self.booking.id,
'paid': self.booking.paid,
'status': self.booking.status,
'block': block1.id
}
form = EditBookingForm(instance=self.booking, data=data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'block': [
'Cannot assign cancelled booking to a block. To assign '
'to block, please also change booking status to OPEN.'
]
}
)
def test_cannot_make_block_booking_unpaid(self):
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
self.booking.block = block1
self.booking.save()
data = {
'id': self.booking.id,
'paid': False,
'status': self.booking.status,
'block': self.booking.block.id
}
form = EditBookingForm(instance=self.booking, data=data)
assert form.is_valid()
# paid field in posted data is ignored
assert form.cleaned_data["paid"]
def test_cannot_make_both_attended_and_no_show(self):
data = {
'id': self.booking.id,
'paid': self.booking.paid,
'status': self.booking.status,
'attended': True,
'no_show': True
}
form = EditBookingForm(instance=self.booking, data=data)
self.assertFalse(form.is_valid())
self.assertCountEqual(
form.errors,
{
'attended': ['Booking cannot be both attended and no-show.'],
'no_show': ['Booking cannot be both attended and no-show.']
}
)
class EditBookingFormTests(PatchRequestMixin, TestCase):
def setUp(self):
super(EditBookingFormTests, self).setUp()
self.event = baker.make_recipe('booking.future_PC')
self.cancelled_event = baker.make_recipe(
'booking.future_PC', cancelled=True
)
self.user = baker.make_recipe('booking.user')
self.block_type = baker.make_recipe(
'booking.blocktype5', event_type=self.event.event_type
)
# 5 active blocks for other users
baker.make_recipe(
'booking.block',
block_type=self.block_type,
paid=True,
_quantity=5
)
self.booking = baker.make_recipe(
'booking.booking', event=self.event, user=self.user
)
self.booking_for_cancelled = baker.make_recipe(
'booking.booking', event=self.cancelled_event, user=self.user,
status='CANCELLED'
)
def test_block_choices(self):
# EditBookingForm is identical to EditPastBookingForm except for
# block choice options
# block drop down lists all blocks for event type with spaces
# if booking is already assigned to a block that's full, still list
# block in options
# excludes expired blocks for past events
# user block, not expired, not full
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
baker.make_recipe(
'booking.booking', user=self.user, block=block1, _quantity=4
)
# user block, not expired, full
block2 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user
)
baker.make_recipe(
'booking.booking', user=self.user, block=block2, _quantity=5
)
# user block, expired, not full
block3 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user, start_date=timezone.now() - timedelta(days=90)
)
baker.make_recipe(
'booking.booking', user=self.user, block=block3, _quantity=4
)
# user block, expired, full
block4 = baker.make_recipe(
'booking.block', block_type=self.block_type, paid=True,
user=self.user, start_date=timezone.now() - timedelta(days=90)
)
baker.make_recipe(
'booking.booking', user=self.user, block=block4, _quantity=5
)
# expired/full blocks are NOT included
form = EditBookingForm(instance=self.booking)
self.assertCountEqual(
form.fields['block'].queryset, [block1]
)
# Add booking to block 1; block 1 is now full, but is still included
# in queryset because it is the block on this booking
self.booking.block = block1
self.booking.save()
form = EditBookingForm(instance=self.booking)
self.assertCountEqual(
form.fields['block'].queryset, [block1]
)
# A different booking does NOT include full block 1
booking = baker.make_recipe(
'booking.booking', user=self.user,
event__event_type=self.block_type.event_type
)
form = EditBookingForm(instance=booking)
self.assertCountEqual(
form.fields['block'].queryset, []
)
class AddBookingFormTests(PatchRequestMixin, TestCase):
def setUp(self):
super(AddBookingFormTests, self).setUp()
self.event = baker.make_recipe('booking.future_EV', cost=10)
self.poleclass = baker.make_recipe('booking.future_PC', cost=10)
self.poleclass1 = baker.make_recipe('booking.future_PC', cost=10)
self.past_event = baker.make_recipe('booking.past_event', cost=10)
self.cancelled_event = baker.make_recipe(
'booking.future_PC', cancelled=True, cost=10
)
self.user = baker.make_recipe('booking.user')
self.block_type_pc = baker.make_recipe(
'booking.blocktype5', event_type=self.poleclass.event_type
)
self.block_type_ev = baker.make_recipe(
'booking.blocktype5', event_type=self.event.event_type
)
# 5 active blocks for other users
baker.make_recipe(
'booking.block',
block_type=self.block_type_ev,
paid=True,
_quantity=5
)
def test_event_choices(self):
# only future, not cancelled events shown
form = AddBookingForm(user=self.user)
self.assertCountEqual(
form.fields['event'].queryset,
[self.event, self.poleclass, self.poleclass1]
)
def test_block_choices(self):
# block drop down lists all blocks for event type with spaces
# if booking is already assigned to a block that's full, still list
# block in options
# includes expired blocks for past events
# user block, not expired, not full
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type_ev, paid=True,
user=self.user
)
baker.make_recipe(
'booking.booking', user=self.user, block=block1, _quantity=4
)
# user block, not expired, full
block2 = baker.make_recipe(
'booking.block', block_type=self.block_type_ev, paid=True,
user=self.user
)
baker.make_recipe(
'booking.booking', user=self.user, block=block2, _quantity=5
)
# user block, expired, not full
block3 = baker.make_recipe(
'booking.block', block_type=self.block_type_ev, paid=True,
user=self.user, start_date=timezone.now() - timedelta(days=90)
)
baker.make_recipe(
'booking.booking', user=self.user, block=block3, _quantity=4
)
# user block, expired, full
block4 = baker.make_recipe(
'booking.block', block_type=self.block_type_ev, paid=True,
user=self.user, start_date=timezone.now() - timedelta(days=90)
)
baker.make_recipe(
'booking.booking', user=self.user, block=block4, _quantity=5
)
# only shows not full, not expired
form = AddBookingForm(user=self.user)
self.assertEqual(
[block.id for block in form.fields['block'].queryset], [block1.id]
)
def test_cannot_assign_free_class_to_block(self):
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type_ev, paid=True,
user=self.user
)
data = {
'user': self.user.id,
'event': self.event.id,
'paid': '',
'status': 'OPEN',
'free_class': True,
'block': block1.id
}
form = AddBookingForm(data=data, user=self.user)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{'free_class': ['"Free class" cannot be assigned to a block.']}
)
def test_cannot_assign_cancelled_class_to_block(self):
block1 = baker.make_recipe(
'booking.block', block_type=self.block_type_ev, paid=True,
user=self.user
)
data = {
'user': self.user.id,
'event': self.event.id,
'paid': True,
'status': 'CANCELLED',
'block': block1.id
}
form = AddBookingForm(data=data, user=self.user)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'__all__': [
'A cancelled booking cannot be assigned to a block.'
]
}
)
def test_cannot_make_both_attended_and_no_show(self):
data = {
'user': self.user.id,
'event': self.event.id,
'paid': True,
'status': 'OPEN',
'attended': True,
'no_show': True
}
form = AddBookingForm(data=data, user=self.user)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'__all__': ['Booking cannot be both attended and no-show'],
}
)
def test_event_not_block_bookable(self):
# poleclass1 has no associated blocktype
# make user blocks for available blocktypes
baker.make_recipe(
'booking.block', user=self.user, block_type=self.block_type_ev,
paid=True,
)
block_pc = baker.make_recipe(
'booking.block', user=self.user, block_type=self.block_type_pc,
paid=True,
)
data = {
'user': self.user.id,
'event': self.poleclass1.id,
'paid': True,
'status': 'OPEN',
'block': block_pc.id
}
form = AddBookingForm(data=data, user=self.user)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'block': ['This class type cannot be block-booked'],
}
)
def test_create_booking_with_wrong_blocktype(self):
# make user blocks for available blocktypes
block_ev = baker.make_recipe(
'booking.block', user=self.user, block_type=self.block_type_ev,
paid=True,
)
baker.make_recipe(
'booking.block', user=self.user, block_type=self.block_type_pc,
paid=True,
)
data = {
'user': self.user.id,
'event': self.poleclass.id,
'paid': True,
'status': 'OPEN',
'block': block_ev.id
}
form = AddBookingForm(data=data, user=self.user)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
'block': [
'This class can only be block-booked with a "{}" '
'block type.'.format(
self.poleclass.event_type
)
],
},
)
|
[
"rebkwok@gmail.com"
] |
rebkwok@gmail.com
|
54d329007b88c5f4f085102a846eda376989a7b0
|
d221eff5232f17603c71c990252bf94c1613f657
|
/TW_MI_INDEX.py
|
70aae4046b2b42bd2a5a9900ca695ee5e472cf4d
|
[] |
no_license
|
RamonLiao/TW_MI_INDEX
|
06c738401ebf8d0c29b9a914692b3cc6b87faa6e
|
552e9b8610410b9650be0e76ffe2bf4be8690c88
|
refs/heads/master
| 2020-04-02T13:19:49.344100
| 2018-10-25T14:36:28
| 2018-10-25T14:36:28
| 154,471,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,013
|
py
|
import requests
requests.adapters.DEFAULT_RETRIES = 5 # 增加重連次數
from bs4 import BeautifulSoup as bs
from datetime import date, timedelta
import sqlite3 as lite
import ProcessBar as PB
import time
url = 'http://www.twse.com.tw/exchangeReport/MI_INDEX?response=html&date={0}&type=ALL'
sql = "insert into 每日收盤行情(證券代號, 證券名稱, 日期, 成交股數, 成交筆數, 成交金額, 開盤價, 最高價, 最低價, 收盤價, '漲跌 (+/-)', 漲跌價差, 最後揭示買價, 最後揭示買量, 最後揭示賣價, 最後揭示賣量, 本益比) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
StartDate = date(year=2011, month=8, day=1) # start at 20110801
DateDelta = (date.today()-StartDate).days
process_bar = PB.ShowProcess(DateDelta, 'Finished') # Bug: 如果超出設定次數,會重跑一次
def money_conversion(input_ele):
return ''.join(input_ele.split(','))
s = requests.session()
s.keep_alive = False # 關閉多餘連接
def getTradeValue(cur, TradeDate):
dt = (''.join(str(TradeDate).split('-')))
# headers中的Connection默認為keep-alive,即連接一次,傳輸多次。然而在多次訪問後若不能结束並回到連接池中,會導致不能產生新的連接
# 因此要將header中的Connection一項設置為close
res = s.get(url.format(dt), headers={'Connection': 'close'})
time.sleep(0.01)
soup = bs(res.text, features='lxml')
for tr in soup.select('table')[4].select('tr')[3:]:
td = tr.select('td')
ret = [
td[0].text, td[1].text, TradeDate, money_conversion(td[2].text),
money_conversion(td[3].text), money_conversion(td[4].text), money_conversion(td[5].text), money_conversion(td[6].text),
money_conversion(td[7].text), money_conversion(td[8].text), td[9].text, money_conversion(td[10].text),
money_conversion(td[11].text), money_conversion(td[12].text), money_conversion(td[13].text), money_conversion(td[14].text),
money_conversion(td[15].text)
]
cur.execute(sql, ret)
con = lite.connect('/Users/ramonliao/Documents/Code/Web_Crawler/TW_MI_INDEX/TW_MI_INDEX.db')
cur = con.cursor()
# con.execute("delete from 每日收盤行情 where 日期") #擇一column可刪除全部資料;但column=?時,db裡多筆為?的資料卻刪不掉 ; 日期 = '2018-10-01'
# con.commit()
# con.close()
# exit()
today = date.today()
TradeDate = StartDate
while TradeDate < today:
try:
getTradeValue(cur, TradeDate)
except IndexError as inst:
print('Not trading %s' % str(TradeDate), inst.args)
except requests.exceptions.ConnectionError:
print('stops on %s and sleeps for 5 seconds' % str(TradeDate)) #假設超過聯結次數,印出中斷日期,下次從此處繼續
# con.commit()
# con.close()
time.sleep(5)
continue
process_bar.show_process()
TradeDate = TradeDate + timedelta(days=1)
con.commit()
con.close()
|
[
"yc52811@hotmail.com"
] |
yc52811@hotmail.com
|
649384386c8da28eddc1c0329ec0a72c07363d93
|
ffd2c03d416e9a04979458e5fd684f6f4abd4746
|
/notification.py
|
bc975a4dbf60219e63cbb739bda157e8b1fbf082
|
[] |
no_license
|
rse43/ingressier
|
493300e820262e7d2d0e10cfbf1a974c85c3a3fe
|
bcb7526070949501eae6c9ef5a61727adc9b7cd4
|
refs/heads/master
| 2021-01-22T11:38:25.698905
| 2013-02-15T13:39:16
| 2013-02-15T13:39:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,587
|
py
|
#!/bin/python
# -*- coding:utf-8 -*-
import logging
import webapp2
import os
import math
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import search
from google.appengine.api import xmpp
from model import NotificationSetting
def create_document(email, latitude, longitude):
return search.Document(
fields=[search.TextField(name='email', value=email),
search.GeoField(name='centre', value=GeoPoint(latitude=latitude, longitude=longitude))])
def calc_distance(origin, destination):
lat1 = origin.lat
lon1 = origin.lon
lat2 = destination.lat
lon2 = destination.lon
radius = 6367500
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
class CheckHandler(webapp2.RequestHandler):
def get(self):
latitude = float(self.request.get('lat').strip())
longitude = float(self.request.get('lon').strip())
attacker = self.request.get('attacker').strip()
target_location = db.GeoPt(lat=latitude, lon=longitude)
query_results = db.GqlQuery("SELECT * FROM NotificationSetting")
for result in query_results:
distance = calc_distance(target_location, result.centre_location)
if distance < result.radius:
template_values = { 'link' : "https://sydneyresistancewatch.appspot.com/notifications/map?lat=%s&lon=%s" % (str(latitude), str(longitude)),
'text' : "%s is attacking our portal at " % (attacker) }
path = os.path.join(os.path.dirname(__file__), 'templates', 'message.xml')
xml = template.render(path, template_values)
status_code = xmpp.send_message(jids=result.email, raw_xml=True, body=xml, message_type=xmpp.MESSAGE_TYPE_CHAT)
chat_message_sent = (status_code == xmpp.NO_ERROR)
if not chat_message_sent:
output = "%s is amoung one of the following errors: %s or %s" % (str(status_code), str(xmpp.INVALID_JID), str(xmpp.OTHER_ERROR))
logging.debug(output)
def post(self):
latitude = float(self.request.get('lat').strip())
longitude = float(self.request.get('lon').strip())
attacker = self.request.get('attacker').strip()
target_location = db.GeoPt(lat=latitude, lon=longitude)
query_results = db.GqlQuery("SELECT * FROM NotificationSetting")
for result in query_results:
distance = calc_distance(target_location, result.centre_location)
if distance < result.radius:
template_values = { 'link' : "https://sydneyresistancewatch.appspot.com/notifications/map?lat=%s&lon=%s" % (str(latitude), str(longitude)),
'text' : "%s is attacking our portal at" % (attacker) }
path = os.path.join(os.path.dirname(__file__), 'templates', 'message.xml')
xml = template.render(path, template_values)
status_code = xmpp.send_message(jids=result.email, raw_xml=True, body=xml, message_type=xmpp.MESSAGE_TYPE_CHAT)
chat_message_sent = (status_code == xmpp.NO_ERROR)
if not chat_message_sent:
output = "%s is amoung one of the following errors: %s or %s" % (str(status_code), str(xmpp.INVALID_JID), str(xmpp.OTHER_ERROR))
logging.debug(output)
class NewHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
email = user.email()
latitude = float(self.request.get('lat').strip())
longitude = float(self.request.get('lon').strip())
radius = int(self.request.get('r').strip())
centre_location = db.GeoPt(lat=latitude, lon=longitude)
setting_record = NotificationSetting(email=email, centre_location=centre_location, radius=radius)
setting_record.put()
self.response.out.write("record added.")
else:
self.redirect(users.create_login_url("/new"))
def post(self):
pass
class MapURLHandler(webapp2.RequestHandler):
def get(self):
latitude = float(self.request.get('lat').strip())
longitude = float(self.request.get('lon').strip())
# url = "geo:%s,%s?z=19" % (str(latitude), str(longitude))
url = "http://www.ingress.com/intel?latE6=%s&lngE6=%s&z=19" % (str(int(latitude * 1000000)), str(int(longitude * 1000000)))
self.redirect(url)
class RemoveHandler(webapp2.RequestHandler):
def get(self):
pass
def post(self):
pass
class ShowHandler(webapp2.RequestHandler):
def get(self):
pass
logging.getLogger().setLevel(logging.DEBUG)
app = webapp2.WSGIApplication([("/notifications/check", CheckHandler),
("/notifications/new", NewHandler),
("/notifications/remove", RemoveHandler),
("/notifications/show", ShowHandler),
("/notifications/map", MapURLHandler)], debug=False)
def real_main():
run_wsgi_app(app)
if __name__ == "__main__":
real_main()
|
[
"rse43.rse43@gmail.com"
] |
rse43.rse43@gmail.com
|
3d382b150c56e3e4e54c4bd56bd12e362f9d8418
|
318b737f3fe69171f706d2d990c818090ee6afce
|
/test/integration-test/python-sdk-test/test/test_lastjoin.py
|
42881e93af11bf34e5d8b0e053b012b75b3d3760
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/OpenMLDB
|
e884c33f62177a70749749bd3b67e401c135f645
|
a013ba33e4ce131353edc71e27053b1801ffb8f7
|
refs/heads/main
| 2023-09-01T02:15:28.821235
| 2023-08-31T11:42:02
| 2023-08-31T11:42:02
| 346,976,717
| 3,323
| 699
|
Apache-2.0
| 2023-09-14T09:55:44
| 2021-03-12T07:18:31
|
C++
|
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allure
import pytest
from nb_log import LogManager
from common.fedb_test import FedbTest
from executor import fedb_executor
from util.test_util import getCases
log = LogManager('python-sdk-test').get_logger_and_add_handlers()
class TestLatJoin(FedbTest):
@pytest.mark.parametrize("testCase", getCases(["/function/join/"]))
@allure.feature("Lastjoin")
@allure.story("batch")
def test_lastjoin(self, testCase):
fedb_executor.build(self.connect, testCase).run()
|
[
"noreply@github.com"
] |
4paradigm.noreply@github.com
|
c46ef0862197d9ac74c159faa9c07fea5cba6851
|
955cb3722ad4f3cc05bb3d38fd2609c822ffd8bb
|
/models.py
|
e65ab43cda250ca56185989fdd06ccc70b2fa556
|
[] |
no_license
|
BenjaminFraser/GameAPI
|
dae102655921ba306e6087c399d3a868231681c6
|
eb9e93027880525d362ccd8c2d57373baee4caca
|
refs/heads/master
| 2016-09-13T22:34:17.190754
| 2016-05-20T19:14:02
| 2016-05-20T19:14:02
| 56,184,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,731
|
py
|
from protorpc import messages
from google.appengine.ext import ndb
class User(ndb.Model):
"""User profile to store the details of each user registered.
Attributes:
name: The name of the user (str).
email: The email address of the user (str).
wins: The total games the user has won (int).
total_played: The total games played by the user (int).
"""
name = ndb.StringProperty(required=True)
email = ndb.StringProperty(required=True)
wins = ndb.IntegerProperty(default=0)
total_played = ndb.IntegerProperty(default=0)
@property
def win_percentage(self):
"""Returns the users win percentage calculated using wins and total_played.
Returns:
the win / total_played ratio as a float if a user has played at least 1 game,
else returns 0.
"""
if self.total_played > 0:
return float(self.wins) / float(self.total_played)
else:
return 0
def to_form(self):
"""Returns the user entity object as a UserForm message object suitable for
outbound messages.
Returns:
UserForm message containing the User entities name, email, wins, total played
and win percentage.
"""
return UserForm(name=self.name,
email=self.email,
wins=self.wins,
total_played=self.total_played,
win_percentage=self.win_percentage)
def add_win(self):
"""Add a to the users win property."""
self.wins += 1
self.total_played += 1
self.put()
def add_loss(self):
"""Add a loss to the user, by only incrementing total_played property."""
self.total_played += 1
self.put()
class Score(ndb.Model):
"""Score entity designed to store the results of a finished game.
Attributes:
date: The date of the game when it ended.
winner: The username of the winner of the game.
loser: The username of the looser of the game.
"""
date = ndb.DateProperty(required=True)
winner = ndb.KeyProperty(required=True)
loser = ndb.KeyProperty(required=True)
def to_form(self):
"""Returns the score entity as a ScoreForm message object suitable for
outbound messages.
Returns:
ScoreForm message containing the Score entity date, winner and loser
properties.
"""
return ScoreForm(date=str(self.date),
winner=self.winner.get().name,
loser=self.loser.get().name)
class InsertShipsForm(messages.Message):
"""Used to insert ships into a users grid prior to starting.
Attributes:
ship_type: The ship type to be inserted, as a string.
start_row: An integer relating to the ship starting row.
start_column: An integer relating to the ship starting column.
orientation: The orientation of the ship as a string: 'horizontal' or 'vertical'
"""
ship_type = messages.StringField(1, required=True)
start_row = messages.IntegerField(2, required=True)
start_column = messages.IntegerField(3, required=True)
orientation = messages.StringField(4, required=True)
class InsertShipsForms(messages.Message):
"""Used for multiple insertships forms during ship insert"""
ships = messages.MessageField(InsertShipsForm, 1, repeated=True)
class MakeMoveForm(messages.Message):
"""Used to make a move in an existing game.
Attributes:
user_name: The users name who is making a game move.
target_row: The chosen target row, as an int between 0-9.
target_col: The chosen target column, as an int between 0-9.
"""
user_name = messages.StringField(1, required=True)
target_row = messages.IntegerField(2, required=True)
target_col = messages.IntegerField(3, required=True)
class ScoreForm(messages.Message):
"""ScoreForm for outbound Score information messages.
Attributes:
date: A string representation of the date of the game.
winner: A string representation of the winner users name.
loser: A string representation of the losers users name.
"""
date = messages.StringField(1, required=True)
winner = messages.StringField(2, required=True)
loser = messages.StringField(3, required=True)
class ScoreForms(messages.Message):
"""Return multiple ScoreForm messages.
Attributes:
items: The ScoreForm messages, as a repeated property.
"""
items = messages.MessageField(ScoreForm, 1, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message.
Attributes:
message: The outbound message to be sent, as a string.
"""
message = messages.StringField(1, required=True)
class UserForm(messages.Message):
"""User Form for outbound user messages.
Attributes:
name: The name of the user as a string.
email: The email of the user as a string.
wins: User wins, as an int.
total_played: User total played games, as an int.
win_percentage: Users win percentage, as a float.
"""
name = messages.StringField(1, required=True)
email = messages.StringField(2)
wins = messages.IntegerField(3, required=True)
total_played = messages.IntegerField(4, required=True)
win_percentage = messages.FloatField(5, required=True)
class UserForms(messages.Message):
"""Container for multiple User Form messages.
Attributes:
items: The UserForm messages, as a repeated property.
"""
items = messages.MessageField(UserForm, 1, repeated=True)
|
[
"Ben-fraser@hotmail.co.uk"
] |
Ben-fraser@hotmail.co.uk
|
465f596cae1d7b7ac8377d0ab55205fc2972dd10
|
bddf2759a453a4e0e3ed075cd195313e044e7f13
|
/readsensors/send_sms.py
|
43eb60b327458192b66174bd6fac74a079187736
|
[] |
no_license
|
SteveCossy/IOT
|
79b6681dd6e3f86af08fd128901cafe2daa49c6d
|
d191c20fd0a4636f3b814c324710112c0fa778a5
|
refs/heads/master
| 2023-03-18T18:48:21.939346
| 2023-03-11T21:35:05
| 2023-03-11T21:35:05
| 100,091,388
| 3
| 3
| null | 2022-02-11T20:34:24
| 2017-08-12T05:29:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,248
|
py
|
# Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Read your Account Sid and Auth Token from twilio.com/console
dataFile = '/home/pi/twilio_data_test'
# File contains: SID Auth_Token TargetTemp TargetFreq Frequency SID startMsg txtBody from_nbr to_nbr
# txtBody eg: Current Temps - This: %1 That: %2
fileContent = open(dataFile,'r')
comment = fileContent.readline()
account_sid = fileContent.readline() # Twilio ID and Token
auth_token = fileContent.readline()
temp_target_parts = fileContent.readline() # Target for Alert messages : How often to send Alerts
temp_frequency_s = fileContent.readline() # How often to sent regular updates
this_msg = fileContent.readline() # Text for the first message
msg_body = fileContent.readline() # Message to send, with %1 & %2 representing two values to insert
msg_from = fileContent.readline() # From phone number, as required by Twilio
msg_to = fileContent.readline() # To phone number, which must be authorised in Twilio
fileContent.close()
target_temp_s,target_freq_s = temp_target_parts.split(":")
client = Client(account_sid, auth_token)
message = client.messages.create(body=msg_body,from_=msg_from,to=msg_to)
print(message.sid)
|
[
"steve@rata.co.nz"
] |
steve@rata.co.nz
|
4cc271d10ba406fe36086b21a70aeab6a77ff050
|
804f2ae447089a1883ffac83b9b07d9cb26be1f8
|
/Final/src/game.py
|
cf928c53782f567a15178952f0ba81e47b377038
|
[] |
no_license
|
JusDOrr/Python
|
b1bcb89291e648b783388a7a06a3e8a70efdee9c
|
73fa3eb2b3181be824a504ecbaf414ceea070678
|
refs/heads/master
| 2021-01-02T22:37:21.975043
| 2018-04-03T18:43:24
| 2018-04-03T18:43:24
| 99,357,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
#Justin Orr
#Final Project
#Python: 3.6.1
'''The Main Game module'''
try:
import pygame
except:
print("Error importing pygame module. Please \'pip install pygame\' for python 3.6.1")
try:
from pygame.locals import *
except:
print("Error importing * from pygame.locals")
try:
from minesweeper.manager import ScreenManager
except:
print("Error importing ScreenManager module from minesweeper.manager")
def initPyGame():
'''Initializes Pygame and Pygame Font as well as sets the window size and title'''
pygame.init()
pygame.font.init()
window = pygame.display.set_mode((480, 480))
pygame.display.set_caption("MineSweeper Lite")
return window
# MAIN PROGRAM
gameWindow = initPyGame()
clock = pygame.time.Clock()
screenManager = ScreenManager()
done = False
while not done:
# CHECK FOR EXIT COMMAND
for event in pygame.event.get():
if event.type == QUIT:
done = True
# CLEAR WINDOW BEFORE REDRAW
gameWindow.fill((195, 195, 195))
# MAIN GAME LOOP
screenManager.update(pygame, gameWindow)
if screenManager.checkQuit():
done = True
# PREPARE FOR NEXT LOOP
pygame.display.flip()
clock.tick(60)
pygame.quit()
quit()
# END MAIN PROGRAM
|
[
"JustinOrr2011@gmail.com"
] |
JustinOrr2011@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.