blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
807aa555d76826932aa6a8c5bf2a3109e5e03a70
|
0e7f6295ef6891a493e626165736867fe3a6b866
|
/Audio-Recognition-main/forms.py
|
2a21d028fea3085016a6f30b0aaba9931e0e6eb7
|
[] |
no_license
|
AkshayScrKiddie/Voice-Pattern-Recognition
|
7fe608c4cfa76e3679db0c0a7e78159704661f27
|
c39332f63cc063f1266f2a56cd7d8b756379e933
|
refs/heads/main
| 2023-04-26T02:50:07.187604
| 2021-05-23T15:31:48
| 2021-05-23T15:31:48
| 328,376,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 845
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegistrationForm(FlaskForm):
username = StringField('Username',validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
class LoginForm(FlaskForm):
email = StringField('Email',validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
|
[
"noreply@github.com"
] |
AkshayScrKiddie.noreply@github.com
|
e630f3b9a59c07a97fc92050d4902ad66e58ee47
|
50f00c6555556593a45b94c6923169191db7a853
|
/pset6/readability/readability.py
|
ee30e635056b645b0c3dc64bd21e261c7aa44248
|
[] |
no_license
|
DeagaDH/CS50_Homework
|
9e239ceaa186de027019a7b5b93ec377d95414ac
|
91e42ba0c1f8aeca692acf508fb49f53c588abb6
|
refs/heads/main
| 2023-03-16T11:45:54.603364
| 2021-03-02T17:29:27
| 2021-03-02T17:29:27
| 342,089,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,278
|
py
|
from cs50 import get_string
def main():
#Get text input from user
text = get_string("Text: ")
#Compute grade level
grade = get_grade(text)
#Print grade
if (grade < 1):
print("Before Grade 1")
elif (grade >= 16):
print("Grade 16+")
else:
print(f'Grade {grade}')
def get_grade(text):
# Get length of text
length = len(text)
# Variables to count letters, words and sencentes
letters = 0
words = 0
sentences = 0
#Check all characters in text
for c in text:
#Check for alphanumeric characters
if c.isalpha():
letters +=1
#Punctuation indicates end of sentence
elif (c == '.' or c=='!' or c =='?'):
sentences += 1
#Spaces denote new words
elif (c == ' '):
words += 1
#Add one final word; no space at the end!
words += 1
#Calculate average number of letters and sentences
words_100 = words/100.0 #Multiplier to get 100 words
#Averages per 100 words
avg_letters = letters/words_100
avg_sentences = sentences/words_100
# Calculate grade
grade = 0.0588 * avg_letters - 0.296 * avg_sentences - 15.8
# Return rounded grade
return int(round(grade,0))
main()
|
[
"dhdeaga@gmail.com"
] |
dhdeaga@gmail.com
|
8e2b8dac9d80efc639d7ff1b9b7e28e2ed734662
|
235dc977904c11b4ad6137490df814dd3e9f14e6
|
/zhihu/Zhihu/settings.py
|
b146d53e8b1a8e90f3cbff4c3ebca5d310df06df
|
[] |
no_license
|
zjian1425/EasySpider
|
8252c2ae51102a5598145689c82c4b96a537da7c
|
00e302e7e18cddedd91d44b06e5555870c41dcde
|
refs/heads/master
| 2021-08-08T07:16:29.622357
| 2018-11-12T06:58:59
| 2018-11-12T06:58:59
| 145,305,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,558
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for Zhihu project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Zhihu'
SPIDER_MODULES = ['Zhihu.spiders']
NEWSPIDER_MODULE = 'Zhihu.spiders'
# User-Agent = ['']
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Zhihu (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20'
}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'Zhihu.middlewares.ZhihuSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
# 'Zhihu.middlewares.RondomUserAgentMiddleware': 543,
'Zhihu.middlewares.PhantomjsMiddleware':543,#启用自定义中间件
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware':None, #禁止内置的中间件
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
#启用pipeline管道传入数据库,具体实现在pipelines.py查看
'Zhihu.pipelines.MysqlPipeline': 201#后面数字越小,优先级越高
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"zjian1425@gmail.com"
] |
zjian1425@gmail.com
|
fb25365b262d24734e244e6b507d22053818cc6b
|
728430915e7ff081fb0ad33568424f3625ebd6aa
|
/djangotutorial/urls.py
|
0a0f48c0b18056461e826ae01dbe0126cc1e9445
|
[] |
no_license
|
sztojfen/Djangotutorial
|
df7625c215d631567483d781f8376e6349c6e183
|
dfa834d15588a39fd12d01151bf619756590f0f6
|
refs/heads/master
| 2022-12-19T10:38:43.248505
| 2020-09-28T14:03:08
| 2020-09-28T14:03:08
| 270,974,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
"""djangotutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^articles/', include('articles.urls')),
url(r'^about/$', views.about),
url(r'^$', views.homepage),
]
|
[
"stefan.stankiewicz@skriware.com"
] |
stefan.stankiewicz@skriware.com
|
abbfeca2267f27ed0e751e283602130681c8780d
|
f0544e7936db475062472ec99f1bdbd28a41d522
|
/conversiones.py
|
930c95bf512dc26113d5bd753bc5026b2744b301
|
[] |
no_license
|
victorhvivasc/helpers
|
9db896b03cd7b55412e1f7d8cc793090e250574b
|
b01beb503e3d2c69afce5dccd22bfefa925c7e75
|
refs/heads/master
| 2022-12-11T05:08:04.162081
| 2020-09-12T01:08:51
| 2020-09-12T01:08:51
| 287,869,597
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,322
|
py
|
# -*- coding: utf-8 -*-
from herramientas.unidades import Unidades
from herramientas.valores import *
def convert_live(u_s: str):
"""Decorador que convierte las unidades de una funcion en tiempo de ejecución, siempre que la relación de
conversión haya sido incluida en el archivo valores.py.
Cuidado, la conversion se hace sobre todos los argumentos que sean instancia Unidades de la función, no asi sobre
constantes incluidas en el cuerpo de la misma
Ejemplo:
@convert_live(u_s='pies')
def sumar(a, b):
return a+b
uno = Unidades(5, dtype='m')
dos = Unidades(5, dtype='m')
sumar(uno, dos)
:return 32.8084 pies
"""
def convert_to_mks(f):
def envoltura(*args):
aux = []
for i in args:
if isinstance(i, Unidades):
aux.append(escalar(i, u_s=u_s))
return f(*aux)
return envoltura
return convert_to_mks
def escalar(valor, u_s: str, u_e: str = None) -> Unidades:
"""Funcion para redimensionar segun las escalas preestablecidas en el documento valores.py
valor: int, float, class Unidades, indiferentemente del valor de entrada la salida sera del type class Unidades
u_s: Unidad a la cual se desea hacer la conversión de escala.
u_e: default None, cuando el parametro 'valor' suministrado es del type Unidades se identifica automaticamente el
tipo de datos de entrada, en caso contrario debe suministrarse
Ejemplo 1:
numero = 25
escala = escalar(numero, u_e='m', u_s='km')
:return 0.025 km
Ejemplo 2:
numero = Unidades(25, dtype='m')
escala = escalar(numero, u_s='km')
:return 0.025 km
"""
if isinstance(valor, Unidades):
dtypes = str(valor.dtype)+'-'+u_s
valor.dtype = u_s
return valor*escalas[dtypes]
else:
valor = valor*escalas[u_e+'-'+u_s]
return Unidades(valor, dtype=u_s)
if __name__ == '__main__':
numero = Unidades(25, dtype='m')
escala = escalar(numero, u_s='km')
print(escala, type(escala))
print(escalar(1, u_e='pies', u_s='m'))
@convert_live(u_s='km')
def sumar(a: Unidades, b: Unidades):
return a+b
uno = Unidades(5, dtype='m')
dos = Unidades(5, dtype='m')
F = sumar(uno, dos, 3)
print(F.dtype)
|
[
"victorhvivasc@gmail.com"
] |
victorhvivasc@gmail.com
|
553135781c95acaf931f856468805e4f9ba93845
|
7d74195bd00cbe8516670c8fe718e983106c9830
|
/src/python_language_services/pyclbr_readmodule.py
|
d8fc7fa857bc6081e3591d82fe9099068ac750a6
|
[] |
no_license
|
masa4u/example_python
|
7ab3d48020855ad493336afcd8d0c02eb3104b2b
|
7bdee4cb8e90255b20353f7f95d3e879f6462638
|
refs/heads/master
| 2021-01-18T14:10:56.539659
| 2017-03-28T12:52:08
| 2017-03-28T12:52:08
| 30,511,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
import pyclbr
import os
from operator import itemgetter
def show_class(name, class_data):
print 'Class:', name
print '\tFile: {0} [{1}]'.format(os.path.basename(class_data.file), class_data.lineno)
show_super_classes(name, class_data)
show_methods(name, class_data)
print
return
def show_methods(class_name, class_data):
for name, lineno in sorted(class_data.methods.items(), key=itemgetter(1)):
print '\tMethod: {0} [{1}]'.format(name, lineno)
return
def show_super_classes(name, class_data):
super_class_names = []
for super_class in class_data.super:
if super_class == 'object':
continue
if isinstance(super_class, basestring):
super_class_names.append(super_class)
else:
super_class_names.append(super_class.name)
if super_class_names:
print '\tSuper classes:', super_class_names
return
example_data = pyclbr.readmodule('pyclbr_example')
for name, class_data in sorted(example_data.items(), key=lambda x:x[1].lineno):
show_class(name, class_data)
|
[
"masa4u@gmail.com"
] |
masa4u@gmail.com
|
f35555d539781fa04732a6e2e84059cd9fd9237b
|
6166ade942c4616afe8a09fba13a3c8c8aab7fa2
|
/day00/ex09/guess.py
|
b2df11c33439311b7f75668cc302a7ee360f590e
|
[] |
no_license
|
Nnevalti/Bootcamp_python_42
|
955f5405be7fe794cb9c2af0d83e39368897809f
|
dd872d77d3015d905d672b447787b4b8c8c9ba79
|
refs/heads/master
| 2022-04-09T08:14:53.497531
| 2020-03-12T17:14:29
| 2020-03-12T17:14:29
| 245,995,952
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 802
|
py
|
import random
print("This is an interactive game!")
print("You have to enter a number between 1 and 99 to find out the secret number.")
print("Type 'exit' to end the game.")
print("Good luck!\n")
number = random.randint(1,99)
ntry = 0
while 1 :
guess = input("What's your guess between 1 and 99?\n")
ntry += 1
if (guess.isdigit()) :
if (int(guess) == number) :
if (number == 42) :
print("The answer to the ultimate question of life, the universe and everything is 42.")
if (ntry == 1) :
print("Congratulations! You got it on your first try!")
else :
print("You won in %d attempt" % ntry)
exit()
elif (int(guess) > number) :
print("Too high!")
else :
print("Too low")
elif guess == "exit" :
print("Goodbye!")
exit();
else :
print("That's not a number")
|
[
"vdescham@e1r10p12.42.fr"
] |
vdescham@e1r10p12.42.fr
|
aa98e70fd853df42feb919f23d0fab847f7065e7
|
0a11a15cf64e25585d28f484bb2118e8f858cfeb
|
/programmers/1019_기둥과 보.py
|
eb212b61c8124f1180f1ede8f04d42782beb6d19
|
[] |
no_license
|
seoul-ssafy-class-2-studyclub/GaYoung_SSAFY
|
7d9a44afd0dff13fe2ba21f76d0d99c082972116
|
23e0b491d95ffd9c7a74b7f3f74436fe71ed987d
|
refs/heads/master
| 2021-06-30T09:09:00.646827
| 2020-11-30T14:09:03
| 2020-11-30T14:09:03
| 197,476,649
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
# n = 5
# build_frame = [[1,0,0,1],[1,1,1,1],[2,1,0,1],[2,2,1,1],[5,0,0,1],[5,1,0,1],[4,2,1,1],[3,2,1,1]]
n = 5
build_frame = [[0,0,0,1],[2,0,0,1],[4,0,0,1],[0,1,1,1],[1,1,1,1],[2,1,1,1],[3,1,1,1],[2,0,0,0],[1,1,1,0],[2,2,0,1]]
def make(x, y, type, total):
if type == 0: # 0: 기둥
if y == 0: # 기둥은 바닥 위에 있거나
return True
if (x - 1, y, 1) in total or (x, y, 1) in total: # 보의 한쪽 끝 부분 위에 있거나
return True
if (x, y - 1, 0) in total: # 다른 기둥 위에 있으면
return True
else:
return False
elif type == 1: # 1: 보
if y == 0:
return False
if (x, y - 1, 0) in total or (x + 1, y - 1, 0) in total: # 보는 한쪽 끝 부분이 기둥 위에 있거나,
return True
if (x - 1, y, 1) in total and (x + 1, y, 1) in total : # 양쪽 끝 부분이 다른 보와 동시에 연결되어 있어야
return True
else:
return False
def solution(n, build_frame):
total = {}
near = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, 1), (0, 0)]
for x, y, type, install in build_frame:
if install == 1: # 1: 설치
if make(x, y, type, total):
total[x, y, type] = 1
elif install == 0: # 0: 삭제
total.pop((x, y, type))
for a, b in near:
xi, yi, = x + a, y + b
if (xi, yi, 0) in total:
if not make(xi, yi, 0, total):
total[x, y, type] = 1 # 이때, total[xi, yi, a] = 1이 아니다.
break
if (xi, yi, 1) in total:
if not make(xi, yi, 1, total):
total[x, y, type] = 1 # 이때, total[xi, yi, a] = 1이 아니다.
break
total = sorted(total.keys())
# print(total)
return total
|
[
"gyyoon4u@naver.com"
] |
gyyoon4u@naver.com
|
c5b1ed3395e45b3b46aa86ce4d3ba2c3a470e6af
|
e13091c137650cd31c8d9778087b369033d0cf96
|
/src/main/python/algo_expert/tree/Boj2056.py
|
125c1327fea1235b995c000f138fea631a91b14d
|
[] |
no_license
|
jwoojun/CodingTest
|
634e2cfe707b74c080ddbe5f32f58c1e6d849968
|
d62479d168085f13e73dfc1697c5438a97632d29
|
refs/heads/master
| 2023-08-22T09:03:32.392293
| 2021-10-31T01:00:33
| 2021-10-31T01:00:33
| 300,534,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,190
|
py
|
import copy
import sys
from collections import deque
input = sys.stdin.readline
N = int(input())
graph = [[] for _ in range(N+1)]
indegree = [0] * (N + 1)
time = [0] * (N + 1)
for i in range(1, N+1) :
data = list(map(int, input().split()))
time[i] = data[0]
indegree[i] = data[1]
for j in range(1, N+1) :
graph[j].append(i)
def topology_sort():
q = deque()
for i in range(1, N+1) :
if indegree[i] == 0 :
q.append(i)
# for i in range(1, N + 1):
# data = list(map(int, input().split()))
# time[i] = data[0]
# indegree[i] = data[1]
# for j in data[2:]:
# graph[j].append(i)
#
#
# def topology_sort():
# result = copy.deepcopy(time)
# q = deque()
# for i in range(1, N+1):
# if indegree[i] == 0:
# q.append(i)
# while q:
# now = q.popleft()
# for i in graph[now]:
# result[i] = max(result[i], result[now] + time[i])
# indegree[i] -= 1
# if indegree[i] == 0:
# q.append(i)
# return max(result)
# print(topology_sort())
# 7
# 5 0
# 1 1 1
# 3 1 2
# 6 1 1
# 1 2 2 4
# 8 2 2 4
# 4 3 3 5 6
|
[
"jjwjun10@gmail.com"
] |
jjwjun10@gmail.com
|
3be13245d033e578bcbae630b6214bae42cf89ca
|
677db16b507cecbbbe7363c3e42a5a6782ec9d2b
|
/234. Palindrome Linked List.py
|
5df00b9c09914d3dca6e9ef5ff394802f39611a6
|
[] |
no_license
|
yunfengzhou-hub/leetcode
|
b3eece98d923ce6629c57bd219deb768e7d9bdc4
|
918f2eec458007cac01c009ba666b0ed9af97506
|
refs/heads/master
| 2022-06-10T06:49:04.252233
| 2019-05-21T12:10:41
| 2019-05-21T12:10:41
| 262,413,401
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if head==None: return True
x=[]
while head!=None:
x.append(head.val)
head=head.next
for i in range(len(x)):
if x[i]!=x[-i-1]: return False
return True
|
[
"zhouyf@bu.edu"
] |
zhouyf@bu.edu
|
b1c15983423fa6712c0a120bd1e47ddf84236846
|
c36d21353df9c5f6a3a3df4545a29c2b306c3ede
|
/myproject/DataShow/tests.py
|
43f7a197acfa6382665710acaa555db4e5303e82
|
[] |
no_license
|
moteily/DjangoSystemMonitor
|
73289ac8fbc60e0f30be631d10f9f3ea5fc1c021
|
f671f3ac226d1add5cc659bc3b2c6bf5ccc9abb5
|
refs/heads/master
| 2022-12-16T06:45:39.719312
| 2020-09-11T09:37:59
| 2020-09-11T09:37:59
| 286,942,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from django.test import TestCase
# Create your tests here.
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(BASE_DIR)
|
[
"994283977@qq.com"
] |
994283977@qq.com
|
a2cb63b2254cbda2b926dc9d46d1ad5ec0c7cfa0
|
013266cf292f36101ff4dc979779332bbc708146
|
/linked_list/linked_list.py
|
3622547a9cae8641d658c2125b3c0d1c64434023
|
[] |
no_license
|
jels-code/python-ds-algo
|
1c71ec302c105eb5cd70007fd67b7e3e7c03c0dc
|
e76b1d1f7986afe15e86fe10dcc7322f1fe7dc4b
|
refs/heads/master
| 2023-04-24T01:57:31.928247
| 2021-05-14T03:44:36
| 2021-05-14T03:44:36
| 338,950,308
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,572
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return self.data
class LinkedList:
def __init__(self):
self.head = None
def __repr__(self):
node = self.head
nodeList = []
while node is not None:
nodeList.append(node.data)
node = node.next
nodeList.append('None')
return ' -> '.join(str(v) for v in nodeList)
def __iter__(self):
if self.head is None:
return
node = self.head
while node is not None:
yield node
node = node.next
# for testing purposes
def insert_from_list(self, list):
for x in list:
self.append(x)
def append(self, data):
new_node = Node(data)
if self.head is None:
self.head = new_node
return
curr_node = self.head
while curr_node.next is not None:
curr_node = curr_node.next
curr_node.next = new_node
def print_list(self):
curr_node = self.head
while curr_node is not None:
print(curr_node.data)
curr_node = curr_node.next
def prepend(self, data):
new_head = Node(data)
new_head.next = self.head
self.head = new_head
def insert_after_node(self, prev_node, data):
if prev_node is None:
raise TypeError("prev_node can not be None")
new_node = Node(data)
new_node.next = prev_node.next
prev_node.next = new_node
def delete_node(self, key):
if self.head is None:
raise TypeError("Linkedlist is empty")
if self.head.data == key:
self.head = self.head.next
return
curr_node = self.head
while curr_node.next is not None:
if curr_node.next.data is key:
curr_node.next = curr_node.next.next
return
curr_node = curr_node.next
raise ValueError(f'Node {key} was not found')
def delete_node_at_pos(self, pos):
if pos < 0:
raise IndexError(f'Index {pos} cannot be negative')
if pos == 0:
self.head = self.head.next
return
prev_node = self.head
for i in range(1, pos):
prev_node = prev_node.next
if prev_node is None:
raise IndexError(f'Index {pos} is out of range')
if prev_node is None or prev_node.next is None:
raise IndexError(f'Index {pos} is out of range')
prev_node.next = prev_node.next.next
def len(self):
if self.head is None:
return 0
length = 0
for node in self:
length += 1
node = node.next
return length
def swap(self, key_1, key_2):
if self.head is None:
return
prev_1 = None
curr_1 = self.head
while curr_1.data is not key_1:
prev_1 = curr_1
curr_1 = curr_1.next
if curr_1 is None:
raise ValueError(f'{key_1} is not in the list')
prev_2 = None
curr_2 = self.head
while curr_2.data is not key_2:
prev_2 = curr_2
curr_2 = curr_2.next
if curr_2 is None:
raise ValueError(f'{key_2} is not in the list')
# then prev_1 is not head node
if prev_1 is not None:
prev_1.next = curr_2
else:
self.head = curr_2
if prev_2 is not None:
prev_2.next = curr_1
else:
self.head = curr_2
curr_1.next, curr_2.next = curr_2.next, curr_1.next
def reverse(self):
prev = None
curr = self.head
while curr is not None:
curr_next = curr.next
curr.next = prev
prev = curr
curr = curr_next
self.head = prev
def reverse_recursive(self):
self.head = self._reverse_recursive(self.head)
def _reverse_recursive(self, curr, prev=None):
if not curr:
return prev
curr_next = curr.next
curr.next = prev
return self._reverse_recursive(curr_next, curr)
def merge_sorted_lists(self, llist):
"""Some documentation"""
ptr1 = self.head
ptr2 = llist.head
dummy = prev_of_merged = Node(0)
while ptr1 is not None and ptr2 is not None:
if ptr1.data <= ptr2.data:
prev_of_merged.next = ptr1
ptr1 = ptr1.next
else:
prev_of_merged.next = ptr2
ptr2 = ptr2.next
prev_of_merged = prev_of_merged.next
prev_of_merged.next = ptr1 or ptr2
return dummy.next
def remove_duplicates(self):
dup = dict()
prev = None
curr = self.head
while curr:
if curr.data in dup:
prev.next = curr.next
else:
prev = curr
dup[curr.data] = 1
curr = curr.next
def nth_to_last(self, n):
end_ptr = self.head
if not end_ptr:
raise ValueError('list is empty')
count = 1
while end_ptr and count < n:
end_ptr = end_ptr.next
count += 1
if end_ptr is None:
raise ValueError(f'{n}th last node is outside length of list')
nth_node = self.head
while end_ptr.next is not None:
end_ptr = end_ptr.next
nth_node = nth_node.next
return nth_node.data
def count_occurences(self, data):
count = 0
for node in self:
if node.data == data:
count += 1
return count
def count_occurences_recursive(self, node, data):
if node is None:
return 0
if node.data == data:
return 1 + self.count_occurences_recursive(node.next, data)
return self.count_occurences_recursive(node.next, data)
def rotate(self, k):
node = self.head
head = self.head
for i in range(1, k):
if not node:
raise ValueError('invalid k for length of list')
node = node.next
pivot = node
if pivot.next is None:
return
self.head = pivot.next
node = pivot.next
pivot.next = None
while node.next is not None:
node = node.next
node.next = head
return
|
[
"jelscode@gmail.com"
] |
jelscode@gmail.com
|
1bd88b66d8410d876e30a0c83d4391d4fdb0d072
|
92e3a6424326bf0b83e4823c3abc2c9d1190cf5e
|
/scripts/icehouse/opt/stack/tempest/tempest/api/network/test_metering_extensions.py
|
2cfb841ba0d9494a75d69a2e0a7513f37ffcfd08
|
[
"Apache-2.0"
] |
permissive
|
AnthonyEzeigbo/OpenStackInAction
|
d6c21cf972ce2b1f58a93a29973534ded965d1ea
|
ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e
|
refs/heads/master
| 2023-07-28T05:38:06.120723
| 2020-07-25T15:19:21
| 2020-07-25T15:19:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,486
|
py
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Emilien Macchi <emilien.macchi@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest.openstack.common import log as logging
from tempest import test
LOG = logging.getLogger(__name__)
class MeteringJSON(base.BaseAdminNetworkTest):
_interface = 'json'
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Delete Metering labels
List, Show, Create, Delete Metering labels rules
"""
@classmethod
def resource_setup(cls):
super(MeteringJSON, cls).resource_setup()
if not test.is_extension_enabled('metering', 'network'):
msg = "metering extension not enabled."
raise cls.skipException(msg)
description = "metering label created by tempest"
name = data_utils.rand_name("metering-label")
cls.metering_label = cls.create_metering_label(name, description)
remote_ip_prefix = "10.0.0.0/24"
direction = "ingress"
cls.metering_label_rule = cls.create_metering_label_rule(
remote_ip_prefix, direction,
metering_label_id=cls.metering_label['id'])
def _delete_metering_label(self, metering_label_id):
# Deletes a label and verifies if it is deleted or not
_, body = self.admin_client.delete_metering_label(metering_label_id)
# Asserting that the label is not found in list after deletion
resp, labels = (self.admin_client.list_metering_labels(
id=metering_label_id))
self.assertEqual(len(labels['metering_labels']), 0)
def _delete_metering_label_rule(self, metering_label_rule_id):
# Deletes a rule and verifies if it is deleted or not
_, body = (self.admin_client.delete_metering_label_rule(
metering_label_rule_id))
# Asserting that the rule is not found in list after deletion
resp, rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule_id))
self.assertEqual(len(rules['metering_label_rules']), 0)
@test.attr(type='smoke')
def test_list_metering_labels(self):
# Verify label filtering
_, body = self.admin_client.list_metering_labels(id=33)
metering_labels = body['metering_labels']
self.assertEqual(0, len(metering_labels))
@test.attr(type='smoke')
def test_create_delete_metering_label_with_filters(self):
# Creates a label
name = data_utils.rand_name('metering-label-')
description = "label created by tempest"
_, body = (self.admin_client.create_metering_label(name=name,
description=description))
metering_label = body['metering_label']
self.addCleanup(self._delete_metering_label,
metering_label['id'])
# Assert whether created labels are found in labels list or fail
# if created labels are not found in labels list
resp, labels = (self.admin_client.list_metering_labels(
id=metering_label['id']))
self.assertEqual(len(labels['metering_labels']), 1)
@test.attr(type='smoke')
def test_show_metering_label(self):
# Verifies the details of a label
_, body = (self.admin_client.show_metering_label(
self.metering_label['id']))
metering_label = body['metering_label']
self.assertEqual(self.metering_label['id'], metering_label['id'])
self.assertEqual(self.metering_label['tenant_id'],
metering_label['tenant_id'])
self.assertEqual(self.metering_label['name'], metering_label['name'])
self.assertEqual(self.metering_label['description'],
metering_label['description'])
@test.attr(type='smoke')
def test_list_metering_label_rules(self):
# Verify rule filtering
_, body = self.admin_client.list_metering_label_rules(id=33)
metering_label_rules = body['metering_label_rules']
self.assertEqual(0, len(metering_label_rules))
@test.attr(type='smoke')
def test_create_delete_metering_label_rule_with_filters(self):
# Creates a rule
_, body = (self.admin_client.create_metering_label_rule(
remote_ip_prefix="10.0.1.0/24",
direction="ingress",
metering_label_id=self.metering_label['id']))
metering_label_rule = body['metering_label_rule']
self.addCleanup(self._delete_metering_label_rule,
metering_label_rule['id'])
# Assert whether created rules are found in rules list or fail
# if created rules are not found in rules list
resp, rules = (self.admin_client.list_metering_label_rules(
id=metering_label_rule['id']))
self.assertEqual(len(rules['metering_label_rules']), 1)
@test.attr(type='smoke')
def test_show_metering_label_rule(self):
# Verifies the details of a rule
_, body = (self.admin_client.show_metering_label_rule(
self.metering_label_rule['id']))
metering_label_rule = body['metering_label_rule']
self.assertEqual(self.metering_label_rule['id'],
metering_label_rule['id'])
self.assertEqual(self.metering_label_rule['remote_ip_prefix'],
metering_label_rule['remote_ip_prefix'])
self.assertEqual(self.metering_label_rule['direction'],
metering_label_rule['direction'])
self.assertEqual(self.metering_label_rule['metering_label_id'],
metering_label_rule['metering_label_id'])
self.assertFalse(metering_label_rule['excluded'])
class MeteringXML(MeteringJSON):
interface = 'xml'
|
[
"cody@uky.edu"
] |
cody@uky.edu
|
7c30a64e642f3a2a0685bf0080c767c724ad7129
|
8ce633e7089ae39dfd3fa2495e03d2a658427fc6
|
/mongo/readmongo.py
|
856ca0ac5b271643db4bc594efb8d3b8413978d1
|
[] |
no_license
|
junlongzhao/lenovo
|
c713d674d5d2be1458aaa82528c6f7e604129d1c
|
eb8aa414383834c147b2a9f5b08deb3844e80607
|
refs/heads/master
| 2020-05-31T07:31:44.730545
| 2020-01-05T15:16:40
| 2020-01-05T15:16:40
| 190,166,538
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,520
|
py
|
import pymongo
import json
def connect():
client=pymongo.MongoClient('localhost',27017) #建立数据库连接,指定ip和端口号
mydb = client.mydb #指定mydb数据库
collection = mydb.user #指定mydb数据库里user集合
data3={"age":12,"name":"laozhao","school":"UESTC","label":"null"}
data4={"_id":1,"age": 23, "userName": "laozhao"}
data5={"_id":2,"age": 23, "userName": "zhangsan","school":"liuzhong"}
data6_before = {"_id": 16, "age": 23, "userName": "zhangsan", "school": "None"} #之前
data6={"_id":0,"age": 23, "userName": "zhangsan","school":"qizhong"} #之后
data7={"_id":0,"content":"中国 军情 新浪 军事","label":"pos"}
#collection.insert_one(data6)
#collection.update_one({ "_id": 0 },{"$set":data7}) #这是更新的方法,必须保证id值是相同的,且字段要相同
# 查询内容
print(collection.find_one({"_id": 0}))
def ReadContent():
client = pymongo.MongoClient('localhost', 27017) # 建立数据库连接,指定ip和端口号
mydb = client.mydb # 指定mydb数据库
collection = mydb.user # 指定mydb数据库里user集合
#with open("data/data.json",encoding="utf-8") as fr:
with open("data/data.json",encoding="utf-8") as fr:
for line in fr.readlines():
data=json.loads(line) #可将内容由字符串转换成字典
print(data)
#print(type(line))
collection.insert_one(data)
if __name__=="__main__":
connect()
#ReadContent()
|
[
"4538241132qq.com"
] |
4538241132qq.com
|
3c999b4143eb281d967c5b6e77f444df4fe4cb11
|
199823d0d74250904c51e1738651cbb0a70e53e1
|
/DjangoModelAuditTesting/urls.py
|
2c58b7ce1e5598e215951ee9ee83f7f4e661d3a0
|
[] |
no_license
|
joshiparthin/DjangoModelAuditTesting
|
ee6480cdde21dcb6240138ea3f9e8325e822ec43
|
9ec1ce32fb8afbb69a7d9fe5bab565e21213ca4d
|
refs/heads/master
| 2020-07-25T09:45:45.849633
| 2019-09-13T11:20:11
| 2019-09-13T11:20:11
| 208,249,863
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
"""DjangoModelAuditTesting URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"parthjoshi@Parths-MacBook-Pro.local"
] |
parthjoshi@Parths-MacBook-Pro.local
|
4f6ab4e80bac7ed94b51c35c39161a3acbb36326
|
8cd5325ff394f3ca92032b586297308c0e865e64
|
/welcome/migrations/0002_auto_20171004_1822.py
|
1d9fdfe7c7e4281ef3aa5ff2d92f49d79cf8eb69
|
[] |
no_license
|
rushi725/DressMeUp
|
1e32c61a0d83d9d5b5b9a3161a182466c5e482d2
|
6af8824f1fe843804fe7be185d224f6f9743347c
|
refs/heads/master
| 2020-03-26T18:35:31.487785
| 2018-08-18T13:23:51
| 2018-08-18T13:23:51
| 104,162,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-04 12:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('welcome', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('image', models.FileField(upload_to='')),
('gender', models.CharField(choices=[('Men', 'Men'), ('Women', 'Women'), ('Kids', 'Kids')], max_length=64, verbose_name='Available_for')),
],
),
migrations.AddField(
model_name='product',
name='pimage',
field=models.FileField(null=True, upload_to=''),
),
migrations.AddField(
model_name='product',
name='size',
field=models.CharField(choices=[('XS', 'XS'), ('S', 'S'), ('M', 'M'), ('L', 'L'), ('XL', 'XL'), ('XXL', 'XXL')], default='M', max_length=64, verbose_name='Available_for'),
preserve_default=False,
),
migrations.AlterField(
model_name='image',
name='original',
field=models.FileField(upload_to=''),
),
migrations.AddField(
model_name='product',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='category', to='welcome.Category'),
),
]
|
[
"rushabh725@gmail.com"
] |
rushabh725@gmail.com
|
88670173a2b5e655a8023f5ea1d810f9cc995f37
|
aa4024b6a846d2f6032a9b79a89d2e29b67d0e49
|
/UMLRT2Kiltera_MM/Properties/from_thesis/HMM3_then1_IsolatedLHS.py
|
5eff41bffa18239a03e748e0c9693e83fac49dd5
|
[
"MIT"
] |
permissive
|
levilucio/SyVOLT
|
41311743d23fdb0b569300df464709c4954b8300
|
0f88827a653f2e9d3bb7b839a5253e74d48379dc
|
refs/heads/master
| 2023-08-11T22:14:01.998341
| 2023-07-21T13:33:36
| 2023-07-21T13:33:36
| 36,246,850
| 3
| 2
|
MIT
| 2023-07-21T13:33:39
| 2015-05-25T18:15:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,493
|
py
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM3_then1_IsolatedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM3_then1_IsolatedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM3_then1_IsolatedLHS, self).__init__(name='HMM3_then1_IsolatedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM3_then1')
# Set the node attributes
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
[
"bentleyjoakes@gmail.com"
] |
bentleyjoakes@gmail.com
|
fb614aa302084b95ddf6cd05eb504c28c248c5db
|
844501294ca37f1859b9aa0a258e6dd6b1bf2349
|
/snipe/imbroglio/tools.py
|
4c62c6e3e8e00524730b7218034e57b76c714190
|
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
1ts-org/snipe
|
2ac1719bc8f6b3b158c04536464f866c34051253
|
ad84a629e9084f161e0fcf811dc86ba54aaf9e2b
|
refs/heads/master
| 2021-06-04T22:32:36.038607
| 2020-03-27T05:18:36
| 2020-04-05T21:50:42
| 18,642,653
| 6
| 3
|
NOASSERTION
| 2019-10-08T02:02:50
| 2014-04-10T16:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 7,436
|
py
|
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
# Copyright © 2018 the Snipe contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Some useful things built on top of the imbroglio primitives
"""
__all__ = [
'Event',
'Promise',
'Timeout',
'TimeoutError',
'gather',
'process_filter',
'run_in_thread',
'test',
]
import fcntl
import functools
import inspect
import os
import socket
import subprocess
import threading
from . import core as imbroglio
class TimeoutError(imbroglio.ImbroglioException):
pass
class Timeout:
"""
Async context manager for timeouts.
Only works for operations that block in imbroglio.
"""
def __init__(self, duration):
self.duration = duration
async def _timer(self):
await imbroglio.sleep(self.duration)
self.watched_task.throw(
TimeoutError(f'timed out after {self.duration}s'))
async def __aenter__(self):
self.watched_task = await imbroglio.this_task()
self.timer_task = await imbroglio.spawn(self._timer())
return self.timer_task
async def __aexit__(self, exc_type, exc_val, exc_tb):
try:
self.timer_task.cancel()
except TimeoutError: # pragma: nocover
pass # this is here to reduce raciness
await imbroglio.sleep() # so the cancel above gets processed
return exc_type == TimeoutError
async def gather(*coros, return_exceptions=False):
async def signaller(coro):
# so we always schedule _after_ the parent sleeps
await imbroglio.sleep()
# this following is a little off but should produce cleaner
# backtraces?
if not return_exceptions:
result = await coro
else:
try:
result = await coro
except Exception as exception:
result = exception
monitor_task.rouse()
return result
unawaitable = [repr(c) for c in coros if not inspect.isawaitable(c)]
if unawaitable:
unawaitable = ' '.join(unawaitable)
raise imbroglio.ImbroglioException(f'got unawaitable {unawaitable}')
monitor_task = await imbroglio.this_task()
tasks = [(await imbroglio.spawn(signaller(coro))) for coro in coros]
while not all(t.is_done() for t in tasks):
await imbroglio.sleep(None)
return [t.result() for t in tasks]
class Promise:
def __init__(self):
self.exception_set = False
self.done = False
self.result = None
self.exception = None
self.task = None
def set_result(self, result):
self.done = True
self.result = result
if self.task is not None:
self.task.rouse()
def set_result_exception(self, exception):
self.done = True
self.exception_set = True
self.exception = exception
if self.task is not None:
self.task.rouse()
def __await__(self):
self.task = yield from imbroglio.this_task()
while not self.done:
yield from imbroglio.sleep(None)
if self.exception_set:
raise self.exception
return self.result
async def run_in_thread(func, *args, **kwargs):
result = None
exception = None
sender, receiver = socket.socketpair()
try:
def runner():
nonlocal result
nonlocal exception
try:
result = func(*args, **kwargs)
except Exception as e:
exception = e
try:
sender.send(b'X')
except Exception: # pragma: nocover
pass
thread = threading.Thread(target=runner)
async def launcher():
# wait a tick, so the parent runs again and starts to sleep
await imbroglio.sleep()
thread.start()
await imbroglio.spawn(launcher())
await imbroglio.readwait(receiver)
thread.join()
if exception is not None:
raise exception
return result
finally:
sender.close()
receiver.close()
async def process_filter(cmd, inbuf):
inr, inw = os.pipe()
outr, outw = os.pipe()
async def sender(inbuf):
inbuf = inbuf.encode()
while inbuf:
await imbroglio.writewait(inw)
count = os.write(inw, inbuf)
inbuf = inbuf[count:]
os.close(inw)
try:
for fd in (inw, outr):
fcntl.fcntl(
fd,
fcntl.F_SETFL,
fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
with subprocess.Popen(
cmd,
stdin=inr,
stdout=outw,
stderr=subprocess.STDOUT) as p:
os.close(inr)
os.close(outw)
await imbroglio.spawn(sender(inbuf))
output = []
s = None
while s != b'':
await imbroglio.readwait(outr)
s = os.read(outr, 4096)
output.append(s)
retval = await run_in_thread(p.wait)
return retval, b''.join(output).decode(errors='replace')
finally:
try:
os.close(inw)
except OSError:
pass
try:
os.close(outr)
except OSError: # pragma: nocover
pass
class Event:
def __init__(self):
self.flag = False
self.promises = set()
def clear(self):
self.flag = False
def is_set(self):
return self.flag
async def set(self):
if not self.flag:
self.flag = True
promises, self.promises = self.promises, set()
for p in promises:
p.set_result(True)
await imbroglio.sleep()
async def wait(self):
if self.flag:
return
p = Promise()
self.promises.add(p)
await p
def test(f):
"""
Wrap an async function in a call to the imbroglio supervisor,
intended for tests.
"""
@functools.wraps(f)
def run(self):
imbroglio.run(f(self))
return run
|
[
"kcr@1ts.org"
] |
kcr@1ts.org
|
18c2fd857274534368105c975eee76dcd11120b7
|
64585a5d9a21dc3690c81209ec4d773795bec458
|
/GombeNatPark.py
|
cd90af3e6c769c2c9e27dee97b1629d6e7c9e600
|
[] |
no_license
|
vasiach/PracticalDataScience
|
6b5fbc889a7a07a58a8a81dc6c0696dc680970ae
|
601601871dbd363f9498c7766b69b3bf93db84a8
|
refs/heads/master
| 2021-09-04T02:13:44.412670
| 2018-01-14T13:53:17
| 2018-01-14T13:53:17
| 110,718,442
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
summary_filename = 'data/gombe_128.csv'
ratings_filename = 'data/gombe_460.csv'
sumdf = pd.read_csv(summary_filename)
ratingsdf = pd.read_csv(ratings_filename)
impl = sumdf.impl
print('a)The median impulsiveness score is {:.2f}'.format(impl.median()))
chimcode_dig =sumdf.chimpcode[sumdf.chimpcode.str.contains(r'\d{3}.*')]
print('b)The number of chimpanzees with 3 digits in their code is {}'.format(len(chimcode_dig)))
avg_diff_decs_conv = abs((sumdf['conv']-sumdf['decs']).mean())
print('c) The average difference between conventional and decisive traits is {:.3f}'.format(avg_diff_decs_conv))
grouped_by_sex = sumdf[sumdf.columns[-6:]].copy()]groupby(['sex'])
prominence = grouped_by_sex.mean()
print(prominence)
|
[
"vasiliki.chira@gmail.com"
] |
vasiliki.chira@gmail.com
|
c804fd279819b292e4ec08953204746649ce4b86
|
4e01416394a229e2afeede99493d282f4259b1a1
|
/examples/optimal_transport/euler/gravity_sav.py
|
501c29064bb59a4bc4bb2e272908fd6c60284151
|
[] |
no_license
|
sd-ot/pysdot
|
b2b2abb53e4e87d53e0bb3734c1624b1fc5e5f3b
|
8d16000c36deb9ab1aa98b819179741b7b65409d
|
refs/heads/master
| 2023-07-20T07:15:58.469620
| 2023-07-14T13:42:48
| 2023-07-14T13:42:48
| 176,300,757
| 4
| 2
| null | 2020-08-02T06:19:26
| 2019-03-18T14:13:33
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,855
|
py
|
from pysdot.domain_types import ConvexPolyhedraAssembly
from pysdot.radial_funcs import RadialFuncInBall
from pysdot import OptimalTransport
from scipy.sparse.linalg import spsolve
from scipy.sparse import csr_matrix
from scipy.linalg import eigvals
from scipy.linalg import eig
import matplotlib.pyplot as plt
import scipy.optimize
import numpy as np
import scipy
import os
def pm( G ):
print( np.array2string( G.todense(), 5000 ) )
def obj( cx, ot, bh, dt ):
op = ot.get_positions() + 0.0
ow = ot.get_weights() + 0.0
pc = cx.reshape( ( -1, 2 ) )
ot.set_positions( pc )
ot.adjust_weights()
bm = np.array( bh[ -2 ].flat )
b0 = np.array( bh[ -1 ].flat )
bc = np.array( ot.get_centroids().flat )
bt = 2 * b0 - bm
ot.set_positions( op )
ot.set_weights( ow )
dlt = bc - bt
return 0.5 * np.sum( dlt ** 2 )
def fit_positions( ot, bh, dt, approx=True ):
nb_diracs = ot.nb_diracs()
dim = ot.dim()
O = None
n = nb_diracs * dim
X = np.array( ot.get_positions().flat )
for num_iter in range( 1000 ):
# gradient
D = np.zeros( n )
if approx:
eps = 1e-8
for r in range( n ):
Y = X + 0.0
Y[ r ] -= eps
err_0 = obj( Y, ot, bh, dt )
Y[ r ] += 2 * eps
err_1 = obj( Y, ot, bh, dt )
D[ r ] = ( err_1 - err_0 ) / ( 2 * eps )
else:
# target centroids
bm = np.array( bh[ -2 ].flat )
b0 = np.array( bh[ -1 ].flat )
bt = 2 * b0 - bm
# derivatives
mvs = ot.pd.der_centroids_and_integrals_wrt_weight_and_positions()
if mvs.error:
print( "mvs.error" )
m = csr_matrix( ( mvs.m_values, mvs.m_columns, mvs.m_offsets ) )
rd = np.arange( dim * nb_diracs, dtype=np.int )
b0 = ( dim + 1 ) * np.floor_divide( rd, dim )
l0 = b0 + rd % dim
l1 = ( dim + 1 ) * np.arange( nb_diracs, dtype=np.int) + dim
C = m[l0, :][:, l0]
D = m[l0, :][:, l1]
F = m[l1, :][:, l1]
E = m[l1, :][:, l0]
G = C - D * spsolve( F.tocsc(), E.tocsc() )
D = G * ( mvs.v_values[ l0 ] - bt )
# adjust gradient to avoid too large displacements
norm = np.linalg.norm( D, ord=np.inf )
if norm > 1e-2:
D *= 1e-2 / norm
# lambda
best_err = 1e40
if O is None:
best_l = 0
for l in np.linspace( 0.0, 1.5, 15 ):
err = obj( X - l * D, ot, bh, dt )
if best_err > err:
best_err = err
best_l = l
#
print( " ", num_iter, best_l, norm, best_l * np.linalg.norm( D ), "err:", best_err )
if best_l == 0:
print( " => bim" )
break
X -= best_l * D
else:
best_l = 0
best_m = 0
nb_div = 15
for l in np.linspace( 0.0, 2.0, nb_div ):
for m in np.linspace( -1.0, 1.0, nb_div ):
err = obj( X - l * D - m * O, ot, bh, dt )
if best_err > err:
best_err = err
best_l = l
best_m = m
for l in np.linspace( best_l - 1.5 / nb_div, best_l + 1.5 / nb_div, nb_div ):
for m in np.linspace( best_m - 1.5 / nb_div, best_m + 1.5 / nb_div, nb_div ):
err = obj( X - l * D - m * O, ot, bh, dt )
if best_err > err:
best_err = err
best_l = l
best_m = m
#
print( " ", num_iter, "best_l", best_l, "best_m", best_m, "dx:", np.linalg.norm( best_l * D + best_m * O ), "err:", best_err )
if best_l == 0:
print( " => bim" )
break
X -= best_l * D + best_m * O
ot.set_positions( X.reshape( ( -1, 2 ) ) )
ot.adjust_weights()
if best_err < 1e-7:
break
O = D
def run( n, base_filename, l=0.5 ):
# domain
domain = ConvexPolyhedraAssembly()
domain.add_box( [ 0, 0 ], [ 1, 1 ] )
# initial positions, weights and masses
positions = []
if n == 1:
radius = 0.3
mass = 3.14159 * radius**2
positions.append( [ 0.5, radius ] )
else:
radius = l / ( 2 * ( n - 1 ) )
mass = l**2 / n**2
for y in np.linspace( radius, l - radius, n ):
for x in np.linspace( 0.5 - l / 2 + radius, 0.5 + l / 2 - radius, n ):
nx = x + 0.0 * radius * ( np.random.rand() - 0.5 )
ny = y + 0.0 * radius * ( np.random.rand() - 0.5 ) + 0.5 * radius
positions.append( [ nx, ny ] )
positions = np.array(positions)
nb_diracs = positions.shape[ 0 ]
# dim = positions.shape[ 1 ]
# OptimalTransport
ot = OptimalTransport( domain, RadialFuncInBall() )
ot.set_weights( np.ones( nb_diracs ) * radius**2 )
ot.set_masses( np.ones( nb_diracs ) * mass )
ot.set_positions( positions )
ot.max_iter = 100
ot.adjust_weights()
ot.display_vtk( base_filename + "0.vtk", points=True, centroids=True )
# history of centroids
ce = ot.get_centroids()
ce[ :, 1 ] += radius / 10
bh = [ ce ]
dt = 1.0
for num_iter in range( 200 ):
print( "num_iter", num_iter )
bh.append( ot.get_centroids() )
fit_positions( ot, bh, dt )
# display
n1 = int( num_iter / 1 ) + 1
ot.display_vtk( base_filename + "{}.vtk".format( n1 ), points=True, centroids=True )
os.system( "rm results/pd_*" )
run( 10, "results/pd_" )
|
[
"hugal.leclerc@gmail.com"
] |
hugal.leclerc@gmail.com
|
1acd293f957c797320d84f470625ab159b51ec48
|
07a2689b0cce09f76f2a7e39c0d4492985e0c774
|
/project/api/auth.py
|
d4e89f67a6c97ce21a49fcccd6fdfeb3d6d7d8f1
|
[] |
no_license
|
ganeshnegi/flask_rest
|
5d6266c7a1aedef1d36f7cfeaa0798ecc3c73476
|
49fdcef4f3258fd8d236bd1760e0eea0c4e4a4ca
|
refs/heads/master
| 2020-03-19T14:12:15.420417
| 2018-06-22T07:29:25
| 2018-06-22T07:29:25
| 136,613,370
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,594
|
py
|
from flask_jwt_extended import (
create_access_token, create_refresh_token, get_raw_jwt, jwt_required, jwt_refresh_token_required
)
from flask_restful import Resource, Api
from flask import request, jsonify
from project import app
from project.models.user import User, BlacklistedToken
@app.route('/login', methods = ['POST'])
def login():
if not request.is_json:
return jsonify({'error':'invalid json data'}), 400
login_data = request.get_json()
email = login_data.get('email')
password = login_data.get('password')
if not all([email, password]):
return jsonify({'message':'invalid credentials'}), 400
user = User.find_by_email(email)
if not user:
return jsonify({'error':'user not exist with this email'}), 400
authenticated = user.check_password(password)
if not authenticated:
return jsonify({'error':'invalid username/password'}), 400
access_token = create_access_token(identity=email)
refresh_token = create_refresh_token(identity = email)
return jsonify({
'access_token':access_token,
'refresh_token':refresh_token
}), 200
@app.route('/logout', methods=['DELETE'])
@jwt_required
def logout():
jti = get_raw_jwt()['jti']
blt = BlacklistedToken(jti=jti)
blt.add()
return jsonify({'message':'Access Token revoked'}), 204
@app.route('/logout2', methods=['DELETE'])
@jwt_refresh_token_required
def logout2():
jti = get_raw_jwt()['jti']
blt = BlacklistedToken(jti=jti)
blt.add()
return jsonify({'message':'Refresh Token revoked'}), 204
|
[
"ganeshnegi@outlook.com"
] |
ganeshnegi@outlook.com
|
e6bdf3876b48f172a914ed46a62869e62397fa4c
|
ae217f7980802ab62c57eb5468064052a7674648
|
/visualisation/Heatmap.py
|
b01a189377d8cf32b3215fba133b37151e2c7333
|
[] |
no_license
|
olegs/Biohack2018
|
13f0e54ea9681de10d570c7ce9bda848909b337c
|
21dae38beb7830e6a5d3d2e55f1db28b15025f99
|
refs/heads/master
| 2021-01-25T14:21:57.147560
| 2018-03-04T11:57:01
| 2018-03-04T11:57:01
| 123,683,237
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import sys
# get and create data
print('Hello username:',sys.argv[])
data = pd.read_csv(sys.argv[1])
# print and save heatmap
sb.heatmap(df, annot=True)
plt.savefig('Heatmap.png', dpi=100)
plt.show()
|
[
"noreply@github.com"
] |
olegs.noreply@github.com
|
4366358c531bace14bf52a2fa83c286264bb80d3
|
66bc55c7c794bf9056c5574b1af8371e05758fd0
|
/abcd.py
|
76adbca25c19467c24e16ada81dd907f603a3594
|
[] |
no_license
|
BeautyDalia/-
|
73cd52ced193730a6d07e27c7b1e3d4bc7b27147
|
1b91ae55e4738a40bd990f143983f6d60b0afb09
|
refs/heads/master
| 2020-04-27T04:36:21.480376
| 2019-03-13T09:44:38
| 2019-03-13T09:44:38
| 174,060,180
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,455
|
py
|
# coding:utf-8
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.chrome.options import Options
from lxml import etree
import pymysql
class NetEase(object):
def __init__(self):
self.chrome_options = Options()
self.chrome_options.add_argument('--headless')
self.chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome() # 无头
self.wait = WebDriverWait(self.driver, 10)
self.sig = 0
self.db = pymysql.connect("127.0.0.1", "root", "123456", "netease")
self.cursor = self.db.cursor()
self.fans_num = 0
self.attention_num = 0
def get_fans_page(self, url): # 用户首页,点击粉丝
self.driver.get(url)
self.wait.until(ec.presence_of_element_located((By.TAG_NAME, "iframe")))
time.sleep(3)
try:
js = "var box = document.getElementsByClassName('g-btmbar')[0];box.parentNode.removeChild(box);"
self.driver.execute_script(js) # 干掉下面的播放栏
finally:
self.driver.switch_to.frame('g_iframe')
fans_page = self.driver.find_element_by_id('fan_count').click()
time.sleep(3)
return fans_page
def get_page_source(self, url):
self.get_fans_page(url)
page_source = self.driver.page_source
selector = etree.HTML(page_source, parser=etree.HTMLParser(encoding='utf-8'))
return selector # 网页源码
def get_funs_num(self, url): # 粉丝数(用户主页中)
page_sourse = self.get_page_source(url)
funs_num = page_sourse.xpath('.//strong[@id="fan_count"]/text()')[0]
name = page_sourse.xpath('.//h2[@id="j-name-wrap"]/span[1]/text()')
return [funs_num, name]
def fun_info_xpath(self, page_sourse):
funs_info = page_sourse.xpath('.//ul[@id="main-box"]/li')
funs_name_list = []
fun_home_url_list = []
funs_funs_num_list = []
attentions_list = []
attentions_num_list = []
for fun_info in funs_info:
fun_home_url = fun_info.xpath('./div[1]/p[1]/a/@href')[0]
fun_home_url = 'https://music.163.com/#' + fun_home_url # 粉丝主页url
funs_funs_num = fun_info.xpath('./div[@class="info"]/p[2]/a[3]/em/text()')[0] # 粉丝数
funs_name = fun_info.xpath('./div[@class="info"]/p[1]/a/text()')[0] # 粉丝名字
attentions = fun_info.xpath('.//a[text()="关注"]/@href')[0] # 关注的人 连接
attentions_num = fun_info.xpath('.//a[text()="关注"]/em/text()')[0] # 关注的人的数目
funs_name_list.append(funs_name)
funs_funs_num_list.append(funs_funs_num)
fun_home_url_list.append(fun_home_url)
attentions_list.append(attentions)
attentions_num_list.append(attentions_num)
funs_info_zip = zip(funs_name_list, fun_home_url_list, funs_funs_num_list, attentions_list, attentions_num_list)
return funs_info_zip
def get_funs_info(self, url): # 粉丝信息
page_sourse = self.get_page_source(url)
return self.fun_info_xpath(page_sourse)
def next_page(self): # 有可能下一页不能点,有可能没有下一页
js = "window.scrollTo(0,document.body.scrollHeight)" # 滚动到底部
self.driver.execute_script(js)
try:
next_page_button = self.driver.find_element_by_xpath('.//a[text()="下一页"]')
last_page_sig = next_page_button.get_attribute('class')
return next_page_button if 'disable' not in last_page_sig else None # 有的粉丝只有一页
except:
return None
def funs(self, url_or_pagesource):
if self.sig == 0:
funs_info = self.get_funs_info(url_or_pagesource)
else:
funs_info = self.fun_info_xpath(url_or_pagesource)
# 粉丝
for fun_info in funs_info: # fname, furl, fnum, fattention, fattention_num
if int(fun_info[2]) > 0:
sql = """insert into all_users(username, url, fans) values ('{}', '{}', {})""".format(str(fun_info[0]), fun_info[1], int(fun_info[2]))
self.into_mysql(sql, '粉丝') # 用户信息写入数据库
else:
continue # 一页粉丝抓完
# 翻页
next_page = self.next_page()
if next_page:
self.sig = 1
self.next_page().click() # 如果有下一页就点
time.sleep(3)
# self.wait.until(ec.presence_of_element_located((By.XPATH, ".//ul[@id='main-box']/li[last()]")))
try:
next_page_source = self.driver.page_source # 下一页源码
except Exception as e:
print('=========>>>获取fans源码错误:', e)
self.next_page().click() # 如果有下一页就点
time.sleep(3)
next_page_source = self.driver.page_source # 下一页源码
nps = etree.HTML(next_page_source, parser=etree.HTMLParser(encoding='utf-8'))
return self.funs(nps) # 下一页源码
else:
# 关注
js = "window.scrollTo(0,0)" # 滚动到顶部
self.driver.execute_script(js)
self.driver.find_element_by_id('follow_count').click() # 点关注
time.sleep(3)
# self.wait.until(ec.presence_of_all_elements_located((By.XPATH, ".//ul[@id='main-box']/li")))
follows_source = self.driver.page_source # 源码
gz = etree.HTML(follows_source, parser=etree.HTMLParser(encoding='utf-8'))
follows_info = self.fun_info_xpath(gz)
return self.follows(follows_info)
def follows(self, follows_info):
new_list = []
for follow_info in follows_info: # fname, furl, fnum, fattention, fattention_num
if isinstance(follow_info, tuple):
new_list.append(follow_info)
for f in new_list:
if int(f[2]) > 0:
sql = """insert into all_users(username, url, fans) values ('{}', '{}', {})""".format(str(f[0]), f[1], int(f[2]))
self.into_mysql(sql, '关注') # 用户信息写入数据库
else:
continue # 一页关注抓完
next_page = self.next_page()
if next_page:
self.next_page().click()
time.sleep(3)
self.wait.until(ec.presence_of_element_located((By.XPATH, ".//ul[@id='main-box']/li[last()]")))
try:
next_follow_source = self.driver.page_source # 下一页源码
except Exception as e:
print('=========>>>获取follow源码错误:', e)
self.next_page().click() # 如果有下一页就点
time.sleep(3)
next_follow_source = self.driver.page_source # 下一页源码
gz = etree.HTML(next_follow_source, parser=etree.HTMLParser(encoding='utf-8'))
ff = self.fun_info_xpath(gz) # 关注列表
return self.follows(ff)
else:
print('一个账号爬完')
self.sig = 0
self.fans_num = 0
self.attention_num = 0
self.update_sql(self.driver.current_url)
def to_sql(self, url):
fans_num = self.get_funs_num(url)
num = int(fans_num[0])
name = str(fans_num[1][0])
if int(fans_num[0]) > 200000:
sql = """insert into user_info(user_name, url, fans_num) values ('{}','{}',{})""".format(name, url, num)
return self.into_mysql(sql, '粉丝')
def into_mysql(self, sql, who):
try:
# 执行sql语句
self.cursor.execute(sql)
# 提交到数据库执行
self.db.commit()
if who == '粉丝':
self.fans_num += 1
print('{}数大于0的用户数据写入成功,当前写入第{}个用户'.format(who, self.fans_num))
if who == '关注':
self.attention_num += 1
print('{}数大于0的用户数据写入成功,当前写入第{}个用户'.format(who, self.attention_num))
except Exception as e:
# 如果发生错误则回滚
self.db.rollback()
print('**写入数据库发生错误,错误为:', e)
def read_mysql(self):
sql = """select * from all_users where isused=0 limit 10"""
try:
# 执行SQL语句
self.cursor.execute(sql)
# 获取所有记录列表
results = self.cursor.fetchall()
for (uid, fname, url, fans, isused) in results:
yield url
except Exception as e:
print("**查询出现错误,错误为:", e)
def update_sql(self, url):
url_id = url.split('?')[-1]
nurl = 'https://music.163.com/#/user/home?' + url_id
sql = "UPDATE all_users SET isused = 1 WHERE url = '{}'".format(nurl)
print('*******', sql)
try:
# 执行SQL语句
self.cursor.execute(sql)
# 提交到数据库执行
self.db.commit()
print('用户账号状态更新,用户为:', url)
except Exception as e:
# 发生错误时回滚
self.db.rollback()
print("**更新出现错误,错误为:", e)
def run(self):
for url in self.read_mysql():
# try:
self.to_sql(url)
self.funs(url)
# except Exception as e:
# print('=======>>>请求发生错误,错误内容为:', e)
# print('=======>>>发生错误url为:', url)
# continue
if self.read_mysql():
self.run()
if __name__ == '__main__':
n = NetEase()
n.run()
# n.funs('https://music.163.com/#/user/home?id=650120')
|
[
"noreply@github.com"
] |
BeautyDalia.noreply@github.com
|
d5a63ff33fc1a8856b4ee87dea50e80cf57550c6
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/valid_20200616210647.py
|
193197e9b2d92cf59f2508550fb9700467f12317
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
# Ipv4 --> 4 decimal numbers,between 0 to 255
# leading zero's is invalid
# check whethere its a digit between 0 to 255
import string
def valid(str):
address = str.split(".")
numbers = range(0,256)
for a in address:
if len(address) == 4:
if int(a) in numbers:
if len(a) == 2 and a[0] == "0":
return False
else:
return True
else:
newAddress = str.split(":")
i = 0
while i < len(newAddress)-1:
# print(type(newAddress[i]))
well =
i +=1
valid("2001:0db8:85a3:0:0:8A2E:0370:7334")
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
d35311b1ddcb74743d899d64cd956f338388ec85
|
9cef2791e309bc3acc8989c8e48daa1cbda0e649
|
/mapclientplugins/parametricfittingstep/scene/scaffold.py
|
6507d619221940f4a8b69761e72211ac0c1626de
|
[
"Apache-2.0"
] |
permissive
|
mahyar-osn/mapclientplugins.parametricfittingstep
|
9fb492766707a9ab3d46505b596fbe797b8dae8d
|
3b78be6a3cbd99f970f0b28c65350304e446c19e
|
refs/heads/master
| 2020-04-11T05:55:03.209766
| 2018-12-13T01:02:41
| 2018-12-13T01:02:41
| 161,564,049
| 0
| 0
|
Apache-2.0
| 2018-12-13T00:57:23
| 2018-12-13T00:57:23
| null |
UTF-8
|
Python
| false
| false
| 2,227
|
py
|
from opencmiss.zinc.field import Field
from opencmiss.zinc.graphics import Graphics
class Scaffold(object):
def __init__(self, master_model):
self._master_model = master_model
def _get_scene(self):
scaffold_model = self._master_model.get_scaffold_model()
region = scaffold_model.get_region()
return region.getScene()
def create_graphics(self):
scaffold_model = self._master_model.get_scaffold_model()
scene = self._get_scene()
material_module = scene.getMaterialmodule()
coordinate_field = scaffold_model.get_coordinate_field()
field_module = coordinate_field.getFieldmodule()
cmiss_number = field_module.findFieldByName('cmiss_number')
scene.beginChange()
scene.removeAllGraphics()
points = scene.createGraphicsPoints()
points.setName('scaffold-points')
points.setCoordinateField(coordinate_field)
points.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
attributes = points.getGraphicspointattributes()
attributes.setLabelField(cmiss_number)
lines = scene.createGraphicsLines()
lines.setExterior(True)
lines.setName('scaffold-lines')
lines.setCoordinateField(coordinate_field)
surfaces = scene.createGraphicsSurfaces()
# surfaces.setSubgroupField(self.notHighlighted)
surfaces.setCoordinateField(coordinate_field)
surfaces.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_SHADED)
surfaces.setExterior(True)
surfacesMaterial = material_module.findMaterialByName(
'trans_blue' if scaffold_model.is_display_surfaces_translucent() else 'solid_blue')
surfaces.setMaterial(surfacesMaterial)
surfaces.setName('scaffold-surfaces')
scene.endChange()
def write(self):
scene = self._get_scene()
stream_information = scene.createStreaminformationScene()
stream_information.setIOFormat(stream_information.IO_FORMAT_DESCRIPTION)
memory_resource = stream_information.createStreamresourceMemory()
scene.write(stream_information)
_, buffer_contents = memory_resource.getBuffer()
return buffer_contents
|
[
"h.sorby@auckland.ac.nz"
] |
h.sorby@auckland.ac.nz
|
baf4291c62a4291a20ae2e32fe2cd4704eb384c3
|
d4a9554840e6f83bf82515a84541408a7828d1fc
|
/todolist/todoapp/urls.py
|
d8386dd1531ea71a2e570ec86bebf31d1bb64b3b
|
[] |
no_license
|
coder-kiran/Todo
|
79d98c84080dcf739563b05af033232ddafbd046
|
aa130b2429287f3a8579b4d5b1ddd53ac0c2e77d
|
refs/heads/master
| 2023-04-08T09:19:29.181087
| 2021-04-13T17:42:06
| 2021-04-13T17:42:06
| 357,634,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from django.urls import path
from todoapp import views
urlpatterns = [
path('',views.index,name='home'),
path('delete/<int:commonid>',views.deletelist,name='delete'),
path('update/<int:commonid>/',views.update,name='update')
]
|
[
"kirankalak4@gmail.com"
] |
kirankalak4@gmail.com
|
2a75511cc7fb86d20c42589fc29f14ccee4b0cef
|
e5df6f614800210137a18312a248cf55d6c14c63
|
/hw4/nlg/nlg_postprocessing.py
|
ff69cff30a46d18308f1bd5028e2af864a31fca3
|
[] |
no_license
|
gdoggg2032/ADL2016
|
ae402ea88f61a1c76a08164dc45ad3dac1281025
|
fb40f83ae201ce12350c7ec965dc37c94a931628
|
refs/heads/master
| 2022-11-04T16:46:08.824116
| 2016-12-28T11:42:14
| 2016-12-28T11:42:14
| 70,919,131
| 0
| 1
| null | 2022-10-20T13:52:55
| 2016-10-14T14:44:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
import sys
import os
import argparse
import time
import json
import re
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--test_data', default='./NLG_data/test.txt', type=str)
parser.add_argument('--predict_data', default='./predict.txt', type=str)
parser.add_argument('--output', default="answer.txt", type=str)
args = parser.parse_args()
return args
def parse_query(query, sen):
# parse query
func = re.search("(.*)\(.*\)", query).group(1)
content = re.search(".*\((.*)\)", query).group(1)
ql = [func]
for token in content.split(";"):
# XX=XX or YY
tl = token.split("=")
if len(tl) == 1:
# YY
ql.append(tl[0])
else:
# XX=XX
entity = tl[0]
replace_name = "_{}_".format(entity)
name = tl[1]
name = name.replace("'", "").strip()
# actually we use this
# sen = sen.replace(replace_name, name)
# but this' BLEU score higher
sen = sen.replace(replace_name, entity)
return sen
def main(args):
# test
test_path = args.test_data
test_data = open(test_path, "r").read().strip().split("\n")
predict_path = args.predict_data
predict_data = open(predict_path, "r").read().strip().split("\n")
ans_path = args.output
ansf = open(ans_path, "w")
for t, p in zip(test_data, predict_data):
output = parse_query(t, p)
print >> ansf, output
if __name__ == "__main__":
s = time.time()
args = arg_parse()
main(args)
print >> sys.stderr, "time cost:", time.time() - s
|
[
"gdoggg2032@gmail.com"
] |
gdoggg2032@gmail.com
|
4419eeb011ca1a3d483fd6e07002cdc8efca4c87
|
39c1242ae3d3043f64a6604b1641bf1ac990aed9
|
/languages/ar.py
|
8bf11f847cd869c1dc583e6b7ebf81a59a77ff6d
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
csantiestebantoca/solarplan
|
9ceae6c3f096a55207ebb47c4cb261748da7fceb
|
c2c14b93ceb7e7a3a9f722baa2d920bd6f043677
|
refs/heads/master
| 2022-11-28T16:37:08.558974
| 2020-08-05T21:30:49
| 2020-08-05T21:30:49
| 285,184,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,483
|
py
|
# -*- coding: utf-8 -*-
{
'!langcode!': 'ar',
'!langname!': 'Arabic',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s %%{row} deleted',
'%s %%{row} updated': '%s %%{row} updated',
'%s selected': '%s selected',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'?': '?',
'@markmin\x01(**%.0d MB**)': '(**%.0d MB**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '**not available** (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)': '``**not available**``:red (requires the Python [[Pympler https://pypi.python.org/pypi/Pympler popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'A new password was emailed to you': 'A new password was emailed to you',
'About': 'نبذة',
'Access Control': 'متحكمات الوصول',
'admin': 'admin',
'Ajax Recipes': 'وصفات أجاكس',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'API Example': 'API Example',
'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel',
'Apply changes': 'Apply changes',
'Are you sure you want to delete this object?': 'هل أنت متأكد بحذف هذا الكائن ؟',
'Authentication code': 'Authentication code',
'Available Databases and Tables': 'Available Databases and Tables',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'لا يمكن بأن يكون خالي',
'Change Password': 'Change Password',
'Change password': 'Change password',
'Check to delete': 'أختر للحذف',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password',
'Client IP': 'IP المستخدم',
'Community': 'المجتمع',
'Components and Plugins': 'العناصر والإضافات',
'Config.ini': 'Config.ini',
'Controller': 'متحكم',
'Copyright': 'الحقوق',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'data uploaded': 'data uploaded',
'Database': 'قاعدة البيانات',
'Database %s select': 'Database %s select',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'DB Model': 'نموذج قاعدة البيانات',
'Delete:': 'Delete:',
'Demo': 'تجربة',
'Deployment Recipes': 'الوصفات المنشورة',
'Description': 'الوصف',
'design': 'design',
'Design': 'Design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Documentation': 'المستندات',
"Don't know what to do?": 'لا تعلم ماذا ستفعل ؟',
'done!': 'done!',
'Download': 'تحميل',
'E-mail': 'البريد الإلكتروني',
'Edit current record': 'Edit current record',
'Email and SMS': 'البريد الإلكتروني والرسالة النصية',
'Email sent': 'Email sent',
'Email verification': 'Email verification',
'Email verified': 'Email verified',
'Errors': 'الأخطاء',
'export as csv file': 'export as csv file',
'FAQ': 'الأسئلة الشائعة',
'First name': 'الأسم الأول',
'Forms and Validators': 'الإستمارات والمدققات',
'Free Applications': 'تطبيقات مجانية',
'Function disabled': 'Function disabled',
'Graph Model': 'Graph Model',
'Grid Example': 'Grid Example',
'Group %(group_id)s created': 'المجموعة %(group_id)s قد أنشئت',
'Group %(group_id)s deleted': 'Group %(group_id)s deleted',
'Group ID': 'هوية المجموعة',
'Group uniquely assigned to user %(id)s': 'المجموعة مخصصة للمستخدم %(id)s',
'Groups': 'مجموعات',
'Hello World': 'مرحباً بالعالم',
'Helping web2py': 'Helping web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'الرئيسية',
'How did you get here?': 'كيف أستطعت الوصول إلى هنا ؟',
'import': 'import',
'Import/Export': 'Import/Export',
'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.',
'Insufficient privileges': 'Insufficient privileges',
'Internal State': 'Internal State',
'Introduction': 'مقدمة',
'Invalid email': 'بريد إلكتروني غير صالح',
'Invalid key': 'Invalid key',
'Invalid login': 'Invalid login',
'Invalid password': 'Invalid password',
'Invalid Query': 'Invalid Query',
'invalid request': 'invalid request',
'Invalid reset password': 'Invalid reset password',
'Invalid user': 'Invalid user',
'Invalid username': 'Invalid username',
'Invitation to join %(site)s': 'Invitation to join %(site)s',
'Key': 'Key',
'Key verified': 'Key verified',
'Last name': 'أسم العائلة',
'Layout': 'النسق',
'Live Chat': 'المحادثة الحيّة',
'Log In': 'Log In',
'Logged in': 'تم تسجيل الدخول',
'Logged out': 'تم تسجيل الخروج',
'Login': 'تسجيل الدخول',
'Login disabled by administrator': 'Login disabled by administrator',
'Logout': 'تسجيل الخروج',
'Lost Password': 'فقدت كلمة المرور',
'Lost your password?': 'Lost your password?',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Manage Cache',
'Memberships': 'Memberships',
'Menu Model': 'قالب القوائم',
'My Sites': 'موقعي',
'Name': 'الأسم',
'New password': 'New password',
'New Record': 'New Record',
'new record inserted': 'new record inserted',
'next %s rows': 'next %s rows',
'No databases in this application': 'No databases in this application',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'أسم الكائن أو الجدول',
'Old password': 'Old password',
'Online book': 'Online book',
'Online examples': 'أمثلة على الأنترنت',
'or import from csv file': 'or import from csv file',
'Origin': 'أصل',
'Other Recipes': 'وصفات أخرى',
'Overview': 'نظرة عامة',
'Password': 'كلمة المرور',
'Password changed': 'Password changed',
"Password fields don't match": 'حقول كلمة المرور لا تتطابق',
'Password reset': 'Password reset',
'Password retrieve': 'Password retrieve',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': 'الرجاء إعادة إدخال كلمة المرور',
'Plugins': 'الإضافات',
'Powered by': 'مدعوم بواسطة',
'Preface': 'المدخل',
'previous %s rows': 'previous %s rows',
'Profile': 'الملف الشخصي',
'Profile updated': 'Profile updated',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'بايثون',
'Query:': 'Query:',
'Quick Examples': 'أمثلة سريعة',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Recipes': 'وصفات',
'Record': 'Record',
'Record %(id)s created': 'Record %(id)s created',
'Record %(id)s deleted': 'Record %(id)s deleted',
'Record %(id)s read': 'Record %(id)s read',
'Record %(id)s updated': 'Record %(id)s updated',
'Record Created': 'Record Created',
'Record Deleted': 'Record Deleted',
'record does not exist': 'record does not exist',
'Record id': 'Record id',
'Record ID': 'هوية السجل ',
'Record Updated': 'Record Updated',
'Register': 'التسجيل',
'Registration identifier': 'مُعرف التسجيل',
'Registration is pending approval': 'Registration is pending approval',
'Registration key': 'رمز التسجيل',
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'تم التسجيل بنجاح',
'Remember me (for 30 days)': 'تذكرني ( إلى 30 يوم)',
'Request reset password': 'Request reset password',
'Reset Password key': 'إعادة ظبط مفتاح كلمة المرور',
'Role': 'دور',
'Roles': 'Roles',
'Rows in Table': 'Rows in Table',
'Rows selected': 'Rows selected',
'Save model as...': 'Save model as...',
'Services': 'خدمات',
'Sign Up': 'Sign Up',
'Sign up': 'Sign up',
'Size of cache:': 'Size of cache:',
'state': 'state',
'Statistics': 'Statistics',
'Stylesheet': 'أسلوب النمط',
'submit': 'submit',
'Submit': 'Submit',
'Support': 'الدعم',
'Table': 'Table',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'النواة',
'The output of the file is a dictionary that was rendered by the view %s': 'نتاج هذا الملف هو قاموس قًدم بواسطة العارض %s',
'The Views': 'المشاهدات',
'This App': 'هذا التطبيق',
'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.',
'This email already has an account': 'This email already has an account',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'البصمة الزمنية',
'Traceback': 'Traceback',
'Twitter': 'تويتر',
'Two-step Login Authentication Code': 'Two-step Login Authentication Code',
'unable to parse csv file': 'unable to parse csv file',
'Unable to send email': 'Unable to send email',
'Update:': 'Update:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'User',
'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s',
'User %(id)s Logged-in': 'المستخدم %(id)s قد سجل دخوله',
'User %(id)s Logged-out': 'المستخدم %(id)s قد سجل خروجه',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Password retrieved': 'User %(id)s Password retrieved',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'المستخدم %(id)s مسجل',
'User %(id)s Username retrieved': 'User %(id)s Username retrieved',
'User %(id)s Verification email sent': 'User %(id)s Verification email sent',
'User %(id)s verified registration key': 'User %(id)s verified registration key',
'User ID': 'هوية المستخدم',
'Username': 'Username',
'Username already taken': 'Username already taken',
'Username retrieve': 'Username retrieve',
'Users': 'Users',
'Verify Password': 'تأكيد كلمة المرور',
'Videos': 'الفيديوهات',
'View': 'العرض',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email',
'Welcome to web2py!': 'مرحباً بكم في ويب2 باي !',
'Which called the function %s located in the file %s': 'الدالة المسماة %s موجودة في ملف %s',
'Wiki Example': 'Wiki Example',
'Working...': 'Working...',
'You are successfully running web2py': 'أستطعت تثبيت web2py بنجاح !',
'You can modify this application and adapt it to your needs': 'تستطيع تعديل هذا التطبيق لما يناسب إحتياجك',
'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process',
'You visited the url %s': ' ملقد زرت الرابط %s',
'Your password is: %(password)s': 'Your password is: %(password)s',
'Your temporary login code is {0}': 'Your temporary login code is {0}',
'Your username is: %(username)s': 'Your username is: %(username)s',
'Your username was emailed to you': 'Your username was emailed to you',
}
|
[
"noreply@github.com"
] |
csantiestebantoca.noreply@github.com
|
4fcaa09e33ba139fb4e87c2ecc64d676ede91e58
|
332e906bbc2d391b50093e5d2146fe7c2ab666b6
|
/POM/Pages/homePage.py
|
d3fd75dc944b2a71a315b11622919be251e00c5a
|
[] |
no_license
|
guilhermelemke/Selenium
|
46de6e462924ddf0b5fb4befd07457e5c00b66d7
|
ad74f62f5d9536847b72219c54453f686f4da630
|
refs/heads/main
| 2023-02-11T04:15:03.797139
| 2021-01-07T20:59:51
| 2021-01-07T20:59:51
| 326,188,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
class HomePage():
def __init__(self, driver):
self.driver = driver
self.welcome_link_id = 'welcome'
self.logout_link_linkText = 'Logout'
def click_welcome(self):
self.driver.find_element_by_id(self.welcome_link_id).click()
def click_logout(self):
self.driver.find_element_by_link_text(self.logout_link_linkText).click()
|
[
"guilherme.lemke@philips.com"
] |
guilherme.lemke@philips.com
|
23b1084ab056307b4bb2914859e7852e1b5c0c92
|
5a89114faaac7bd79a867e2283718612b04b6077
|
/common/layers.py
|
c2bd1ed2d7f0cffc489e9c93310032fbcc469462
|
[] |
no_license
|
rnakamura-sky/DeepLearning2
|
163e991b926b3efc04e10bece82f0e91572eae3a
|
19670cb685dead79d7036c8237ff610e25ca2602
|
refs/heads/master
| 2023-03-19T21:57:35.998422
| 2021-03-11T08:37:25
| 2021-03-11T08:37:25
| 332,815,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,731
|
py
|
import numpy as np
from common.functions import softmax, cross_entropy_error
class MatMul:
def __init__(self, W):
self.params = [W]
self.grads = [np.zeros_like(W)]
self.x = None
def forward(self, x):
W, = self.params
out = np.dot(x, W)
self.x = x
return out
def backward(self, dout):
W, = self.params
dx = np.dot(dout, W.T)
dW = np.dot(self.x.T, dout)
self.grads[0][...] = dW
return dx
class Affine:
def __init__(self, W, b):
self.params = [W, b]
self.grads = [np.zeros_like(W), np.zeros_like(b)]
self.x = None
def forward(self, x):
W, b = self.params
out = np.dot(x, W) + b
self.x = x
return out
def backward(self, dout):
W, b = self.params
dx = np.dot(dout, W.T)
dW = np.dot(self.x.T, dout)
db = np.sum(dout, axis=0)
self.grads[0][...] = dW
self.grads[1][...] = db
return dx
class Softmax:
def __init__(self):
self.params, self.grads = [], []
self.out = None
def forward(self, x):
self.out = softmax(x)
return self.out
def backward(self, dout):
dx = self.out * dout
sumdx = np.sum(dx, axis=1, keepdims=True)
dx -= self.out * sumdx
return dx
class SoftmaxWithLoss:
def __init__(self):
self.params, self.grads = [], []
self.y = None
self.t = None
def forward(self, x, t):
self.t = t
self.y = softmax(x)
if self.t.size == self.y.size:
self.t = self.t.argmax(axis=1)
loss = cross_entropy_error(self.y, self.t)
return loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
dx = self.y.copy()
dx[np.arange(batch_size), self.t] -= 1
dx *= dout
dx = dx / batch_size
return dx
class Sigmoid:
def __init__(self):
self.params, self.grads = [], []
self.out = None
def forward(self, x):
out = 1 / (1 + np.exp(-x))
self.out = out
return out
def backward(self, dout):
dx = dout * (1.0 - self.out) * self.out
return dx
class SigmoidWithLoss:
def __init__(self):
self.params, self.grads = [], []
self.loss = None
self.y = None
self.t = None
def forward(self, x, t):
self.t = t
self.y = 1 / (1 + np.exp(-x))
self.loss = cross_entropy_error(np.c_[1 - self.y, self.y], self.t)
return self.loss
def backward(self, dout=1):
batch_size = self.t.shape[0]
dx = (self.y - self.t) * dout / batch_size
return dx
class Dropout:
'''
http://arxiv.org/abs/1207.0580
'''
def __init__(self, dropout_ratio=0.5):
self.params, self.grads = [], []
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask
class Embedding:
def __init__(self, W):
self.params = [W]
self.grads = [np.zeros_like(W)]
self.idx = None
def forward(self, idx):
W, = self.params
self.idx = idx
out = W[idx]
return out
def backward(self, dout):
dW, = self.grads
dW[...] = 0
# for i, word_id in enumerate(self.idx):
# dW[word_id] = dout[i]
np.add.at(dW, self.idx, dout)
return None
|
[
"rnakamura1234sky@gmail.com"
] |
rnakamura1234sky@gmail.com
|
31a05db7838d4d33629743005e121a3330199d89
|
eb5c7da64d7b2def4060dc52cf9a4b6e397aefd0
|
/day07/mysite/urls.py
|
b08461406ea1724472143c00f764505610c6992e
|
[] |
no_license
|
leeyj341/django
|
a77e0828df0318e3fb527c9b0f3ecaa9d3f66455
|
4e4a1e7c907b61d718130269bccb3d0768d4a3b9
|
refs/heads/master
| 2022-11-13T04:28:21.109114
| 2020-06-29T09:18:03
| 2020-06-29T09:18:03
| 270,921,868
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 801
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('movies/', include('movies.urls'))
]
|
[
"leeyj3794@gmail.com"
] |
leeyj3794@gmail.com
|
44a889c50958580d26f6837358f91371e02c1903
|
8a1200f2bd0bf25a264c065da896d1806e45155a
|
/packages/postgresql-9.1.3.py
|
ac7ed54add17150e2aa1e04acc81df806c74723f
|
[] |
no_license
|
mhulsman/enhance
|
fac908eca561749ab99ea708f19f37eaceabf6c5
|
761234a263e4962a8e10026ce143c1ea0e7e8728
|
refs/heads/master
| 2021-01-18T23:40:56.252953
| 2016-09-29T13:04:48
| 2016-09-29T13:04:48
| 1,520,086
| 3
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
from package import *
class postgresql(MakePackage):
dependencies = ["readline"]
fetch="http://ftp.postgresql.org/pub/source/v%(version)s/postgresql-%(version)s.tar.gz"
|
[
"m.hulsman@tudelft.nl"
] |
m.hulsman@tudelft.nl
|
615e915ae7695c6459a347d7de6b31510dc0b268
|
769cc94f2d87b97a0c0cb54ae2a4b10a43594f57
|
/src/models/model_both.py
|
e454067cb2bbf8a5d3c9324193cd75f758790dde
|
[] |
no_license
|
mdrgona/nsiete_project
|
106096d0d959c8e228f84a9a1cb4134f7f675b00
|
e7993f209c496b19ea70cbf1c728a05273ee0633
|
refs/heads/master
| 2020-07-30T19:13:47.187073
| 2019-12-09T08:25:57
| 2019-12-09T08:25:57
| 210,327,481
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,787
|
py
|
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import initializers
from tensorflow.keras.layers import Embedding, Input, Dense, Flatten, concatenate, Dropout, dot, Lambda, Reshape, Dot
from tensorflow.keras.backend import transpose
import numpy as np
class JokeRecommender(keras.Model):
def __init__(self, emb_output_dim, number_users, number_jokes):
super(JokeRecommender, self).__init__()
# MLP part
self.user_emb_mlp = keras.layers.Embedding(number_users + 1, emb_output_dim)
self.user_vector_mlp = keras.layers.Flatten()
self.user_drop_mlp = keras.layers.Dropout(0.2)
self.joke_emb_mlp = keras.layers.Embedding(number_jokes + 1, emb_output_dim)
self.joke_vector_mlp = keras.layers.Flatten()
self.joke_drop_mlp = keras.layers.Dropout(0.2)
self.concat_drop = keras.layers.Dropout(0.2)
self.dense = keras.layers.Dense(units=32, activation='relu')
self.dense_2 = keras.layers.Dense(units=16, activation='relu')
self.dense_3 = keras.layers.Dense(units=12, activation='relu')
self.final_mlp = keras.layers.Dense(units=1, activation='relu')
# GMF part
self.user_emb_gmf = keras.layers.Embedding(number_users + 1, emb_output_dim)
self.user_vector_gmf = keras.layers.Flatten()
self.user_drop_gmf = keras.layers.Dropout(0.2)
self.joke_emb_gmf = keras.layers.Embedding(number_jokes + 1, emb_output_dim)
self.joke_vector_gmf = keras.layers.Flatten()
self.joke_drop_gmf = keras.layers.Dropout(0.2)
self.final_gmf = Dot(axes=1, normalize=True)
# Merge
self.dense_output = keras.layers.Dense(1)
def call(self, x):
user_mlp = self.user_emb_mlp(x[0])
user_mlp = self.user_vector_mlp(user_mlp)
user_mlp = self.user_drop_mlp(user_mlp)
joke_mlp = self.joke_emb_mlp(x[1])
joke_mlp = self.joke_vector_mlp(joke_mlp)
joke_mlp = self.joke_drop_mlp(joke_mlp)
mlp = concatenate([user_mlp, joke_mlp])
mlp = self.dense(mlp)
mlp = self.dense_2(mlp)
mlp = self.dense_3(mlp)
mlp = self.final_mlp(mlp)
user_gmf = self.user_emb_gmf(x[0])
user_gmf = self.user_vector_gmf(user_gmf)
user_gmf = self.user_drop_gmf(user_gmf)
joke_gmf = self.joke_emb_gmf(x[1])
joke_gmf = self.joke_vector_gmf(joke_gmf)
joke_gmf = self.joke_drop_gmf(joke_gmf)
gmf = self.final_gmf([user_gmf, joke_gmf])
x = concatenate([mlp, gmf])
x = self.dense_output(x)
return x
|
[
"xdrgonam@stuba.sk"
] |
xdrgonam@stuba.sk
|
36d2d97f098581fcaf52b4c5e20fb4b92772c732
|
89faa4f7bd62956119484ec7b858ff8102ad6ad0
|
/Three_dimention_rendering.py
|
d14675f8af25d0326ec028b7ae63d6aaa4f368dd
|
[] |
no_license
|
naseer2426/Three-Dimention-Rendering-Tk
|
0066cdef5fb85d841b5858e512e71d1fb742cb9d
|
176ac860faed6c55846987df6d93710275ccf898
|
refs/heads/master
| 2020-04-15T22:26:12.193656
| 2019-01-31T16:57:27
| 2019-01-31T16:57:27
| 165,072,614
| 16
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,828
|
py
|
import tkinter as tk
import math as m
import time as t
from tkinter import colorchooser as c_chooser
class ThreeD():
def __init__(self, canvas, coords, alpha = 0, beeta = 0, gaama = 0, frame_rate = 30, unit_pixels = 200):
self.canvas = canvas
self.coords = self.set_coords(coords)
self.alpha = alpha
self.beeta = beeta
self.gaama = gaama
self.frame_rate = frame_rate
self.unit_pixels = unit_pixels
self.printed_polygons = []
self.set_view_dist()
self.set_canvas_size()
self.set_colour()
self.set_view_point()
self.set_virtual_axis()
not_error = not(self.set_surface_equations())
if not_error:
raise Exception("Points that are supposed to be coplanar are non coplanar")
@staticmethod
def dist(point1, point2):
x1,y1,z1 = point1
x2,y2,z2 = point2
return m.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)
@staticmethod
def set_coords(coords):
#[(points),[(orientation),(orientation),(colour)]]
points = coords[0]
final_coords = []
for surface in coords[1:]:
s = []
for polygon in surface:
p = []
for point in polygon[:-1]:
p.append(points[point])
p.append(polygon[-1])
s.append(p)
final_coords.append(s)
return final_coords
def set_view_dist(self):
self.d = 0
points = self.distinct_points()
for point in points:
distance = self.dist(point,(0,0,0))
if distance>self.d:
self.d = distance
self.d*=50
def set_view_point(self, view_point = None, reference_point = None):
if view_point == None and reference_point == None:
self.a = m.cos(self.beeta) * m.sin(self.alpha)
self.b = m.sin(self.beeta)
self.c = m.cos(self.beeta) * m.cos(self.alpha)
reference_point = self.rotate_zaxis((0,1,0), theeta = self.gaama)
self.set_virtual_axis(reference_point)
else:
self.a, self.b, self.c = view_point
self.set_virtual_axis(reference_point)
def distinct_points(self):
points = []
for surface in self.coords:
for polygon in surface:
for point in polygon[:-1]:
if point not in points:
points.append(point)
return points
def set_canvas_size(self):
self.csize = 0
points = self.distinct_points()
for point in points:
distance = self.dist(point,(0,0,0))
if distance>self.csize:
self.csize = distance
self.csize = int(self.csize*2*self.unit_pixels)+50
self.canvas.config(width = self.csize, height = self.csize)
@staticmethod
def plane_equation(point1, point2, point3):
x1,y1,z1 = point1
x2,y2,z2 = point2
x3,y3,z3 = point3
a = (y2-y1)*(z3-z1)-(y3-y1)*(z2-z1)
b = (x3-x1)*(z2-z1)-(x2-x1)*(z3-z1)
c = (x2-x1)*(y3-y1)-(x3-x1)*(y2-y1)
d = a*x1 + b*y1 + c*z1
return [a,b,c,d]
def set_surface_equations(self):
self.s_equations = []
for surface in self.coords:
point1 = surface[0][0]
point2 = surface[0][1]
point3 = surface[0][2]
self.s_equations.append(self.plane_equation(point1,point2,point3))
for polygon in surface:
for point in polygon[:-1]:
x,y,z = point
a,b,c,d = self.s_equations[-1]
if a*x + b*y + c*z != d:
return 0
return 1
def display_list(self):
l = []
for equation in self.s_equations:
A,B,C,D = equation
x,y,z = self.d*self.a,self.d*self.b,self.d*self.c
l.append(A*x + B*y + C*z >= D)
# print(l)
return l
def display_surfaces(self,coords):
d_list = self.display_list()
d_surface = []
for i in range(len(d_list)):
if d_list[i]:
d_surface.append(coords[i])
return d_surface
def threeD_to_twoD(self):
return_coords = []
for surface in self.coords:
return_surface = []
for polygon in surface:
return_polygon = []
for point in polygon[:-1]:
x,y,z = point
a,b,c = self.a, self.b, self.c
# X_temp = x*m.cos(self.alpha)-z*m.sin(self.alpha)
# Y_temp = y*m.cos(self.beeta)-z*m.sin(self.beeta)*m.cos(self.alpha)-x*m.sin(self.beeta)*m.sin(self.alpha)
#
# X = (X_temp*m.cos(self.gaama) + Y_temp*m.sin(self.gaama))*self.unit_pixels + self.csize/2
# Y = self.csize/2 - (-X_temp*m.sin(self.gaama) + Y_temp*m.cos(self.gaama))*self.unit_pixels
X = x*(b**2+c**2) - y*(a*b) - z*(a*c)
Y = y*(a**2+c**2) - z*(b*c) - x*(a*b)
Z = z*(a**2+b**2) - y*(b*c) - x*(a*c)
lamda = m.sqrt(b**2+c**2)
v = m.sqrt(a**2+b**2+c**2)
if lamda == 0:
lamdax = 1
c=1
else:
lamdax = lamda
X,Y,Z = self.rotate_xaxis((X,Y,Z), cos_val = c/lamdax, sin_val = b/lamdax)
X,Y,Z = self.rotate_yaxis((X,Y,Z), cos_val = lamda/v, sin_val = -a/v)
new_vxaxis = self.rotate_xaxis(self.vxaxis, cos_val = c/lamdax, sin_val = b/lamdax)
new_vxaxis = self.rotate_yaxis(new_vxaxis, cos_val = lamda/v, sin_val = -a/v)
new_referencepoint = self.rotate_xaxis(self.reference_point, cos_val = c/lamdax, sin_val = b/lamdax)
new_referencepoint = self.rotate_yaxis(new_referencepoint, cos_val = lamda/v, sin_val = -a/v)
if new_vxaxis[1]>=0 and new_referencepoint[1]>=0:
gaama = m.asin(new_vxaxis[1])
elif new_referencepoint[1]<=0:
gaama = m.pi - m.asin(new_vxaxis[1])
else:
gaama = 2*m.pi + m.asin(new_vxaxis[1])
X,Y,Z = self.rotate_zaxis((X,Y,Z),theeta = -gaama)
X = X*self.unit_pixels + self.csize/2
Y = self.csize/2 - Y*self.unit_pixels
return_polygon.append((X,Y))
return_polygon.append('#%02x%02x%02x' % polygon[-1])
return_surface.append(return_polygon)
return_coords.append(return_surface)
return return_coords
def delete_polygon(self):
for polygon in self.printed_polygons:
self.canvas.delete(polygon)
self.printed_polygons = []
def print_object(self , during_animation = 0):
self.delete_polygon()
twoD_coords = self.display_surfaces(self.threeD_to_twoD())
self.dynamic_colours()
for surface in twoD_coords:
for polygon in surface:
self.printed_polygons.append(self.canvas.create_polygon(polygon[:-1], fill = polygon[-1]))
self.canvas.update()
if during_animation:
t.sleep(1/self.frame_rate)
def change_angles(self, change_alpha, change_beeta, change_gaama):
self.alpha += change_alpha
self.beeta += change_beeta
self.gaama += change_gaama
self.set_view_point()
def set_angles(self, alpha = None, beeta = None, gaama = None):
if alpha == None and beeta == None and gaama ==None:
pass
else:
self.alpha = alpha
self.beeta = beeta
self.gaama = gaama
self.set_view_point()
@staticmethod
def rotate_xaxis(point, theeta = None, cos_val = None, sin_val = None):
if cos_val == None:
cos_val = m.cos(theeta)
if sin_val == None:
sin_val = m.sin(theeta)
x,y,z = point
Y = y*cos_val - z*sin_val
Z = y*sin_val + z*cos_val
return (x,Y,Z)
@staticmethod
def rotate_yaxis(point, theeta = None, cos_val = None, sin_val = None):
if cos_val == None:
cos_val = m.cos(theeta)
if sin_val == None:
sin_val = m.sin(theeta)
x,y,z = point
X = x*cos_val + z*sin_val
Z = -x*sin_val + z*cos_val
return (X,y,Z)
@staticmethod
def rotate_zaxis(point, theeta = None, cos_val = None, sin_val = None):
if cos_val == None:
cos_val = m.cos(theeta)
if sin_val == None:
sin_val = m.sin(theeta)
x,y,z = point
X = x*cos_val - y*sin_val
Y = x*sin_val + y*cos_val
return (X,Y,z)
def rotate_point_about_line(self, point, angle, line_vector):
a,b,c = line_vector
lamda = m.sqrt(b**2+c**2)
v = m.sqrt(a**2+b**2+c**2)
if lamda == 0:
lamdax = 1
c=1
else:
lamdax = lamda
# print(line_vector)
p = self.rotate_xaxis(point, cos_val = c/lamdax, sin_val = b/lamdax)
p = self.rotate_yaxis(p, cos_val = lamda/v, sin_val = -a/v)
p = self.rotate_zaxis(p, theeta = angle)
p = self.rotate_yaxis(p, cos_val = lamda/v, sin_val = a/v)
p = self.rotate_xaxis(p, cos_val = c/lamdax, sin_val = -b/lamdax)
return p
def set_virtual_axis(self, reference_point = (0,1,0)):
self.reference_point = reference_point
x1,y1,z1 = reference_point
x2,y2,z2 = self.a,self.b,self.c
self.vxaxis = (y1*z2-y2*z1, x2*z1-x1*z2, x1*y2-x2*y1)
def set_first_click(self, event):
self.mouse_loc = (event.x, event.y)
def change_view_angle(self, event):
self.canvas.unbind('<B1-Motion>', self.move)
x_diff = event.x - self.mouse_loc[0]
y_diff = event.y - self.mouse_loc[1]
const = m.pi/(self.unit_pixels*4)
alpha_change = -x_diff * const
beeta_change = y_diff * const
#print(self.reference_point)
new_viewpoint = self.rotate_point_about_line((self.a,self.b,self.c),alpha_change,self.reference_point)
new_viewpoint = self.rotate_point_about_line(new_viewpoint,-beeta_change,self.vxaxis)
new_referencepoint = self.rotate_point_about_line(self.reference_point,-beeta_change,self.vxaxis)
#print(self.vxaxis)
# print('Angles: ',(self.alpha/m.pi,self.beeta/m.pi,self.gaama/m.pi))
# print('viewpoint: ',self.reference_point)
# print('vxaxis: ',self.vxaxis)
self.set_view_point(new_viewpoint,new_referencepoint)
#self.set_angles()
self.print_object(1)
# print((self.alpha/m.pi,self.beeta/m.pi,self.gaama/m.pi))
self.mouse_loc = (event.x, event.y)
self.move = self.canvas.bind('<B1-Motion>', self.change_view_angle)
def dynamic_movement(self):
self.start_move = self.canvas.bind('<Button-1>', self.set_first_click)
self.move = self.canvas.bind('<B1-Motion>', self.change_view_angle)
def stop_dynamic_movement(self):
self.canvas.unbind('<Button-1>', self.start_move)
self.canvas.unbind('<B1-Motion>',self.move)
def change_colour(self, colours):
for i in range(len(self.coords)):
for j in range(len(self.coords[i])):
self.colours[i][j][-1] = colours[i][j]
def set_colour(self, colours = None):
if colours == None:
self.colours = []
for surface in self.coords:
s = []
for polygon in surface:
s.append(polygon[-1])
self.colours.append(s)
else:
self.colours = colours
def dynamic_colours(self):
a1,b1,c1 = self.a,self.b,self.c
for i in range(len(self.coords)):
a2,b2,c2 = self.s_equations[i][0],self.s_equations[i][1],self.s_equations[i][2]
d = self.dist((a2,b2,c2),(0,0,0))
a2,b2,c2 = a2/d,b2/d,c2/d
cos_angle = a1*a2+b1*b2+c1*c2
if cos_angle>=0:
for j in range(len(self.coords[i])):
r,g,b = self.colours[i][j]
r,g,b = r*cos_angle + r/3*(1-cos_angle),g*cos_angle + g/3*(1-cos_angle),b*cos_angle + b/3*(1-cos_angle)
self.coords[i][j][-1] = (int(r),int(g),int(b))
# cube_coords = [[(1,-1,1),(-1,-1,1),(-1,-1,-1),(1,-1,-1),(1,1,-1),(1,1,1),(-1,1,1),(-1,1,-1)],[[5,6,1,0,(255,255,255)]],[[4,5,0,3,(0,255,0)]],[[2,7,4,3,(0,0,255)]],[[1,6,7,2,(255,255,255)]],[[6,5,4,7,(0,255,255)]],[[1,2,3,0,(255,255,0)]]]
# cone_coords = [[(1,-1,1),(-1,-1,1),(-1,-1,-1),(1,-1,-1),(0,1,0)],[[1,2,3,0,(255,0,0)]],[[1,0,4,(255,0,0)]],[[0,3,4,(255,0,0)]],[[2,4,3,(255,0,0)]],[[1,4,2,(255,0,0)]]]
# cube = ThreeD(canvas, cube_coords)
# cone = ThreeD(canvas, cone_coords)
# cube.print_object()
# cube.dynamic_movement()
# cone.print_object()
# cone.dynamic_movement()
class input_info():
def __init__(self):
self.root = tk.Tk()
self.root.title("3D Renderer")
self.heading_frame = tk.Frame(self.root)
heading = tk.Label(self.heading_frame, text = "3D Renderer", font = ("Arial Black", 30))
self.body_frame = tk.Frame(self.root)
q = tk.Label(self.body_frame, text = '''Please input the 3D coordinates of the corner points of your shape.
(It should be of the form (x,y,z):(x,y,z):(x,y,z):.....)''', font = ("Arial", 18))
q2 = tk.Label(self.body_frame, text = '''Please input the number of sides in your 3D shape.''', font = ("Arial", 18))
self.points_input = tk.Entry(self.body_frame, font = ("Arial", 18), width = 50)
self.surface_input = tk.Entry(self.body_frame, font = ("Arial", 18))
empty_label = tk.Label(self.body_frame, text = '')
self.button_frame = tk.Frame(self.root)
done_button = tk.Button(self.button_frame, text = "Done", height = 2, width = 20, bg = "grey", command = self.connect_points)
self.heading_frame.pack(side = 'top')
self.body_frame.pack()
heading.pack()
q.grid(row = 0)
self.points_input.grid(row = 1)
empty_label.grid(row = 2)
q2.grid(row = 3)
self.surface_input.grid(row = 4)
self.button_frame.pack(side = 'bottom')
done_button.grid(row = 0, pady = 15)
self.root.mainloop()
def set_points(self):
temp_points = self.points_input.get()
temp_points2 = ''
for stuff in temp_points:
if stuff!=' ':
temp_points2+=stuff
try:
point_list = temp_points2.split(':')
self.points = []
for point in point_list:
x,y,z = point[1:-1].split(",")
self.points.append((int(x),int(y),int(z)))
self.surfaces = int(self.surface_input.get())
self.body_frame.destroy()
self.button_frame.destroy()
return 1
except:
error_label = tk.Label(self.body_frame, text = "There's something wrong in your input!", font = ("Arial", 14))
error_label.grid(row = 5)
return 0
def connect_points(self):
if self.set_points():
self.curr_index = 0
self.body_frame = tk.Frame(self.root)
index_frame = tk.Frame(self.body_frame)
input_frame = tk.Frame(self.body_frame)
self.button_frame = tk.Frame(self.root)
self.order_list = []
self.side_label = tk.Label(self.body_frame, text = "Side "+str(self.curr_index+1), font = ("Arial", 28))
point_index = tk.Label(index_frame, text = "Points\n"+ self.get_points_string(), font = ("Arial", 18))
instruction = tk.Label(input_frame, text = "Input the idexes of the points that connect to make this side (For eg. 0,2,4,3), and choose its colour.\nMake sure you use the right hand rule for the order of the points.", font = ("Arial", 15))
self.order_input = tk.Entry(input_frame, font = ("Arial", 18))
next_button = tk.Button(self.button_frame, text = 'Next', bg = 'grey', height = 3, width = 30, command = self.go_next)
back_button = tk.Button(self.button_frame, text = 'Back', bg = 'grey', height = 3, width = 30, command = self.go_back)
self.colour_chosen = (255,255,255)
self.error = 0
self.colour_button = tk.Button(input_frame, text = 'Choose colour', bg = 'grey', command = self.colour_chooser)
self.body_frame.pack(side = 'top')
self.button_frame.pack(side = 'bottom')
self.side_label.grid(row = 0, columnspan = 2)
index_frame.grid(row = 1, column = 0)
input_frame.grid(row = 1, column = 1)
point_index.pack()
instruction.grid(row = 0)
self.order_input.grid(row = 1)
self.colour_button.grid(row = 2)
back_button.grid(row = 0, column = 0, padx = 25)
next_button.grid(row = 0, column = 1, padx = 25)
def get_points_string(self):
index = 0
final_string = ''
for point in self.points:
final_string+=str(index)+': '+str(point)+'\n'
index+=1
return final_string
def go_next(self):
if self.error:
self.error_label.destroy()
if self.curr_index<self.surfaces:
order = self.order_input.get()
if self.check_order(order):
self.order_input.delete(0,len(order))
order = order.split(',')
for i in range(len(order)):
order[i] = int(order[i])
order.append(self.colour_chosen)
self.colour_chosen = (255,255,255)
if self.curr_index==self.surfaces-1:
self.order_list.append([order])
self.draw_shape()
else:
self.curr_index+=1
self.colour_button.config(bg = 'grey')
self.order_list.append([order])
self.side_label.config(text = "Side "+str(self.curr_index+1))
def go_back(self):
if self.curr_index>0:
self.curr_index-=1
self.order_list = self.order_list[:-1]
self.side_label.config(text = "Side "+str(self.curr_index+1))
self.order_input.delete(0,len(self.order_input.get()))
def colour_chooser(self):
r,g,b = c_chooser.askcolor()[0]
self.colour_chosen = (int(r),int(g),int(b))
self.colour_button.config(bg = '#%02x%02x%02x' % self.colour_chosen)
def draw_shape(self):
self.body_frame.destroy()
self.button_frame.destroy()
canvas = tk.Canvas(self.root)
canvas.pack()
shape = ThreeD(canvas, [self.points]+self.order_list)
shape.print_object()
shape.dynamic_movement()
def check_order(self,order):
try:
indexes = order.split(',')
self.error = 0
for i in range(len(indexes)):
indexes[i] = int(indexes[i])
if indexes[i]>=len(self.points):
self.error = 1
except:
self.error = 1
if self.error:
self.error_label = tk.Label(self.body_frame, text = "There's something wrong with your input!", font = ("Arial",18))
self.error_label.grid(row = 2, columnspan = 2)
return not self.error
if __name__ == '__main__':
Input = input_info()
|
[
"35726629+naseer2426@users.noreply.github.com"
] |
35726629+naseer2426@users.noreply.github.com
|
77ebe723a966d681a96463a872a4909952361fc8
|
dfc0d7851f0df172b8cbf06021f92e864bcaec0e
|
/pahap/scripts/RRT_server1.py
|
fc6a95d2e6f944633241396d612f188d4316cfd6
|
[
"BSD-2-Clause"
] |
permissive
|
aralab-unr/ara_navigation
|
1d022e72a42e0b523d11782f9c297ebf96443a4a
|
a83d12df1f6b85023c84a2d6835648f7ae12e8be
|
refs/heads/main
| 2023-01-03T17:02:41.521632
| 2020-10-31T18:15:18
| 2020-10-31T18:15:18
| 308,933,232
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,440
|
py
|
"""
Path planning Sample Code with Randomized Rapidly-Exploring Random Trees (RRT)
author: AtsushiSakai(@Atsushi_twi)
"""
import math
import random
import matplotlib.pyplot as plt
import numpy as np
show_animation = True
class RRT:
"""
Class for RRT planning
"""
class Node:
"""
RRT Node
"""
def __init__(self, x, y, phi):
self.x = x
self.y = y
self.phi = phi
self.path_x = []
self.path_y = []
self.path_phi = []
self.parent = None
def __init__(self,
start,
goal,
obstacle_list,
rand_area,
expand_dis=3.0,
path_resolution=0.5,
goal_sample_rate=5,
max_iter=500
):
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Random Sampling Area [min,max]
"""
self.start = self.Node(start[0], start[1], start[2])
self.end = self.Node(goal[0], goal[1], goal[3])
self.min_rand = rand_area[0]
self.max_rand = rand_area[1]
self.phi_rand = rand_area[2]
self.expand_dis = expand_dis
self.path_resolution = path_resolution
self.goal_sample_rate = goal_sample_rate
self.max_iter = max_iter
self.obstacle_list = obstacle_list
self.node_list = []
def planning(self, animation=True):
"""
rrt path planning
animation: flag for animation on or off
"""
self.node_list = [self.start]
for i in range(self.max_iter):
rnd_node = self.get_random_node()
# return the index of the nearest node
nearest_ind = self.get_nearest_node_index(self.node_list, rnd_node)
nearest_node = self.node_list[nearest_ind]
new_node = self.steer(nearest_node, rnd_node, self.expand_dis)
# if self.check_collision(new_node, self.obstacle_list):
if self.ara_check_collision(new_node, boudnaries):
self.node_list.append(new_node)
# if animation and i % 5 == 0:
# self.draw_graph(rnd_node)
if self.calc_dist_to_goal(self.node_list[-1].x, self.node_list[-1].y
self.node_list[-1].phi) <= self.expand_dis:
final_node = self.steer(self.node_list[-1], self.end,
self.expand_dis)
if self.check_collision(final_node, self.obstacle_list):
return self.generate_final_course(len(self.node_list) - 1)
# if animation and i % 5:
# self.draw_graph(rnd_node)
return None # cannot find path
def steer(self, from_node, to_node, extend_length=float("inf")):
new_node = self.Node(from_node.x, from_node.y)
d, theta, d_phi = self.calc_distance_and_angle(new_node, to_node)
new_node.path_x = [new_node.x]
new_node.path_y = [new_node.y]
new_node.path_phi = [new_node.phi]
if extend_length > d:
extend_length = d
# find the number of step to extend by rounding down ...
n_expand = math.floor(extend_length / self.path_resolution)
for _ in range(n_expand):
new_node.x += self.path_resolution * math.cos(theta)
new_node.y += self.path_resolution * math.sin(theta)
new_node.phi += self.path_resolution*d_phi
new_node.path_x.append(new_node.x)
new_node.path_y.append(new_node.y)
new_node.path_phi.append(new_node.phi)
d, _, _ = self.calc_distance_and_angle(new_node, to_node)
if d <= self.path_resolution:
new_node.path_x.append(to_node.x)
new_node.path_y.append(to_node.y)
new_node.path_phi.append(to_node.phi)
new_node.x = to_node.x
new_node.y = to_node.y
new_node.phi = to_node.phi
new_node.parent = from_node
return new_node
def generate_final_course(self, goal_ind):
path = [[self.end.x, self.end.y, self.end.phi]]
node = self.node_list[goal_ind]
while node.parent is not None:
path.append([node.x, node.y, node.phi])
node = node.parent
path.append([node.x, node.y, node.phi])
return path
def calc_dist_to_goal(self, x, y, phi):
dx = x - self.end.x
dy = y - self.end.y
dphi = phi - self.end.phi
dis = math.hypot(dx, dy)
return dis + dphi
def get_random_node(self):
if random.randint(0, 100) > self.goal_sample_rate:
rnd = self.Node(
random.uniform(self.min_rand, self.max_rand),
random.uniform(self.min_rand, self.max_rand),
random.uniform(-self.phi_rand, self.phi_rand))
else: # goal point sampling
rnd = self.Node(self.end.x, self.end.y, self.end.phi)
return rnd
def draw_graph(self, rnd=None):
plt.clf()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect(
'key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
if rnd is not None:
plt.plot(rnd.x, rnd.y, "^k")
for node in self.node_list:
if node.parent:
plt.plot(node.path_x, node.path_y, "-g")
for (ox, oy, size) in self.obstacle_list:
self.plot_circle(ox, oy, size)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
plt.axis("equal")
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
@staticmethod
def plot_circle(x, y, size, color="-b"): # pragma: no cover
deg = list(range(0, 360, 5))
deg.append(0)
xl = [x + size * math.cos(np.deg2rad(d)) for d in deg]
yl = [y + size * math.sin(np.deg2rad(d)) for d in deg]
plt.plot(xl, yl, color)
@staticmethod
def get_nearest_node_index(node_list, rnd_node):
dlist = [(node.x - rnd_node.x)**2 + (node.y - rnd_node.y)**2 + (node.phi - rnd_node.phi)**2
for node in node_list]
minind = dlist.index(min(dlist))
return minind
@staticmethod
def check_collision(node, obstacleList):
if node is None:
return False
for (ox, oy, size) in obstacleList:
dx_list = [ox - x for x in node.path_x]
dy_list = [oy - y for y in node.path_y]
d_list = [dx * dx + dy * dy for (dx, dy) in zip(dx_list, dy_list)]
if min(d_list) <= size**2:
return False # collision
return True # safe
# @staticmethod
def ara_check_collision(self, node, data):
if node is None:
return False
# calculate all the other conner of the robot
roPos = self.robot_positions(node)
# calculate the center point of the clusters
cenPointSet = []
for i in range(len(data)):
data1 = data[i]
for j in range(len(data1)):
cen_x = data[j,0]
cen_y = data[j,1]
cen_x = cen_x/len(data1)
cen_y = cen_y/len(data1)
if i ==0:
cenPointSet = np.array([[cen_x, cen_y]])
else:
cenPointSet = np.append(cenPointSet, [[cen_x, cen_y]], axis=0)
# check in the boudnary for each robot point
for pos in range(len(roPos)):
# select 3 closest clusters around the robot point by their center
clus_index = np.array([0,0,0])
clus_dis = np.array([1.5,1.5,1.5])
d_max = 1.5
dmax_index = 0
if len(cenPointSet) < 3:
clus_index[2] = -1
clus_disp[2] = 0.0
for i in range(len(cenPointSet)):
dis = self.distance(roPos[pos], cenPointSet[i])
if dis < d_max:
d_max = dis
clus_dis[dmax_index] = dis
clus_index[dmax_index] = i
for k in range(len(clus_dis)):
if d_max < clus_dis[k]:
d_max = clus_dis[k]
dmax_index = k
# check the robot point whether it belongs to one of the clusters
for j in range(len(clus_index)):
if not (clus_index[j] == -1):
data2 = data[clus_index[j]]
neigh_index = np.array([0,0,0,0])
dist4neighbors = np.array([1.0,1.0,1.0,1.0])
max_distance = 1.5
point_index = 0
# find 4 closest point around the robot point in each cluster
for k in range(len(data2)):
dis = self.distance(roPos[pos], data2[k])
if dis < max_distance:
max_distance = dis
dist4neighbors[point_index] = dis
neigh_index[point_index] = k
for m in range(len(dist4neighbors)):
if max_distance < dist4neighbors[m]:
max_distance = dist4neighbors[m]
point_index = m
# compare the 4 distance of 4 points and the robot points to the center points
d_nc = np.array([1.0,1.0,1.0,1.0])
for g in range(len(neigh_index)):
d_nc1 = self.distance(cenPointSet[clus_index[j]], data2[neigh_index[g]])
d_nc[g] = d_nc1
d_rpc = self.distance(cenPointSet[clus_index[j]], roPos[pos])
# boolean check set
bcs = [True, True, True, True]
for h in range(len(d_nc)):
if np.absolute(d_rpc - d_nc[h]) > 0.01:
bcs[h] = False
check = True
for u in range(len(bcs)):
check = check and bcs[u]
if check:
return True # Safe
return False # collision
def distance(self, pos1, pos2):
return np.sqrt(np.power((pos1[0]-pos2[0]),2)+np.power((pos1[1]-pos2[1]),2))
def robot_positions(self, node)
x = node.x
y = node.y
phi = node.phi
d = 0.4 # distance between two feet
l = 0.18 # length of the foot
w = 0.22 # width of the foot
dc = np.sqrt(np.power((l/2),2) + np.power((w/2),2))
alpha = np.arctan(w/d)
p1_x = x + dc*np.cos(-alpha + phi)
p1_y = y + dc*np.sin(-alpha + phi)
PoRo = np.array([[p1_x, p1_y]])
p2_x = x + dc*np.cos(alpha + phi)
p2_y = y + dc*np.sin(alpha + phi)
PoRo = np.append(PoRo, [[p2_x, p2_y]], axis=0)
p3_x = x + dc*np.cos(np.pi - alpha + phi)
p3_y = y + dc*np.sin(np.pi - alpha + phi)
PoRo = np.append(PoRo, [[p3_x, p3_y]], axis=0)
p4_x = x + dc*np.cos(-np.pi + alpha + phi)
p4_y = y + dc*np.sin(-np.pi + alpha + phi)
PoRo = np.append(PoRo, [[p4_x, p4_y]], axis=0)
p5_x = -d*cos(phi) + x + dc*np.cos(-alpha + phi)
p5_y = -d*sin(phi) + y + dc*np.sin(-alpha + phi)
PoRo = np.append(PoRo, [[p5_x, p5_y]], axis=0)
p6_x = -d*cos(phi) + x + dc*np.cos(alpha + phi)
p6_y = -d*sin(phi) + y + dc*np.sin(alpha + phi)
PoRo = np.append(PoRo, [[p6_x, p6_y]], axis=0)
p7_x = -d*cos(phi) + x + dc*np.cos(np.pi - alpha + phi)
p7_y = -d*sin(phi) + y + dc*np.sin(np.pi - alpha + phi)
PoRo = np.append(PoRo, [[p7_x, p7_y]], axis=0)
p8_x = -d*cos(phi) + x + dc*np.cos(-np.pi + alpha + phi)
p8_y = -d*sin(phi) + y + dc*np.sin(-np.pi + alpha + phi)
PoRo = np.append(PoRo, [[p8_x, p8_y]], axis=0)
return PoRo
@staticmethod
def calc_distance_and_angle(from_node, to_node):
dx = to_node.x - from_node.x
dy = to_node.y - from_node.y
d = math.hypot(dx, dy)
delta_phi = to_node.phi - from_node.phi
phi_res = delta_phi/d
theta = math.atan2(dy, dx)
return d, theta, phi_res
def main(gx=6.0, gy=10.0):
print("start " + __file__)
# ====Search Path with RRT====
obstacleList = [(5, 5, 1), (3, 6, 2), (3, 8, 2), (3, 10, 2), (7, 5, 2),
(9, 5, 2), (8, 10, 1)] # [x, y, radius]
# Set Initial parameters
rrt = RRT(
start=[0, 0],
goal=[gx, gy],
rand_area=[-2, 15],
obstacle_list=obstacleList)
path = rrt.planning(animation=show_animation)
if path is None:
print("Cannot find path")
else:
print("found path!!")
# Draw final path
if show_animation:
rrt.draw_graph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.pause(0.01) # Need for Mac
plt.show()
if __name__ == '__main__':
main()
|
[
"bui.hoangdungtn@gmail.com"
] |
bui.hoangdungtn@gmail.com
|
9665f5262400aa5e8492c915f9433a794eb3fa9f
|
36d9b0bf8da6403a233cb1a5d93a97fd5dac38d9
|
/fundraising/migrations/0002_djangohero_approved_booleanfield.py
|
e86571ef1be70336c5dea12ccba6017475b7da91
|
[
"BSD-3-Clause"
] |
permissive
|
cossacklabs/djangoproject.com
|
da4123046514693edaaffd726a84587a5f91ae66
|
621e18f928db903d73b84788b3e3c9df9e83dd4c
|
refs/heads/master
| 2021-12-14T17:41:54.270535
| 2021-12-02T15:55:41
| 2021-12-02T15:55:41
| 76,054,463
| 1
| 0
|
NOASSERTION
| 2021-12-02T15:55:42
| 2016-12-09T17:04:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 398
|
py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fundraising', '0001_squashed_0007_inkinddonor'),
]
operations = [
migrations.AlterField(
model_name='djangohero',
name='approved',
field=models.BooleanField(null=True, verbose_name='Name, URL, and Logo approved?'),
),
]
|
[
"felisiak.mariusz@gmail.com"
] |
felisiak.mariusz@gmail.com
|
d2036e2ad31daaa4615ba8e2b100b0650b8f4e7f
|
f11671737fa347b00725d8f1c75167e8cbf1ac9a
|
/tests/unit/filesys/concrete_test.py
|
4856658ecc00f1660de0c703a3b6885411507621
|
[
"Apache-2.0"
] |
permissive
|
sirosen/SALVE
|
12d89e9ddd172ba3a7b81839fc4bb8687ecd1cdd
|
5711b5c71e39b958bc8185c6b893358de7598ae2
|
refs/heads/stable
| 2021-01-13T02:23:21.002718
| 2015-11-14T21:28:25
| 2015-11-14T21:28:25
| 11,790,762
| 0
| 0
| null | 2015-11-14T21:24:49
| 2013-07-31T12:51:16
|
Python
|
UTF-8
|
Python
| false
| false
| 14,161
|
py
|
#!/usr/bin/python
import os
import hashlib
import mock
from nose.tools import istest
from tests.util import ensure_except, scratch
from salve.filesys import ConcreteFilesys
class TestWithScratchdir(scratch.ScratchContainer):
@istest
def missing_file_lookup(self):
"""
Unit: Filesys Concrete Missing File Lookup Type Is None
Verifies that attempting to lookup the type of a file that does not
exist in a Concrete Filesystem will return None.
"""
fs = ConcreteFilesys()
ty = fs.lookup_type(self.get_fullname('a'))
assert ty is None
@istest
def file_lookup_type(self):
"""
Unit: Filesys Concrete File Lookup Type Is FILE
Validate that type lookups always produce the correct type.
"""
full_path = self.get_fullname('a')
self.write_file('a', 'abcdefg')
fs = ConcreteFilesys()
ty = fs.lookup_type(full_path)
assert ty is fs.element_types.FILE
@istest
def dir_lookup_type(self):
"""
Unit: Filesys Concrete Dir Lookup Type Is DIR
Validate that type lookups always produce the correct type.
"""
full_path = self.get_fullname('a')
self.make_dir('a')
fs = ConcreteFilesys()
ty = fs.lookup_type(full_path)
assert ty is fs.element_types.DIR
@istest
def link_lookup_type(self):
"""
Unit: Filesys Concrete Link Lookup Is LINK
Validate that type lookups always produce the correct type.
"""
full_path = self.get_fullname('a')
os.symlink('nowhere', full_path)
fs = ConcreteFilesys()
ty = fs.lookup_type(full_path)
assert ty is fs.element_types.LINK
@istest
def copy_file(self):
"""
Unit: Filesys Concrete File Copy
Copying a file must succeed.
"""
content = 'fooing a bar in here'
self.write_file('a', content)
src_path = self.get_fullname('a')
dst_path = self.get_fullname('b')
fs = ConcreteFilesys()
fs.copy(src_path, dst_path)
assert content == self.read_file('b')
@istest
def copy_link(self):
"""
Unit: Filesys Concrete Link Copy
Copying a symlink must succeed.
"""
link_target = self.get_fullname('a')
content = 'fooing a bar in here'
self.write_file('a', content)
src_path = self.get_fullname('a_link')
dst_path = self.get_fullname('b')
os.symlink(link_target, src_path)
fs = ConcreteFilesys()
fs.copy(src_path, dst_path)
assert os.path.islink(dst_path)
assert os.readlink(dst_path) == link_target
@istest
def copy_dir(self):
"""
Unit: Filesys Concrete Dir Copy
Copying a directory must succeed.
"""
self.make_dir('a/b/c')
self.make_dir('z')
content = 'fooing a bar in here'
self.write_file('a/b/f1', content)
src_path = self.get_fullname('a')
dst_path = self.get_fullname('z/a')
fs = ConcreteFilesys()
fs.copy(src_path, dst_path)
assert os.path.isdir(dst_path)
assert os.path.isfile(self.get_fullname('z/a/b/f1'))
assert content == self.read_file('z/a/b/f1')
@istest
def create_file(self):
"""
Unit: Filesys Concrete File Touch
Creating an empty file must always work (when the path is valid)
"""
full_path = self.get_fullname('a')
fs = ConcreteFilesys()
fs.touch(full_path)
assert os.path.isfile(full_path)
assert '' == self.read_file('a')
@istest
def create_link(self):
"""
Unit: Filesys Concrete Link Create
Creating a symlink must always work, even if it is a broken link
"""
full_path = self.get_fullname('a')
link_target = 'b'
fs = ConcreteFilesys()
fs.symlink(link_target, full_path)
assert os.path.islink(full_path)
assert link_target == os.readlink(full_path)
@istest
def create_dir_nonrecursive(self):
"""
Unit: Filesys Concrete Dir Create (Non-Recursive)
Creating a single level of a directory tree should always succeed
"""
full_path = self.get_fullname('a')
fs = ConcreteFilesys()
fs.mkdir(full_path, recursive=False)
assert os.path.isdir(full_path)
assert len(os.listdir(full_path)) == 0
@istest
def create_dir_recursive(self):
"""
Unit: Filesys Concrete Dir Create (Recursive)
Creating a path of a directory tree should always succeed if recursive
is set.
"""
full_path = self.get_fullname('a/b/c')
fs = ConcreteFilesys()
fs.mkdir(full_path, recursive=True)
assert os.path.isdir(full_path)
assert len(os.listdir(full_path)) == 0
@istest
def double_create_dir(self):
"""
Unit: Filesys Concrete Dir Double Create No Error
Repeated creation of a single directory should not raise any errors
"""
full_path = self.get_fullname('a')
fs = ConcreteFilesys()
fs.mkdir(full_path, recursive=False)
fs.mkdir(full_path, recursive=False)
assert os.path.isdir(full_path)
assert len(os.listdir(full_path)) == 0
@istest
def create_dir_nonrecursive_missing_parent(self):
"""
Unit: Filesys Concrete Dir Create (Non-Recursive) Missing Parent Error
Validates that creating a directory with recursive off raises an
OSError with errno=2 if there is a missing ancestor.
"""
full_path = self.get_fullname('a/b')
fs = ConcreteFilesys()
e = ensure_except(OSError, fs.mkdir, full_path, recursive=False)
assert e.errno == 2
assert not os.path.isdir(full_path)
@istest
def file_open_write_only(self):
"""
Unit: Filesys Concrete File Open Write-Only
Verifies that opening a file in write only mode works as expected
"""
full_path = self.get_fullname('a')
fs = ConcreteFilesys()
fs.touch(full_path)
with fs.open(full_path, 'w') as fd:
fd.write('xyz')
ensure_except(IOError, fd.read)
assert os.path.isfile(full_path)
assert 'xyz' == self.read_file('a')
@istest
def file_open_read_only(self):
"""
Unit: Filesys Concrete File Open Read-Only
Verifies that opening a file in read only mode works as expected
"""
full_path = self.get_fullname('a')
self.write_file('a', 'xyz')
fs = ConcreteFilesys()
fs.touch(full_path)
with fs.open(full_path, 'r') as fd:
ensure_except(IOError, fd.write, 'pqr')
assert fd.read() == 'xyz'
@istest
def file_get_hash(self):
"""
Unit: Filesys Concrete File Get Hash
Validates the result of getting a file's sha512 hash
"""
full_path = self.get_fullname('a')
self.write_file('a', 'xyz')
fs = ConcreteFilesys()
hashval = fs.hash(full_path)
expect = hashlib.sha512('xyz'.encode('utf-8')).hexdigest()
assert hashval == expect
@istest
def link_get_hash(self):
"""
Unit: Filesys Concrete Link Get Hash
Validates the result of getting a symlink's sha256 hash
"""
full_path = self.get_fullname('a')
fs = ConcreteFilesys()
fs.symlink('xyz', full_path)
hashval = fs.hash(full_path)
expect = hashlib.sha256('xyz'.encode('utf-8')).hexdigest()
assert hashval == expect
@istest
def concrete_access_all_combinations(self):
"""
Unit: Filesys Concrete Access With All Flags & Perms
Validates that the various valid inputs to access return the correct
results. Only uses user perms, setting g and o to 0.
"""
# maps (flags, mode) pairs to the expected results
# when mode=None, means the file is missing
result_map = {}
all_modes = [0o000, 0o100, 0o200, 0o400,
0o300, 0o500, 0o600, 0o700]
for mode in all_modes:
result_map[(os.F_OK, mode)] = True
r = mode & 0o400 != 0
w = mode & 0o200 != 0
x = mode & 0o100 != 0
result_map[(os.R_OK, mode)] = r
result_map[(os.W_OK, mode)] = w
result_map[(os.X_OK, mode)] = x
result_map[(os.R_OK | os.W_OK, mode)] = r and w
result_map[(os.R_OK | os.X_OK, mode)] = r and x
result_map[(os.W_OK | os.X_OK, mode)] = w and x
result_map[(os.R_OK | os.W_OK | os.X_OK, mode)] = r and w and x
# somewhat redundant, but an easy way to list all flags
for flags in (os.F_OK, os.R_OK, os.W_OK, os.X_OK,
os.R_OK | os.W_OK, os.R_OK | os.X_OK, os.W_OK | os.X_OK,
os.R_OK | os.W_OK | os.X_OK):
result_map[(flags, None)] = False
fs = ConcreteFilesys()
for (flags, mode) in result_map:
expect = result_map[(flags, mode)]
full_path = self.get_fullname('a')
if mode is not None:
fs.touch(full_path)
fs.chmod(full_path, mode)
assert fs.access(full_path, flags) == expect
if mode is not None:
os.remove(full_path)
@istest
def file_stat(self):
"""
Unit: Filesys Concrete File Stat
Verifies that stating a file gives an object with the correct
attributes.
"""
full_name = self.get_fullname('a')
fs = ConcreteFilesys()
assert not fs.exists(full_name)
fs.touch(full_name)
st_result = fs.stat(full_name)
assert hasattr(st_result, 'st_mode')
assert hasattr(st_result, 'st_ino')
assert hasattr(st_result, 'st_nlink')
assert hasattr(st_result, 'st_uid')
assert hasattr(st_result, 'st_gid')
assert hasattr(st_result, 'st_size')
assert hasattr(st_result, 'st_atime')
assert hasattr(st_result, 'st_mtime')
assert hasattr(st_result, 'st_ctime')
assert st_result.st_uid == os.geteuid()
assert st_result.st_gid == os.getegid()
@istest
def link_stat(self):
"""
Unit: Filesys Concrete Link Stat
Verifies that attempting to stat a link works.
"""
full_name = self.get_fullname('a')
fs = ConcreteFilesys()
assert not fs.exists(full_name)
fs.symlink('nowhere', full_name)
st_result = fs.stat(full_name)
assert hasattr(st_result, 'st_mode')
assert hasattr(st_result, 'st_ino')
assert hasattr(st_result, 'st_nlink')
assert hasattr(st_result, 'st_uid')
assert hasattr(st_result, 'st_gid')
assert hasattr(st_result, 'st_size')
assert hasattr(st_result, 'st_atime')
assert hasattr(st_result, 'st_mtime')
assert hasattr(st_result, 'st_ctime')
assert st_result.st_uid == os.geteuid()
assert st_result.st_gid == os.getegid()
@istest
def file_chmod(self):
"""
Unit: Filesys Concrete File Chmod
Verifies that chmoding a file results in correct stat() results for
various permissions settings.
"""
full_name = self.get_fullname('a')
fs = ConcreteFilesys()
assert not fs.exists(full_name)
fs.touch(full_name)
fs.chmod(full_name, 0o651)
st_result = fs.stat(full_name)
assert hasattr(st_result, 'st_mode')
assert st_result.st_mode & 0o777 == 0o651, oct(st_result.st_mode)
full_name = self.get_fullname('b')
assert not fs.exists(full_name)
fs.touch(full_name)
fs.chmod(full_name, 0o536)
st_result = fs.stat(full_name)
assert hasattr(st_result, 'st_mode')
assert st_result.st_mode & 0o777 == 0o536, oct(st_result.st_mode)
@istest
def file_chown(self):
"""
Unit: Filesys Concrete File Chown
Verifies that file chowns pass through to lchown. Because we cannot
guarantee that the tests are run as root, we have no expectation that a
chown operation will work.
"""
full_name = self.get_fullname('a')
fs = ConcreteFilesys()
fs.touch(full_name)
mock_chown = mock.Mock()
mock_chown.return_value = None
with mock.patch('os.lchown', mock_chown):
fs.chown(full_name, 100, 200)
mock_chown.assert_called_once_with(full_name, 100, 200)
@istest
def dir_walk(self):
"""
Unit: Filesys Concrete Dir Walk
Validates the results of using filesys tooling to create a directory
and walk it.
"""
fs = ConcreteFilesys()
fs.mkdir(self.get_fullname('a/b/c'))
fs.touch(self.get_fullname('a/f1'))
fs.symlink('../f1', self.get_fullname('a/b/l1'))
fs.symlink('..', self.get_fullname('a/b/c/l2'))
results = []
for (d, sds, fs) in fs.walk(self.get_fullname('a')):
results.append((d, sds, fs))
assert len(results) == 3
assert results[0] == (self.get_fullname('a'), ['b'], ['f1'])
assert results[1] == (self.get_fullname('a/b'), ['c'], ['l1'])
assert results[2] == (self.get_fullname('a/b/c'), ['l2'], [])
@istest
def dir_create_bad_permissions_fails(self):
"""
Unit: Filesys Concrete Dir Create Bad Permissions Fails
Creating a directory when the parent directory has bad permissions
should raise an OSError.
"""
fs = ConcreteFilesys()
full_name = self.get_fullname('a')
fs.mkdir(full_name)
fs.chmod(full_name, 0o000)
full_name = self.get_fullname('a/b')
e = ensure_except(OSError, fs.mkdir, full_name)
# must be a permission denied error
assert e.errno == 13
|
[
"sirosen@uchicago.edu"
] |
sirosen@uchicago.edu
|
da28883c4aa66cf33b380f37e89608dd0689e5b2
|
c6f4498db87c98cb6cbc28b6528b847aa15ad842
|
/Entree_atmospherique/script.py
|
97885710afe315718e604a9e3607f10d923241c0
|
[] |
no_license
|
nuyte/CentreDeControle
|
7a4797f9e62ae4644807152b219e0acd19521cb5
|
0fed9d9add665c2ee22c478262de5214ea9b94d9
|
refs/heads/master
| 2022-07-11T00:03:18.527217
| 2019-08-08T09:08:14
| 2019-08-08T09:08:14
| 192,314,771
| 0
| 0
| null | 2022-06-21T22:19:54
| 2019-06-17T09:18:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,675
|
py
|
import os
import sys
import pdb
import time
import numpy as np
import shutil as sh
import unicodedata
import argparse
import signal
import subprocess as sp
import random as rand
from termcolor import colored
def strip_accents(s):
# to remove accent from a string
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def format_string(s) :
s = strip_accents(s)
return "".join(s.lower().strip().split())
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
def isYon(string) :
# function that checks if the input string is 'oui' or 'non'
s = ''.join(string.lower().strip().split())
if s == 'oui' or s == 'non' or \
s == 'o' or s == 'n':
return True
else :
return False
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
def slowprint(s,color=None,attrs=None,time_scale=0.02):
# time scale is in sec :
for c in s + '\n':
if not color is None:
sys.stdout.write(colored(c,color,attrs=attrs))
else :
# This case allows to use color for single word in a sentence..
sys.stdout.write(c)
sys.stdout.flush() # defeat buffering
time.sleep(rand.random() * time_scale)
return None
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
def displayLoadingBar(length=40,duration=5,color=None) :
# prints a fake loading bar on the terminal
# length defines the length of the loading var in number of characters
# duration defines how much time it will take for the bar to load (in sec)
sys.stdout.write(colored("|%s|" % ("-" * length),color))
sys.stdout.flush()
sys.stdout.write(colored("\b" * (length+1),color)) # return to start of line,
# after '['
dt = float(duration/length)
for i in np.arange(length):
time.sleep(dt)
sys.stdout.write(colored("#",color))
sys.stdout.flush()
sys.stdout.write("\n\n")
return None
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
def atmospheric_entry(good_angles) :
# asking coordinate for the atmospheric re-entry :
sp.call(['clear'])
message = "Ce programme a pour but d'établir les trajectoires du Soyouz "+\
"pour rentrer sur Terre en sécurité.\n\n"
slowprint(message,'red')
questions = ["Angle d'entrée de Soyouz dans l'atmosphere (a1):",\
'Angle de descente du Soyouz vers la Terre (a2):']
ok_angles = False
while not ok_angles :
try :
angle1 = input(questions[0]).lower()
angle1 = float(angle1)
except ValueError :
slowprint('Veuillez entrer un nombre','red')
time.sleep(1)
sp.call(['clear'])
print(colored(message,'red'))
continue
try :
angle2 = input(questions[1]).lower()
angle2 = float(angle2)
except ValueError :
slowprint('Veuillez entrer un nombre','red')
time.sleep(1)
sp.call(['clear'])
print(colored(message,'red'))
continue
slowprint('\n Simulation en cours:')
displayLoadingBar(duration=3)
if angle1 == good_angles[0] and angle2 == good_angles[1] :
ok_angles = True
success_message = 'Trajectoire correcte, entrée atmospherique OK\n'
slowprint(success_message,'green')
slowprint('Envoi des informations de vol vers le Soyouz:')
displayLoadingBar(duration=7)
slowprint('Transfert terminé, paré au décollage.')
# to quit the script at the end
# Merci au beau Babak pour cette excellente idée !!
res = input()
while format_string(res) != format_string('quit script') :
res = input()
else :
ok_angles = False
error_message = "Paramètres incorrects, désintégration dans l'atmosphère"
slowprint(error_message, 'red')
time.sleep(1)
sp.call(['clear'])
print(colored(message,'red'))
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Some options for the program.')
parser.add_argument('--test','-t',action='store_true',default=False, \
help='Use simpler inputs for the login and mdp')
args = parser.parse_args()
# to prevent ctrl + c ctrl + z
if not args.test :
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
# locked by password
mdp = 4594 # NGC code of sombrero galaxy, cf galaxy poster in the room
message = "La tentative de hacking du centre de contrôle a bloqué cet ordinateur.\n"+\
"Veuillez entrer le mot de passe (4 chiffres) pour démarer la "+\
"réinitialisation: "
res = "0"
if args.test:
mdp = 0
while True:
sp.call('clear')
slowprint(message, 'white',attrs=['bold'])
res = input("Mot de passe :")
if int(res) != mdp :
slowprint('Mot de passe incorect','red')
time.sleep(1)
else :
slowprint('\nReinitialisation en cours:\n','green')
duration = 10
if args.test : duration = 1
displayLoadingBar(duration=duration)
time.sleep(1)
break
if args.test:
good_angles = [0.,0.]
else :
good_angles = [40.,3.]
atmospheric_entry(good_angles)
|
[
"gdelavieuvil@irap.omp.eu"
] |
gdelavieuvil@irap.omp.eu
|
29a4580f5cd11a8eb6127e55ae286b1281ed1a42
|
19c1fe95876314e134d2d5a0c1be7fa9fa010552
|
/datastore.py
|
3132ad0b10b670a406be4fc11c303a08350b4552
|
[] |
no_license
|
svcastaneda/tack-it
|
2ee52cfec2b9122f9d067f3163221a6da08396c3
|
a3e9fc33111e685421a326c76ca8ed7b152257e7
|
refs/heads/master
| 2021-01-10T18:25:00.493618
| 2015-08-08T03:08:26
| 2015-08-08T03:08:26
| 40,389,213
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
# we need to link emails to Account kind in order to save data
class Account(ndb.Model):
username = ndb.StringProperty()
class Tacs(ndb.Model):
title = ndb.StringProperty()
def delete(self):
return '/delete?id=%s' % self.key.id()
class Note(ndb.Model):
title = ndb.StringProperty()
content = ndb.StringProperty()
def delete(self):
return '/delete?id=%s' % self.key.id()
class Image(ndb.Model):
blobKey = ndb.PickleProperty()
urlString = ndb.StringProperty()
def deleteImage(self):
return '/deleteImage?id=%s' % self.key.id()
|
[
"svcastaneda@gmail.com"
] |
svcastaneda@gmail.com
|
a7a988e423187bc106cf26e91ded0c5078180e75
|
743d1918178e08d4557abed3a375c583130a0e06
|
/src/util/SetMinus.py
|
0bea8840a11e3084b35940e0154a7834b34b3345
|
[] |
no_license
|
aquablue1/dns_probe
|
2a027c04e0928ec818a82c5bf04f485a883cfcb3
|
edd4dff9bea04092ac76c17c6e77fab63f9f188f
|
refs/heads/master
| 2020-03-25T19:40:07.346354
| 2018-11-17T05:31:43
| 2018-11-17T05:31:43
| 144,094,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
"""
" Get rid of all elements appeared in list A out of list B
" Accept two non-empty lists
" return the minus result of the lists.
" By Zhengping on 2018-08-15
"""
def setMinus(listA, listB):
resultList = []
for elem in listA:
if elem in listB:
continue
resultList.append(elem)
return resultList
if __name__ == '__main__':
listA = [1,2,3,4]
listB = [2,3,4,5,6]
print(setMinus(listA, listB))
|
[
"94apieceofcake@gmail.com"
] |
94apieceofcake@gmail.com
|
37da748cdd06fa9c7aefb7290dde8c0e7a587774
|
1bcd1933321f161810c615c7c003d688cefb38f2
|
/tests/test_example.py
|
491022e97f601a29142778a04fe2b982f8601d5d
|
[] |
no_license
|
herczy/piano
|
4eef453cf1ac4277e8de9a9f92daee35e7d5b408
|
e27d699f9afa9f95453e7c82122b619ab948b082
|
refs/heads/master
| 2020-06-09T07:26:57.179253
| 2013-07-27T17:09:30
| 2013-07-27T17:09:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
from unittest import TestCase
from piano import call_validate, CallValidator, array_of
@CallValidator.decorate
def sum(values : array_of(int)) -> int:
res = 0
for i in values:
res += i
return res
@call_validate
class TestSomething(TestCase):
def test_something(self):
self.assertEquals(10, sum([1, 2, 3, 4]))
def test_fail(self):
self.assertEquals(10, sum([1.0, 2, 3, 4]))
|
[
"herczy@balabit.hu"
] |
herczy@balabit.hu
|
8e212e6f65b5bc088cdf555b3982eb19fa64ff4e
|
e0df2bc703d0d02423ea68cf0b8c8f8d22d5c163
|
/ScientificComputing/ch15/spectrum_full_period.py
|
7892f2bb5fca7b8c3f6f47de6050e5c4c371b3d1
|
[] |
no_license
|
socrates77-sh/learn
|
a5d459cb9847ba3b1bc4f9284ce35d4207d8aa8b
|
ae50978023f6b098b168b8cca82fba263af444aa
|
refs/heads/master
| 2022-12-16T16:53:50.231577
| 2019-07-13T13:52:42
| 2019-07-13T13:52:42
| 168,442,963
| 0
| 0
| null | 2022-12-08T05:18:37
| 2019-01-31T01:30:06
|
HTML
|
UTF-8
|
Python
| false
| false
| 550
|
py
|
# -*- coding: utf-8 -*-
import numpy as np
import pylab as pl
sampling_rate, fft_size = 8000, 512
t = np.arange(0, 1.0, 1.0/sampling_rate)
x = np.sin(2*np.pi*156.25*t) + 2*np.sin(2*np.pi*234.375*t)
xs = x[:fft_size]
xf = np.fft.rfft(xs)/fft_size
freqs = np.linspace(0, sampling_rate/2, fft_size/2+1)
xfp = 20*np.log10(np.clip(np.abs(xf), 1e-20, 1e100))
pl.figure(figsize=(8, 4))
pl.subplot(211)
pl.plot(t[:fft_size], xs)
pl.xlabel(u"时间(秒)")
pl.subplot(212)
pl.plot(freqs, xfp)
pl.xlabel(u"频率(Hz)")
pl.subplots_adjust(hspace=0.4)
pl.show()
|
[
"zhwenrong@sina.com"
] |
zhwenrong@sina.com
|
d813c0f26aa8fba371e52282603d485a903e5a63
|
211dee24f530e387f9c72ab13c9ae37ca059f1da
|
/scripts/encode_aruco_tag.py
|
fe12afc55f9c74db3ac08f7871a51d7253b6cbc1
|
[] |
no_license
|
haganelego/aruco_detect
|
d63bdfe22128a1686a2e1297bc0ab2f540aea407
|
305091880fe42984036e2e89a688fd3a4409d9b2
|
refs/heads/master
| 2023-03-23T23:10:11.879902
| 2021-03-17T14:55:26
| 2021-03-17T14:55:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,086
|
py
|
#! /usr/bin/env python
from __future__ import print_function
import csv
import sys
import os
import numpy as np
# This encoding file only works for 5x5 tags.
GRID_SIZE = (5, 5)
def main():
tag_defs, file_names = parse_input()
print('Processing {} tag definitions'.format(len(tag_defs)))
encodings = [create_encodings(definition) for definition in tag_defs]
print(create_cpp_string(encodings, file_names))
def parse_input():
if len(sys.argv) < 2:
print_useage()
tag_defs = []
file_names = sys.argv[1:]
for arg in file_names:
try:
tag = process_definition_file(arg)
tag_defs.append(tag)
except ValueError:
file_names.remove(arg)
print('Could not process {}'.format(arg))
print(file_names)
return tag_defs, file_names
def print_useage():
usage_string = 'Usage: encode_aruco_tag.py <tag definition files>'
print(usage_string)
sys.exit(1)
def process_definition_file(file_name):
if not os.path.exists(file_name):
raise ValueError
grid = np.genfromtxt(file_name, delimiter=',', dtype='uint8')
if grid.shape != GRID_SIZE:
raise ValueError
return grid
def create_encodings(definition):
def process_byte(byte):
return int(sum([num * 2 ** i for i, num in enumerate(reversed(byte))]))
def encode_rotation(tag):
tag = tag.reshape(-1)
return [
process_byte(tag[0:8]),
process_byte(tag[8:16]),
process_byte(tag[16:24]),
tag[24]
]
return [encode_rotation(np.rot90(definition, n)) for n in range(4)]
def create_cpp_string(encodings, file_names):
cpp_string = '{{\n'
for encoding, name in zip(encodings, file_names):
cpp_string += ' //' + name[:-4] + '\n'
for rotation in encoding:
cpp_string += ' {' + ','.join([str(num) for num in rotation]) + '},\n'
cpp_string += '},{\n'
cpp_string = cpp_string[:-4] + '}};'
return cpp_string
if __name__ == "__main__":
main()
|
[
"larsnathanson@gmail.com"
] |
larsnathanson@gmail.com
|
29227c14aecf5733c34cba93849bdcbe50701885
|
6aa2f437f93ee065226188e94fc3d0017948e281
|
/DataGenerator.py
|
1e171a7e5343baf3a7f4cd4cfa8af3d91ac87fda
|
[] |
no_license
|
MaverickMeerkat/TomFonts
|
3925f9b8af6be8a73d415a0f5f69713633dea524
|
8bc7035d835349d036cee8012f45db43714e7d10
|
refs/heads/master
| 2020-12-11T09:48:49.185347
| 2020-01-18T11:34:43
| 2020-01-18T11:34:43
| 233,812,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,613
|
py
|
import keras
import numpy as np
# from keras.utils import HDF5Matrix, to_categorical
class DataGenerator(keras.utils.Sequence):
# Generates data for Keras
def __init__(self, hf, batch_size=62, shuffle=True):
self.hf = hf
self.data_size = len(self.hf) * len(self.hf[0])
self.indexes = np.arange(self.data_size)
self.batch_size = batch_size
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch
return self.data_size // self.batch_size
def __getitem__(self, index):
# Generate one batch of data
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, y = self.__data_generation(indexes)
return X, y
def on_epoch_end(self):
# Updates indexes after each epoch
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, indexes):
fonts = np.zeros((self.batch_size, len(self.hf)))
chars = np.zeros((self.batch_size, len(self.hf[0])))
f_i = indexes // len(self.hf[0])
c_i = indexes % len(self.hf[0])
fonts[np.arange(len(f_i)), f_i] = 1
chars[np.arange(len(c_i)), c_i] = 1
targets = self.hf[]
# targets = np.zeros((self.batch_size, self.hf.shape[2]*self.hf.shape[3]))
# for i in range(self.batch_size):
# targets[i] = self.hf[f_i[i]][c_i[i]].reshape(self.hf.shape[2] * self.hf.shape[3],)
return [fonts, chars], targets
|
[
"davidrefaeli@gmail.com"
] |
davidrefaeli@gmail.com
|
f3c3e7a8b03b2c7c32486230529d5599c1bd2ffc
|
7e3184ad9c5d964715c4f69fe6e3b19de1cc86be
|
/modnotes/__init__.py
|
ae8e555019b283b4fd1f76d684cfd95ce5371d65
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
entchen66/sinbad3.1
|
acfa053388eacb97defc72ee68f9a11f64d1ecbd
|
3353118b8693c84d5572ab2a7a2278a32be2a76c
|
refs/heads/master
| 2020-12-23T02:31:43.438280
| 2020-02-13T16:56:04
| 2020-02-13T16:56:04
| 237,006,830
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from redbot.core.errors import CogLoadError
try:
import apsw
except ImportError:
HAS_APSW = False
apsw = None
else:
HAS_APSW = True
def setup(bot):
if not HAS_APSW:
raise CogLoadError("This cog requires `apsw-wheels`.")
else:
from .modnotes import ModNotes
cog = ModNotes(bot)
bot.add_cog(cog)
cog.init()
|
[
"tiegerchris1@web.de"
] |
tiegerchris1@web.de
|
dd403228a5fd0387c765dce75485d51cd467c723
|
904e8e1e59f0354cefd4968e1a88b85882d7e49f
|
/zhcx/nxr_app/utils/nxr_frame.py
|
641678d3c6a71b220547c1e3ad42819141ac6966
|
[] |
no_license
|
cschan279/canbus_comm
|
c17eab4aa677ff161522eb0fdf0e4d554e8ae352
|
c3910a47506aa54f21e357365c981c8c2d4a279e
|
refs/heads/master
| 2022-12-28T07:31:30.314503
| 2020-10-16T02:47:12
| 2020-10-16T02:47:12
| 275,784,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,966
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import struct
#[int(i) for i in struct.pack('f',1.2)].reverse()
#struct.unpack('f', bytearray(x.reverse()))
lockflag = False
def assert_var(var, typ, len_limit):
wn = "Wrong type: {}/{}".format(str(type(var)), str(typ))
assert isinstance(var, typ), wn
wn = "Out of range: 0<{}<{}".format(var, 2**len_limit)
assert var >= 0 and var < 2**len_limit, wn
return
def ls2f(ls):
ls.reverse()
return struct.unpack('f', bytearray(ls))
def ls2int(ls):
val = 0
for i in ls:
val *= 256
val += i
return val
def f2ls(var):
res = [int(i) for i in struct.pack('f',var)]
res.reverse()
return res
def assert_lst(lst, length):
assert isinstance(lst, list)
assert len(lst) == length
for i in lst:
assert_var(i, int, 8)
return
def ext_id(ptp=0x0, dst=0xff, src=0xf0, grp=0x0):
val_len = (1,8,8,3)
var = (ptp, dst, src, grp)
res = 0x060
for i in range(4):
assert_var(var[i], int, val_len[i])
res = res << val_len[i]
res += var[i]
return res
def id_ext(id_num):
rest, grp = divmod(id_num, 2**3)
rest, src = divmod(rest, 2**8)
rest, dst = divmod(rest, 2**8)
pro, ptp = divmod(rest, 2**1)
#rest, pro = divmod(rest, 2**9)
return pro, ptp, dst, src, grp
def data_sect(typ=0x0, cmd=0x0043, dat=[0x00]*4):
assert_var(typ, int, 8)
assert_var(cmd, int, 16)
assert_lst(dat, 4)
cmd0, cmd1 = divmod(cmd, 0x100)
res = [typ]+[0x00]+[cmd0]+[cmd1]+dat
return res
def sendonly(can_dev, eid, dat):
global lockflag
if lockflag:
raise ConnectionError('In use')
else:
lockflag = True
try:
print('Sent:')
printlsHex(id_ext(eid))
printlsHex(dat)
can_dev.send(1, eid, dat)
return True
finally:
lockflag = False
return False
return
def sendNread(can_dev, eid, dat):
global lockflag
if lockflag:
raise ConnectionError('In use')
else:
lockflag = True
try:
print('Sent:')
printlsHex(id_ext(eid))
printlsHex(dat)
can_dev.send(1, eid, dat)
a, b = can_dev.read(1)
count = 0
while not b and count < 20:
can_dev.send(1, eid, dat)
a, b = can_dev.read(1)
count += 1
finally:
lockflag = False
if not b:
raise ConnectionError('No response:', b[1])
return a, b
def send2get(can_dev, eid, dat):
a, b = sendNread(can_dev, eid, dat)
if not b:
raise ConnectionError('no response')
if b[1] !=0xf0:
print(b)
raise ConnectionError('Invalid Response Frame:', b[1])
if b[0] == 0x41:
fn = ls2f(b[4:])
else:
fn = ls2int(b[4:])
id_ls = id_ext(a)
print('Received:')
printlsHex(id_ls)
printlsHex(b)
return id_ls, [b[0], b[1], ls2int(b[2:4]), fn]
def printlsHex(ls):
ls_out = [hex(i) if isinstance(i, int) else i for i in ls]
print(ls_out)
def req_addr(can_dev):
eid = ext_id(ptp=0x0, dst=0xff, grp=0x03)
dat = data_sect(typ=0x10, cmd=0x0043)
a, b = send2get(can_dev, eid, dat)
print("Result:")
printlsHex(a)
printlsHex(b)
return
def req_volt(can_dev, addr, grp=0x03):
eid = ext_id(ptp=0x1, dst=addr, grp=grp)
dat = data_sect(typ=0x10, cmd=0x0001)
a, b = send2get(can_dev, eid, dat)
print("Result:")
printlsHex(a)
#printlsHex(b)
#print(b[-1])
return b[-1]
def set_volt(can_dev, addr, val=100, grp=0x03):
eid = ext_id(ptp=0x1, dst=addr, grp=grp)
dat = data_sect(typ=0x03, cmd=0x0021, dat=f2ls(val))
ret = sendonly(can_dev, eid, dat)
return
def turn_onoff(can_dev, addr, onoff, grp=0x03):
eid = ext_id(ptp=0x1, dst=addr, grp=grp)
odat = [0,0,0,0] if onoff else [0,1,0,0]
dat = data_sect(typ=0x03, cmd=0x0030, dat=odat)
ret = sendonly(can_dev, eid, dat)
return
|
[
"troychan279@gmail.com"
] |
troychan279@gmail.com
|
7a829ca1f6bcdb2e495c9c3a84d113ea5c92bd97
|
e9538b7ad6d0ce0ccfbb8e10c458f9e0b73926f6
|
/tests/unit/modules/network/onyx/test_onyx_syslog_files.py
|
a9d955e69c98729bde2ec5ab4428524de95bb30a
|
[] |
no_license
|
ansible-collection-migration/misc.not_a_real_collection
|
b3ef8090c59de9ac30aca083c746ec3595d7f5f5
|
7ab1af924a3db4ada2f714b09bb392614344cb1e
|
refs/heads/master
| 2020-12-18T13:48:51.849567
| 2020-01-22T17:39:18
| 2020-01-22T17:39:18
| 235,400,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,825
|
py
|
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.misc.not_a_real_collection.tests.unit.compat.mock import patch
from ansible_collections.misc.not_a_real_collection.plugins.modules import onyx_syslog_files
from ansible_collections.misc.not_a_real_collection.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxSyslogFilesModule(TestOnyxModule):
module = onyx_syslog_files
def setUp(self):
self.enabled = False
super(TestOnyxSyslogFilesModule, self).setUp()
self.mock_get_config = patch.object(
onyx_syslog_files.OnyxSyslogFilesModule, "show_logging")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxSyslogFilesModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_logging_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_syslog_files_force_rotate(self):
set_module_args(dict(rotation=dict(force=True)))
commands = ["logging files rotation force"]
self.execute_module(changed=True, commands=commands)
def test_syslog_files_max_num(self):
set_module_args(dict(rotation=dict(max_num=30)))
commands = ["logging files rotation max-num 30"]
self.execute_module(changed=True, commands=commands)
def test_syslog_files_freq(self):
set_module_args(dict(rotation=dict(frequency="daily")))
commands = ["logging files rotation criteria frequency daily"]
self.execute_module(changed=True, commands=commands)
def test_syslog_files_size(self):
set_module_args(dict(rotation=dict(size=10.5)))
commands = ["logging files rotation criteria size 10.5"]
self.execute_module(changed=True, commands=commands)
def test_syslog_files_delete(self):
set_module_args(dict(delete_group="oldest"))
commands = ["logging files delete oldest"]
self.execute_module(changed=True, commands=commands)
def test_syslog_debug_files_force_rotate(self):
set_module_args(dict(rotation=dict(force=True), debug=True))
commands = ["logging debug-files rotation force"]
self.execute_module(changed=True, commands=commands)
def test_syslog_debug_files_max_num(self):
set_module_args(dict(rotation=dict(max_num=30), debug=True))
commands = ["logging debug-files rotation max-num 30"]
self.execute_module(changed=True, commands=commands)
def test_syslog_debug_files_freq(self):
set_module_args(dict(rotation=dict(frequency="weekly"), debug=True))
commands = ["logging debug-files rotation criteria frequency weekly"]
self.execute_module(changed=True, commands=commands)
def test_syslog_debug_files_size(self):
set_module_args(dict(rotation=dict(size=10.5), debug=True))
commands = ["logging debug-files rotation criteria size 10.5"]
self.execute_module(changed=True, commands=commands)
def test_syslog_debug_files_delete(self):
set_module_args(dict(delete_group="oldest", debug=True))
commands = ["logging debug-files delete oldest"]
self.execute_module(changed=True, commands=commands)
''' nochange '''
def test_syslog_files_max_num_no_change(self):
set_module_args(dict(rotation=dict(max_num=10)))
self.execute_module(changed=False)
def test_syslog_files_freq_no_change(self):
set_module_args(dict(rotation=dict(frequency="weekly")))
self.execute_module(changed=False)
def test_syslog_files_size_no_change(self):
set_module_args(dict(rotation=dict(size_pct=10)))
self.execute_module(changed=False)
def test_syslog_debug_files_max_num_no_change(self):
set_module_args(dict(rotation=dict(max_num=20), debug=True))
self.execute_module(changed=False)
def test_syslog_debug_files_freq_no_change(self):
set_module_args(dict(rotation=dict(frequency="daily"), debug=True))
self.execute_module(changed=False)
def test_syslog_debug_files_size_no_change(self):
set_module_args(dict(rotation=dict(size=20), debug=True))
self.execute_module(changed=False)
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
1f075e007a70cd47c887fbcc9b097a7c1b9bdbb3
|
465fd14b3dd1974fa6ef40294217fb14381ab39f
|
/interface/favorites_interface.py
|
943fc8c6153f46751dd890573afe731266e31377
|
[] |
no_license
|
vit-001/fget3
|
0d4c6d30d8d0f73d7b84feea94b456f0b667ac19
|
dc737ac1c42d825d1511f0a264eb83bec20f1b9e
|
refs/heads/master
| 2021-11-07T07:25:49.666172
| 2021-10-05T06:31:50
| 2021-10-05T06:31:50
| 83,305,656
| 1
| 0
| null | 2021-10-05T06:33:45
| 2017-02-27T12:01:40
|
Python
|
UTF-8
|
Python
| false
| false
| 402
|
py
|
# -*- coding: utf-8 -*-
__author__ = 'Vit'
from data_format.url import URL
from interface.site_interface import SiteInterface
class FavoritesInterface:
def on_exit(self):
pass
def add(self, label:str, url: URL):
pass
def remove(self, url:URL):
pass
def get_favorite_items(self, site: SiteInterface) -> list:
pass
if __name__ == "__main__":
pass
|
[
"vitaliy@inbox.ru"
] |
vitaliy@inbox.ru
|
06077c7edc997754e851741d749ba56ba2be69a9
|
911d824f9f3cd70553b40b2263df45f65242f545
|
/awshelper/cfinstance.py
|
93a5404279f036f43b6212c9bf1b355dadd18e84
|
[] |
no_license
|
fariqizwan/aws-boto-example
|
354c13931aad4f392aac1622b56a10fc9dfb4ea6
|
100309e3888cb2a3dc10911fb8f62e174c3df9e5
|
refs/heads/master
| 2021-01-10T12:09:31.551569
| 2017-09-24T07:34:53
| 2017-09-24T07:34:53
| 45,172,322
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,130
|
py
|
import boto.ec2
from datetime import datetime
from awsconfigs import awsregion,awsaccesskey,awssecretkey
class cmCloudFormInstance(object):
def __init__(self):
self.conn = boto.ec2.connect_to_region(awsregion,aws_access_key_id=awsaccesskey,aws_secret_access_key=awssecretkey)
def convertAWSTimeString(self,awstime):
"""
Convert AWS time string to easier to read format
:type string
:param awstime: time string from aws
:return Time string like this: '09 Mar 2015 06:24:37'
"""
awstimeformat = '%Y-%m-%dT%H:%M:%S.000Z'
prefdate_format = '%d/%m/%Y %H:%M:%S'
dt = datetime.strptime(awstime,awstimeformat)
return dt.strftime(prefdate_format)
def get_instance_detail(self,inst_id):
""""
Get detail info of an instance
:type string
:param inst_id: The id of the AWS instance
:return A dictionary that contains these keys: Name,Stack,LaunchOn,Architecture,PrivateIP,PublicIP
"""
try:
awsinstance = self.conn.get_only_instances(instance_ids=inst_id)[0]
inst_dict = {'Name':str(awsinstance.tags['Name']),
'Stack':str(awsinstance.tags['aws:cloudformation:stack-name']),
'LaunchOn':self.convertAWSTimeString(awsinstance.launch_time),
'Architecture':str(awsinstance.architecture),
'PrivateIP':str(awsinstance.private_ip_address),
'PublicIP':str(awsinstance.ip_address),
'State':str(awsinstance.state)}
return inst_dict
except Exception,e:
print e
def start_instances(self,inst_list):
"""
Start the instances specified
:type list
:param A list of strings of the instance IDs to start
:return A list of the instances started
"""
try:
inst_list = self.conn.start_instances(instance_ids=inst_list)
return inst_list
except Exception,e:
print e
def stop_instances(self,inst_list):
"""
Stop instances for the given instance id list
:type list
:param inst_list: A list of strings of the instance IDs to stop
:return A list of the instances stopped
"""
try:
inst_list = self.conn.stop_instances(instance_ids=inst_list)
return inst_list
except Exception,e:
print e
|
[
"fariqizwani@EAP34E6D757DA9F.apac.experian.local"
] |
fariqizwani@EAP34E6D757DA9F.apac.experian.local
|
50c9f6a1226bfd1410c4cd10e3f8f6d7eea1d53c
|
3515f7da144963a811efbff9874d31e2e6639d2a
|
/backend/chat/models.py
|
3ce5452f1c2293726fb897433ba99f43418d360f
|
[] |
no_license
|
crowdbotics-apps/rn-ui-22326
|
1974e4c778583e0eb1e202c0e470ba94154dabed
|
09cc1d8e1cd284a32a0477a13f89d985f554908c
|
refs/heads/master
| 2023-01-08T04:57:56.020399
| 2020-11-05T12:47:25
| 2020-11-05T12:47:25
| 310,293,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,152
|
py
|
from django.conf import settings
from django.db import models
class Message(models.Model):
"Generated Model"
message = models.TextField()
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="message_thread",
)
sent_by = models.ForeignKey(
"chat.ThreadMember",
on_delete=models.CASCADE,
related_name="message_sent_by",
)
attachment = models.URLField()
is_draft = models.BooleanField()
is_delivered = models.BooleanField()
is_read = models.BooleanField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
timestamp_delivered = models.DateTimeField()
timestamp_read = models.DateTimeField()
class ThreadAction(models.Model):
"Generated Model"
action = models.CharField(
max_length=7,
)
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="threadaction_thread",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadaction_profile",
)
timestamp_action = models.DateTimeField(
auto_now_add=True,
)
class MessageAction(models.Model):
"Generated Model"
action = models.CharField(
max_length=7,
)
message = models.ForeignKey(
"chat.Message",
on_delete=models.CASCADE,
related_name="messageaction_message",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="messageaction_profile",
)
timestamp_action = models.DateTimeField(
auto_now_add=True,
)
class ForwardedMessage(models.Model):
"Generated Model"
message = models.ForeignKey(
"chat.Message",
on_delete=models.CASCADE,
related_name="forwardedmessage_message",
)
forwarded_by = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_by",
)
forwarded_to = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_to",
)
timestamp_forwarded = models.DateTimeField(
auto_now_add=True,
)
class Thread(models.Model):
"Generated Model"
name = models.CharField(
max_length=255,
)
thread_photo = models.URLField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
class ThreadMember(models.Model):
"Generated Model"
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadmember_profile",
)
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="threadmember_thread",
)
is_admin = models.BooleanField()
timestamp_joined = models.DateTimeField(
auto_now_add=True,
)
timestamp_left = models.DateTimeField()
last_rejoined = models.DateTimeField()
# Create your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
30d14a92e0de00f34b836e65ba17682093fbc79b
|
06c06465ef0b7c3e080859bf9c161155349a4d0c
|
/oldscript/base_encoded.py
|
75d80d3c17cd1737e1a76004a5e75154a6d13e38
|
[] |
no_license
|
Flecart/Olicyber2021
|
e296ea54566c7912c939e4e1119b669cd6f37a32
|
3fb161a0ce8ce1c4269aae3290c5588ce621f638
|
refs/heads/main
| 2023-06-22T12:05:25.184420
| 2021-07-21T09:23:05
| 2021-07-21T09:23:05
| 388,059,404
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,872
|
py
|
#! /bin/python3
# nc based.challs.olicyber.it 10600
from pwn import *
import json
import base64
# 65-90quit
# 97-122
# 146330605473666206330630144146575643203056327630156276630603445756314275063276665622766106033462175
# Prende la risposta, la trasforma in JSON e la manda al server
def trimAndSend(line):
dict= {"answer": line}
js = bytes(str(json.dumps(dict)), 'utf-8')
conn.sendline(js)
conn = remote("based.challs.olicyber.it", 10600)
# Questi sono utili a mettere fuori gioco le linee iniziali
conn.recvline()
#Non printano niente
i = 0;
while(True):
conn.recvline()
first = conn.recvline()
if first != b'Ottimo!\n' and i > 1:
print(first)
break
conn.recvline()
# Le tre linee con le istruzioni
firstline = conn.recvline()
jsline = conn.recvline()
# print(" io sono una linea di debug", jsline)
secondline = conn.recvline()
# Estrae l'operazione dalla linea
temp = firstline.split()
operation = temp[3][0:-1]
andata = False
if temp[2] == b'a':
andata = True
js = json.loads(jsline)
data = js["message"]
if operation == b'base64' and andata: # testato funziona fino al print di debug
message_bytes = data.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
answer = base64_bytes.decode('ascii')
trimAndSend(answer)
elif operation == b'base64' and not andata: # testato funziona fino al print di debug
message_bytes = data.encode('ascii')
base64_bytes = base64.b64decode(message_bytes)
answer = base64_bytes.decode('ascii')
trimAndSend(answer)
elif operation == b'binari' and andata: # testato funziona fino al print di debug
array = [x for x in bytearray(js["message"], 'utf-8')]
bytess = [format(x, 'b') for x in array]
answer = ''
for x in bytess:
if len(x) < 8:
x += "0" * (8 - len(x))
answer += x
if (answer[len(answer)-1] == '0'):
answer = answer[0:-1]
trimAndSend(answer)
elif operation == b'binario' and not andata: # Boh escono cose strane, con bytes non rappresentabili
data = "0" + data
n = 8
listaBytes = ["0b" + data[i:i+n] for i in range(0, len(data), n)]
answer = ''
for binario in listaBytes:
answer += chr(int(binario,2))
trimAndSend(answer)
elif operation == b'esadecimale' and andata: # testato funziona fino al print di debug
answer = ''.join(hex(ord(x))[2:] for x in data)
trimAndSend(answer)
elif operation == b'esadecimale' and not andata: # testato funziona fino al print di debug
answer = ''.join([chr(int(''.join(c), 16)) for c in zip(data[0::2],data[1::2])])
trimAndSend(answer)
i += 1
print(conn.recvline())
print(conn.recvline())
print(conn.recvline())
print(conn.recvline())
print(conn.recvline())
print(conn.recvline())
# #! /bin/python3
# # nc based.challs.olicyber.it 10600
# from pwn import *
# import json
# import base64
# # 65-90
# # 97-122
# #Mi da il valore dell' operazione che mi devo mettere a fare
# # Mi dice anche poi in che verso mi devo mettere a fare
# def trimFirstLine(line):
# temp = line.split()
# boolean = False
# if temp[2] == b'a':
# boolean = True
# return boolean, temp[3][0:-1]
# # Prende una linea in formato 0 e 1 tutto attaccato, le divide e ritorna rappresentazioen ascii
# def binToStr(line):
# line = "0" + line
# n = 8
# listaBytes = ["0b" + line[i:i+n] for i in range(0, len(line), n)]
# answer = ''
# for binario in listaBytes:
# answer += chr(int(binario,2))
# return answer
# def strToBin(line):
# array = [x for x in bytearray(js["message"], 'utf-8')]
# bytess = [format(x, 'b') for x in array]
# ans = ''
# for x in bytess:
# if len(x) < 8:
# x += "0" * (8 - len(x))
# ans += x
# if (ans[len(ans)-1] == '0'):
# ans = ans[0:-1]
# return ans
# # Prende la risposta, la trasforma in JSON e la manda al server
# def trimAndSend(line):
# dict= {"answer": line}
# js = bytes(str(json.dumps(dict) + '\n'), 'utf-8')
# conn.send(js)
# conn = remote("based.challs.olicyber.it", 10600)
# # Questi sono utili a mettere fuori gioco le linee iniziali
# conn.recvline()
# #Non printano niente
# i = 0;
# while(True):
# conn.recvline()
# first = conn.recvline()
# if first != b'Ottimo!\n' and i > 1:
# print(first)
# break
# conn.recvline()
# # Le tre linee con le istruzioni
# firstline = conn.recvline()
# jsline = conn.recvline()
# # print(" io sono una linea di debug", jsline)
# secondline = conn.recvline()
# # Estrae l'operazione dalla linea
# andata, operation = trimFirstLine(firstline)
# js = json.loads(jsline)
# if operation == b'base64' and andata: # testato funziona fino al print di debug
# message_bytes = js["message"].encode('ascii')
# base64_bytes = base64.b64encode(message_bytes)
# answer = base64_bytes.decode('ascii')
# trimAndSend(answer)
# elif operation == b'base64' and not andata: # testato funziona fino al print di debug
# message_bytes = js["message"].encode('ascii')
# base64_bytes = base64.b64decode(message_bytes)
# answer = base64_bytes.decode('ascii')
# trimAndSend(answer)
# elif operation == b'binari' and andata: # testato funziona fino al print di debug
# answer = strToBin(js["message"])
# trimAndSend(answer)
# elif operation == b'binario' and not andata: # Boh escono cose strane, con bytes non rappresentabili
# answer = binToStr(js["message"])
# trimAndSend(answer)
# elif operation == b'esadecimale' and andata: # testato funziona fino al print di debug
# answer = ''.join(hex(ord(x))[2:] for x in js["message"])
# trimAndSend(answer)
# elif operation == b'esadecimale' and not andata: # testato funziona fino al print di debug
# answer = ''.join([chr(int(''.join(c), 16)) for c in zip(js["message"][0::2],js["message"][1::2])])
# trimAndSend(answer)
# i += 1
# print(conn.recvline())
# print(conn.recvline())
# print(conn.recvline())
# print(conn.recvline())
# print(conn.recvline())
# print(conn.recvline())
|
[
"huangelo02@gmail.com"
] |
huangelo02@gmail.com
|
9546f02424ed3520814abf573f853d29013498da
|
c9cceb72e8d076b75a266efa89cd315f81c7f890
|
/code/grid_kmedoids_destination.py
|
d974e700ccc077857d13b212c569679ca8f20bbb
|
[] |
no_license
|
qyn0729/Food-delivery-data-analysis
|
35259b4dce8811e7b94f2fa0da7a05354d630fa5
|
4e19eb92061c9f0ac2b7ba0209e1cb2054551608
|
refs/heads/master
| 2023-04-20T15:39:47.613813
| 2021-05-05T04:11:32
| 2021-05-05T04:11:32
| 364,457,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,451
|
py
|
import numpy as np
import csv
from sklearn_extra.cluster import KMedoids
import tslearn.metrics as metrics
from tslearn.clustering import silhouette_score
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
from tslearn.generators import random_walks
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from itertools import islice
import math
import matplotlib.ticker as mtick
from matplotlib.collections import LineCollection
# elbow法则找最佳聚类数,结果:elbow = 5
def test_elbow(X, dtw_value, seed):
print(len(X))
distortions = []
silhouette_value = []
dists = dtw_value
print(dists)
if seed == -1:
for seed in range(0, 21):
cur_silhouette = [seed]
cur_distortions = [seed]
for i in range(2, 15):
print(i)
km = KMedoids(n_clusters=i, random_state=seed, metric="precomputed", init='k-medoids++', max_iter=30000)
km.fit(dists)
# 记录误差和
cur_distortions.append(km.inertia_)
y_pred = km.fit_predict(dists)
np.fill_diagonal(dists, 0)
score = silhouette_score(dists, y_pred, metric="precomputed")
cur_silhouette.append(score)
distortions.append(cur_distortions)
silhouette_value.append(cur_silhouette)
with open(r".//res//grid_distortions_destination.csv", "w",encoding='UTF-8', newline='') as csvfile:
writer = csv.writer(csvfile)
for row in distortions:
writer.writerow(row)
print(row)
with open(r".//res//grid_silhouette_destination.csv", "w",encoding='UTF-8', newline='') as csvfile:
writer = csv.writer(csvfile)
for row in silhouette_value:
writer.writerow(row)
print(row)
else:
csv_reader = csv.reader(open(".//res//grid_distortions_destination.csv", encoding='UTF-8'))
for row in csv_reader:
distortions.append([float(item) for item in row])
csv_reader = csv.reader(open(".//res//grid_silhouette_destination.csv", encoding='UTF-8'))
for row in csv_reader:
silhouette_value.append([float(item) for item in row])
chosen_distortions = distortions[seed][1:]
chosen_silhouette = silhouette_value[seed][1:]
plt.figure(1)
plt.plot(range(2, 15), chosen_distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.savefig(r'.//res//grid_distortions_destination.png')
plt.close()
plt.figure(1)
plt.bar(range(2, 15), chosen_silhouette, color='grey')
plt.xlabel('Number of clusters')
plt.ylabel('Silhouette score')
plt.savefig(r'.//res//grid_silhouette_destination.png')
def test_kmedoids(dtw_value, cluster_num, seed):
# 声明precomputed自定义相似度计算方法
km = KMedoids(n_clusters=cluster_num, random_state=seed, metric="precomputed", init='k-medoids++', max_iter=30000)
dists = dtw_value
y_pred = km.fit_predict(dists)
with open(r".//res//grid_pred_d"+str(cluster_num)+".csv", "w", encoding='UTF-8', newline='') as csvfile:
writer = csv.writer(csvfile)
index = 0
for row in y_pred:
writer.writerow([row])
index += 1
with open(r".//res//grid_centroids_d"+str(cluster_num)+".csv", "w", encoding='UTF-8', newline='') as csvfile:
writer = csv.writer(csvfile)
for yi in range(cluster_num):
writer.writerow([km.medoid_indices_[yi]])
print('finish')
def kmedoids(func, cluster_num, seed):
X = []
for i in range(0, 29500, 500):
csv_reader = csv.reader(open("temp_finish" + str(i) + ".csv", encoding='UTF-8'))
for row in csv_reader:
X.append(row)
roadnet_num = len(X)
dtw_value = np.zeros((roadnet_num, roadnet_num), dtype='float32')
for i in range(len(X)):
for j in range(len(X[i])):
if j > i:
dtw_value[i][j] = X[i][j]
else:
dtw_value[i][j] = X[j][i]
if func == 1:
test_elbow(X, dtw_value, seed)
else:
test_kmedoids(dtw_value, cluster_num, seed)
def draw_result(cluster_num, normalized):
classes = {}
class_cnt = [0] * cluster_num
dict = {}
ave_weekday = {}
ave_sat = {}
ave_sun = {}
for i in range(cluster_num):
ave_weekday[i] = [0] * 24
ave_sat[i] = [0] * 24
ave_sun[i] = [0] * 24
index = 0
csv_reader = csv.reader(
open(r"D:\aMyFile\Data\Takeout\MyData\mine\kmedoids\grid\grid_centroids_d" +str(cluster_num) + ".csv"))
for row in csv_reader:
dict[int(row[0])] = index
index += 1
index = 0
csv_reader = csv.reader(
open(r"D:\aMyFile\Data\Takeout\MyData\mine\kmedoids\grid\grid_pred_d" + str(cluster_num) + ".csv"))
for row in csv_reader:
classes[index] = int(row[0])
class_cnt[int(row[0])] += 1
index += 1
print(index)
if normalized == 0:
csv_reader = csv.reader(open(r"D:\aMyFile\Data\Takeout\MyData\mine\destination_count.csv"))
else:
csv_reader = csv.reader(open(r"D:\aMyFile\Data\Takeout\MyData\mine\normalized_destination_count.csv"))
index = 0
series = {}
data = {}
for row in islice(csv_reader, 2, None):
cur_class = classes[index]
cur_data = [float(item) for item in row[2:]]
for i in range(24):
ave_weekday[cur_class][i] += cur_data[i] / class_cnt[cur_class]
for i in range(24):
ave_sat[cur_class][i] += cur_data[i + 24] / class_cnt[cur_class]
for i in range(24):
ave_sun[cur_class][i] += cur_data[i + 48] / class_cnt[cur_class]
index += 1
if normalized == 1:
with open(r"D:\aMyFile\Data\Takeout\MyData\mine\kmedoids\grid\grid_d" + str(cluster_num) + "_normalized_ave.csv", "w", encoding='UTF-8', newline='') as csvfile:
writer = csv.writer(csvfile)
for i in range(24):
row = []
for j in range(cluster_num):
row.append(ave_weekday[j][i])
row.append(ave_sat[j][i])
row.append(ave_sun[j][i])
writer.writerow(row)
else:
with open(r"D:\aMyFile\Data\Takeout\MyData\mine\kmedoids\grid\grid_d" + str(cluster_num) + "_ave.csv", "w", encoding='UTF-8', newline='') as csvfile:
writer = csv.writer(csvfile)
for i in range(24):
row = []
for j in range(cluster_num):
row.append(ave_weekday[j][i])
row.append(ave_sat[j][i])
row.append(ave_sun[j][i])
writer.writerow(row)
# hour = np.array(range(24))
# plt.figure(1)
# # plt.figure(dpi=300, figsize=(24, 8))
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.5)
#
# for i in range(cluster_num):
# ax1 = plt.subplot(2, math.ceil(cluster_num / 2), 1 + i)
# ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2f'))
# plt.xticks([0, 6, 12, 18, 23])
# ax1.set_xticklabels(['0:00\nOrder Finish Time', '6:00', '12:00', '18:00', '23:00'], rotation=0)
# ax1.set_ylabel('Order Count')
# ax1.plot(hour, ave_weekday[i], "blue", label='Weekdays')
# ax1.plot(hour, ave_sat[i], "#f48b29", label='Saturday')
# ax1.plot(hour, ave_sun[i], "#f0c929", label='Sunday')
# ax1.legend()
# plt.grid(linestyle='--')
# plt.title("Cluster " + str(i + 1), fontsize=18)
# if normalized == 1:
# plt.ylim(0, 1)
# if normalized == 0:
# plt.savefig(r"D:\aMyFile\Data\Takeout\MyData\mine\kmedoids\grid\grid_d" + str(cluster_num) + "_ave.png",bbox_inches='tight')
# else:
# plt.savefig(r"D:\aMyFile\Data\Takeout\MyData\mine\kmedoids\grid\grid_d" + str(
# cluster_num) + "_normalized_ave.png", box_inches='tight')
# plt.show()
# def kmedoids(func, cluster_num, seed)
# ----------------------
# kmedoids(1, 3, 18)
# kmedoids(2, 2, 18)
# kmedoids(2, 3, 18)
# def draw_result(cluster_num, normalized):
# ----------------------
draw_result(2, 0)
draw_result(2, 1)
# draw_result(8, 0)
# draw_result(8, 1)
|
[
"674217529@qq.com"
] |
674217529@qq.com
|
7d4898d1d6a66301d4a9eba76a7f075b6447387b
|
4420a643a79d0166b9637e80f496f414c1dbb76a
|
/p01/osc_quest.py
|
f0b85a62220ce4c0f5e21ac8bcbf32990dcb5baa
|
[] |
no_license
|
raghuramos1987/solexa_pipeline
|
aa34379cc538d8bdcccb1ef0749c008ea815b12e
|
f87b82088c1a8f66b26916f2e7b43fc7fe99348f
|
refs/heads/master
| 2016-09-06T16:22:30.214204
| 2012-08-28T18:07:17
| 2012-08-28T18:07:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 496
|
py
|
#!/usr/bin/env python
######################################################################
# File Name :
# Purpose :
# Author : Raghuram Onti Srinivasan
# Email : onti@cse.ohio-state.edu
######################################################################
import config
import sys
conf_obj = config.Parameters(sys.argv[1])
if int(sys.argv[3]) == 0:
conf_obj.server.UpdateRunStatusCompressingResults(sys.argv[2])
else:
conf_obj.server.UpdateRunStatusRsyncQuestResults(sys.argv[2])
|
[
"ros@ros.(none)"
] |
ros@ros.(none)
|
35ce73700006367e65389cd8b8a919bb33359d33
|
53f35cc7910a7d505d46dc7b6ac3c06b2e2e2a37
|
/network_morphogenesis/src/evo/community.py
|
924e3a8f09427142ef52a3c85c79a8304dd8654c
|
[] |
no_license
|
paulrt/morphogenesis_network
|
77374caef20a432a501016342d235d061dcb6d50
|
9f196c6a6255e884a3d205d2e268dc30a6df426d
|
refs/heads/master
| 2020-09-19T22:35:43.372209
| 2015-05-04T15:29:14
| 2015-05-04T15:29:14
| 224,313,765
| 1
| 0
| null | 2019-11-27T00:51:24
| 2019-11-27T00:51:24
| null |
UTF-8
|
Python
| false
| false
| 17,584
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module implements community detection.
"""
__all__ = ["partition_at_level", "modularity", "best_partition", "generate_dendogram", "induced_graph"]
__author__ = """Thomas Aynaud (thomas.aynaud@lip6.fr)"""
# Copyright (C) 2009 by
# Thomas Aynaud <thomas.aynaud@lip6.fr>
# All rights reserved.
# BSD license.
__PASS_MAX = -1
__MIN = 0.0000001
import networkx as nx
import sys
import types
import array
def partition_at_level(dendogram, level) :
"""Return the partition of the nodes at the given level
A dendogram is a tree and each level is a partition of the graph nodes.
Level 0 is the first partition, which contains the smallest coto_numpy_matrixmmunities, and the best is len(dendogram) - 1.
The higher the level is, the bigger are the communities
Parameters
----------
dendogram : list of dict
a list of partitions, ie dictionnaries where keys of the i+1 are the values of the i.
level : int
the level which belongs to [0..len(dendogram)-1]
Returns
-------
partition : dictionnary
A dictionary where keys are the nodes and the values are the set it belongs to
Raises
------
KeyError
If the dendogram is not well formed or the level is too high
See Also
--------
best_partition which directly combines partition_at_level and generate_dendogram to obtain the partition of highest modularity
Examples
--------
>>> G=nx.erdos_renyi_graph(100, 0.01)
>>> dendo = generate_dendogram(G)
>>> for level in range(len(dendo) - 1) :
>>> print "partition at level", level, "is", partition_at_level(dendo, level)
"""
partition = dendogram[0].copy()
for index in range(1, level + 1) :
for node, community in partition.iteritems() :
partition[node] = dendogram[index][community]
return partition
def modularity(partition, graph) :
"""Compute the modularity of a partition of a graph
Parameters
----------
partition : dict
the partition of the nodes, i.e a dictionary where keys are their nodes and values the communities
graph : networkx.Graph
the networkx graph which is decomposed
Returns
-------
modularity : float
The modularity
Raises
------
KeyError
If the partition is not a partition of all graph nodes
ValueError
If the graph has no link
TypeError
If graph is not a networkx.Graph
References
----------
.. 1. Newman, M.E.J. & Girvan, M. Finding and evaluating community structure in networks. Physical Review E 69, 26113(2004).
Examples
--------
>>> G=nx.erdos_renyi_graph(100, 0.01)
>>> part = best_partition(G)
>>> modularity(part, G)
"""
if type(graph) != nx.Graph :
raise TypeError("Bad graph type, use only non directed graph")
inc = dict([])
deg = dict([])
links = graph.size(weight='weight')
if links == 0 :
raise ValueError("A graph without link has an undefined modularity")
for node in graph :
com = partition[node]
deg[com] = deg.get(com, 0.) + graph.degree(node, weight = 'weight')
for neighbor, datas in graph[node].iteritems() :
weight = datas.get("weight", 1)
if partition[neighbor] == com :
if neighbor == node :
inc[com] = inc.get(com, 0.) + float(weight)
else :
inc[com] = inc.get(com, 0.) + float(weight) / 2.
res = 0.
for com in set(partition.values()) :
res += (inc.get(com, 0.) / links) - (deg.get(com, 0.) / (2.*links))**2
return res
def best_partition(graph, partition = None) :
"""Compute the partition of the graph nodes which maximises the modularity
(or try..) using the Louvain heuristices
This is the partition of highest modularity, i.e. the highest partition of the dendogram
generated by the Louvain algorithm.
Parameters
----------
graph : networkx.Graph
the networkx graph which is decomposed
partition : dict, optionnal
the algorithm will start using this partition of the nodes. It's a dictionary where keys are their nodes and values the communities
Returns
-------
partition : dictionnary
The partition, with communities numbered from 0 to number of communities
Raises
------
NetworkXError
If the graph is not Eulerian.
See Also
--------
generate_dendogram to obtain all the decompositions levels
Notes
-----
Uses Louvain algorithm
References
----------
.. 1. Blondel, V.D. et al. Fast unfolding of communities in large networks. J. Stat. Mech 10008, 1-12(2008).
Examples
--------
>>> #Basic usage
>>> G=nx.erdos_renyi_graph(100, 0.01)
>>> part = best_partition(G)
>>> #other example to display a graph with its community :
>>> #better with karate_graph() as defined in networkx examples
>>> #erdos renyi don't have true community structure
>>> G = nx.erdos_renyi_graph(30, 0.05)
>>> #first compute the best partition
>>> partition = community.best_partition(G)
>>> #drawing
>>> size = float(len(set(partition.values())))
>>> pos = nx.spring_layout(G)
>>> count = 0.
>>> for com in set(partition.values()) :
>>> count = count + 1.
>>> list_nodes = [nodes for nodes in partition.keys()
>>> if partition[nodes] == com]
>>> nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 20,
node_color = str(count / size))
>>> nx.draw_networkx_edges(G,pos, alpha=0.5)
>>> plt.show()
"""
dendo = generate_dendogram(graph, partition)
return partition_at_level(dendo, len(dendo) - 1 )
def generate_dendogram(graph, part_init = None) :
"""Find communities in the graph and return the associated dendogram
A dendogram is a tree and each level is a partition of the graph nodes. Level 0 is the first partition, which contains the smallest communities, and the best is len(dendogram) - 1. The higher the level is, the bigger are the communities
Parameters
----------
graph : networkx.Graph
the networkx graph which will be decomposed
part_init : dict, optionnal
the algorithm will start using this partition of the nodes. It's a dictionary where keys are their nodes and values the communities
Returns
-------
dendogram : list of dictionaries
a list of partitions, ie dictionnaries where keys of the i+1 are the values of the i. and where keys of the first are the nodes of graph
Raises
------
TypeError
If the graph is not a networkx.Graph
See Also
--------
best_partition
Notes
-----
Uses Louvain algorithm
References
----------
.. 1. Blondel, V.D. et al. Fast unfolding of communities in large networks. J. Stat. Mech 10008, 1-12(2008).
Examples
--------
>>> G=nx.erdos_renyi_graph(100, 0.01)
>>> dendo = generate_dendogram(G)
>>> for level in range(len(dendo) - 1) :
>>> print "partition at level", level, "is", partition_at_level(dendo, level)
"""
if graph.is_directed() :
raise TypeError("Bad graph type, use only non directed graph")
#special case, when there is no link
#the best partition is everyone in its community
if graph.number_of_edges() == 0 :
part = dict([])
for node in graph.nodes() :
part[node] = node
return [part]
current_graph = graph.copy()
status = Status()
status.init(current_graph, part_init)
mod = __modularity(status)
status_list = list()
__one_level(current_graph, status)
new_mod = __modularity(status)
partition = __renumber(status.node2com)
status_list.append(partition)
mod = new_mod
current_graph = induced_graph(partition, current_graph)
status.init(current_graph)
while True :
__one_level(current_graph, status)
new_mod = __modularity(status)
if new_mod - mod < __MIN :
break
partition = __renumber(status.node2com)
status_list.append(partition)
mod = new_mod
current_graph = induced_graph(partition, current_graph)
status.init(current_graph)
return status_list[:]
def induced_graph(partition, graph) :
"""Produce the graph where nodes are the communities
there is a link of weight w between communities if the sum of the weights of the links between their elements is w
Parameters
----------
partition : dict
a dictionary where keys are graph nodes and values the part the node belongs to
graph : networkx.Graph
the initial graph
Returns
-------
g : networkx.Graph
a networkx graph where nodes are the parts
Examples
--------
>>> n = 5
>>> g = nx.complete_graph(2*n)
>>> part = dict([])
>>> for node in g.nodes() :
>>> part[node] = node % 2
>>> ind = induced_graph(part, g)
>>> goal = nx.Graph()
>>> goal.add_weighted_edges_from([(0,1,n*n),(0,0,n*(n-1)/2), (1, 1, n*(n-1)/2)])
>>> nx.is_isomorphic(int, goal)
True
"""
ret = nx.Graph()
ret.add_nodes_from(partition.values())
for node1, node2, datas in graph.edges_iter(data = True) :
weight = datas.get("weight", 1)
com1 = partition[node1]
com2 = partition[node2]
w_prec = ret.get_edge_data(com1, com2, {"weight":0}).get("weight", 1)
ret.add_edge(com1, com2, weight = w_prec + weight)
return ret
def __renumber(dictionary) :
"""Renumber the values of the dictionary from 0 to n
"""
count = 0
ret = dictionary.copy()
new_values = dict([])
for key in dictionary.keys() :
value = dictionary[key]
new_value = new_values.get(value, -1)
if new_value == -1 :
new_values[value] = count
new_value = count
count = count + 1
ret[key] = new_value
return ret
def __load_binary(data) :
"""Load binary graph as used by the cpp implementation of this algorithm
"""
if type(data) == types.StringType :
data = open(data, "rb")
reader = array.array("I")
reader.fromfile(data, 1)
num_nodes = reader.pop()
reader = array.array("I")
reader.fromfile(data, num_nodes)
cum_deg = reader.tolist()
num_links = reader.pop()
reader = array.array("I")
reader.fromfile(data, num_links)
links = reader.tolist()
graph = nx.Graph()
graph.add_nodes_from(range(num_nodes))
prec_deg = 0
for index in range(num_nodes) :
last_deg = cum_deg[index]
neighbors = links[prec_deg:last_deg]
graph.add_edges_from([(index, int(neigh)) for neigh in neighbors])
prec_deg = last_deg
return graph
def __one_level(graph, status) :
"""Compute one level of communities
"""
modif = True
nb_pass_done = 0
cur_mod = __modularity(status)
new_mod = cur_mod
while modif and nb_pass_done != __PASS_MAX :
cur_mod = new_mod
modif = False
nb_pass_done += 1
for node in graph.nodes() :
com_node = status.node2com[node]
degc_totw = status.gdegrees.get(node, 0.) / (status.total_weight*2.)
neigh_communities = __neighcom(node, graph, status)
__remove(node, com_node,
neigh_communities.get(com_node, 0.), status)
best_com = com_node
best_increase = 0
for com, dnc in neigh_communities.iteritems() :
incr = dnc - status.degrees.get(com, 0.) * degc_totw
if incr > best_increase :
best_increase = incr
best_com = com
__insert(node, best_com,
neigh_communities.get(best_com, 0.), status)
if best_com != com_node :
modif = True
new_mod = __modularity(status)
if new_mod - cur_mod < __MIN :
break
class Status :
"""
To handle several data in one struct.
Could be replaced by named tuple, but don't want to depend on python 2.6
"""
node2com = {}
total_weight = 0
internals = {}
degrees = {}
gdegrees = {}
def __init__(self) :
self.node2com = dict([])
self.total_weight = 0
self.degrees = dict([])
self.gdegrees = dict([])
self.internals = dict([])
self.loops = dict([])
def __str__(self) :
return ("node2com : " + str(self.node2com) + " degrees : "
+ str(self.degrees) + " internals : " + str(self.internals)
+ " total_weight : " + str(self.total_weight))
def copy(self) :
"""Perform a deep copy of status"""
new_status = Status()
new_status.node2com = self.node2com.copy()
new_status.internals = self.internals.copy()
new_status.degrees = self.degrees.copy()
new_status.gdegrees = self.gdegrees.copy()
new_status.total_weight = self.total_weight
def init(self, graph, part = None) :
"""Initialize the status of a graph with every node in one community"""
count = 0
self.node2com = dict([])
self.total_weight = 0
self.degrees = dict([])
self.gdegrees = dict([])
self.internals = dict([])
self.total_weight = graph.size(weight = 'weight')
if part == None :
for node in graph.nodes() :
self.node2com[node] = count
deg = float(graph.degree(node, weight = 'weight'))
if deg < 0 :
raise ValueError("Bad graph type, use positive weights")
self.degrees[count] = deg
self.gdegrees[node] = deg
self.loops[node] = float(graph.get_edge_data(node, node,default={"weight":0}).get("weight", 1))
self.internals[count] = self.loops[node]
count = count + 1
else :
for node in graph.nodes() :
com = part[node]
self.node2com[node] = com
deg = float(graph.degree(node, weigh = 'weight'))
self.degrees[com] = self.degrees.get(com, 0) + deg
self.gdegrees[node] = deg
inc = 0.
for neighbor, datas in graph[node].iteritems() :
weight = datas.get("weight", 1)
if weight <= 0 :
raise ValueError("Bad graph type, use positive weights")
if part[neighbor] == com :
if neighbor == node :
inc += float(weight)
else :
inc += float(weight) / 2.
self.internals[com] = self.internals.get(com, 0) + inc
def __neighcom(node, graph, status) :
"""
Compute the communities in the neighborood of node in the graph given
with the decomposition node2com
"""
weights = {}
for neighbor, datas in graph[node].iteritems() :
if neighbor != node :
weight = datas.get("weight", 1)
neighborcom = status.node2com[neighbor]
weights[neighborcom] = weights.get(neighborcom, 0) + weight
return weights
def __remove(node, com, weight, status) :
""" Remove node from community com and modify status"""
status.degrees[com] = ( status.degrees.get(com, 0.)
- status.gdegrees.get(node, 0.) )
status.internals[com] = float( status.internals.get(com, 0.) -
weight - status.loops.get(node, 0.) )
status.node2com[node] = -1
def __insert(node, com, weight, status) :
""" Insert node into community and modify status"""
status.node2com[node] = com
status.degrees[com] = ( status.degrees.get(com, 0.) +
status.gdegrees.get(node, 0.) )
status.internals[com] = float( status.internals.get(com, 0.) +
weight + status.loops.get(node, 0.) )
def __modularity(status) :
"""
Compute the modularity of the partition of the graph faslty using status precomputed
"""
links = float(status.total_weight)
result = 0.
for community in set(status.node2com.values()) :
in_degree = status.internals.get(community, 0.)
degree = status.degrees.get(community, 0.)
if links > 0 :
result = result + in_degree / links - ((degree / (2.*links))**2)
return result
def __main() :
"""Main function to mimic C++ version behavior"""
try :
filename = sys.argv[1]
graphfile = __load_binary(filename)
partition = best_partition(graphfile)
print >> sys.stderr, str(modularity(partition, graphfile))
for elem, part in partition.iteritems() :
print str(elem) + " " + str(part)
except (IndexError, IOError):
print "Usage : ./community filename"
print "find the communities in graph filename and display the dendogram"
print "Parameters:"
print "filename is a binary file as generated by the "
print "convert utility distributed with the C implementation"
if __name__ == "__main__" :
__main()
|
[
"titanjdemoniak@hotmail.com"
] |
titanjdemoniak@hotmail.com
|
337746a94e224bc10c4f73315b23aab5796f0e52
|
9629dfc1d0ead0a6595b01c39c24fd86acc63f68
|
/venv/Scripts/easy_install-3.7-script.py
|
ddc0045e2e815089f6e83870213ca9b09d9f1985
|
[] |
no_license
|
zeriky/Olofofo
|
9d7e785e4e4f747681c979b022562d2b333cefb5
|
effd2e97e7e990e602b346b04cc480397720b9e4
|
refs/heads/master
| 2022-11-19T16:55:59.299725
| 2020-07-22T11:30:32
| 2020-07-22T11:30:32
| 281,662,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
#!C:\Users\seriki\PycharmProjects\ranti\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
[
"tanniseriki1@gmail.com"
] |
tanniseriki1@gmail.com
|
2a2d14293dcce2075af26c7c590b753bf7f2e182
|
9a278766d210b92d06a46321303481e86fa5811d
|
/test_single_csv.py
|
6494dd93499a53baf3877964921283ddfbb8b1de
|
[] |
no_license
|
fvmassoli/deep-acoustic-modeling
|
64662b9299bb71b549b4a90e8090a147f24387dc
|
2de174642d634743dc2a32bb2518808f3fec469e
|
refs/heads/master
| 2020-12-20T18:57:01.763017
| 2020-02-15T15:55:52
| 2020-02-15T15:55:52
| 236,178,953
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,380
|
py
|
import argparse
import numpy as np
import pandas as pd
from model_manager import ModelManager
from utils import get_model_info_from_fname
import torch
import torch.nn as nn
idx_to_class = {0: 'di',
1: 'dje',
2: 'do',
3: 'due',
4: 'dze',
5: 'kwa',
6: 'kwan',
7: 'kwe',
8: 'kwin',
9: 'la',
10: 'lle',
11: 'mi',
12: 'nno',
13: 'no',
14: 'o',
15: 'ran',
16: 'ro',
17: 'se',
18: 'sei',
19: 'sil',
20: 'sp',
21: 'ssan',
22: 'sse',
23: 'tSa',
24: 'tSen',
25: 'tSi',
26: 'tSin',
27: 'tSo',
28: 'ta',
29: 'ti',
30: 'to',
31: 'tre',
32: 'tren',
33: 'ttan',
34: 'tte',
35: 'tto',
36: 'ttor',
37: 'ttro',
38: 'tu',
39: 'u',
40: 'un',
41: 'van',
42: 've',
43: 'ven'}
def test(args):
architecture, hidden_size, bidir, out_features, windows, dropout = get_model_info_from_fname(args.checkpointPath.split('/')[-2])
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_manager = ModelManager(n_classes=44,
out_channels=out_features,
hidden=hidden_size,
bidir=bidir,
dropout=dropout,
architecture=architecture,
windows=windows,
load_ckt=args.loadModelCkt,
ckt_path=args.checkpointPath,
device=device)
model_manager.set_eval_mode()
test_ds = torch.from_numpy(pd.read_csv(args.csvFilePath).to_numpy()).float()[np.newaxis, :]
output, loss = model_manager.forward(test_ds, None, nn.CrossEntropyLoss)
output = output.detach().cpu().numpy()
exp_scores = np.exp(output - np.max(output, axis=1, keepdims=True))
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
output = output.squeeze()
probs = probs.squeeze()
file = open(args.outputFile, 'w')
str_ = 'Most probable class: {} --- with prob: {}\n'.format(idx_to_class[np.argmax(output)], probs[np.argmax(output)])
file.write(str_)
for idx, out in enumerate(output):
str_ = str(out) + ' class ' + idx_to_class[idx] + ' prob: ' + str(probs[idx]) + '\n'
file.write(str_)
file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser('Syllables')
parser.add_argument('-l', '--loadModelCkt', action='store_true', help='Load model ckt (default: false)')
parser.add_argument('-ck', '--checkpointPath', help='Path to model checkpoint')
parser.add_argument('-f', '--csvFilePath', help='Path to input csv file')
parser.add_argument('-o', '--outputFile', help='Output .txt file path')
args = parser.parse_args()
test(args)
|
[
"fabiovaleriomassoli@gmail.com"
] |
fabiovaleriomassoli@gmail.com
|
2ecbd8f9a85ba36e7cb4307928d5d576e57ebf5e
|
3aa36cd8efa0b5aca6111db28d7197266fdc8506
|
/roman_to_decimal.py
|
1147e64ba2a9f398d223935f07ca2acf6946a3d7
|
[] |
no_license
|
AyresJonas/RomanConverter
|
f387eae81c7c257e459344821bb5ee8905a1a7b9
|
0c9b94ddcd9066f173cc1d9495b56f9de35465c0
|
refs/heads/master
| 2020-05-30T18:14:17.561696
| 2019-06-02T20:54:11
| 2019-07-25T18:01:46
| 189,892,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 937
|
py
|
import sys
from validations import is_invalid_roman_number
_ROMAN_TO_DECIMAL_DICT = {
'M': 1000,
'D': 500,
'C': 100,
'L': 50,
'X': 10,
'V': 5,
'I': 1
}
def roman_to_decimal(roman_number):
try:
if len(roman_number) == 1:
return _ROMAN_TO_DECIMAL_DICT[roman_number]
decimals = [_ROMAN_TO_DECIMAL_DICT[char] for char in roman_number]
except KeyError:
return 'Invalid roman character'
if is_invalid_roman_number(decimals):
return 'Invalid roman number'
result = _calculate_result(decimals)
if result > 3999:
return 'Invalid roman number'
return result
def _calculate_result(decimals):
result = 0
for first_num, second_num in zip(decimals, decimals[1:]):
if first_num >= second_num:
result += first_num
else:
result -= first_num
return result + second_num
if __name__ == '__main__':
roman_num = sys.argv[1].upper()
print(roman_to_decimal(roman_num))
|
[
"jonas.ayres@southsystem.com.br"
] |
jonas.ayres@southsystem.com.br
|
57e22578fbcd05532daf06696ed64a58b5b37649
|
6802fee77c53ba92ba2bdd8065dc3732921ad7f7
|
/user_app/admin.py
|
ab72bbcbd0ea87af5ca65edeee760af1f5d9713a
|
[] |
no_license
|
Aashishparajuli/tasbir
|
925292ee507db9b28cf565d6e233caa6d069681d
|
a0fbc4a6120565cfa4ee55616d1c188a1a3221aa
|
refs/heads/master
| 2023-04-26T07:18:25.865455
| 2019-12-25T09:01:17
| 2019-12-25T09:01:17
| 229,171,015
| 0
| 0
| null | 2023-04-21T20:43:17
| 2019-12-20T01:59:43
|
HTML
|
UTF-8
|
Python
| false
| false
| 125
|
py
|
from django.contrib import admin
from .models import usermodel
# Register your models here.
admin.site.register(usermodel)
|
[
"aashishparajuli018@gmail.com"
] |
aashishparajuli018@gmail.com
|
edda37c0d808f51c3ce33ec5cbadcfba08ffc3f0
|
16024097e5283c1194f6d076cb321969c1d45317
|
/space/management/commands/space_update.py
|
fd716da5cb9e7c7090d5fceaa2a7ef4093e4cf03
|
[] |
no_license
|
ShinjiroMoriya/React-Django-GraphQL-Sample
|
1677c1c29cec348b62df50c7104faf27bb387dd7
|
3c4c36ec67b8fe4b69824f2305032116411b0e9e
|
refs/heads/master
| 2020-03-21T18:16:43.326461
| 2018-11-09T07:29:19
| 2018-11-09T07:29:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
from django.core.management.base import BaseCommand
from space.models import Space
import string
import random
class Command(BaseCommand):
def handle(self, *args, **options):
try:
count = 1
for s in Space.objects.all():
random_str = ''.join(
[random.choice(string.ascii_letters + string.digits) for i
in range(15)])
s.sfid = random_str
s.save()
count += 1
except Exception as ex:
print(str(ex))
|
[
""
] | |
39e37960d94378ff7cdd7f3dbc16d7c764ebdfaf
|
551dabfe10ea6778546f380d3ee9f0500f352b0f
|
/2.6 K-Means/2.6.1.5.py
|
7b9e48d4c270aaff583d5deaee70f884d44e328e
|
[] |
no_license
|
mohamedalemam/sklearn
|
c32843b6a04fe4b2a326870ba3658e8c1fc3b424
|
387c21357b165a1bc3593f8303ac5f9a672bd62a
|
refs/heads/main
| 2022-12-29T15:38:06.534505
| 2020-10-17T21:26:59
| 2020-10-17T21:26:59
| 301,839,374
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
from sklearn.datasets import load_iris
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import matplotlib.pyplot as plt
iris = load_iris()
X = iris.data
kmean = KMeans(n_clusters= 3 )
kmean.fit(X)
result = kmean.labels_
print(silhouette_score(X , result))
score = []
for n in range(2,11):
kmean = KMeans(n_clusters= n )
kmean.fit(X)
result = kmean.labels_
print(n , ' ' , silhouette_score(X , result))
score.append(silhouette_score(X , result))
plt.plot(range(2,11) , score)
plt.show()
kmean = KMeans(n_clusters= 4 )
y_kmeans = kmean.fit_predict(X)
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 10, c = 'r')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 10, c = 'b')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 10, c = 'g')
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 10, c = 'c')
plt.scatter(kmean.cluster_centers_[:, 0], kmean.cluster_centers_[:, 1], s = 100, c = 'y')
plt.show()
|
[
"noreply@github.com"
] |
mohamedalemam.noreply@github.com
|
d325b49fa0fcb6ede20cfa6d8734d6d65a19f18e
|
40638f7594848ea851648f5dd83208ea244f5fa3
|
/Python/HelloWorld/HelloWorld.py
|
384cb676fb76881d8d63028bc2a956d755758bc6
|
[] |
no_license
|
roshanlouhar/SelfLearning
|
3c67920b01149866adf39687fc1959bb43d3d7ac
|
21140d128565b0c536b32c3fceaea469f1f6f0a4
|
refs/heads/master
| 2021-01-03T18:10:18.409652
| 2020-11-30T13:01:10
| 2020-11-30T13:01:10
| 240,185,637
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 152
|
py
|
# def GCD(a,b):
# while(b!= 0):
# t =a
# a = b
# b = t % b
# return a
#print(("GCD is :") + str(GCD(20,8)))
|
[
"22392219+roshanlouhar@users.noreply.github.com"
] |
22392219+roshanlouhar@users.noreply.github.com
|
878808f98d0845d8e9e22ed632973acc9053833b
|
c49fa285a24d1d6e673fff5a439c682fb872cbf0
|
/TP2Bis_Cartpole_Qdeep/cartpole.py
|
b4c654a8bd8a357fa6fdc1c12548a818574d4d83
|
[] |
no_license
|
HadarakDev/LabESGI-IA-ML
|
a229e1f7ca676ddeb4225014f9ecf77ea2ecc23b
|
2727048622f0857beb15d088c2b8e641395cb412
|
refs/heads/master
| 2020-09-21T15:53:59.433526
| 2020-01-17T14:23:10
| 2020-01-17T14:23:10
| 224,838,109
| 1
| 6
| null | 2020-01-09T18:15:31
| 2019-11-29T11:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,849
|
py
|
import random
import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
BATCH_SIZE = 10
GAMMA = 0.80
def create_model(nb_input, nb_output):
model = Sequential()
model.add(Dense(24, input_dim=nb_input, activation="relu"))
model.add(Dense(24, activation="relu"))
model.add(Dense(nb_output, activation="softmax"))
model.compile(optimizer=Adam(lr=0.001), loss="mse")
return model
def predict(model, state, action_space, explo_ratio):
if np.random.rand() < explo_ratio:
return random.randrange(action_space)
return np.argmax(model.predict(state)[0])
def replay(model, memory, explo_ratio):
if len(memory) < BATCH_SIZE:
# not enough data to train
return explo_ratio, model
batch = random.sample(memory, BATCH_SIZE)
for action, state, reward, state_next, done in batch:
q_update = reward
if not done:
q_update = reward + GAMMA * np.amax(model.predict(state_next)[0])
q_values = model.predict(state)
q_values[0][action] = q_update
model.fit(state, q_values, verbose=0)
explo_ratio = explo_ratio * 0.995
explo_ratio = max(explo_ratio, 0.05)
return explo_ratio, model
if __name__ == "__main__":
tf.compat.v1.disable_eager_execution()
env = gym.make("CartPole-v0")
observation_space = env.observation_space.shape[0]
action_space = env.action_space.n
model = create_model(observation_space, action_space)
run = 0
explo_ratio: float = 1
max_steps = 0
while True:
run += 1
state = env.reset()
state = np.reshape(state, [1, observation_space])
step = 0
memory = []
print("nb run:" + str(run))
while True:
step += 1
env.render()
action = predict(model, state, action_space, explo_ratio)
state_next, reward, done, info = env.step(action)
if done:
reward = -reward
state_next = np.reshape(state_next, [1, observation_space])
memory.append((action, state, reward, state_next, done))
state = state_next
if done:
if max_steps < step:
max_steps = step
print("\t Model is done with steps: " + str(step) + " max steps: " + str(max_steps))
break
explo_ratio, model = replay(model, memory, explo_ratio)
if __name__ == "__main__":
env = gym.make('CartPole-v0')
env.reset()
for i in range(1000):
env.render()
state_next, reward, done, info = env.step(env.action_space.sample()) # take a random action
if done == True:
env.reset()
env.close()
|
[
"nico_roche@hotmail.fr"
] |
nico_roche@hotmail.fr
|
24768d2f93aa88b46dad55e4ea6aa5d3b008fa2f
|
05628092235f402452d5d25bbc2e005cf7b67045
|
/Product/serializers.py
|
f3d5cd00e9abe9259686e756b158832ca09b302b
|
[] |
no_license
|
prahladtripathi19/django-mongo-elasticsearch
|
41e3066047e0ce2576b9dd925b95228164323fc0
|
13249d49e3b92673ef01fa0f40618cc46b2d8509
|
refs/heads/main
| 2023-02-06T02:57:27.734925
| 2020-12-13T15:30:33
| 2020-12-13T15:30:33
| 321,093,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
from rest_framework import serializers
from .models import Product, Category, Mostviewed
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ["id","name","slug","published","created_at","updated_at"]
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ["id","name","slug","productcode","price","published","category","created_at","updated_at"]
class MostviewedSerializer(serializers.ModelSerializer):
class Meta:
model = Mostviewed
fields = ["id","product"]
|
[
"prahlad.tripathi@timesinternet.in"
] |
prahlad.tripathi@timesinternet.in
|
186bd95b0db222551e33365655e533c01afc348d
|
bb16070b9fe39894bdea67020eb7e41f93f3cef1
|
/src/utils.py
|
cf4d1346833931f47112f782333e7f5b18b732dd
|
[] |
no_license
|
toshi835/t5-generation
|
a782927c2bae62e272423947701c93f6fe4b8dac
|
1343ea0d5895b4a0fc72147f555676c118a6fd13
|
refs/heads/main
| 2023-06-02T19:09:14.996191
| 2021-06-17T06:43:22
| 2021-06-17T06:43:22
| 377,701,947
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,411
|
py
|
import os
import random
import numpy as np
import torch
import pytorch_lightning as pl
from torch.utils.data import Dataset, DataLoader
from transformers import AdamW, T5ForConditionalGeneration, T5Tokenizer, get_linear_schedule_with_warmup
# 乱数シードの設定
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
class TsvDataset(Dataset):
def __init__(self, tokenizer, data_dir, type_path, input_max_len=512, target_max_len=512):
self.file_path = os.path.join(data_dir, type_path)
self.input_max_len = input_max_len
self.target_max_len = target_max_len
self.tokenizer = tokenizer
self.inputs = []
self.targets = []
self._build()
def __len__(self):
return len(self.inputs)
def __getitem__(self, index):
source_ids = self.inputs[index]["input_ids"].squeeze()
target_ids = self.targets[index]["input_ids"].squeeze()
source_mask = self.inputs[index]["attention_mask"].squeeze()
target_mask = self.targets[index]["attention_mask"].squeeze()
return {"source_ids": source_ids, "source_mask": source_mask,
"target_ids": target_ids, "target_mask": target_mask}
def _make_record(self, input, target):
# ニュースタイトル生成タスク用の入出力形式に変換する。
input = f"{input}"
target = f"{target}"
return input, target
def _build(self):
with open(self.file_path, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
line = line.strip().split("\t")
if i == 0: # header
continue
input = line[1]
target = line[0]
input, target = self._make_record(input, target)
tokenized_inputs = self.tokenizer.batch_encode_plus(
[input], max_length=self.input_max_len, truncation=True,
padding="max_length", return_tensors="pt"
)
# tokenizer.batch_encode_plus([input], max_length=100, truncation=True,padding="max_length", return_tensors="pt")
tokenized_targets = self.tokenizer.batch_encode_plus(
[target], max_length=self.target_max_len, truncation=True,
padding="max_length", return_tensors="pt"
)
self.inputs.append(tokenized_inputs)
self.targets.append(tokenized_targets)
class T5FineTuner(pl.LightningModule):
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
# 事前学習済みモデルの読み込み
self.model = T5ForConditionalGeneration.from_pretrained(hparams.model_name_or_path)
# トークナイザーの読み込み
self.tokenizer = T5Tokenizer.from_pretrained(hparams.tokenizer_name_or_path, is_fast=True)
def forward(self, input_ids, attention_mask=None, decoder_input_ids=None,
decoder_attention_mask=None, labels=None):
"""順伝搬"""
return self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels
)
def _step(self, batch):
"""ロス計算"""
labels = batch["target_ids"]
# All labels set to -100 are ignored (masked),
# the loss is only computed for labels in [0, ..., config.vocab_size]
labels[labels[:, :] == self.tokenizer.pad_token_id] = -100
outputs = self(
input_ids=batch["source_ids"],
attention_mask=batch["source_mask"],
decoder_attention_mask=batch['target_mask'],
labels=labels
)
loss = outputs[0]
return loss
def training_step(self, batch, batch_idx):
"""訓練ステップ処理"""
loss = self._step(batch)
self.log("train_loss", loss)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
"""バリデーションステップ処理"""
loss = self._step(batch)
self.log("val_loss", loss)
return {"val_loss": loss}
def test_step(self, batch, batch_idx):
"""テストステップ処理"""
loss = self._step(batch)
self.log("test_loss", loss)
return {"test_loss": loss}
def configure_optimizers(self):
"""オプティマイザーとスケジューラーを作成する"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
eps=self.hparams.adam_epsilon)
self.optimizer = optimizer
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.t_total
)
self.scheduler = scheduler
return [optimizer], [{"scheduler": scheduler, "interval": "step", "frequency": 1}]
def get_dataset(self, tokenizer, type_path, args):
"""データセットを作成する"""
return TsvDataset(
tokenizer=tokenizer,
data_dir=args.data_dir,
type_path=type_path,
input_max_len=args.max_input_length,
target_max_len=args.max_target_length)
def setup(self, stage=None):
"""初期設定(データセットの読み込み)"""
if stage == 'fit' or stage is None:
train_dataset = self.get_dataset(tokenizer=self.tokenizer,
type_path="train.tsv", args=self.hparams)
self.train_dataset = train_dataset
val_dataset = self.get_dataset(tokenizer=self.tokenizer,
type_path="dev.tsv", args=self.hparams)
self.val_dataset = val_dataset
self.t_total = (
(len(train_dataset) // (self.hparams.train_batch_size * max(1, self.hparams.n_gpu)))
// self.hparams.gradient_accumulation_steps
* float(self.hparams.num_train_epochs)
)
def train_dataloader(self):
"""訓練データローダーを作成する"""
return DataLoader(self.train_dataset,
batch_size=self.hparams.train_batch_size,
drop_last=True, shuffle=True, num_workers=4)
def val_dataloader(self):
"""バリデーションデータローダーを作成する"""
return DataLoader(self.val_dataset,
batch_size=self.hparams.eval_batch_size,
num_workers=4)
|
[
"toshi@Mac-ptt.local"
] |
toshi@Mac-ptt.local
|
d3ccd807f7b8490745b957d2c89790b672f754c5
|
38ea5e22afbd1b3a353c2bafbddd1b2659c64209
|
/Python/calc.py
|
ce6ac18590c3ad9b9549f9c30aeff7e2f228cb2d
|
[] |
no_license
|
Raysjc/python
|
200242a3ec8c7c30a5af14a12e6902c2ec474a93
|
93863ceb7e75b0037a8de73aa5b4b5617391dbd2
|
refs/heads/master
| 2020-12-01T06:49:20.016406
| 2019-12-28T07:55:40
| 2019-12-28T07:55:40
| 230,578,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
print ("Hello World!")
def sum(op1, op2):
return op1 + op2
def minus(op1, op2):
return op1 - op2
def mult(op1, op2):
return op1 * op2
def divide(op1, op2):
return op1 / op2
def menu():
print(" Menu")
print("[1] - Add")
print("[2] - Subtract")
print("[3] - Multiply")
print("[4] - Divide")
print("[x] - Exit")
print("-" * 30)
print(' Welcome to PyCalc')
print("-" * 30)
opc = ""
while(opc != "x"):
menu()
opc = input("Select an option: ")
if(opc == "x"):
break #break the loop
num1 = float(input("First Number: "))
num2 = float(input("Second Number: "))
if(opc == '1'):
sum_res = sum(num1, num2)
print("sum = " + str(sum_res))
if(opc == '2'):
minus_res = minus(num1, num2)
print("sum = " + str(minus_res))
if(opc == '3'):
mult_res = mult(num1, num2)
print("sum = " + str(mult_res))
if(opc == '4'):
divide_res = divide(num1, num2)
print("sum = " + str(divide_res))
print("Thank you for using PyCalc")
|
[
"noreply@github.com"
] |
Raysjc.noreply@github.com
|
b30e1f00a7c498e849ea02cac85faa9435afc5cb
|
6bbdaf14c23d23702e1fc27cc40349b6c54c4709
|
/accounts/views.py
|
84c3bbdee18bd4f12c1b022df5aa989649a00712
|
[] |
no_license
|
UsamaNawaz1/FreelanceJobMarket
|
9b810cc0552733aeab0c6b0554410b6538607810
|
d126714dcc06f91f11dce8e7206b730e1a625483
|
refs/heads/master
| 2023-05-25T23:35:58.058320
| 2021-06-05T12:11:51
| 2021-06-05T12:11:51
| 374,104,231
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,943
|
py
|
from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm
from django.http import JsonResponse, HttpResponse
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib import messages
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.views import View
import stripe, json
from .models import Education, Job, Skills, UserAward, Userprofile, Experience, Education, UserProject, UserAward, Job, Proposal, Message, Review, Report
stripe.api_key = settings.STRIPE_SECRET_KEY
@csrf_exempt
def stripe_webhook(request):
payload = request.body
sig_header = request.META['HTTP_STRIPE_SIGNATURE']
event = None
try:
event = stripe.Webhook.construct_event(
payload, sig_header, settings.STRIPE_WEBHOOK_SECRET
)
except ValueError as e:
# Invalid payload
return HttpResponse(status=400)
except stripe.error.SignatureVerificationError as e:
# Invalid signature
return HttpResponse(status=400)
if event['type'] == 'checkout.session.completed':
session = event['data']['object']
print(session)
winner_id = session["metadata"]["winner_id"]
job_id = session["metadata"]["job_id"]
job = Job.objects.get(pk=job_id)
winner = Proposal.objects.get(pk=winner_id)
job.awarded_to = winner.created_by
job.job_status = 'Ongoing'
job.created_by.userprofile.ongoing_jobs = job.created_by.userprofile.ongoing_jobs + 1
job.created_by.userprofile.save()
winner.created_by.userprofile.ongoing_jobs = winner.created_by.userprofile.ongoing_jobs + 1
winner.created_by.userprofile.save()
job.pending_amount = winner.amount
job.save()
message = Message.objects.create(application=winner,created_by=job.created_by, content=f'{job.created_by.userprofile.first_name} has awarded you the project with title {job.jobTitle}')
message.save()
return HttpResponse(status=200)
def checkout(request, job_id, winner_id):
job = Job.objects.get(pk=job_id)
winner = Proposal.objects.get(pk=winner_id)
context = {
'job' : job,
'winner' : winner,
"STRIPE_PUBLIC_KEY": settings.STRIPE_PUBLIC_KEY,
}
return render(request, 'accounts/landing.html', context)
class CreateCheckoutSessionView(View):
def post(self, request, *args, **kwargs):
#http://127.0.0.1:8000
#https://vegassportsadvantage-1.herokuapp.com/
#https://www.vegassportsadvantage.com
YOUR_DOMAIN = 'http://127.0.0.1:8000'
job_id = kwargs['job_id']
winner_id = kwargs['winner_id']
job = Job.objects.get(pk=job_id)
winner = Proposal.objects.get(pk=winner_id)
checkout_session = stripe.checkout.Session.create(
payment_method_types=['card'],
line_items=[
{
'price_data': {
'currency': 'usd',
'unit_amount': winner.amount * 100,
'product_data': {
'name': job.jobTitle,
},
},
'quantity': 1,
},
],
metadata={
'job_id':job_id,
'winner_id':winner_id,
},
mode='payment',
success_url=YOUR_DOMAIN + '/success/',
cancel_url=YOUR_DOMAIN + '/cancel/',
)
return JsonResponse({'id': checkout_session.id})
def paypal_complete(request):
body = json.loads(request.body)
print('BODY: ',body)
job_id = body["job_id"]
winner_id = body["winner_id"]
job = Job.objects.get(pk=job_id)
winner = Proposal.objects.get(pk=winner_id)
job.awarded_to = winner.created_by
job.job_status = 'Ongoing'
job.created_by.userprofile.ongoing_jobs = job.created_by.userprofile.ongoing_jobs + 1
job.created_by.userprofile.save()
winner.created_by.userprofile.ongoing_jobs = winner.created_by.userprofile.ongoing_jobs + 1
winner.created_by.userprofile.save()
job.pending_amount = winner.amount
job.save()
message = Message.objects.create(application=winner,created_by=job.created_by, content=f'{job.created_by.userprofile.first_name} has awarded you the project with title {job.jobTitle}')
message.save()
return redirect('success')
def success(request):
return render(request, 'accounts/success.html')
def cancel(request):
return render(request, 'accounts/cancel.html')
def message(request, job_id, proposal_id):
job = Job.objects.get(pk=job_id)
proposal = Proposal.objects.get(pk=proposal_id)
if request.method == 'POST':
content = request.POST.get('content')
message = Message.objects.create(content=content, application=proposal, created_by=request.user)
message.save()
context = {
'job':job,
'prop' : proposal,
}
return render(request, 'accounts/message.html', context)
def freelancer_proposal(request):
return render(request, 'accounts/freelancer_proposal.html')
def view_proposal(request, job_id, proposal_id):
job = Job.objects.get(pk=job_id)
proposal = Proposal.objects.get(pk=proposal_id)
context = {
'job':job,
'proposal' : proposal,
}
return render(request, 'accounts/view_proposal.html', context)
def job_completed(request, job_id):
job = Job.objects.get(pk=job_id)
if request.method == 'POST':
rating = request.POST.get('rating')
feedback = request.POST.get('feedback')
if request.user.userprofile.is_employer:
review = Review.objects.create(review_rating=rating, feedback=feedback, given_by=job.created_by,on_job=job, given_to=job.awarded_to)
review.save()
job.job_status = 'Completed'
total_reviews = Review.objects.filter(given_to=job.awarded_to)
temp = 0.0
size = len(total_reviews)
for x in total_reviews:
temp += x.review_rating
job.awarded_to.userprofile.overall_rating = round(temp/size,1)
job.awarded_to.userprofile.feedback = job.awarded_to.userprofile.feedback + 1
job.created_by.userprofile.completed_jobs = job.created_by.userprofile.completed_jobs + 1
job.created_by.userprofile.save()
job.awarded_to.userprofile.completed_jobs = job.awarded_to.userprofile.completed_jobs + 1
job.awarded_to.userprofile.balance = job.pending_amount - (job.pending_amount * 0.1)
job.awarded_to.userprofile.save()
job.save()
else:
review = Review.objects.create(review_rating=rating, feedback=feedback, given_by=job.awarded_to,on_job=job, given_to=job.created_by)
review.save()
job.job_status = 'Completed'
total_reviews = Review.objects.filter(given_to=job.created_by)
temp = 0.0
size = len(total_reviews)
for x in total_reviews:
temp += x.review_rating
job.created_by.userprofile.overall_rating = round(temp/size,1)
job.created_by.userprofile.feedback = job.created_by.userprofile.feedback + 1
job.created_by.userprofile.save()
return redirect('manage_jobs')
return render(request, 'accounts/job_completed.html')
def job_proposal(request, job_id):
job = Job.objects.get(pk=job_id)
return render(request, 'accounts/job_proposal.html', {'job':job})
def add_proposal(request, job_id):
job = Job.objects.get(pk=job_id)
if request.method == 'POST':
amount = request.POST.get('amount')
bid = request.POST.get('bid')
duration = request.POST.get('duration')
proposal = Proposal.objects.create(amount=amount, bid=bid, duration=duration, created_by=request.user, on_job=job)
proposal.save()
job.proposal_count = job.proposal_count + 1
job.save()
return redirect('freelancer_proposal')
return render(request, 'accounts/add_proposal.html', {'job':job})
def manage_jobs(request):
return render(request, 'accounts/manage_jobs.html')
def about(request):
return render(request, 'accounts/about.html')
def how_work(request):
return render(request, 'accounts/how_work.html')
def jobs(request):
jobs = Job.objects.all()
context={
'jobs':jobs,
}
return render(request, 'accounts/jobs.html', context)
def view_freelancers(request):
freelancers = Userprofile.objects.filter(is_employer=False)
context={
'freelancers':freelancers,
}
return render(request, 'accounts/view_freelancers.html', context)
def freelancer(request, pk):
userprofile = Userprofile.objects.get(pk=pk)
reviews = Review.objects.filter(given_to=userprofile.user)
educations = Education.objects.filter(created_by=userprofile.user)
projects = UserProject.objects.filter(created_by=userprofile.user)
awards = UserAward.objects.filter(created_by=userprofile.user)
experiences = Experience.objects.filter(created_by=userprofile.user)
context={
'freelancer':userprofile,
'educations':educations,
'projects':projects,
'experiences':experiences,
'awards':awards,
'reviews':reviews
}
return render(request, 'accounts/freelancer.html', context)
def home(request):
return render(request, 'accounts/index-2.html')
def job_single(request, pk):
job = Job.objects.get(pk=pk)
if request.method == 'POST':
reason = request.POST.get('reason')
description = request.POST.get('description')
report = Report.objects.create(reason=reason, description=description)
report.save()
check = False
if request.user.userprofile.is_employer == False:
proposals = Proposal.objects.filter(created_by=request.user)
for proposal in proposals:
if proposal.on_job.id == job.id:
check = True
return render(request, 'accounts/job_single.html', {'job':job, 'check':check})
def addJobs(request):
if request.method == 'POST':
jobTitle = request.POST.get('jobTitle')
description = request.POST.get('description')
duration = request.POST.get('duration')
experience = request.POST.get('experience')
budget = request.POST.get('budget')
job = Job.objects.create(jobTitle=jobTitle, description=description, duration=duration, experience=experience, created_by=request.user, budget=budget)
job.save()
return redirect('jobs')
return render(request, 'accounts/addJobs.html')
def profile(request, pk):
user = User.objects.get(pk=pk)
return render(request, 'accounts/profile.html', {'user': user})
def user_account(request):
skills = request.user.userprofile.skills.all
exp = request.user.experiences.all
edu = request.user.educations.all
pro = request.user.projects.all
awr = request.user.awards.all
job = 1
proposal = 1
check = False
if request.user.userprofile.is_employer == False:
proposal = Proposal.objects.filter(created_by=request.user)
if len(proposal) > 0:
proposal = proposal[0]
job_id = proposal.on_job.id
job = Job.objects.get(pk=job_id)
check = True
if request.method == 'POST':
request.user.userprofile.first_name = request.POST.get('first_name')
request.user.userprofile.last_name = request.POST.get('last_name')
request.user.userprofile.hourly_rate = request.POST.get('hourly_rate')
request.user.userprofile.tag_line = request.POST.get('tag_line')
request.user.userprofile.description = request.POST.get('description')
request.user.userprofile.save()
context = {
'skills':skills,
'works':exp,
'learning': edu,
'projects':pro,
'awards' : awr,
'job': job,
'proposal':proposal,
'check':check,
}
return render(request, 'accounts/user_account.html', context)
def addSkills(request, pk):
user= User.objects.get(pk=pk)
if request.method == 'POST':
name = request.POST.get('Skills')
skill = Skills.objects.get(name=name)
user.userprofile.skills.add(skill)
user.userprofile.save()
return redirect('user_account')
def addExperience(request, pk):
user= User.objects.get(pk=pk)
if request.method == 'POST':
company_name = request.POST.get('company_name')
starting_date = request.POST.get('starting_date')
ending_date = request.POST.get('ending_date')
job_title = request.POST.get('job_title')
job_description = request.POST.get('job_description')
exp = Experience.objects.create(company_name=company_name, starting_date=starting_date, ending_date=ending_date, job_title=job_title, job_description=job_description, created_by=user)
exp.save()
return redirect('user_account')
return redirect('user_account')
def addEducation(request, pk):
user= User.objects.get(pk=pk)
if request.method == 'POST':
company_name = request.POST.get('school')
starting_date = request.POST.get('starting_date')
ending_date = request.POST.get('ending_date')
job_title = request.POST.get('degree')
job_description = request.POST.get('description')
edu = Education.objects.create(school=company_name, starting_date=starting_date, ending_date=ending_date, degree=job_title, description=job_description, created_by=user)
edu.save()
return redirect('user_account')
return redirect('user_account')
def addProject(request, pk):
user= User.objects.get(pk=pk)
if request.method == 'POST':
project_title = request.POST.get('project_title')
project_url = request.POST.get('project_url')
pro = UserProject.objects.create(project_title=project_title, project_url=project_url, created_by=user)
pro.save()
return redirect('user_account')
return redirect('user_account')
def addAward(request, pk):
user= User.objects.get(pk=pk)
if request.method == 'POST':
award_title = request.POST.get('award_title')
award_date = request.POST.get('award_date')
awr = UserAward.objects.create(award_title=award_title, award_date=award_date, created_by=user)
awr.save()
return redirect('user_account')
return redirect('user_account')
def coming_soon(request):
return render(request, 'accounts/coming_soon.html')
def registerPage(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
user.email = request.POST.get('email')
user.save()
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
account_type = request.POST.get('account_type', 'jobseeker')
country = request.POST.get('country')
if account_type == 'employee':
userprofile = Userprofile.objects.create(user = user, first_name=first_name, last_name=last_name, country=country, is_employer = True)
userprofile.save()
else:
userprofile = Userprofile.objects.create(user = user, first_name=first_name, last_name=last_name, country=country)
userprofile.save()
login(request, user)
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'accounts/register.html', {'form':form})
def loginPage(request):
data = dict()
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password1')
user = authenticate(request, username = username, password = password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.error(request, "username or password is incorrect")
return render(request, 'accounts/login.html', data)
@login_required(login_url='login')
def logoutPage(request):
logout(request)
return redirect('login')
|
[
"usamanawaz1994@gmail.com"
] |
usamanawaz1994@gmail.com
|
ead4250fa9612d4baad9c9740790c4ad8c2b510d
|
a3785b6ff7734d98af1417000cd619a59bd5a268
|
/part_1_illustration/ga.py
|
b2ee2731865273695da7b558d2e297854a93b084
|
[] |
no_license
|
SuryodayBasak/mst-final-run
|
bd9800744ab4fb6f0947c258ebc1be1151bc9ff2
|
2cde94af03d63f66cc3753843e4e60c92c313466
|
refs/heads/master
| 2022-07-01T22:40:39.003096
| 2020-05-09T04:25:18
| 2020-05-09T04:25:18
| 260,341,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,458
|
py
|
import numpy as np
import random
class GeneticAlgorithm:
def __init__(self, n_dims, N_init, mu = 0.001):
self.n_dims = n_dims
self.mu = mu
#Initialize population
self.population = np.random.rand(N_init, self.n_dims)
self.fitness = None
def crossover(self, N_cross):
#Implement crossover before applying the solution
new_pop = np.empty((N_cross, self.n_dims))
for i in range(N_cross):
parent_1 = self.population[random.randint(0, len(self.population)-1)]
parent_2 = self.population[random.randint(0, len(self.population)-1)]
child = (parent_1 + parent_2)/2
new_pop[i] = self.mutate(child)
self.population = new_pop
def mutate(self, gene):
mutated_child = np.empty(self.n_dims)
for i in range(self.n_dims):
mutated_child[i] = gene[i] + (self.mu*np.random.uniform(-1, 1))
return mutated_child
def best_sol(self):
#return w, fitness
fitness_args = np.argsort(self.fitness)
return self.population[fitness_args[0]], self.fitness[fitness_args[0]]
def selection(self, N_select):
#Argsort first
fitness_args = np.argsort(self.fitness)
self.population = self.population.take(fitness_args[0:N_select], axis=0)
def get_population(self):
return self.population
def set_fitness(self, fitness):
self.fitness = fitness
|
[
"suryodaybasak@gmail.com"
] |
suryodaybasak@gmail.com
|
555b322a5669fc5bd21c772037c09b21974363db
|
c2ee9d6d84e2270ba4c9d6062460a2be0ff5f19c
|
/708. Insert into a Sorted Circular Linked List.py
|
bc945a6e28aeb948bfa2169748de9206d81682c7
|
[] |
no_license
|
Peiyu-Rang/LeetCode
|
0dd915638e8c41c560952d86b4047c85b599d630
|
f79886ed3022664c3291e4e78129bd8d855cf929
|
refs/heads/master
| 2021-11-27T23:48:39.946840
| 2021-11-09T12:47:48
| 2021-11-09T12:47:48
| 157,296,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,105
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 23:22:09 2021
@author: Caven
"""
"""
# Definition for a Node.
class Node:
def __init__(self, val=None, next=None):
self.val = val
self.next = next
"""
class Solution:
def insert(self, head: 'Node', insertVal: int) -> 'Node':
if head is None:
new_node = Node(insertVal)
new_node.next= new_node
return new_node
prev, curr = head, head.next
to_insert = False
while True:
if prev.val <= insertVal <= curr.val:
to_insert = True
elif prev.val > curr.val:
if insertVal >= prev.val or insertVal <= curr.val:
to_insert = True
if to_insert:
prev.next = Node(insertVal, curr)
return head
prev, curr = curr, curr.next
# loop condition:
if prev == head:
break
prev.next = Node(insertVal, curr)
return head
|
[
"prang3@gatech.edu"
] |
prang3@gatech.edu
|
571b5cab5e89333a2fe9a05f143579374f1e7336
|
16cbb6cfa2201934db9011e7eba18bdebd9357d0
|
/optiloops.py
|
eb4415a9c8cdb62494d6dc9d89e5117a1152165b
|
[] |
no_license
|
Dreadchild/-Blender-
|
9da0e067bf47b55599086b60d8bcc489a86f6e5d
|
baa2945d72379f2cf4b6da5ae20a7453dfdb0856
|
refs/heads/master
| 2022-12-16T20:14:52.026188
| 2020-09-01T16:22:57
| 2020-09-01T16:22:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,460
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Optiloops",
"author": "Vilem Duha",
"version": (1, 0),
"blender": (2, 80, 0),
"location": "View3D > Mesh > Mesh Tools panel > Optimize loops",
"description": "Optimize meshes by removing loops with angle threshold",
"warning": "",
"wiki_url": "",
"category": "Add Mesh",
}
import bpy, bmesh
from bpy.props import (
BoolProperty,
BoolVectorProperty,
FloatProperty,
FloatVectorProperty,
)
def get_loop(bm, e):
checkverts = e.verts[:]
checkedverts = []
loop_edges = [e]
while len(checkverts) > 0:
v = checkverts.pop()
checkedverts.append(v)
if len(v.link_edges) == 4:
for e in v.link_edges:
if e in loop_edges:
estart = e
for e in v.link_edges:
isneighbour = False
for f in e.link_faces:
if f in estart.link_faces:
isneighbour = True
if not isneighbour:
loop_edges.append(e)
for v in e.verts:
if v not in checkedverts and v not in checkverts:
checkverts.append(v)
return loop_edges
def get_neighbours(loops):
for l in loops:
l.neighbours = []
for l in loops:
e = l.edges[0]
neighbours = 0
for f in e.link_faces:
if len(f.verts) == 4:
for e1 in f.edges:
if e1 != e:
do = True
for v in e1.verts: # check it's the parallel edge...
if v in e.verts:
do = False
if do:
for l1 in loops:
if l1 != l and e1 in l1.edges:
neighbours += 1
if l1 not in l.neighbours:
l.neighbours.append(l1)
l1.neighbours.append(l)
class edgeloop():
edges = []
neighbours = []
def loop_closed(es):
closed = True
for e in es:
for v in e.verts:
ec = 0
for e1 in v.link_edges:
if e1 in es:
ec += 1
if ec == 1:
closed = False
return False
return True
def check_angles(edges, angle_threshold):
for e in edges:
if len(e.link_faces) != 2:
return False
# print(len(e.link_faces))
a = e.calc_face_angle()
if a > angle_threshold:
return False
return True
def skiploop(result_loops, final_loops, skip_loops, lstart):
final_loops.append(lstart)
last_neighbour = None
checkneighbours = lstart.neighbours[:]
checked = []
while len(checkneighbours) > 0:
neighbour = checkneighbours.pop()
checked.append(neighbour)
skip_loops.append(neighbour)
for n in neighbour.neighbours:
if n not in final_loops and n not in checked:
final_loops.append(n)
checked.append(n)
for n1 in n.neighbours:
checkneighbours.append(n1)
if n1 not in skip_loops and n1 not in final_loops:
skip_loops.append(n1)
checked.append(n1)
def optiloops(self, context):
angle_threshold = self.angle_threshold / 180 * 3.1415926
ob = bpy.context.active_object
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='EDGE')
bm = bmesh.from_edit_mesh(ob.data)
checkedges = [] # bm.edges[:]
bpy.ops.mesh.loop_multi_select(ring=False)
for e in bm.edges:
if e.select:
checkedges.append(e)
if len(checkedges) == 0:
checkedges = bm.edges[:]
resultedges = []
result_loops = []
shape_loop_edges = []
bpy.ops.mesh.select_all(action='DESELECT')
i = 0
while len(checkedges) > 0:
es = get_loop(bm, checkedges[0])
for e in es:
if e in checkedges:
checkedges.remove(e)
thresok = True
if len(es) == 0:
thresok = False
if thresok: # only manifold
for e in es:
if len(e.link_faces) < 2:
thresok = False
if e.seam and self.keep_seams:
thresok = False
if thresok: # first level angle check
thresok = check_angles(es, angle_threshold)
if thresok and self.only_closed: # only closed check
thresok = loop_closed(es)
if thresok: # append results
resultedges.extend(es)
loop = edgeloop()
loop.edges = es
result_loops.append(loop)
# if i == 1:
# print(thresok)
# fal
for e in es:
e.select = False
i += 1
get_neighbours(result_loops)
if self.keep_subsurf_influencing_loops:
# check for neighbouring loops if they aren't in the cleanup group which means they are where borders start.
remove_loops = []
for l in result_loops:
if len(l.neighbours) < 2:
remove_loops.append(l)
for l in remove_loops:
result_loops.remove(l)
get_neighbours(result_loops)
if not self.finish_dissolve:
for l in result_loops:
for e in l.edges:
e.select = True
else:
while len(result_loops) > 0:
final_loops = []
# while len(result_loops)>0:
skip_loops = []
for l in result_loops:
if len(l.neighbours) == 1 and l.neighbours[0] not in final_loops:
skiploop(result_loops, final_loops, skip_loops, l)
if len(l.neighbours) == 0:
final_loops.append(l)
if len(skip_loops) + len(final_loops) < len(result_loops):
for l in result_loops:
if l not in skip_loops and l not in final_loops:
skiploop(result_loops, final_loops, skip_loops, l)
# if l not in skip_loops and l not in final_loops and # nothing was done this round
for l in final_loops:
for e in l.edges:
e.select = True
# fal
bpy.ops.mesh.dissolve_edges()
result_loops = []
for l in skip_loops:
filter = False
for e in l.edges:
if e not in bm.edges:
filter = True
continue
if not filter:
if check_angles(l.edges, angle_threshold):
result_loops.append(l)
get_neighbours(result_loops)
# make things iterative here
# def main(context):
# for ob in context.scene.objects:
# print(ob)
class OptiloopsOperator(bpy.types.Operator):
"""Reduces mesh geometry while keeping loops"""
bl_idname = "mesh.optiloops"
bl_label = "Optimize loops"
bl_options = {'REGISTER', 'UNDO'}
angle_threshold = FloatProperty(
name="Max angle",
description="loops containing only lower angles will be removed",
min=0.01, max=180.0,
default=5.0,
)
only_closed = BoolProperty(
name="Remove only closed loops",
default=False,
)
keep_subsurf_influencing_loops = BoolProperty(
name="Keep loops defining subsurf creases",
default=False,
)
keep_seams = BoolProperty(
name="Keep uv seams",
description="keep uv seams loops intact",
default=True,
)
finish_dissolve = BoolProperty(
name="Delete loop candidates",
description="If disabled, loops will only be selected",
default=True,
)
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
optiloops(self, context)
return {'FINISHED'}
def optiloops_panel(self, context):
layout = self.layout
layout.operator('mesh.optiloops')
# Regustratuib
def register():
bpy.utils.register_class(OptiloopsOperator)
bpy.types.VIEW3D_MT_edit_mesh.append(optiloops_panel)
def unregister():
bpy.utils.unregister_class(OptiloopsOperator)
bpy.types.VIEW3D_MT_edit_mesh.remove(optiloops_panel)
if __name__ == "__main__":
register()
|
[
"noreply@github.com"
] |
Dreadchild.noreply@github.com
|
20fd02d08428f5b26d7cd204290cbcefb30b0d46
|
144c150fe5b4e3df33d36b550f2e0d0d694e4154
|
/hw1.py
|
29c7f14ac9fa563812e060c842c03388e7e0a91c
|
[] |
no_license
|
pyang4/msan698-hw1
|
4ce412d8594f910d620102eef54e7f48e76f5dae
|
55467b7d6de11849f797cc1fb257e555eee2a042
|
refs/heads/master
| 2021-04-15T10:21:05.558427
| 2018-03-23T19:16:37
| 2018-03-23T19:16:37
| 126,527,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
"""
Created on March, 2018
@author: Diane Woodbridge
"""
import sys
import os
def function_with_many_many_argunemts(variable_1, variable_2,
variable_3, variable_4):
print("wow")
def another_function():
print("wow")
def insert(data):
preprocessed_data = [1, 2]
def update(data=None):
return True
def drop(db, table):
"""
Designed specifically for MonogoDB.
Todo: extend it for other DBs.
"""
find(db).drop(table)
# An error will occur, if db or table doesn't exist.
def delete_from_table(data): # Fix this: Readability?
processed_data = preprocess(data)
return False
|
[
"phyang3@dons.usfca.edu"
] |
phyang3@dons.usfca.edu
|
6a50f4e7ffe144e16194fc0e6f5295d2f9d0a997
|
96311d4ad2d3d620853ed538e9631f7de8f86944
|
/lefffExtractor/LefffExtractor.py
|
709948c01da1da2fe5bc53a9ed09751b3dc43523
|
[
"Apache-2.0"
] |
permissive
|
Krolov18/Languages
|
1b37faf1ab825d0db7663f7dcc7b6d7a4e0ce385
|
549952886f73f0b8e1b9e6393875f5473b591407
|
refs/heads/master
| 2021-05-04T10:12:23.446838
| 2017-01-03T20:20:18
| 2017-01-03T20:20:18
| 49,289,124
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,919
|
py
|
__author__ = 'korantin'
from DecoupeurRecursif import DecoupeurRecursif
import re, sys
from yaml import load, dump
from codecs import open
import pickle
with open("../RESSOURCES/lefff-2.1.txt",'r','latin-1') as lexique:
lexicon = []
for ligne in lexique:
if not ligne.startswith('#'):
lexie = {}
temp = {}
(form, chiffre, categorie,construction) = ligne.split('\t')
construction = construction.replace('=',': ')
if "@" in construction:
construction = construction.replace('@',"a@")
research = re.search(": '(.*)'[,\]]",construction)
if research:
construction = construction.replace(research.group(1),research.group(1).replace("'",'"'))
morpho = {"morphologie": []}
construction = load(construction)
for element in construction:
if isinstance(element,str):
morpho["morphologie"].append(element.replace("a@",''))
construction.append(morpho)
construction = [x for x in construction if not isinstance(x, str)]
[temp.update(x) for x in construction]
if "pred" not in temp: temp.update({"pred":""})
research1 = re.search(r"(.*)_____[0-9]<(.*)>",temp["pred"])
if research1:
tempo = list(research1.groups())
tempo[1] = load("{{{0}}}".format(tempo[1].replace(":",": ")))
lexie.update({"lemme":tempo[0]})
lexie.update({"syntaxe":tempo[1]})
lexie.update({"forme":form})
lexie.update({"true_cat":categorie})
lexie.update({"chiffre":chiffre})
del temp["pred"]
for x,y in temp.items():
lexie.update(dict([(x,y)]))
lexicon.append(lexie)
with open(sys.argv[1],'wb') as stream:
# temp = pickle.load(stream)
# for element in temp:
# print(element)
dump(lexicon,stream,default_flow_style=False,allow_unicode=True)
print(dump(lexicon, default_flow_style=False,allow_unicode=True))
class LefffExtractor:
def __init__(self, lefff):
self.corpus = lefff
#Regexes
self.etiquettesRegex = re.compile("(\w+)=(\w+|'.*>')")
self.syntaxeRegex = re.compile("(\w+):")
self.lemmeNumProSyntRegex = re.compile("'([a-zA-Zéèàêâôûùçîïöüë ]*)_*([0-9]*)([a-zA-Zéèàêâôûùçîïöüë]*)?(<?.*>?)?'")
self.morphologieRegex = re.compile("@(\w+)")
self.listeEtiquettes = ["lemme","chiffre","pronominal","fonctions"]
# initialisations des sets
self._etiquettes = set([])
self._syntaxe = set([])
self._morphologie = set([])
# liste contenant une ligne du lefff decoupée selon les différentes méthodes de la classe
self.lexique = []
# boucle remplissant les trois sets.
for tuplex in self.corpus:
if not tuplex[0].startswith('#') and len(tuplex)==4:
(forme,chiffre,categorie,phrases) = tuplex
temp1 = set([x[0] for x in self.etiquettesRegex.findall(phrases)])
temp2 = set(self.morphologieRegex.findall(phrases))
temp3 = self.lemmeNumProSyntRegex.findall(phrases)
if temp3 != []:
temp3 = set(self.syntaxeRegex.findall(temp3[0][-1]))
else:
temp3 = set(temp3)
self.updateEtiquettes(temp1)
self.updateMorphologie(temp2)
self.updateSyntaxe(temp3)
print(self.getEtiquettes())
print(self.getSyntaxe())
print(self.getMorphologie())
def getEtiquettes(self):
return self._etiquettes
def updateEtiquettes(self,set):
self._etiquettes |= set
def getSyntaxe(self):
return self._syntaxe
def updateSyntaxe(self,set):
self._syntaxe |= set
def getMorphologie(self):
return self._morphologie
def updateMorphologie(self,set):
self._morphologie |= set
etiquettes = property(fget=getEtiquettes,fset=updateEtiquettes,doc="etiquettes property")
syntaxe = property(fget=getSyntaxe,fset=updateSyntaxe,doc="syntaxe property")
morphologie = property(fget=getMorphologie,fset=updateMorphologie,doc="morphologie property")
def attribuerValeurs(self,chaine):
morpho = self.morphologieRegex.findall(chaine)
etiquettes = dict(self.etiquettesRegex.findall(chaine))
etiquettes.update(dict(zip(self.listeEtiquettes,self.lemmeNumProSyntRegex.search(etiquettes["pred"]).groups())))
etiquettes.pop("pred")
def main():
pass
# def main():
# temp = DecoupeurRecursif(open(sys.argv[1],encoding="latin1").read())
# temp.decouper(separateurs=[["\n"],["\t"]])
# analyse = LefffExtractor(temp.liste)
if __name__=="__main__":
main()
|
[
"korantin.leveque@gmail.com"
] |
korantin.leveque@gmail.com
|
dbaba92b4d25cfbf94c508d9c250c7ee484e5962
|
7655e5f4e2692093ad49970ef52a6adc1974dbed
|
/config/settings/production.py
|
b5b73f4c83f9f6c8d1cf716c38987118d1484d1c
|
[
"BSD-3-Clause"
] |
permissive
|
addys888/metaci
|
4c4e739b8e35230a26c366f04a9fafab14e3f363
|
87fe4ff144b7a0295e94806f4abe699fc606988d
|
refs/heads/master
| 2020-04-08T18:39:40.439583
| 2018-12-27T06:14:04
| 2018-12-27T06:14:04
| 159,618,556
| 0
| 1
|
BSD-3-Clause
| 2018-12-07T09:55:28
| 2018-11-29T06:28:54
|
Python
|
UTF-8
|
Python
| false
| false
| 9,890
|
py
|
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
DEBUG = env.bool("DJANGO_DEBUG", default=True)
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ("raven.contrib.django.raven_compat",)
INSTALLED_APPS += ("defender",)
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ("whitenoise.middleware.WhiteNoiseMiddleware",)
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = (
"raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware",
)
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
DEFENDER_MIDDLEWARE = ("defender.middleware.FailedLoginMiddleware",)
MIDDLEWARE = MIDDLEWARE + DEFENDER_MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/1.9/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/ja/1.9/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = "DENY"
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["metaci.herokuapp.com"])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn",)
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ("storages",)
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
# AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
# AWS_HEADERS = {
# 'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
# AWS_EXPIRY, AWS_EXPIRY))
# }
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = "https://s3.amazonaws.com/{}/".format(AWS_STORAGE_BUCKET_NAME)
DEFAULT_FILE_STORAGE = "config.settings.storage_backends.MediaStorage"
# Static Assets
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="metaci <noreply@metaci.herokuapp.com>"
)
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default="[metaci] ")
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ("anymail",)
SENDGRID_API_KEY = env("SENDGRID_API_KEY", default=None)
SENDGRID_USERNAME = env("SENDGRID_USERNAME", default=None)
SENDGRID_PASSWORD = env("SENDGRID_PASSWORD", default=None)
ANYMAIL = {}
if SENDGRID_API_KEY:
ANYMAIL["SENDGRID_API_KEY"] = SENDGRID_API_KEY
elif SENDGRID_USERNAME and SENDGRID_PASSWORD:
ANYMAIL["SENDGRID_USERNAME"] = SENDGRID_USERNAME
ANYMAIL["SENDGRID_PASSWORD"] = SENDGRID_PASSWORD
EMAIL_BACKEND = "anymail.backends.sendgrid_v2.EmailBackend"
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]["OPTIONS"]["loaders"] = [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES["default"] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
REDIS_MAX_CONNECTIONS = env.int("REDIS_MAX_CONNECTIONS", default=1)
REDIS_LOCATION = "{0}/{1}".format(env("REDIS_URL", default="redis://127.0.0.1:6379"), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": REDIS_LOCATION,
"OPTIONS": {
"CONNECTION_POOL_CLASS": "redis.BlockingConnectionPool",
"CONNECTION_POOL_KWARGS": {
"max_connections": REDIS_MAX_CONNECTIONS,
"timeout": 20,
},
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
},
}
}
# Logging configuration, heroku logfmt
# 12FA logs to stdout only.
# request_id injected into logstream for all lines
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"root": {"level": "WARNING", "handlers": []},
"filters": {"request_id": {"()": "log_request_id.filters.RequestIDFilter"}},
"formatters": {
"logfmt": {
"format": "at=%(levelname)-8s request_id=%(request_id)s module=%(name)s %(message)s"
},
"simple": {"format": "at=%(levelname)-8s module=%(name)s msg=%(message)s"},
},
"handlers": {
"console_w_req": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"filters": ["request_id"],
"formatter": "logfmt",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"filters": ["request_id"],
"formatter": "simple",
},
},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console_w_req"],
"propagate": False,
},
"raven": {"level": "DEBUG", "handlers": ["console_w_req"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console_w_req"],
"propagate": False,
},
"log_request_id.middleware": {
"handlers": ["console_w_req"],
"level": "DEBUG",
"propagate": False,
},
"rq.worker": {"handlers": ["console"], "level": "DEBUG", "propagate": False},
},
}
# Sentry Configuration
SENTRY_DSN = env("DJANGO_SENTRY_DSN", default=None)
SENTRY_CLIENT = env(
"DJANGO_SENTRY_CLIENT", default="raven.contrib.django.raven_compat.DjangoClient"
)
RAVEN_CONFIG = {}
if SENTRY_DSN:
RAVEN_CONFIG["DSN"] = SENTRY_DSN
LOGGING["handlers"]["sentry"] = {
"level": "ERROR",
"class": "raven.contrib.django.raven_compat.handlers.SentryHandler",
}
LOGGING["loggers"]["sentry.errors"] = {
"level": "DEBUG",
"handlers": ["console"],
"propagate": False,
}
LOGGING["root"]["handlers"].append("sentry")
LOGGING["loggers"]["django.security.DisallowedHost"]["handlers"].append("sentry")
# Add the HireFire middleware for monitoring queue to scale dynos
# See: https://hirefire.readthedocs.io/
HIREFIRE_TOKEN = env("HIREFIRE_TOKEN", default=None)
if HIREFIRE_TOKEN:
HIREFIRE_PROCS = ["config.procs.WorkerProc"]
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Site URL: assumes appname.herokuapp.com
SITE_URL = env("SITE_URL")
FROM_EMAIL = env("FROM_EMAIL")
# Github credentials
GITHUB_USERNAME = env("GITHUB_USERNAME")
GITHUB_PASSWORD = env("GITHUB_PASSWORD")
GITHUB_WEBHOOK_BASE_URL = env("GITHUB_WEBHOOK_BASE_URL")
GITHUB_WEBHOOK_SECRET = env("GITHUB_WEBHOOK_SECRET")
# Salesforce OAuth Connected App credentials
CONNECTED_APP_CLIENT_ID = env("CONNECTED_APP_CLIENT_ID")
CONNECTED_APP_CLIENT_SECRET = env("CONNECTED_APP_CLIENT_SECRET")
CONNECTED_APP_CALLBACK_URL = env("CONNECTED_APP_CALLBACK_URL")
SFDX_CLIENT_ID = env("SFDX_CLIENT_ID")
SFDX_HUB_KEY = env("SFDX_HUB_KEY")
SFDX_HUB_USERNAME = env("SFDX_HUB_USERNAME")
# django-defender configuration
DEFENDER_REDIS_NAME = "default"
|
[
"adarshsngh73@gmail.com"
] |
adarshsngh73@gmail.com
|
fe59fe346826a8d30f5af1322e259da2683205ec
|
0db7b8c009a2db07a35c099ea54870ff9ddfaa41
|
/ss.py
|
b986be34a27cbf91d524ead778fbfe00c4c53c52
|
[] |
no_license
|
ubstart707/TelegramBot
|
2f0f8a517b77ba6dfeab9e556e64d5aac8fb866f
|
2ff4e3929e3d6aae273d3e4e53b0d53ad6ccd544
|
refs/heads/master
| 2020-04-01T05:57:48.867211
| 2018-11-18T15:24:12
| 2018-11-18T15:24:12
| 152,927,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,215
|
py
|
import time
from bs4 import BeautifulSoup
import requests
import os
url = 'https://www.islom.uz/'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
path = ''
namaazNames = soup.select('div.p_v')
namaazNames = [namaazName.text for namaazName in namaazNames]
namaazTimes = soup.select('div.p_clock')
namaazTimes = [namaazTime.text for namaazTime in namaazTimes]
del namaazNames[1]
del namaazTimes[1]
Bomdot = namaazTimes[0] #
Peshin = namaazTimes[1] #
Asr = namaazTimes[2] #
Shom = namaazTimes[3] #
Xufton = namaazTimes[4] #
#
Namoz_vaqtlari = (" Bomdot vaqti: {0}\n Peshin vaqti: {1}\n Asr vaqti: {2}\n Shom vaqti: {3}\n Xufton vaqti: {4}".format(Bomdot, Peshin, Asr, Shom, Xufton))
#
#
print(Namoz_vaqtlari)
currentTime = time.strftime('%H:%M')
localtime = time.localtime(time.time())
print(localtime[4])
#(localtime[3]) bu soat hour
#(localtime[4]) bu minut
# if localtime[3] > 9 :
# if localtime[3] > Peshin[0]) and localtime[4] > Peshin and localtime[3] < Asr{} :
# print("Hozir Peshin Vaqti \nAsr vaqti yaqinlashib kelmoqda[]".format(Asr))
# if localtime[3]
# if localtime[3] < 9 :
# print(voaleykum)
print(Peshin[2,5])
# for namaazName, namaazTime in zip(namaazNames, namaazTimes):
# with open(path + namaazName + '.txt', 'w') as file:
# file.write(namaazTime)
# with open("Тонг.txt", "r") as my_new_handle:
# for the_line in my_new_handle:
# print("Bomdot",the_line)
# Bomdot = (the_line)
# with open("Пешин.txt", "r") as my_new_handle:
# for the_line in my_new_handle:
# print("Пешин:",the_line)
# Peshin = (the_line)
# with open("Аср.txt", "r") as my_new_handle:
# for the_line in my_new_handle:
# print("Аср:",the_line)
# Asr = (the_line)
# with open("Шом.txt", "r") as my_new_handle:
# for the_line in my_new_handle:
# print("Шом:",the_line)
# Shom = (the_line)
# with open("Хуфтон.txt", "r") as my_new_handle:
# for the_line in my_new_handle:
# print("Хуфтон:",the_line)
# Xufton = (the_line)
|
[
"ubstart707@gmail.com"
] |
ubstart707@gmail.com
|
5941bda4adcbcc72eb0969d934b98f492b8328a6
|
b05fd557048a2e3e2eaf0aaaac1d415f88dbc408
|
/cruisin_classics_scrape.py
|
a2d96ef3f451f09b8e5acc0eef316b3a4c6dc72d
|
[] |
no_license
|
goodwordalchemy/cruisin_classics_scrape
|
bafd4a02f40f15c93453c7f73d577b84595f0329
|
941d856daf00975895e62703dbfa3188cddb4559
|
refs/heads/master
| 2021-08-14T21:50:12.260827
| 2017-11-16T21:19:27
| 2017-11-16T21:19:27
| 111,023,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
from bs4 import BeautifulSoup
import pickle
import requests
url = 'https://www.discogs.com/label/801577-Cruisin-Classics'
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.0; WOW64; rv:24.0) Gecko/20100101 Firefox/24.0' }
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.content)
table = soup.find(id='label')
titles = table.findAll(class_='title')
title_links = map(lambda t: t.find('a')['href'], titles)[1:]
base_url = "https://www.discogs.com"
result = []
for link in title_links:
url = base_url + link
album_request = requests.get(url, headers=headers)
soup = BeautifulSoup(album_request.content)
tracklist = soup.findAll(class_='tracklist_track')
for track in tracklist:
artist = track.find(class_='tracklist_track_artists').find('a').text
track_title = track.find('span', class_='tracklist_track_title').text
result.append({'artist':artist, 'track_title':track_title})
with open('cruisin_classics_songlist.pickle','wb') as handle:
pickle.dump(result, handle)
|
[
"goodwordachemy@gmail.com"
] |
goodwordachemy@gmail.com
|
f5a62167ad7f9a3a7374cc3eddf00f05490b5456
|
039cbe06a624a5feb77264fde9a922029bf0920e
|
/proyecto1/tienda/admin.py
|
dbcf3693e9c9339c5cbaf5a399206f81eed51c9c
|
[] |
no_license
|
cmr451/proyecto1
|
c429661c606353ca00cbf18b7f2f6ea13e7bc12d
|
51c0409a5b8f6b5918d82a176907b0366d9006e7
|
refs/heads/master
| 2020-09-20T23:38:34.768159
| 2019-11-28T09:40:13
| 2019-11-28T09:40:13
| 224,618,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 172
|
py
|
from django.contrib import admin
from tienda.models import Cliente, Producto
# Register your models here.
admin.site.register(Cliente)
admin.site.register(Producto)
|
[
"noreply@github.com"
] |
cmr451.noreply@github.com
|
3dba926fc4272e0315d39d96a284468daa9015f8
|
0797cc84f8783d8b615aa3ea42aeff91b29e18ed
|
/main.py
|
ac54dc68720205f6a95cf5475bfab29ba44d34ba
|
[] |
no_license
|
hryniej2/CW_WD_NUMPY
|
379700af9a8bbdc313fa26c981eac14e2774aedd
|
4316dea859cd5a4485aa5ee2d7c4025fe33e9eab
|
refs/heads/master
| 2023-04-17T22:50:46.452685
| 2021-04-14T16:53:41
| 2021-04-14T16:53:41
| 357,978,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
import numpy as num
# ZAD 1
# krotnosc = num.arange(4,20*4+4,4)
# print(krotnosc)
# ZAD 2
# polecenie = num.arange(0, 0.2, 0.01)
# print(polecenie)
# ewolucja = polecenie.astype('int32')
# print((ewolucja))
# print(ewolucja.dtype)
# Zad 3
# def funkcja(n):
# a = num.empty((n,n))
# a = a.astype('int')
# b = 0
# for i in range(n):
# for j in range(n):
# a[i,j] = (2**b)
# b += 1
# print(a)
#
#
#
# n = input("PODAJ liczbe: ")
# n = int(n)
#
# print(funkcja(n))
# Zad 6
# wykreslanka = num.array([['K', 'U', 'R', 'C', 'Z', 'E'], ['R', 'O', 'B', 'D', 'F', 'W'], ['Ó', 'Z', 'L', 'N', 'Q', 'V'],
# ['W', 'A', 'U', 'A', 'T', 'M'], ['K', 'E', 'C', 'S', 'N', 'Z'], ['A', 'K', 'T', 'S','O', 'O']
# ])
#
# print(wykreslanka)
|
[
"jakubhryniewicki21@gmail.com"
] |
jakubhryniewicki21@gmail.com
|
4925f99e653223883fd57896ff5423adeca34184
|
e0390d987cf00c766641c9ab346cf7dcb59d320b
|
/ver/final.py
|
d3f2e5b5501a8b103cd1d4b2b6f18026133bb5ef
|
[
"MIT"
] |
permissive
|
khyatichandak/EyeTrackingSystem
|
4efa3226086cc673d7c8a0bbe6370637039f6134
|
45a884c8b64bd2035ab820da44bf38ef6131a9c3
|
refs/heads/master
| 2020-07-12T06:37:18.121937
| 2019-09-09T19:52:01
| 2019-09-09T19:52:01
| 204,745,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,172
|
py
|
import cv2
import numpy as np
import dlib
from gaze_tracking import GazeTracking
import pyautogui as pag
from math import hypot
from numpy import array
import win32com.client
import winsound
# Load sound
speaker = win32com.client.Dispatch("SAPI.SpVoice")
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
board = np.zeros((300, 1400), np.uint8)
board[:] = 255
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
first_frame = None
# Keyboard settings
keyboard = np.zeros((400, 1100, 4), np.uint8)
key_arr_1 = np.array(
[("1", "2", "3", "4", "5", "6", "7", "8", "9", "0", "."), ("Q", "W", "E", "R", "T", "Y", "U", "I", "O", "P", "?"),
("A", "S", "D", "F", "G", "H", "J", "K", "L", "'"," "), ("Z", "X", "C", "V", "B", "N", "M", ",","<", "CL","")])
pts = np.array([[1020,340],[1020,360],[1040,360],[1070,390],[1070,310],[1040,340],[1020,340]],np.int32)
def direction(nose_point, anchor_point, w, h, multiple=1):
nx = nose_point[0]
ny = nose_point[1]
x = anchor_point[0]
y = anchor_point[1]
if ny > y + multiple * h:
return 'DOWN'
elif ny <= y - multiple * h:
return 'UP'
return '-'
def letter(letter_index_i, letter_index_j, text, letter_light):
width = 100
height = 100
th = 3 # thickness
# Keys
x = letter_index_j * width
y = letter_index_i * height
# Text settings
font_letter = cv2.FONT_HERSHEY_PLAIN
font_scale = 5
font_th = 4
text_size = cv2.getTextSize(text, font_letter, font_scale, font_th)[0]
width_text, height_text = text_size[0], text_size[1]
text_x = int((width - width_text) / 2) + x
text_y = int((height + height_text) / 2) + y
if letter_light is True:
cv2.rectangle(keyboard, (x + th, y + th), (x + width - th, y + height - th), (255, 255, 255), -1)
cv2.putText(keyboard, text, (text_x, text_y), font_letter, font_scale, (51, 51, 51), font_th)
cv2.polylines(keyboard, [pts], 1, (51, 51, 51), 4)
cv2.line(keyboard,(858,349),(888,349),(51,51,51),4)
else:
cv2.rectangle(keyboard, (x + th, y + th), (x + width - th, y + height - th), (51, 51, 51), -1)
cv2.putText(keyboard, text, (text_x, text_y), font_letter, font_scale, (255, 255, 255), font_th)
cv2.polylines(keyboard, [pts], 1, (255, 255, 255), 4)
cv2.line(keyboard, (858, 349), (888, 349), (255,255,255), 4)
def midpoint(p1, p2):
return int((p1.x + p2.x) / 2), int((p1.y + p2.y) / 2)
def draw_menu():
rows, cols, _ = keyboard.shape
th_lines = 4 # thickness lines
cv2.line(keyboard, (int(cols / 2) - int(th_lines / 2), 0), (int(cols / 2) - int(th_lines / 2), rows),
(51, 51, 51), th_lines)
cv2.putText(keyboard, "LEFT", (80, 300), font, 6, (255, 255, 255), 5)
cv2.putText(keyboard, "RIGHT", (80 + int(cols / 2), 300), font, 6, (255, 255, 255), 5)
font = cv2.FONT_HERSHEY_PLAIN
def get_blinking_ratio(eye_points, facial_landmarks):
left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)
right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)
center_top = midpoint(facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))
center_bottom = midpoint(facial_landmarks.part(eye_points[5]), facial_landmarks.part(eye_points[4]))
# hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 2)
# ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 2)
hor_line_lenght = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))
ver_line_lenght = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))
if ver_line_lenght == 0:
ver_line_lenght = 1;
ratio = hor_line_lenght / ver_line_lenght
return ratio
def get_gaze_ratio(eye_points, facial_landmarks):
left_eye_region = np.array([(facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y),
(facial_landmarks.part(eye_points[1]).x, facial_landmarks.part(eye_points[1]).y),
(facial_landmarks.part(eye_points[2]).x, facial_landmarks.part(eye_points[2]).y),
(facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y),
(facial_landmarks.part(eye_points[4]).x, facial_landmarks.part(eye_points[4]).y),
(facial_landmarks.part(eye_points[5]).x, facial_landmarks.part(eye_points[5]).y)],
np.int32)
# cv2.polylines(frame, [left_eye_region], True, (0, 0, 255), 2)
height, width, _ = frame.shape
mask = np.zeros((height, width), np.uint8)
cv2.polylines(mask, [left_eye_region], True, 255, 2)
cv2.fillPoly(mask, [left_eye_region], 255)
eye = cv2.bitwise_and(gray, gray, mask=mask)
min_x = np.min(left_eye_region[:, 0])
max_x = np.max(left_eye_region[:, 0])
min_y = np.min(left_eye_region[:, 1])
max_y = np.max(left_eye_region[:, 1])
gray_eye = eye[min_y: max_y, min_x: max_x]
_, threshold_eye = cv2.threshold(gray_eye, 70, 255, cv2.THRESH_BINARY)
height, width = threshold_eye.shape
left_side_threshold = threshold_eye[0: height, 0: int(width / 2)]
left_side_white = cv2.countNonZero(left_side_threshold)
right_side_threshold = threshold_eye[0: height, int(width / 2): width]
right_side_white = cv2.countNonZero(right_side_threshold)
if left_side_white == 0:
gaze_ratio = 1
elif right_side_white == 0:
gaze_ratio = 5
else:
gaze_ratio = left_side_white / right_side_white
return gaze_ratio
# Counters
frames = 0
letter_index_i = 0
letter_index_j = 0
keyboard_selection_frames = 0
blinking_frames = 0
frames_to_blink = 6
text = ""
while True:
# We get a new frame from the webcam
_, frame = webcam.read()
# We send this frame to GazeTracking to analyze it
gaze.refresh(frame)
if first_frame is None:
first_frame = frame
left_pupil = array(gaze.pupil_left_coords())
right_pupil = array(gaze.pupil_right_coords())
firstpointx = (left_pupil[0] + right_pupil[0]) / 2
firstpointy = (left_pupil[1] + right_pupil[1]) / 2
frame_eye = array([int(firstpointx), int(firstpointy)])
# frame_eye = array(gaze.frame_left_coords(first_frame))
continue
frame = gaze.annotated_frame()
keyboard[:] = (0, 0, 0, 0)
frames += 1
new_frame = np.zeros((500, 500, 3), np.uint8)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face in faces:
landmarks = predictor(gray, face)
# Detect blinking
left_eye_ratio = get_blinking_ratio([36, 37, 38, 39, 40, 41], landmarks)
right_eye_ratio = get_blinking_ratio([42, 43, 44, 45, 46, 47], landmarks)
blinking_ratio = (left_eye_ratio + right_eye_ratio) / 2
if blinking_ratio > 5.7:
cv2.putText(frame, "BLINKING", (50, 150), font, 7, (255, 0, 0))
# Gaze detection
gaze_ratio_left_eye = get_gaze_ratio([36, 37, 38, 39, 40, 41], landmarks)
gaze_ratio_right_eye = get_gaze_ratio([42, 43, 44, 45, 46, 47], landmarks)
gaze_ratio = (gaze_ratio_right_eye + gaze_ratio_left_eye) / 2
drag = 12
left_pupil = array(gaze.pupil_left_coords())
right_pupil = array(gaze.pupil_right_coords())
w, h = 8, 8
dir1 = ""
mid_point = frame_eye
if left_pupil.size > 1 and right_pupil.size > 1:
midpointx = (left_pupil[0] + right_pupil[0]) / 2
midpointy = (left_pupil[1] + right_pupil[1]) / 2
mid_point = array([int(midpointx), int(midpointy)])
if mid_point.size > 1:
dir1 = direction(mid_point, frame_eye, w, h)
# cv2.line(frame, tuple(mid_point), tuple(frame_eye), (255, 0, 0), 2)
# cv2.line(frame, (900,900), tuple(frame_eye),(255, 0, 0), 2)
if blinking_ratio > 5.7:
blinking_frames += 1
frames -= 1
active_letter = key_arr_1[letter_index_i][letter_index_j]
keyboard_selection_frames = 0
# Typing letter
if blinking_frames == frames_to_blink:
if active_letter != "<" and active_letter != "" and active_letter != "CL":
text += active_letter
if active_letter == "<":
temp = text
c = text[-1:]
text = text[:-1]
cv2.putText(board, temp, (80, 100), font, 9, (255, 255, 255), 3)
if active_letter == "CL":
cv2.putText(board, text, (80, 100), font, 9, (255, 255, 255), 3)
text = ""
if active_letter == "":
speaker.Speak(text)
winsound.PlaySound("SystemExclamation", winsound.SND_ALIAS)
else:
blinking_frames = 0
# Show the text we're writing on the board
cv2.putText(board, text, (80, 100), font, 9, 0, 3)
if gaze_ratio < 0.8:
keyboard_selection_frames += 1
# If Kept gaze on one side more than 9 frames, move to keyboard
if keyboard_selection_frames == 9:
# print("Right" + str(gaze_ratio) + " " + str(blinking_ratio))
cv2.putText(frame, "RIGHT", (50, 100), font, 2, (0, 0, 255), 3)
if letter_index_j < 10 and blinking_ratio < 5:
letter_index_j += 1
keyboard_selection_frames = 0
elif gaze_ratio > 1.5:
keyboard_selection_frames += 1
# If Kept gaze on one side more than 9 frames, move to keyboard
if keyboard_selection_frames == 9:
# print("LEFT" + str(gaze_ratio) + " " + str(blinking_ratio))
cv2.putText(frame, "LEFT", (50, 100), font, 2, (0, 0, 255), 3)
if letter_index_j > 0 and blinking_ratio < 5:
letter_index_j -= 1
keyboard_selection_frames = 0
elif gaze_ratio >= 0.8 and gaze_ratio < 1.5:
if dir1 == 'UP':
keyboard_selection_frames += 1
# If Kept gaze on one side more than 9 frames, move to keyboard
if keyboard_selection_frames == 9:
# print("UP" + str(gaze_ratio) + " " + str(blinking_ratio))
cv2.putText(frame, "UP", (50, 100), font, 2, (0, 0, 255), 3)
if letter_index_i > 0 and blinking_ratio < 5:
letter_index_i -= 1
keyboard_selection_frames = 0
elif dir1 == 'DOWN':
keyboard_selection_frames += 1
# If Kept gaze on one side more than 9 frames, move to keyboard
if keyboard_selection_frames == 9:
# print("DOWN" + str(gaze_ratio) + " " + str(blinking_ratio))
cv2.putText(frame, "DOWN" + str(gaze_ratio), (50, 100), font, 2, (0, 0, 255), 3)
if letter_index_i < 3 and blinking_ratio < 5:
letter_index_i += 1
keyboard_selection_frames = 0
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31),
1)
cv2.putText(frame, "direction: " + str(dir1), (90, 255), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
for i in range(4):
for j in range(11):
if i == letter_index_i and j == letter_index_j:
light = True
else:
light = False
letter(i, j, key_arr_1[i][j], light)
cv2.imshow("Frame", frame)
cv2.imshow("Virtual keyboard", keyboard)
cv2.imshow("Board", board)
key = cv2.waitKey(1)
if key == 27:
break
webcam.release()
cv2.destroyAllWindows()
|
[
"khyati.agola@gmail.com"
] |
khyati.agola@gmail.com
|
ad71308e70690de58c8bb93c0a7493aa18aace2e
|
a2a99278deab6edd8ac4800781978610d01bb779
|
/temp.py
|
57658bca22b497f6d5deb6a5750c96aa36333c1e
|
[] |
no_license
|
hjh6230/CRF_KeywordExtraction
|
467d14b73a4a8c3be45503fe66b86612ec975a5e
|
daa203644bc7d6ae220df4792b36d90c274e7be6
|
refs/heads/master
| 2021-04-15T17:17:41.175210
| 2018-11-13T15:02:30
| 2018-11-13T15:02:30
| 126,869,204
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,853
|
py
|
import keras
import matplotlib.pyplot as plt
from keras.models import Sequential,load_model
from keras.layers import Dense, Dropout, Activation,Input
from keras.layers import LSTM,TimeDistributed,Bidirectional
from keras.utils import to_categorical
from word2vec2 import word2vec,loadWordVecs,vec2word,featureExtraction
import nltk
from readin import standardReadin as SR
import numpy as np
from ProcessBar import progressbar
import pickle
from sklearn.metrics import precision_score, recall_score
num_doc=300
num_epochs=300
num_steps=8
one_batch_size=20
docInbatch=3
batch_size=one_batch_size*docInbatch #bumber of total batch
skip_step=4
labelsize=5 # number of label clusters
emb_index=loadWordVecs() #look up table
vocab=len(emb_index)
# hidden_size=100
feature_size=9
reverse_dict=dict(zip(emb_index.values(), emb_index.keys()))
data = SR("Scopes.xlsx", True)
class KerasBatchGenerator(object):
def __init__(self, datain ,label):
self.data = datain
self.datasize=len(datain)
self.label=label
# this will track the progress of the batches sequentially through the
# data set - once the data reaches the end of the data set it will reset
# back to zero
self.current_idx = 0
self.idx=0
# skip_step is the number of words which will be skipped before the next
# batch is skimmed from the data set
self.labelsize=labelsize
self.count=0
def generate(self):
while True:
text = self.data[self.idx]
lbs = self.label[self.idx]
len_batch = int(len(text) / skip_step)
if len_batch==0:len_batch=1
id = np.zeros((len_batch, num_steps))
x = np.zeros((len_batch, num_steps, feature_size))
y = np.zeros((len_batch, num_steps, labelsize))
if (len(text) <= skip_step):
self.idx = self.idx + 1
if (self.idx >= len(self.data)):
self.idx = 0
yield [id,x], y
self.current_idx = 0
for i in range(len_batch):
head=0
if (self.current_idx >= len(text)):
head+=1
self.current_idx = head
if self.current_idx + num_steps >= len(text):
# reset the index back to the start of the data set
rest_seat=num_steps-(len(text)-self.current_idx)
id[i, :] = np.hstack((text[self.current_idx:, 0], np.zeros((rest_seat))))
x[i, :, :] = np.vstack((text[self.current_idx:,1:], np.zeros((rest_seat,feature_size))))
temp_y = np.hstack((lbs[self.current_idx:],np.zeros((rest_seat))))
self.current_idx = 0
else:
id[i, :] = text[self.current_idx:self.current_idx + num_steps, 0]
x[i, :, :] = text[self.current_idx:self.current_idx + num_steps,1:]
temp_y = lbs[self.current_idx:self.current_idx + num_steps]
# convert all of temp_y into a one hot representation
y[i, :, :] = to_categorical(temp_y, num_classes=labelsize)
self.current_idx += skip_step
self.idx=self.idx+1
if(self.idx>=len(self.data)):
self.idx=0
yield [id, x], y
def load_data():
# get the data paths
# data = SR("Scopes.xlsx", True)
x=[]
y=[]
datasize = data.getsize()
# randorder = [i for i in range(1, datasize)]
# np.random.shuffle(randorder)
name = 'randlist'
f=open('obj/' + name + '.pkl', 'rb')
randorder=pickle.load(f)
randorder = randorder[1800 - num_doc:1800]
ft=featureExtraction(emb_index)
# for index in range(num_doc):
# title = data.getTitle(index+1)
# text = data.getBrief(index+1)
# token_title = nltk.word_tokenize(title)
# token_text = nltk.word_tokenize(text)
# token = token_title
# token.extend(token_text)
# token = [w for w in token if not w in nltk.corpus.stopwords.words('english')]
# token = [w for w in token if w.isalpha()]
#
# kws = data.loadkw(index+1)
# kws_split = [(word.lower()).split() for word in kws]
# labelList = []
# for tk_O in token:
# lab = 0
# tk = tk_O.lower()
# if tk in nltk.corpus.stopwords.words('english'):
# lab = 4
# for kw in kws_split:
# if (tk in kw):
# if len(kw) == 1:
# lab = 1
# break
# if kw.index(tk) == 0:
# lab = 2
# else:
# lab = 3
# break
# labelList.append(lab)
# vecs=[word2vec(emb_index,tk) for tk in token]
# # vecs=np.zeros(len(token))
# # for i in range(len(token)):
# # # print(token[i])
# # # ans=word2vec(emb_index, token[i])
# # vecs[i] = word2vec(emb_index, token[i])
# x.append(np.array(vecs))
# y.append(np.array(labelList))
process_bar = progressbar(num_doc, '*')
count = 0
for index in randorder:
count = count + 1
process_bar.progress(count)
x.append(np.array(ft.getFeatures(index)))
y.append(np.array(ft.getLabel(index)))
size_of_data=len(x)
x=np.array(x)
y=np.array(y)
return x,y,randorder
if __name__ == "__main__":
x,y,randorder=load_data()
modelname="1880model-100-175"
model= load_model("dLSTMmodels/"+modelname+".hdf5")
gen=KerasBatchGenerator(x,y)
kwNum = 0
predNum = 0.
correct = 0
get = 0
id_doc=0
Answer=[]
Pred=[]
filename = "sample/lstm_" + modelname + "_ext_" + str(num_doc) + '.txt'
file = open(filename, "w",encoding='utf-8')
mtx=[[],[]]
for i in range(num_doc):
doc_idx = randorder[i]
kws = data.loadkw(doc_idx)
# if (i==490):
# print(i)
# start to extract kw from predict
gen_data=next(gen.generate())
cont=gen_data[0]
y_val=gen_data[1]
if len(cont)==1:
print(i)
pred=model.predict([cont[0],cont[1]])
cont = cont[0]
pred_label=np.argmax(pred,axis=2)
y_val=np.argmax(y_val,axis=2)
labels=[0,1,2,3,4]
prec=0
recall=0
for j in range(len(y_val)):
prec+=precision_score(y_val[j], pred_label[j],average='weighted')
recall+=recall_score(y_val[j], pred_label[j],average='weighted')
prec/=len(y_val)
recall/=len(y_val)
mtx[0].append(prec)
mtx[1].append(recall)
pred_kw=[]
for seq in range(len(pred_label)):
for word in range(num_steps):
wd = 0
if pred_label[seq,word]== 1:
wd = vec2word(reverse_dict,cont[seq,word])
if pred_label[seq,word] == 2:
wd = vec2word(reverse_dict,cont[seq,word])
step = 1
while (True):
if (word + step >= num_steps):
break
if pred_label[seq,word+step] != 3:
break
wd = wd + ' ' + vec2word(reverse_dict,cont[seq,word+step])
step = step + 1
if(step==1):
wd=0
if (wd in pred_kw) or (wd == 0):
continue
else:
pred_kw.append(wd)
kwNum += len(kws)
predNum += len(pred_kw)
kws_low = [kw.lower() for kw in kws]
print("document num:" + str(randorder[i]), file=file)
print("predictions:", file=file)
for kw in pred_kw:
print(kw, file=file)
print("answers:", file=file)
for kw in kws_low:
print(kw, file=file)
print(" ", file=file)
for kw in pred_kw:
if kw in kws_low:
correct+=1
for kw in kws_low:
if kw in pred_kw:
get+=1
Answer.append(kws_low)
Pred.append(pred_kw)
print("recall = ", np.mean(mtx[1]))
print("precision=", np.mean(mtx[0]))
print ('kws')
print("recall = ", get / kwNum)
print("precision=", correct / predNum)
|
[
"noreply@github.com"
] |
hjh6230.noreply@github.com
|
2a021b30743ab086c068e49e57e334bca10dd1c4
|
8dcf32559f7579a81fecfa0ba5a28dd146f6a5b8
|
/core/models.py
|
4b9c1d14c1c3d2950eea3753e7038654b54dcc55
|
[] |
no_license
|
soyArturo/POS
|
3c1d8a48d7a555ebcc84188da04aaeb8b4e41f4a
|
d3ca70f2627050ac06fdecbf3b21802e6cc33d1a
|
refs/heads/master
| 2022-05-25T18:23:37.971077
| 2020-04-25T23:28:44
| 2020-04-25T23:28:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from django.db import models
from django.contrib.auth.models import User
from POS import settings
class ClaseModelo(models.Model):
estado = models.BooleanField(default=True)
fc = models.DateTimeField(auto_now_add=True)
fm = models.DateTimeField(auto_now=True)
uc = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
um = models.IntegerField(blank=True, null=True)
class Meta:
abstract = True
|
[
"prz.arturo@gmail"
] |
prz.arturo@gmail
|
99daa699063f65bf07cfbc8e504d64a6a1988f15
|
38372fcc2ca58798176267360ff07f886400bc7b
|
/core/migrations/0031_auto_20191025_1440.py
|
a36d5cc6cf669825b0123f70fe5f3de65fe299fc
|
[] |
no_license
|
portman-asset-finance/_GO_PAF
|
4eb22c980aae01e0ad45095eb5e55e4cb4eb5189
|
ee93c49d55bb5717ff1ce73b5d2df6c8daf7678f
|
refs/heads/master
| 2020-09-21T05:22:10.555710
| 2019-11-28T16:44:17
| 2019-11-28T16:44:17
| 224,691,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 2.1.12 on 2019-10-25 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0030_ncf_bacs_file_signatures_processed'),
]
operations = [
migrations.AlterField(
model_name='ncf_bacs_file_signatures_processed',
name='file_signature',
field=models.TextField(),
),
]
|
[
"portman-asset-finance@outlook.com"
] |
portman-asset-finance@outlook.com
|
03e41cee3ee82d288b1d3986d77486dc0ab81671
|
f2af6a5d41bbc08bc7b3291bbdc9d28020bf4c78
|
/Practica_5/test_polynomial_curve_fitting.py
|
d2219118fc96c64d238a5632019547a8cf459702
|
[] |
no_license
|
alejandroag/gcom
|
6925db1a803492188f234692d1e40a5542a4fc1f
|
0421a33bd78be32cf3eeb24cc9c7f3c9b2833dce
|
refs/heads/master
| 2021-03-12T19:33:21.572270
| 2015-04-20T13:45:00
| 2015-04-20T13:45:00
| 34,068,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,132
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import timeit
from polynomial_curve_fitting import polynomial_curve_fitting
def test_least_squares_fitting():
n = 10
x = np.random.randint(-10, 10, size=(n, 2))
knots = np.linspace(0, 1, n)
num_points = 200
poly_0 = polynomial_curve_fitting(x, knots, method='least_squares',
libraries=False, num_points=num_points)
poly_1 = polynomial_curve_fitting(x, knots, method='least_squares',
libraries=True, num_points=num_points)
t = np.linspace(knots[0], knots[-1], num_points)
plt.plot(poly_0[:, 0], poly_0[:, 1])
plt.plot(poly_1[:, 0], poly_1[:, 1])
plt.plot(x[:, 0], x[:, 1], 'o')
plt.show()
assert np.max(poly_0 - poly_1) < 1e-1, 'least squares fitting\
polynomial is not correct'
def test_least_squares_fitting_regularized():
n = 10
x = np.array([[-7, -4], [-3, -1], [1, 2], [2, 4], [2.6, 3], [4, 1],
[10, 1], [12, 1], [12.4, -11], [20, -1]])
list_L = [10**k for k in range(-15, -5)]
print list_L
knots = np.linspace(0, 1, n)
num_points = 200
for L in list_L:
poly = polynomial_curve_fitting(x, knots, method='least_squares', L=L,
libraries=False, num_points=num_points)
plt.plot(poly[:, 0], poly[:, 1])
plt.plot(x[:, 0], x[:, 1], 'o')
plt.show()
def test_least_squares_fitting_degree():
n = 10
x = np.array([[-7, -4], [-3, -1], [1, 2], [2, 4], [2.6, 3], [4, 1],
[10, 1], [12, 1], [12.4, -11], [20, -1]])
list_L = range(1,10)
print list_L
knots = np.linspace(0, 1, n)
num_points = 200
for L in list_L:
poly = polynomial_curve_fitting(x, knots, method='least_squares',
libraries=False, num_points=num_points,degree=L)
print poly
plt.plot(poly[:, 0], poly[:, 1])
plt.plot(x[:, 0], x[:, 1], 'o')
plt.show()
def test_newton_poly_cheb():
n = 15
x = np.random.randint(-10, 10, size=(n, 2))
num_points = 200
cheb_knots = np.array([ 0.99726095, 0.97552826, 0.9330127, 0.87157241,\
0.79389263, 0.70336832, 0.60395585, 0.5,\
0.39604415, 0.29663168, 0.20610737, 0.12842759,\
0.0669873, 0.02447174, 0.00273905])
poly_0 = polynomial_curve_fitting(x, 'chebyshev', method='newton',
libraries=False, num_points=num_points)
poly_1 = polynomial_curve_fitting(x, cheb_knots, method='newton',
libraries=True, num_points=num_points)
assert np.max(poly_0 - poly_1) < 1e-1, 'wrong newton polynomial with cheb_knots'
plt.plot(poly_0[:, 0], poly_0[:, 1])
plt.plot(poly_1[:, 0], poly_1[:, 1])
plt.plot(x[:, 0], x[:, 1], 'o')
plt.show()
def test_newton_poly():
n = 10
x = np.random.randint(-10, 10, size=(n, 2))
knots = np.linspace(0, 1, n)
num_points = 200
poly_0 = polynomial_curve_fitting(x, knots, method='newton',
libraries=False, num_points=num_points)
poly_1 = polynomial_curve_fitting(x, knots, method='newton',
libraries=True, num_points=num_points)
assert np.linalg.norm(poly_0 - poly_1) < 1e-2, 'wrong newton polynomial'
plt.figure()
plt.plot(poly_0[:, 0], poly_0[:, 1])
plt.plot(poly_1[:, 0], poly_1[:, 1])
plt.plot(x[:, 0], x[:, 1], 'o')
plt.show()
def timing_curve_fitting():
setup = '''
import numpy as np
from polynomial_curve_fitting import polynomial_curve_fitting
n=10
knots = np.linspace(0, 1, n)
num_points = 200
'''
print 'newton:', min(timeit.repeat("x = np.random.randint(-10, 10, size=(n, 2));\
polynomial_curve_fitting(x, knots, method='newton',\
libraries=False, num_points=num_points)", setup=setup, number=10000))
print 'newton_libraries:', min(timeit.repeat("x = np.random.randint(-10, 10, size=(n, 2));\
polynomial_curve_fitting(x, knots, method='newton',\
libraries=True, num_points=num_points)", setup=setup, number=10000))
print 'least_squares:', min(timeit.repeat("x = np.random.randint(-10, 10, size=(n, 2));\
polynomial_curve_fitting(x, knots, method='least_squares',\
libraries=False, num_points=num_points, L=1e-10)", setup=setup, number=10000))
print 'least_squares_libraries:', min(timeit.repeat("x = np.random.randint(-10, 10, size=(n, 2));\
polynomial_curve_fitting(x, knots, method='least_squares',\
libraries=True, num_points=num_points, L=1e-10)", setup=setup, number=10000))
if __name__ == '__main__':
test_least_squares_fitting()
test_least_squares_fitting_regularized()
test_least_squares_fitting_degree()
test_newton_poly_cheb()
test_newton_poly()
timing_curve_fitting()
|
[
"aleagui1992@gmail.com"
] |
aleagui1992@gmail.com
|
271ae5ca4139a9b9302f09300148038a65288bf4
|
901ce1a7e6ebb051a8f87062a0e9f4c5c45d970f
|
/main.py
|
749100c38b55e24037e17186d1a2ef71a56720ac
|
[] |
no_license
|
Supremist/PrimeSearcher
|
2ac50cf9459771a9fa42a7015aa88b7fde6d9708
|
836035414b4bc3167bbdd406e86d2af634da7d10
|
refs/heads/master
| 2021-01-19T05:01:47.551542
| 2016-06-14T20:58:09
| 2016-06-14T20:58:09
| 61,155,402
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,032
|
py
|
# coding=utf-8
import math
from timing import timing
@timing
def get_primes_with_remember(max_number):
primes = [2]
for number in range(3, max_number):
square_root = math.sqrt(number)
for divider in primes:
if divider > square_root:
primes.append(number)
break
if number % divider == 0:
break
return primes
@timing
def get_primes_eratosthenes(max_number):
primes = [False, False] + [True]*(max_number-1)
square_root = math.sqrt(max_number)
for number, is_prime in enumerate(primes):
if number > square_root:
break
if is_prime:
for index in range(number*number, max_number+1, number):
primes[index] = False
result = []
for number, is_prime in enumerate(primes):
if is_prime:
result.append(number)
return result
test_size = 1000000
print(len(get_primes_with_remember(test_size)))
print(len(get_primes_eratosthenes(test_size)))
|
[
"sergkarv@gmail.com"
] |
sergkarv@gmail.com
|
b9779a67e9106a265b2663e3a9f8e6b5cf4606bb
|
bdb1a312d8600c6cbf531ac86d1ec770e4a06d70
|
/conftest.py
|
2e7bd19523ab21a611cf55ec9d92bd679f486e42
|
[
"Apache-2.0"
] |
permissive
|
dcramer/kollect
|
9977c1433a25e549bd19096b7c437a5439f93bbb
|
a8586ec07f671e01e80df2336ad1fa5dfe4804e5
|
refs/heads/master
| 2020-05-16T07:59:05.019094
| 2019-04-23T02:15:40
| 2019-04-27T19:19:45
| 182,894,090
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
from uuid import UUID
import graphene.test
import pytest
from django.contrib.auth.models import AnonymousUser
from kollect import factories
from kollect.root_schema import schema
class Context(object):
user = AnonymousUser()
class GqlClient(graphene.test.Client):
def execute(self, query, user=None):
context = Context()
if user:
context.user = user
return super().execute(query, context=context)
@pytest.fixture
def gql_client(db):
return GqlClient(schema)
@pytest.fixture
def default_user(db):
user = factories.UserFactory(
id=UUID("449c76aa-ad6a-46a8-b32b-91d965e3f462"),
name="Reel Big Phish",
email="reel.big.phish@example.com",
)
user.set_password("phish.reel.big")
user.save()
return user
@pytest.fixture
def default_collection(db, default_user):
return factories.CollectionFactory.create(
id=UUID("6960436f-53cd-4d00-bd5b-a293349e7d1f"),
name="My Games",
created_by=default_user,
public=False,
)
@pytest.fixture
def default_item(db, default_collection, default_user):
return factories.ItemFactory.create(
id=UUID("76111b88-301b-4620-9c93-7c6d28f0987b"),
name="Unsettlers of Qatan",
collection=default_collection,
created_by=default_user,
)
|
[
"dcramer@gmail.com"
] |
dcramer@gmail.com
|
d6d3ce3448268ec9fb1c6f812130ef47550b2050
|
bd8532378ad2a61240faaa7be8ef44c60c055a2a
|
/rabona/data/leagues/Major League Soccer/New York Red Bulls/New York Red Bulls.py
|
885e5775e7b3d9f6d86b50041a7d6059bcdf4f20
|
[] |
no_license
|
nosoyyo/rabona
|
278a9dfe158e342261343b211fb39b911e993803
|
b0af3ab5806675fbf81b038633a74943118a67bb
|
refs/heads/master
| 2020-03-16T06:56:55.277293
| 2018-05-30T11:45:51
| 2018-05-30T11:45:51
| 132,565,989
| 2
| 1
| null | 2018-05-30T11:45:52
| 2018-05-08T06:44:11
|
Python
|
UTF-8
|
Python
| false
| false
| 11,087
|
py
|
club_info = {'club_url': 'https://www.futbin.com///18/leagues/Major%20League%20Soccer?page=1&club=689', 'club_logo': 'https://cdn.futbin.com/content/fifa18/img/clubs/689.png', 'club_name': 'New York Red Bulls'}
players = {}
players['Wright-Phillips'] = {'player_url': 'https://www.futbin.com//18/player/18664/Wright-Phillips', 'player_name': 'Wright-Phillips', 'player_rating': '81', 'player_shortname': 'Wright-Phillips', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/164464.png?v=2'}
players['Wright-Phillips'] = {'player_url': 'https://www.futbin.com//18/player/1713/Bradley Wright-Phillips', 'player_name': 'Bradley Wright-Phillips', 'player_rating': '75', 'player_shortname': 'Wright-Phillips', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/14.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/164464.png?v=2'}
players['Kljestan'] = {'player_url': 'https://www.futbin.com//18/player/2229/Sacha Kljestan', 'player_name': 'Sacha Kljestan', 'player_rating': '74', 'player_shortname': 'Kljestan', 'player_position': 'CAM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/176062.png?v=2'}
players['Robles'] = {'player_url': 'https://www.futbin.com//18/player/3304/Luis Robles', 'player_name': 'Luis Robles', 'player_rating': '72', 'player_shortname': 'Robles', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/179820.png?v=2'}
players['Romero Gamarra'] = {'player_url': 'https://www.futbin.com//18/player/18418/Romero Gamarra', 'player_name': 'Romero Gamarra', 'player_rating': '72', 'player_shortname': 'Romero Gamarra', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/52.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/226376.png?v=2'}
players['Collin'] = {'player_url': 'https://www.futbin.com//18/player/3336/Aurélien Collin', 'player_name': 'Aurélien Collin', 'player_rating': '72', 'player_shortname': 'Collin', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/18.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/184664.png?v=2'}
players['Lawrence'] = {'player_url': 'https://www.futbin.com//18/player/4344/Kemar Lawrence', 'player_name': 'Kemar Lawrence', 'player_rating': '71', 'player_shortname': 'Lawrence', 'player_position': 'LB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/82.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/220209.png?v=2'}
players['Royer'] = {'player_url': 'https://www.futbin.com//18/player/4101/Daniel Royer', 'player_name': 'Daniel Royer', 'player_rating': '71', 'player_shortname': 'Royer', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/4.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/199729.png?v=2'}
players['Parker'] = {'player_url': 'https://www.futbin.com//18/player/18720/Tim Parker', 'player_name': 'Tim Parker', 'player_rating': '70', 'player_shortname': 'Parker', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/226803.png?v=2'}
players['Perrinelle'] = {'player_url': 'https://www.futbin.com//18/player/4579/Damien Perrinelle', 'player_name': 'Damien Perrinelle', 'player_rating': '70', 'player_shortname': 'Perrinelle', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/18.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/168283.png?v=2'}
players['Felipe'] = {'player_url': 'https://www.futbin.com//18/player/4928/Felipe', 'player_name': 'Felipe', 'player_rating': '70', 'player_shortname': 'Felipe', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/54.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/207465.png?v=2'}
players['Verón'] = {'player_url': 'https://www.futbin.com//18/player/5939/Gonzalo Verón', 'player_name': 'Gonzalo Verón', 'player_rating': '69', 'player_shortname': 'Verón', 'player_position': 'RM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/52.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/215111.png?v=2'}
players['Baah'] = {'player_url': 'https://www.futbin.com//18/player/6914/Gideon Baah', 'player_name': 'Gideon Baah', 'player_rating': '68', 'player_shortname': 'Baah', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/117.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/232761.png?v=2'}
players['Keita'] = {'player_url': 'https://www.futbin.com//18/player/7604/Muhamed Keita', 'player_name': 'Muhamed Keita', 'player_rating': '67', 'player_shortname': 'Keita', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/36.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/183151.png?v=2'}
players['Rivas'] = {'player_url': 'https://www.futbin.com//18/player/16434/Carlos Rivas', 'player_name': 'Carlos Rivas', 'player_rating': '67', 'player_shortname': 'Rivas', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/56.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/220934.png?v=2'}
players['Grella'] = {'player_url': 'https://www.futbin.com//18/player/7682/Mike Grella', 'player_name': 'Mike Grella', 'player_rating': '67', 'player_shortname': 'Grella', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/190619.png?v=2'}
players['Redding'] = {'player_url': 'https://www.futbin.com//18/player/18419/Tommy Redding', 'player_name': 'Tommy Redding', 'player_rating': '67', 'player_shortname': 'Redding', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/222338.png?v=2'}
players['Adams'] = {'player_url': 'https://www.futbin.com//18/player/8826/Tyler Adams', 'player_name': 'Tyler Adams', 'player_rating': '66', 'player_shortname': 'Adams', 'player_position': 'CDM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/232999.png?v=2'}
players['Long'] = {'player_url': 'https://www.futbin.com//18/player/8642/Aaron Long', 'player_name': 'Aaron Long', 'player_rating': '66', 'player_shortname': 'Long', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/222123.png?v=2'}
players['Lade'] = {'player_url': 'https://www.futbin.com//18/player/8337/Connor Lade', 'player_name': 'Connor Lade', 'player_rating': '66', 'player_shortname': 'Lade', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/206603.png?v=2'}
players['Davis'] = {'player_url': 'https://www.futbin.com//18/player/8710/Sean Davis', 'player_name': 'Sean Davis', 'player_rating': '66', 'player_shortname': 'Davis', 'player_position': 'CM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/226273.png?v=2'}
players['Meara'] = {'player_url': 'https://www.futbin.com//18/player/9224/Ryan Meara', 'player_name': 'Ryan Meara', 'player_rating': '65', 'player_shortname': 'Meara', 'player_position': 'GK', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/25.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/207610.png?v=2'}
players['Escobar'] = {'player_url': 'https://www.futbin.com//18/player/9761/Fidel Escobar', 'player_name': 'Fidel Escobar', 'player_rating': '65', 'player_shortname': 'Escobar', 'player_position': 'CB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/87.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/240196.png?v=2'}
players['Zizzo'] = {'player_url': 'https://www.futbin.com//18/player/9015/Sal Zizzo', 'player_name': 'Sal Zizzo', 'player_rating': '65', 'player_shortname': 'Zizzo', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/184065.png?v=2'}
players['Muyl'] = {'player_url': 'https://www.futbin.com//18/player/10543/Alex Muyl', 'player_name': 'Alex Muyl', 'player_rating': '64', 'player_shortname': 'Muyl', 'player_position': 'RM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/231886.png?v=2'}
players['Murillo'] = {'player_url': 'https://www.futbin.com//18/player/10679/Michael Murillo', 'player_name': 'Michael Murillo', 'player_rating': '64', 'player_shortname': 'Murillo', 'player_position': 'RB', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/87.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/237841.png?v=2'}
players['Duka'] = {'player_url': 'https://www.futbin.com//18/player/10866/Dilly Duka', 'player_name': 'Dilly Duka', 'player_rating': '63', 'player_shortname': 'Duka', 'player_position': 'LM', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/192548.png?v=2'}
players['Etienne Jr.'] = {'player_url': 'https://www.futbin.com//18/player/11522/Derrick Etienne Jr.', 'player_name': 'Derrick Etienne Jr.', 'player_rating': '62', 'player_shortname': 'Etienne Jr.', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/80.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/166539.png?v=2'}
players['Lewis'] = {'player_url': 'https://www.futbin.com//18/player/12866/Zeiko Lewis', 'player_name': 'Zeiko Lewis', 'player_rating': '60', 'player_shortname': 'Lewis', 'player_position': 'RW', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/68.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/218789.png?v=2'}
players['Allen'] = {'player_url': 'https://www.futbin.com//18/player/16224/Brandon Allen', 'player_name': 'Brandon Allen', 'player_rating': '60', 'player_shortname': 'Allen', 'player_position': 'ST', 'player_nation': 'https://cdn.futbin.com/content/fifa18/img/nation/95.png', 'player_photo': 'https://cdn.futbin.com/content/fifa18/img/players/231877.png?v=2'}
|
[
"oyyoson@gmail.com"
] |
oyyoson@gmail.com
|
7ae0bb18328addbd924019da293a8f575931758d
|
55647a80c8b412af9df0ba3f50595cc2f29c25e6
|
/res/scripts/client/bwobsolete_helpers/PyGUI/Console.py
|
5192451643ffdf6abcc8a58fe7a39d63cf12f32a
|
[] |
no_license
|
cnsuhao/WOT-0.9.17-CT
|
0035eb6070fb4fab8d8ee9f8bbc676c10d511cfb
|
d1f932d8cabaf8aa21708622e87f83c8d24d6451
|
refs/heads/master
| 2021-06-08T18:11:07.039293
| 2016-11-19T19:12:37
| 2016-11-19T19:12:37
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 6,546
|
py
|
# 2016.11.19 19:47:46 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/bwobsolete_helpers/PyGUI/Console.py
import BigWorld, GUI
import Keys
import math
from Listeners import registerDeviceListener
from PyGUIBase import PyGUIBase
MAX_HISTORY_ENTRIES = 50
class Console(PyGUIBase):
factoryString = 'PyGUI.Console'
def __init__(self, component = None):
PyGUIBase.__init__(self, component)
self.__history = []
self.__historyShown = -1
if component == None:
self.component = GUI.Window('system/maps/col_white.bmp')
self.component.colour = (0, 0, 0, 255)
self.component.materialFX = 'SOLID'
self.component.height = 0.75
self.component.width = 1.5
self.component.addChild(ScrollableText().component, 'buffer')
self.component.buffer.colour = (0, 0, 0, 0)
self.component.buffer.widthMode = 'CLIP'
self.component.buffer.width = 2.0
self.component.buffer.height = 1.8
self.component.buffer.verticalAnchor = 'TOP'
self.component.buffer.verticalPositionMode = 'CLIP'
self.component.buffer.position.y = 1.0
self.component.addChild(EditField().component, 'editField')
self.component.editField.colour = (64, 64, 64, 255)
self.component.editField.verticalPositionMode = 'CLIP'
self.component.editField.verticalAnchor = 'BOTTOM'
self.component.editField.position.y = -1.0
self.component.editField.height = 0.2
self.component.editField.widthMode = 'CLIP'
self.component.editField.width = 2.0
self.component.script = self
self.onBound()
registerDeviceListener(self)
return
def enableEditField(self, state):
self.component.editField.script.setEnabled(state)
def clear(self):
self.component.buffer.script.clear()
def editFieldChangeFocus(self, editField, state):
try:
languageIndicator = self.component.languageIndicator
languageIndicator.visible = state and editField.enabled
except AttributeError:
pass
def _onEnterText(self, text):
self.component.editField.script.setText('')
if len(text) > 0:
self._insertHistory(text)
self.handleConsoleInput(text)
def _onEscape(self):
self.handleEscapeKey()
def handleConsoleInput(self, msg):
pass
def handleEscapeKey(self):
pass
def getMaxLines(self):
return self.component.buffer.script.getMaxLines()
def setMaxLines(self, maxLines):
self.component.editField.script.setMaxLines(maxLines)
def appendLine(self, msg, colour):
self.component.buffer.script.appendLine(msg, colour)
def setEditText(self, text):
self.component.editField.script.setText(text)
def getEditText(self):
return self.component.editField.script.getText()
def fini(self):
if self.editable:
self.editCallback(None)
self.active(False)
return
def enableEdit(self):
self.component.editField.script.setKeyFocus(True)
def disableEdit(self):
self.component.editField.script.setKeyFocus(False)
def handleEditFieldKeyEvent(self, event):
handled = False
if event.isKeyDown():
if event.key == Keys.KEY_PGDN:
self.component.buffer.script.scrollDown()
handled = True
elif event.key == Keys.KEY_PGUP:
self.component.buffer.script.scrollUp()
handled = True
elif event.key == Keys.KEY_UPARROW:
editText = self.getEditText()
if len(self.__history) > 0:
if self.__historyShown == -1:
self.__history.insert(0, editText)
self.__historyShown = 1
else:
if len(editText) > 0:
self.__history[self.__historyShown] = editText
self.__historyShown += 1
self._showHistory()
handled = True
elif event.key == Keys.KEY_DOWNARROW:
editText = self.getEditText()
if len(self.__history) > 0:
if self.__historyShown == -1:
self.__history.insert(0, editText)
self.__historyShown = len(self.__history) - 1
else:
if len(editText) > 0:
self.__history[self.__historyShown] = editText
self.__historyShown -= 1
self._showHistory()
handled = True
return handled
def _insertHistory(self, s):
if len(s) > 0:
if len(self.__history) > 0 and self.__historyShown != -1:
self.__history[0] = s
else:
self.__history.insert(0, s)
elif len(self.__history) > 0 and len(self.__history[0]) == 0:
self.__history.pop(0)
if len(self.__history) > MAX_HISTORY_ENTRIES:
self.__history.pop()
self.__historyShown = -1
def _showHistory(self):
if self.__historyShown < 0:
self.__historyShown = len(self.__history) - 1
elif self.__historyShown == len(self.__history):
self.__historyShown = 0
self.setEditText(self.__history[self.__historyShown])
def onBound(self):
PyGUIBase.onBound(self)
self.component.editField.script.onBound()
self.component.editField.script.onEnter = self._onEnterText
self.component.editField.script.onEscape = self._onEscape
self.component.editField.script.setExternalKeyEventHandler(self.handleEditFieldKeyEvent)
def onRecreateDevice(self):
self.component.editField.script.onRecreateDevice()
self.component.editField.script.fitVertically()
self.component.editField.heightMode = 'CLIP'
self.component.buffer.heightMode = 'CLIP'
self.component.buffer.height = 2.0 - self.component.editField.height
def isShowing(self):
return self.alphaShader.value > 0
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\bwobsolete_helpers\PyGUI\Console.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.11.19 19:47:46 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
f21631c03270cf938b92455a67e113db9b7c7471
|
1b9ffc6033c810959d739e0389f490f1573bab2a
|
/py_functions/tensorflow_tf/tf_train_Saver.py
|
3a7620d51f3af371511326518f43a73dc1236ba8
|
[] |
no_license
|
VicnentX/MachineLearning_PorjectsAndNotes
|
7867142fee044d49ca03d6177fa50e199316ea5f
|
9eb29e47a67d499da0cd880664ae660ff31cbcad
|
refs/heads/master
| 2020-05-15T07:42:54.748552
| 2019-12-13T05:31:15
| 2019-12-13T05:31:15
| 182,145,261
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,551
|
py
|
# https://blog.csdn.net/mzpmzk/article/details/78647699
import tensorflow as tf
import os
import numpy as np
def myregression():
"""
自实现一个线性回归预测
:return:
"""
# 1. 准备数据, 1 个特征 【100, 1】 y【100】 100个sample
x = tf.random_normal([100, 1], mean=1.75, stddev=0.5, name="x_data")
# 矩阵相乘必须是二维的
y_true = tf.matmul(x, [[0.7]]) + 0.8
# y_true = x * 0.7 + 0.8
# 2. 建立线性回归模型 1个特征 一个权重 一个偏执 y = x w + b
# 随机给 w b 开始的时候 让她去计算损失 然后在当前状态下优化
# !!!!!!!!!用变量定义 才能优化
# trainable 默认是 True 改成False的话 这个值就不能被改变了
weight = tf.Variable(tf.random_normal((1,1), mean=0.0, stddev=1.0), name="w", trainable=True)
bias = tf.Variable(0.0, name="b")
y_predict = tf.matmul(x, weight) + bias
# 3. 建立损失函数, 均芳误差
error = tf.square(y_predict - y_true)
loss = tf.reduce_mean(error)
# 4. 梯度下降 优化损失
# 学习旅 0- 1 之间取
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
# 5. 因为有variable 所以要初始化变量的op
init_op = tf.global_variables_initializer()
'''
# 收集tensor,收集变量 收集卸载session前面
# 下面收集loss 因为他只是一个数字 所以就是scalar
# 之后就合并变量,写入事件簿
'''
tf.summary.scalar("losses", loss) #losses 是在后台显示的名字
tf.summary.histogram("weights", weight) #weight 是二维的 所以用histogram
# 合并变量写入事件步,定义合并tensor的op
merged = tf.summary.merge_all()
"""
定义一个保存模型的实例
"""
saver = tf.train.Saver()
# open session
with tf.Session() as sess:
# 初始化变量
sess.run(init_op)
# 打印随机最初的的w b
print(f"初时的w : {weight.eval()} , b : {bias.eval()}")
# 建立事件文件
writer = tf.summary.FileWriter("/Users/Vincent_Xia/PycharmProjects/MachineLearningProjects&Notes/py_functions/tensorflow_tf/", graph=sess.graph)
"""
这边当我重新训练 我就从之前saver的时候开始训练:
所以我要加载模型,覆盖模型当中随机定义的参数,从上次训练的参数结果开始
"""
if os.path.exists("/Users/Vincent_Xia/PycharmProjects/MachineLearningProjects&Notes/py_functions/tensorflow_tf/checkpoint/checkpoint"):
saver.restore(sess, "/Users/Vincent_Xia/PycharmProjects/MachineLearningProjects&Notes/py_functions/tensorflow_tf/checkpoint/model_for_w&b")
# 这里开始循环优化:
for i in range(1000):
# !!!!!运行 一次 优化 (特别注意这里run指运行一次优化)
sess.run(train_op)
# 运行合并的tensor
summary = sess.run(merged)
# 每次的数据都要写入记事簿
writer.add_summary(summary, i) #每次写入第i个值
# 打印优化后的的w b
print(f"第 {i + 1} 优化后的w : {weight.eval()} , b : {bias.eval()}")
saver.save(sess, "/Users/Vincent_Xia/PycharmProjects/MachineLearningProjects&Notes/py_functions/tensorflow_tf/checkpoint/model_for_w&b")
# 这次我的数据文件 model_for_w&b.data-00000-of-00001
return None
if __name__ == "__main__":
myregression()
|
[
"chunqiu1xia@gmail.com"
] |
chunqiu1xia@gmail.com
|
a33717cf057c1911e2532a76d8657afcd2dd6faa
|
e96651ad687a4290686d9f50b531fc13b9f0edd5
|
/glove_test.py
|
0bf80f0b9923d867d79d049e80c732e1b2f26418
|
[] |
no_license
|
KevinFang97/ano
|
50dc74b79e7956d5a224266b6ea43ed1ca80d276
|
125db6881ff267edc0ceee13cdd3bef697e97ebe
|
refs/heads/master
| 2021-09-01T14:34:22.020179
| 2017-12-27T13:42:38
| 2017-12-27T13:42:38
| 112,478,602
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
import numpy as np
def loadGloveModel(gloveFile):
print("Loading Glove Model")
f = open(gloveFile,'r')
model = {}
line_count = 0;
max_line = 10000 #for test
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
line_count += 1
if line_count%1000 == 0:
print("Total lines read = %dk" % (line_count/1000))
if line_count > max_line:
break
print("Done.",len(model)," words loaded!")
return model
#if __name__ == "__main__":
# gloveFile = 'glove.6B/glove.6B.100d.txt'
# embedder = loadGloveModel(gloveFile)
|
[
"noreply@github.com"
] |
KevinFang97.noreply@github.com
|
031fb5c39a5ed811e9c30aa59fc0a84838d218f8
|
39f0edc7aa402da0dba9ccf5663a6d13e8dfefc1
|
/controller.py
|
492fd51e9ba9162c46e49910598d7c0d7e3133b6
|
[] |
no_license
|
lljuniorll/desafio-tv-globo
|
9718eee01c99b196d2c27829ac307a2281cc7147
|
2836f4998bd47bb05f6ec395d6709f14e7c88cbd
|
refs/heads/master
| 2020-04-24T00:14:58.128604
| 2019-02-20T01:23:41
| 2019-02-20T01:23:41
| 171,560,659
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,365
|
py
|
from model import CutInstruction, CutInstructionSchema, CutJob, CutJobSchema, CutFile, CutFileSchema, session
import datetime
import requests
import os
import re
import constants as cons
cut_instruction_schema = CutInstructionSchema()
cut_file_schema = CutFileSchema()
cut_job_schema = CutJobSchema()
class File:
def __init__(self, file):
self._file = file
@property
def file(self):
return self._file
@property
def first_line_to_read(self):
return self._first_line_to_read
def run(self):
file_to_read = open(self.file, "r")
head, tail = os.path.split(self.file)
filename_date_and_sequence = self.filename_date_and_sequence(tail)
line_number = 1
start_line = self.number_last_line(filename_date_and_sequence['filename_date'])
for line in file_to_read:
if line_number > start_line:
formatted_line = self.extract_line_data(line)
show = TvShow(formatted_line).save() # grava instruções de corte no banco de dados
# verifica se a instrução de corte é maior que 30 segundos
if TvShow.qualify_video_by_duration(show.duration):
# envia a solicitação de corte para a API externa e recebe o ID do job de corte
path = f"{show.id}_{str(show.reconcile_key).replace(' ', '_').strip().lower()}.mp4"
job_external_id = ApiCut().send(show.start_time, show.end_time, path)
job_cut = JobCut()
job_cut.save(job_external_id, path, show)
line_number += 1
file_to_read.close() # fecha o arquivo
# grava as informações arquivo no banco de dados
self.log_file_processing(tail, line_number-1, filename_date_and_sequence['filename_date'],
filename_date_and_sequence['filename_sequence'])
@staticmethod
def search_filename(filename):
file = session.query(CutFile).filter_by(filename=filename).first()
result = cut_file_schema.dump(file).data
return result
@staticmethod
def valide_filename(filename):
regex = '[0-9]{8}(?:_)[0-9]{6}(.txt)$'
return True if re.match(regex, filename, flags=0) else False
@staticmethod
def filename_date_and_sequence(filename):
result = {'filename_date': filename[:8], 'filename_sequence': filename[9:15]}
return result
@staticmethod
def extract_line_data(line):
formatted_line = {'start_time': line[5:29].strip(), 'end_time': line[29:52].strip(),
'title': line[106:139].strip(), 'duration': line[184:196].strip(),
'reconcile_key': line[279:312].strip()}
return formatted_line
@staticmethod
def filename_sequence_date():
consulta = session.query(CutFile).order_by(CutFile.id.desc()).first()
result = cut_file_schema.dump(consulta).data
return int(result['last_line']) if result else 8
@staticmethod
def number_last_line(filename_date):
consulta = session.query(CutFile).filter_by(filename_date=filename_date).order_by(CutFile.filename_sequence.desc()).first()
result = cut_file_schema.dump(consulta).data
return int(result['last_line']) if result else 8
@staticmethod
def log_file_processing(filename, last_line, filename_date, filename_sequence):
try:
session.add(CutFile(filename, last_line, filename_date, filename_sequence))
session.commit()
return True
except:
return False
@property
def list(self):
files = session.query(CutFile).all()
result = cut_file_schema.dump(files, many=True).data
return result
class TvShow:
def __init__(self, instruction):
self._instruction = instruction
@property
def instruction(self):
return self._instruction
# TODO arrumar o try
def save(self):
try:
record = CutInstruction(self.instruction['start_time'], self.instruction['end_time'],
self.instruction['title'], self.instruction['duration'],
self.instruction['reconcile_key'])
session.add(record)
session.commit()
return record
except:
print(f'Erro ao tentar salvar {self.instruction}')
return False
@property
def list(self):
contato = session.query(CutInstruction).all()
result = cut_instruction_schema.dump(contato, many=True).data
return result
@staticmethod
def qualify_video_by_duration(duration):
duration = duration[:-3]
duration = datetime.datetime.strptime(duration, '%H:%M:%S')
interval = datetime.datetime.strptime(cons.DURATION_MIN, '%H:%M:%S')
return True if duration > interval else False
class JobCut:
@staticmethod
def save(job_external_id, path, cut):
try:
session.add(CutJob(job_external_id, path, cut))
session.commit()
return True
except:
return False
@staticmethod
def update_status(job_external_id, status):
job = session.query(CutJob).filter_by(job_external_id=job_external_id).first()
job.status = status
result = cut_job_schema.dump(job).data
session.commit()
return result
@staticmethod
def list_jobs_in_progress():
job = session.query(CutJob).filter_by(status=cons.STATUS_IN_PROGRESS).all()
result = cut_job_schema.dump(job, many=True).data
return result
@staticmethod
def list_jobs_completed():
job = session.query(CutJob).filter_by(status=cons.STATUS_COMPLETED).all()
result = cut_job_schema.dump(job, many=True).data
return result
@staticmethod
def list_all_jobs():
jobs = session.query(CutJob).all()
result = cut_job_schema.dump(jobs, many=True).data
return result
class ApiCut:
@staticmethod
def send(start_time, end_time, path):
response = requests.post(cons.URL_API_CUT, json={'start_time': start_time,
'end_time': end_time, 'path': path})
if response.status_code == 200:
response = response.json()
return response['job_id']
else:
return None
@staticmethod
def status(job_id):
response = requests.get(f'{cons.URL_API_STATUS}/{job_id}')
if response.status_code == 200:
response = response.json()
if response['status'] == cons.STATUS_COMPLETED:
return True
else:
return False
else:
return False
class ApiGloboPlay:
def __init__(self, title, duration, path):
self._title = title
self._duration = duration
self._path = path
def send(self):
response = requests.post(cons.URL_API_DELIVERY, json={'title': self._title,
'duration': self._duration, 'path': self._path})
if response.status_code == 200:
response = response.json()
return True
else:
return False
|
[
"sergiotoledo@bconn.com.br"
] |
sergiotoledo@bconn.com.br
|
41a14c0e31f59ee7256eda5a6ce03b147a61303d
|
7dbe0daf56af3752498c0b70ebbad2e2fc9b11e4
|
/targetshare/models/relational/proximity_model.py
|
d4d055131e12c0252134f85bb76028772a633909
|
[] |
no_license
|
edgeflip/edgeflip
|
db737518ff9a6f377f3dfab16f27572a5e68c1aa
|
bebd10d910c87dabc0680692684a3e551e92dd2a
|
refs/heads/master
| 2020-04-09T16:41:15.815531
| 2015-05-17T22:14:42
| 2015-05-17T22:14:42
| 7,248,450
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 560
|
py
|
from django.db import models
class ProximityModel(models.Model):
proximity_model_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=256, null=True)
description = models.CharField(max_length=1024, null=True)
is_deleted = models.BooleanField(default=False)
create_dt = models.DateTimeField(auto_now_add=True)
delete_dt = models.DateTimeField(null=True)
def __unicode__(self):
return u'%s' % self.name
class Meta(object):
app_label = 'targetshare'
db_table = 'proximity_models'
|
[
"jesse@edgeflip.com"
] |
jesse@edgeflip.com
|
42af8f467697b652ec040251c1e83cae85438dae
|
df8404f67042f8f27251d3d43804867408ec756d
|
/qps-analyse.py
|
dfef433175174e14bed7a977a074846f3f3cc7bd
|
[
"Apache-2.0"
] |
permissive
|
fcelda/dns-tcp-stats
|
97b4d7e28e0aa118bf81db50139a1c1d5faf2bb8
|
6b38596fed480b535aa569a319952c589a3c5a23
|
refs/heads/master
| 2020-12-30T13:28:49.069171
| 2017-05-14T09:59:15
| 2017-05-14T09:59:15
| 91,223,356
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,749
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import csv
import json
import collections
import ipaddress
def split_value(row, name, map_cb=None):
value = row.get(name, "")
values = value.split(",") if value != "" else []
if map_cb is None:
return values
else:
return [map_cb(v) for v in values]
def decode_row(row):
if row["tcp.stream"] == "":
return
client = (row["ip.src"] or row["ipv6.src"], int(row["tcp.srcport"]))
server = (row["ip.dst"] or row["ipv6.dst"], int(row["tcp.dstport"]))
if client[1] == 53:
client, server = server, client
return {
"flow": int(row["tcp.stream"]),
"time": float(row["frame.time_relative"]),
"client": client,
"server": server,
"dns_id": split_value(row, "dns.id"),
"dns_query": split_value(row, "dns.flags.response", lambda flag: flag == "0"),
"dns_response": split_value(row, "dns.flags.response", lambda flag: flag == "1"),
"dns_qtype": split_value(row, "dns.qry.type", lambda type: int(type)),
}
def private_ip(ip):
return ipaddress.ip_address(unicode(ip)).is_private
packets = []
servers = set()
with open(sys.argv[1]) as cvsfile:
reader = csv.DictReader(cvsfile, delimiter="\t", strict=True)
for row in reader:
packet = decode_row(row)
if not packet:
continue
if private_ip(packet["client"][0]) or private_ip(packet["server"][0]):
continue
packets.append(packet)
servers.add(packet["server"][0])
# count flows
flows = {}
for packet in packets:
# skip server initiated connections
if packet["client"][0] in servers:
continue
flows.setdefault(packet["flow"], {"queries": 0, "client": packet["client"], "qtypes": set()})
flow = flows[packet["flow"]]
flow["queries"] += len([q for q in packet["dns_query"] if q])
flow["qtypes"] |= set(packet["dns_qtype"])
# delete flows with unassigned types, AXFR/IXFR, or ANY
def standard_type(type):
return 1 <= type and type <= 258 and type not in [251, 252, 255]
flows = dict((k, v) for (k, v) in flows.items() if all(standard_type(t) for t in v["qtypes"]))
# histogram number of queries -> number of streams
histogram = {}
for flow, data in flows.items():
queries = data["queries"]
histogram.setdefault(queries, 0)
histogram[queries] += 1
# interesting clients
clients = {}
for flow, data in flows.items():
queries = data["queries"]
if queries > 1:
ip = data["client"][0]
clients.setdefault(ip, {})
clients[ip].setdefault(queries, 0)
clients[ip][queries] += 1
print(json.dumps(dict(histogram=histogram, clients=clients), indent=2))
|
[
"jv@fcelda.cz"
] |
jv@fcelda.cz
|
7e28279feb938861c7db7ba7ea03dc38206b752f
|
47b38cc1fc3a032f60d772e0ffcbe556e2986b8f
|
/svn_connector/svn_conn.py
|
cbcff5b3ce4903efdbb8000954d71badbdfe8fca
|
[
"Apache-2.0"
] |
permissive
|
kant/svn-connector-for-storediq
|
64ad0f5a2f5f79b6e76e577c23406c00075ad7d9
|
7c0f24258b2fe06e2c41e7cb62c8c8e812b5d30a
|
refs/heads/master
| 2020-03-19T12:22:55.447100
| 2018-06-07T12:51:42
| 2018-06-07T12:51:42
| 136,513,873
| 0
| 0
|
Apache-2.0
| 2018-06-07T18:05:29
| 2018-06-07T18:05:28
| null |
UTF-8
|
Python
| false
| false
| 32,027
|
py
|
'''
#------------------------------------------------------------------------------------------
# DISCLAIMER OF WARRANTIES.
# The following code is sample code created by IBM Corporation. IBM grants you a
# nonexclusive copyright license to use this sample code example to generate similar
# function tailored to your own specific needs. This sample code is not part of any
# standard IBM product and is provided to you solely for the purpose of assisting you
# in the development of your applications. This example has not been thoroughly tested
# under all conditions. IBM, therefore cannot guarantee nor may you imply reliability,
# serviceability, or function of these programs. The code is provided "AS IS", without
# warranty of any kind. IBM shall not be liable for any damages arising out of your or
# any other parties use of the sample code, even if IBM has been advised of the possibility
# of such damages. If you do not agree with these terms, do not use the sample code.
# Licensed Materials - Property of IBM
# 5725-M86
# Copyright IBM Corp. 2017 All Rights Reserved.
# US Government Users Restricted Rights - Use, duplication or disclosure restricted by
# GSA ADP Schedule Contract with IBM Corp.
#------------------------------------------------------------------------------------------
'''
import os, commands
import traceback
import time
import re
import types
from stat import *
from siq_connector.templateconnection import TemplateConnection, TemplatePropertyNames
from siq_connector.templateexceptions import TemplateException, TemplateDirTooLargeException
from siq_connector.templateexceptions import TemplateOtherException, Template_Err
from siq_connector.templateexceptions import TemplateNotConnectedException
from siq_connector.templateexceptions import TemplateFileNotFoundException
from siq_connector.templateexceptions import TemplateInvalidPathException
from siq_connector.templateexceptions import TemplateConnectionError
from siq_connector.templateexceptions import Template_SSL_Exception
from siq_connector.node import Node
import requests
from requests.exceptions import SSLError, ConnectionError
import logging
import pysvn
'''
A simple file handle class to maintain state data for file reads and writes.
'''
class Handle(object):
def __init__(self, path, mode, size=0, offset=0, url=None):
self.path = path
self.mode = mode
self.size = size
self.offset = offset
self.url = url
self.gen = None
self.attrMap = None
self.writeData = None # write data bytearray
self.writeBufCount = 0 # write buffer current size
self.writeInputBufMax = 125 # max number of 1MB buffers before Conns write
self.response = None
self.os_handle = None
def __del__(self):
'''
Clean up stale OS handle, if any
'''
if self.os_handle:
os.close(self.os_handle)
self.os_handle = None
def getOsHandle(self):
return self.os_handle
def setOsHandle(self, handle):
self.os_handle = handle
class TSvnConnector(TemplateConnection):
# Enable for local logging. Use appropriate log file path
_mylogger = logging.getLogger("SvnConnector")
_mylogger.setLevel(logging.DEBUG)
_mylogger.addHandler(logging.FileHandler("/deepfs/config/sampleconnector.log"))
DUPLICATE_CHECK_FIELDS = ['share']
# ------------------------------------------------------------------------
def __init__(self, serverName, userName, userPwd, options, serial, mountPoint):
"""
Initialize the SvnConnector connection.
"""
self._mylogger.info('SvnConnector.__init__(): servname=%s, uname=%s, pwd=%s, options=%s, serial=%s, mountPoint=%s' % (str(serverName), str(userName), str(userPwd), str(options), str(serial), str(mountPoint)))
# Determine the maximum size of a page of items to return per call during harvest listdir
self._listdir_page_size = 500
#---------------------------------------------------
# serverName: <host-IP> Example: Server: '9.30.52.63' or 'localhost'
# Option String: Supported options: multiple options separated by ';'
# <share>: Example: 'share=/mnt/demo-A'
# <mount>: Example: 'mount=/tmp/my_mount'
# - If mount is absent, value of mountPoint is used.
#---------------------------------------------------
self._server_name = serverName
self._userName = userName
self._userPwd = userPwd
self._options = options
self._serial = serial
self._initial_dir = None
self._start_dir = None
self._end_dir = None
self._top_level = True
self._is_connect = True
self._volume_register = False
# self._share = options.get('share', None)
# if not self._share:
# # self._mylogger.info("SvnConnector.__init__(): servname=%s, uname=%s, options=%s: 'share' missing in options" % (str(serverName), str(userName), str(options)))
# # return
self._mount_base = options.get('mount', None)
if not self._mount_base:
self._mount_base = mountPoint
if self._mount_base.endswith('/'):
sep = ''
else:
sep = '/'
self._mp = self._mount_base + sep
# self._mylogger.info("SvnConnector.__init__(): _mp=%s" % (str(self._mp)))
# Default parameters set to not require a valid server certificate.
# TODO: Certificate verification for NFS mount (NFS over SSH tunnel?)
# ------------------------------------------------------------------------
@classmethod
def get_attribute_def(cls):
'''
Return array of custom attributes to be registered with StoredIQ later
'''
import tsvn_connector.sample_attributes as attr
return attr.attributes
# ------------------------------------------------------------------------
@classmethod
def get_fs_name(cls):
'''
Provide the name of this connector to StoredIQ File Manager
'''
from tsvn_connector.sample_attributes import fs_name
return fs_name
# ------------------------------------------------------------------------
def _fqn(self, path):
'''
_fqn: get fully qualified name for the given path
Object paths passed down from StoredIQ are relative to the data source 'mount'
'''
fpath = path
# self._mylogger.info('1) SvnConnector.__fqn(): path=%s' % str(fpath))
# self._mylogger.info('2) SvnConnector.__fqn(): self._mp=%s' % str(self._mp))
if not fpath.startswith('/'):
fpath = self._mp + path
return fpath
# ------------------------------------------------------------------------
def _get_login(self, realm, user, may_save ):
'''
This is get login call back function.
'''
return True, self._userName, self._userPwd, False
def ssl_server_trust_prompt(self, trust_dict ):
'''
This is ssl server trust prompt.
'''
return True, 1, False
def connect(self):
"""
Connect to the server containing the share.
"""
self._mylogger.info('>>>>>>>>>>Connect<<<<<<<<<')
self._top_level = True
# self._mylogger.info('SvnConnector.connect(): serverName: %s, userName: %s' % (self._server_name, self._userName))
if not self._mp:
# self._mylogger.info('SvnConnector.connect(): Unknown Mount-point: serverName: %s, share: %s' % (self._server_name, self._share))
return False
# Create mount point path, if doesn't exist
if not os.path.ismount(self._mp):
if not os.path.exists(self._mp):
os.makedirs(self._mp)
else:
# self._mylogger.info('SvnConnector.connect(): %s, already mounted' % (self._mp))
# TODO: Unmount only if mounted. Ignore error for now.
return True
self._top_level = True
mount_point = '/deepfs/{0}/svn'.format(self._server_name)
if not os.path.exists(mount_point):
self._mylogger.info('>>>>>>>>>>Connect<<<<<<<<< mount_point: %s'% mount_point)
os.makedirs(mount_point)
cmd = "/bin/mount --bind {0} {1}".format(mount_point, self._fqn(self._mp))
self._mylogger.info('>>>>>>>>>>Connect<<<<<<<<< bind: %s'% cmd)
errorcode, output = commands.getstatusoutput(cmd )
if errorcode:
emsg = 'mount %s failed, status=%s, msg=%s' % (self._mp, str(errorcode), output)
raise TemplateInvalidPathException
return True
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
def disconnect(self):
"""
Disconnect from the Connections server.
"""
self._mylogger.info('SvnConnector.disconnect:')
if self._mp:
errorcode, output = commands.getstatusoutput('/bin/umount %s' % self._mp)
self._mylogger.info('SvnConnector.disconnect(): umount %s, status=%s, output=%s' % ( self._mp, str(errorcode), output))
self._mp = None
return True
# ------------------------------------------------------------------------
def shutdown(self):
"""
Does a destroy of the Connections server and cleans up all resources.
"""
self._mylogger.info('SvnConnector.shutdown:')
self.disconnect()
return True
# ------------------------------------------------------------------------
def _get_extras(self, path):
'''
# Build primary attributes for the node given by path into extras.
# For Sample_connector, we choose to provide information in the primary attribute
# in addition to the standard (file system metadata) information.
# In essence, the value (a dictionary) of 'primary_attributes' contains just one key/value pair.
# The format of 'primary_attributes' that is added to 'extras' is as shown below:
# { 'primary_attributes' : { 'SampleConnObjFileType' : <File contents type> }}
# Note that file contents type is retrieved with the help of 'file' command.
'''
# Get content type if not a directory
extras = {}
fqn = self._fqn(path)
if not self.is_dir(fqn):
errorcode, output = commands.getstatusoutput("/usr/bin/file %s" % fqn)
ctype = 'Unknown content type'
if not errorcode:
# Translate output string to nice looking mime type
if 'ASCII text' in output:
ctype = 'text/plain'
elif 'ASCII C program text' in output:
ctype = 'text/C program'
elif 'executable for python' in output:
ctype = 'text/x-python'
elif 'XML' in output:
ctype = 'text/xml'
elif 'executable,' in output:
ctype = 'binary/executable'
extras['primary_attributes'] = {'SampleConnObjFileType' : ctype}
return extras
# ------------------------------------------------------------------------
def is_read_only(self, path):
"""
Checks and returns appropriate read/write permissions on the path.
"""
#self._mylogger.info('SvnConnector.isReadOnly(): path=%s' % path)
st = self.lstat(path)
mode = st[ST_MODE]
rmask = (mode & (S_IRUSR | S_IRGRP | S_IROTH))
wmask = (mode & (S_IWUSR | S_IWGRP | S_IWOTH))
if ((rmask == (S_IRUSR | S_IRGRP | S_IROTH)) and wmask == 0):
return True
return False
def get_unsupported_characters(self):
return ['\\', ':', '*', '?', '"', '<', '>', '|']
# ------------------------------------------------------------------------
def create_checkout(self, path):
svn_client = pysvn.Client()
svn_client.callback_get_login = self._get_login
svn_client.callback_ssl_server_trust_prompt = self.ssl_server_trust_prompt
mount_point = '/deepfs/{0}/svn'.format(str(self._server_name))
local_checkout = '{0}/{1}'.format(mount_point, path)
full_url = 'https://{0}/svn/{1}'.format(self._server_name, path)
#self._mylogger.info('SvnConnector.checkout_mount(): full_url: %s local_checkout: %s' % (full_url, local_checkout))
#self._mylogger.info('SvnConnector.checkout_mount(): CheckOUT Started')
svn_client.checkout(full_url, local_checkout, recurse=True)
#self._mylogger.info('SvnConnector.checkout_mount(): CheckOUT Complete')
# Build mount command w/ arguments
# mount --bind afiz /deepfs/imports/template/svn
cmd = "/bin/mount --bind {0} {1}".format(mount_point, self._fqn(self._mp))
errorcode, output = commands.getstatusoutput(cmd )
self._mylogger.info('SvnConnector.checkout_mount(): cmd=%s, status=%s' % ( cmd, str(errorcode)))
if errorcode:
emsg = 'mount %s failed, status=%s, msg=%s' % (self._mp, str(errorcode), output)
raise TemplateInvalidPathException
def start_checkout(self, path):
mount_point = '/deepfs/{0}/svn/{1}'.format(self._server_name, path)
if self._volume_register:
if not os.path.exists(mount_point):
os.makedirs(mount_point)
def list_dir(self,node):
'''
Lists the files and directories in the specified directory.
'''
self._mylogger.info('SvnConnector.list_dir(): Entered >>>>>>>>>>>>LD')
path = node.path
extras = node.extras
if self._is_connect:
self.create_checkout(path)
self._is_connect = False
self._mylogger.info('SvnConnector.list_dir(): path=%s, extras=%s' % (path, str(extras)))
if self._top_level:
# We're at top level during traversal. Pick user-specified initial-directory
self._initial_dir = path
self._top_level = False
try:
# With OS help, retrieve child objects of path
self._mylogger.info('1) SvnConnector.list_dir(): path=%s' % (str(self._fqn(path))))
dir_entries = os.listdir(self._fqn(path))
self._mylogger.info('2) SvnConnector.list_dir(): dir_entries=%s' % (str(dir_entries)))
except IOError as e:
#self._mylogger.info('SvnConnector.list_dir(): Error %s, path=%s, extras=%s' % (str(e), path, str(extras)))
raise 'SvnConnector.list_dir(): Error "%s" on "%s"' % (e, path)
self.retlist = []
# When node is top level directory
if not dir_entries:
self._mylogger.info('SvnConnector.list_dir(): node_path=%s' % (str(node.path)))
return self.list_dir_next(node)
for item in dir_entries:
#self._mylogger.info('SvnConnector.list_dir(): path=%s, ini_dir=%s, item=%s' % (path, self._initial_dir, item))
if path: # TODO: optimize by pulling fixed code outside loop
# Construct item_path (relative to mount point) to be returned to File Manager
# and lstat_path to obtain lstat properties using OS call
item_path = lstat_path = path + '/' + item
if self._initial_dir:
# Don't include initial directory in return elements
item_path = item_path.lstrip(self._initial_dir)
if item_path.startswith('/'):
item_path = item_path[1:]
else:
lstat_path = item_path = item
#self._mylogger.info('SvnConnector.list_dir(): item_path=%s, lstat_path=%s' % (item_path, lstat_path))
new_node = Node(item_path, self._get_extras(lstat_path))
new_node.set_lstat(self.lstat(lstat_path))
self.retlist.append(new_node)
#self._mylogger.info('SvnConnector.list_dir(): path=%s, extras=%s' % (path, str(extras)))
return self.list_dir_next(node)
# ------------------------------------------------------------------------
def list_dir_next(self, node):
'''
Retrieve the next 'count' items from the current list_dir.
'''
#self._mylogger.info('SvnConnector.list_dir_next(): path=%s, extras=%s' % (node.path, str(node.get_extras())))
self._listdir_page_size = len(self.retlist) + 1
return self.retlist
# ------------------------------------------------------------------------
def lstat(self, path, extras={}):
'''
Get the attributes for the specified file.
If extras has lstat return it. If top level dires return lstat.
Otherwise return None
'''
self._mylogger.info("SvnConnector.lstat(): path=%s, extras=%s" % (path, str(extras)))
try:
return tuple(os.lstat(self._fqn(path)))
except Exception, e:
self._mylogger.info("SvnConnector.lstat(): Exception, %s on path=%s" % (e, path))
raise IOError(e)
# ------------------------------------------------------------------------
def lstat_extras(self, path, extras={}):
'''
Get the file system and the extra attributes for the specified file.
'''
#self._mylogger.info('SvnConnector.lstat_extras(): path=%s, extras=%s' % (path, str(extras)))
return (self.lstat(path, extras), self.get_node(path, extras))
# ------------------------------------------------------------------------
def create_file(self, path, size, attrMap):
"""
Create a file: If the file cannot be created, IOError is raised.
Returns Handle if successful.
TODO: Spec for attr dictionaries
:param path: fully qualified file name including path
:type path: str
:returns: an OSfile handle object.
"""
fileHandle = Handle(path, TemplateConnection.FILE_WRITE, url=None)
#self._mylogger.info('SvnConnector.create_file(): path=%s, size=%s, map=%s' % (path, str(size), str(attrMap)))
try:
fileHandle.setOsHandle(os.open(self._fqn(path), os.O_RDWR|os.O_CREAT))
except Exception, e:
raise IOError(e)
return fileHandle
# ------------------------------------------------------------------------
def _doesFileExist(self, requestPath):
'''
_doesFileExist(): Check if file w/ given path exists
'''
#self._mylogger.info("SvnConnector._doesFileExist(): requestPath=%s" % requestPath)
try:
self.lstat(requestPath)
return True
except Exception, e:
return False
# ------------------------------------------------------------------------
def _truncateFile(self, path):
'''
_truncateFile(): Truncate file size w/ given path to zero
'''
#self._mylogger.info("SvnConnector._truncateFile(): path=%s" % path)
try:
return os.open(self._fqn(path), os.O_RDWR|os.O_TRUNC)
except Exception, e:
#self._mylogger.info("SvnConnector._truncateFile(): Exception, %s on path=%s" % (e, path))
raise IOError(e)
# ------------------------------------------------------------------------
def open_file(self, path, mode=TemplateConnection.FILE_READ):
"""
Open a file:
If the file cannot be opened, IOError is raised.
Only support modes, FILE_READ and FILE_WRITE.
An existing file open for FILE_WRITE will truncate the file.
If mode is omitted, it defaults to FILE_READ
If mode is FILE_READ and file does not exist, IOError is raised
:param path: fully qualified file name including path
:type path: str
:returns: a file handle object.
"""
fileHandle = Handle(path, mode, url=None)
#self._mylogger.info('SvnConnector.open_file(): path=%s, mode=%s' % (path, str(mode)))
if mode==TemplateConnection.FILE_READ:
if not self._doesFileExist(path):
raise IOError("SvnConnector.open_file: The file, %s cannot be opened." % (path))
else:
try:
fileHandle.setOsHandle(os.open(self._fqn(path), os.O_RDONLY))
#self._mylogger.info("SvnConnector.open_file: The file, %s opened successfully." % (path))
except Exception, e:
#self._mylogger.info("SvnConnector.open_file: Error %s opening %s." % (e, path))
raise IOError(e)
elif mode==TemplateConnection.FILE_WRITE:
if not self._doesFileExist(path):
#self._mylogger.info("SvnConnector.open_file: File '%s' doesn't exist, create it." % (path))
try:
fileHandle.setOsHandle(os.open(self._fqn(path), os.O_RDWR|os.O_CREAT))
except Exception, e:
raise IOError(e)
else:
#self._mylogger.info("SvnConnector.open_file: File '%s' exists, truncate it." % (path))
try:
fileHandle.setOsHandle(self._truncateFile(path))
except Exception, e:
raise IOError(e)
else:
#self._mylogger.info("SvnConnector.open_file:Open mode %s is not supported." % (mode))
raise IOError("SvnConnector.open_file:Open mode %s is not supported." % (mode))
return fileHandle
# ------------------------------------------------------------------------
def write_file(self, path, fileHandle, filebuf):
"""
Write to a file:
Write the buffer passed in to the file handle that is passed in.
Return the number of bytes of written.
"""
#self._mylogger.info("SvnConnector.write_file(): path=%s" % (path))
if not fileHandle or not fileHandle.getOsHandle():
raise IOError("SvnConnector.write_file: File %s, not open." % (path))
if (fileHandle.mode != TemplateConnection.FILE_WRITE) :
#self._mylogger.info("SvnConnector.write_file: file: '%s' is in mode: %s, write is not allowed" % (path, fileHandle.mode))
raise IOError("SvnConnector.write_file: file: '%s' is in mode: %s, write is not allowed" % (path, fileHandle.mode))
w = count = 0
if (filebuf != None):
count = len(filebuf)
try:
w = os.write(fileHandle.getOsHandle(), filebuf)
except Exception, e:
raise IOError("SvnConnector.write_file: Error %s on '%s'." % (e, path))
# TODO: Should short writes be reported?
#self._mylogger.info('SvnConnector.write: input buffer count: %s' % (count))
return w
# ------------------------------------------------------------------------
def read_file(self, path, fileHandle, readSize):
"""
Read from a file:
Read <readsize> bytes from the file (less if the read hits EOF before obtaining size bytes). Reads from the current
position of the file handle or the specified offset.
If readSize is > 0 this number of bytes will be read, less if it hits EOF.
If readSize is = 0 then a 0 length buffer will be returned
If readSize is < 0 the whole file will be read from the offset to the end
If the readOffset in the fileHandle is at or past the EOF, a 0 length buffer is returned.
returns a buffer containing the bytes that were read.
"""
buf = None
readfileURL = None
#self._mylogger.info("SvnConnector.read_file(): path=%s, size=%s" % (path, readSize))
if not fileHandle.getOsHandle():
raise IOError("SvnConnector.read_file: File %s, not open." % (path))
try:
buf = os.read(fileHandle.getOsHandle(), readSize)
except Exception, e:
raise IOError("SvnConnector.read_file: Error %s on '%s'." % (e, path))
return buf
# ------------------------------------------------------------------------
def close_file(self, fileHandle):
"""
Close a file:
Closes the file represented by the file handle. Will return the stat and stat extras if they are available at the
time of close. If these are not available, they will be retrieve from the repository as part of finishing write
operations. These are used to fill the audit and attribute information. If the stats are not available, and empty
dictionary is returned.
"""
#self._mylogger.info('SvnConnector.close: done')
if not fileHandle.getOsHandle():
raise IOError("SvnConnector.close: file not open.")
try:
buf = os.close(fileHandle.getOsHandle())
fileHandle.setOsHandle(None)
except Exception, e:
raise IOError("SvnConnector.close: Error %s." % (e))
return {}
# ------------------------------------------------------------------------
def get_audit_attr_names(self, path):
'''
Return a list of attribute names to be included in the audit. When audit is needed,
StoredIQ will look for these names in the lstat extras. If the name is found the
value will be included in the audit information. If the value is not found in extras,
then a blank value will be shown.
If there are no attributes to audit besides the standard file system attributes,
this method should return an empty list.
For now, will add the content address, block size, and storage policy to the audit.
Can assess whether others are needed.
'''
return []
# ------------------------------------------------------------------------
def setTimestamps(self, path, mtime, atime):
'''
Set the timestamp attributes for the specified file.
'''
#self._mylogger.info('SvnConnector.setTimestamps path: >%s< ' % (path))
try:
os.utime(self._fqn(path), (mtime, atime))
except Exception, e:
raise IOError("SvnConnector.setTimestamps: Error's'" % (e))
return True
# ------------------------------------------------------------------------
def makedirs(self, path):
'''
Make directory
'''
#self._mylogger.info('SvnConnector.makedirs(): path->%s< ' % (path))
try:
os.makedirs(self._fqn(path))
except Exception, e:
raise IOError("SvnConnector.makedirs: Error %s." % (e))
return True
# ------------------------------------------------------------------------
def rmdir(self, path):
'''
Remove directory
'''
#self._mylogger.info('SvnConnector.rmdir(): path->%s< ' % (path))
try:
p = path
if not path.startswith('/'):
p = self._fqn(path)
os.rmdir(p)
except Exception, e:
raise IOError("SvnConnector.rmdir: Error %s." % (e))
return True
# ------------------------------------------------------------------------
def unlink(self, path):
'''
Deletes a file.
'''
#self._mylogger.info('SvnConnector.unlink: path->%s< ' % (path))
try:
p = path
if not path.startswith('/'):
p = self._fqn(path)
os.unlink(p)
except Exception, e:
#self._mylogger.info("SvnConnector.unlink(): Exception, %s on path=%s" % (e, path))
raise IOError("SvnConnector.unlink: Error %s." % (e))
return True
# ------------------------------------------------------------------------
def get_list_dir_page_size(self):
'''
Returns the page size that will be used when calling list_dir(). Should return a value < 1 if
there is no paging. By default this returns 0.
'''
return self._listdir_page_size
# ------------------------------------------------------------------------
def validate_directories_in_filemgr(self):
'''
Validate all three paths
'''
return True
# ------------------------------------------------------------------------
def validate_directories(self, path, startDir, endDir):
'''
Validates that the initial directory, the start directory, and the end directory are
valid for a volume definition. If not specified in the configuration, these directories
may be set to None. It is up to the file manager implemenation to determine whether
None values are valid.
Returns a list of error messages found when checking the directories. Returns
None or an empty collection if all of the directories are valid. If any excpeiton
is raised by the implementation, it is assumed the directories are not valid.
'''
#self._mylogger.info("SvnConnector.validate_directories(): path >%s< startDir: >%s< endDir: >%s<" % (path, startDir, endDir))
if not self._volume_register:
self._volume_register = True
self.start_checkout(path)
errlist = []
if not self.is_dir(self._fqn(path)):
errlist.append("path '%s' is not a valid directory." % path)
else:
self._initial_dir = path
if startDir and not self.is_dir(self._fqn(path + '/' + startDir)):
errlist.append("startDir '%s' is not a valid directory." % startDir)
else:
self._start_dir = startDir
if endDir and not self.is_dir(self._fqn(path + '/' + endDir)):
errlist.append("endDir '%s' is not a valid directory." % endDir)
else:
self._end_dir = endDir
#self._mylogger.info("SvnConnector.validate_directories(): idir=%s, sdir=%s, edir=%s" % (self._initial_dir, self._start_dir, self._end_dir))
return errlist
# ------------------------------------------------------------------------
def verify_mount_in_filemgr(self):
'''
Connections validates mounts to avoid costly calls to list_dir()
'''
#self._mylogger.info("SvnConnector.verify_mount_in_filemgr():")
return True
# ------------------------------------------------------------------------
def verify_mount(self, path='', includeDirs=None):
'''
This validates that the path exists to determine readability. It calls
isReadOnly to determine writability, which is currently not implemented.
It does not validate the includeDirs as this requires a list_dir, which may
be inefficient.
'''
verified = True
errlist = []
readable = False
writable = False
#self._mylogger.info('SvnConnector.verify_mount(): path=%s, includeDirs=%s' % (path, includeDirs))
try:
readable = self.is_dir(path)
except Exception, err:
#self._mylogger.info("SvnConnector.verify_mount(): %s failed checking path. (%s)" % (path, err))
errlist.append(str(err))
verified = False
try:
writeErrlist, writable, deleted = self._verify_write(path)
errlist.extend(writeErrlist)
except Exception, err:
#self._mylogger.info("SvnConnector.verify_mount(): %s failed verifying write. (%s)" % (path, err))
errlist.append(str(err))
return (verified, errlist, readable, writable)
if __name__ == '__main__':
print 'sample_conn.main(): started'
|
[
"noreply@github.com"
] |
kant.noreply@github.com
|
87869714674d944bfea5f6967ca78d445d95c22a
|
dc09f42afada25b536895df232531fb38d11a008
|
/log_settings.py
|
c5acfd737266dd9c3c0dde50bd4c3064cbe0a30f
|
[] |
no_license
|
zaharay/json_parser
|
626241fed019968fda467f3fb5ba416f6e508fbe
|
1f55364c9eff1dcd3190807c0c90c50a31fb93c3
|
refs/heads/master
| 2023-07-11T04:47:20.449364
| 2021-08-16T11:51:42
| 2021-08-16T11:51:42
| 360,473,977
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
"""
log_settings.py
Модуль настроек логгеров:
simple_logger - логгер для сообщений 'Старт' и 'Стоп'
app_logger - логгер для сообщений с приоритетом от DEBUG и выше
"""
import logging
class MegaHandler(logging.Handler):
def __init__(self, filename):
logging.Handler.__init__(self)
self.filename = filename
def emit(self, record):
message = self.format(record)
with open(self.filename, 'a', encoding='utf-8') as file:
file.write(message + '\n')
logger_config = {
'version': 1,
'disable_existing_loggers': False, # отключение остальных логгеров
'formatters': { # форматировщики
'simple_format': { # при старте и остановке программы
'format': '-------------------- {message}: {asctime} --------------------',
'style': '{',
'datefmt': '%d-%m-%Y %H:%M:%S'
},
'std_format': { # для остальных сообщений
# 'format': '{asctime}.{msecs:0<3.0f} - {levelname} - {name} - {module}:{funcName}:{lineno} - {message}',
'format': '{asctime}.{msecs:0<3.0f} - {levelname} - {module}:{funcName}:{lineno} - {message}',
'style': '{',
'datefmt': '%d-%m-%Y %H:%M:%S'
}
},
'handlers': { # обработчики
'simple_console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple_format'
},
'simple_file': {
'()': MegaHandler, # экземпляр MegaHandler
'level': 'DEBUG',
'filename': 'debug.log',
'formatter': 'simple_format'
},
'std_console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'std_format'
},
'std_file': {
'()': MegaHandler, # экземпляр MegaHandler
'level': 'DEBUG',
'filename': 'debug.log',
'formatter': 'std_format'
}
},
'loggers': { # логгеры
'app_logger': {
'level': 'DEBUG',
'handlers': ['std_console', 'std_file']
#'propagate': False
},
'simple_logger': {
'level': 'DEBUG',
'handlers': ['simple_console', 'simple_file']
#'propagate': False
}
},
# 'filters': {},
# 'root': {} # '': {}
# 'incremental': True
}
|
[
"zaharay@yandex.ru"
] |
zaharay@yandex.ru
|
8aa19fd0bc578107d06a18b7f710f976051a7684
|
cc9b8184c53529d763759a50d5830c0b7f6c6782
|
/future_value.py
|
a3032b8a5b0d4d3ced023633f356b294c588ae67
|
[] |
no_license
|
22zagiva/1.2-exercises
|
5de028bae6d21f19ff42e00366bef845399d1058
|
b3cc9ca3d759fa82533d3bfaf1c32e3527c6e7e0
|
refs/heads/master
| 2020-09-06T03:16:20.116589
| 2019-11-08T14:26:27
| 2019-11-08T14:26:27
| 220,302,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
#!/usr/bin/env python3
# display a welcome message
print("Welcome to the Future Value Calculator")
print()
choice = "y"
while choice.lower() == "y":
# initialize variables
monthly_investment = 0
yearly_interest_rate = 0
years = 0
monthly_interest_amount = 0
while monthly_investment < 1:
monthly_investment = float(input("Enter monthly investment:\t"))
if monthly_investment > 0:
break
else:
print("Entry must be greater than 0. Please try again.")
while yearly_interest_rate <= 0 or yearly_interest_rate > 15:
yearly_interest_rate = float(input("Enter yearly interest rate:\t"))
if yearly_interest_rate > 0 and yearly_interest_rate <= 15:
break
else:
print("Entry must be greater than 0 and less than or equal to 15")
while years < 1 or years > 50:
years = int(input("Enter number of years:\t\t"))
if years > 0 and years <= 50:
break
else:
print("Entry must be greater than 0 and less than or equal to 50")
# convert yearly values to monthly values
monthly_interest_rate = yearly_interest_rate / 12 / 100
months = 12
future_value = 0
# calculate the future value
# display the result
print()
years_count = 0
for i in range(years):
for i in range(months):
future_value += monthly_investment
monthly_interest_amount = future_value * monthly_interest_rate
future_value += monthly_interest_amount
years_count += 1
print("year = ", years_count, "Future Value = "+ str(round(future_value, 2)))
# see if the user wants to continue
choice = input("Continue (y/n)? ")
print()
print("Bye!")
|
[
"noreply@github.com"
] |
22zagiva.noreply@github.com
|
4fab92a80e440d9e1e6feb7bec374aa34a15ccd9
|
be965d02e9fd84e2e94e36551a2751f85c2d82ef
|
/Chapter1/First_Last.py
|
d9168c1f9effff24bdcfdb631df554fffd4cb87d
|
[] |
no_license
|
Anshomyous/Python-practise
|
6fc76c7cf279dccb9a20ee6d994bf6148d30d854
|
a36f39a7f891e0249fc0013dc5e8187ccbf568d3
|
refs/heads/main
| 2023-06-21T14:42:51.794667
| 2021-07-31T15:03:45
| 2021-07-31T15:03:45
| 390,832,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
sum = int(input("Enter the number: "))
a = int(sum % 10)+int(sum/1000)
print("The sum of first & last no. are : ", a)
|
[
"sohamr@infinite.com"
] |
sohamr@infinite.com
|
a992d1152c5be474e340cc5c29cf8b8f7842aed3
|
758bf41e46a3093f4923af603f1f7f8063408b9c
|
/website/testFromRemoteRepo/_bsch3398/museum/python/django/utils/simplejson/decoder.py
|
e41a1159557db57daa3113426ba5391664e79a9e
|
[
"MIT"
] |
permissive
|
mpetyx/mpetyx.com
|
4033d97b21c9227a6ba505980fd0c1b57254e8fb
|
d50c379b4fe09e0135656573f7049225fc90ae36
|
refs/heads/master
| 2021-01-10T19:50:15.488371
| 2014-01-22T09:04:14
| 2014-01-22T09:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,984
|
py
|
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from django.utils.simplejson.scanner import make_scanner
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
raise ValueError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
|
[
"mpetyx@gmail.com"
] |
mpetyx@gmail.com
|
b7aaf762c480e33d6ce227b84721007c6b47a082
|
106e6ad652abb5e45a1a7d63886e46928f0a7fdd
|
/010/python/010.py
|
69838f55215c64db3479da5a61861a8b609f50c2
|
[] |
no_license
|
Chippers255/ProjectEuler
|
fe99a12dc82e9e0930725aabbe7bec3b112398f3
|
8e75eb80b6ab34c725f9fe16fefaed2bc97490bf
|
refs/heads/master
| 2021-11-25T06:19:21.220832
| 2021-10-28T01:47:47
| 2021-10-28T01:47:47
| 18,447,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
# Project Euler Problem # 10
def isPrime(n):
# 0 and 1 are not prime numbers
if n < 2:
return False
# Mark 2 as a prime number
if n == 2:
return True
# Check for any even number and return false, bitwise and
if not n & 1:
return False
# Range starts with 3 and only needs to go up the squareroot of n
# for all odd numbers
for x in range(3,(n**0.5)+1,2):
if n % x == 0:
return False
return True
count = 0
for i in range(0,2000001):
if isPrime(i):
count += i
if i%100000 == 0:
print (i/2000000*100)
print count
|
[
"tn90ca@gmail.com"
] |
tn90ca@gmail.com
|
ad2c9b7e94c237c5df1869e5634d637eae5bcada
|
469b3a91a3afabd1ac8bd135af2c3839a2298e27
|
/blogcrawler/blogspider/spiders/bspider_gamma.py
|
35a80ddefc9841e73d42826c5eda113aa40545c9
|
[] |
no_license
|
jezeelmohd/blogcrawler
|
49fac92d559b858e78746567b5ff8d151e842448
|
2d3c481c13fd1a788348e0d2731503436fa313c3
|
refs/heads/master
| 2020-12-24T17:08:44.470498
| 2014-01-26T01:19:47
| 2014-01-26T01:19:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,245
|
py
|
import re
import csv
import time
from items import BlogItem
from fuzzywuzzy import fuzz
from nltk import clean_html
from scrapy.item import Item
from urlparse import urlparse
from scrapy.http import Request
from scrapy.selector import Selector
from collections import OrderedDict, Counter
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
class MySpider(CrawlSpider):
name = 'blogspider'
DENY_PATTERNS = re.compile('(\/tag)|(\/search\/)|(\/category\/)|(\?tag\=)|(\/search\?max-results=)|(_archive\.html)|\
(\/search\?by-date=)|(\?showComment=)|(\?shared=)|(\?msg=)|(\?replytocom=)|(\/author\/)|(\/\d{4}\/\d{2}\/$)|(\/page\/)|\
([\/]?\?page=)|(\/\?pfstyle=)|(\/\d{4}\/\d{2}\/\d{2}\/$)|(\/archive\/)|(\/comment-page-1\/)|(\/\?attachment_id=)|\
(\/\?adv_search=)|(\?redirect_to=)|(com\/\d{4}\/\d{2}$)')
def __init__(self, blogurl='http://mycottoncreations.blogspot.com',blogid='7777'):
self.start_urls = [blogurl,]
self.allowed_domains = [self.get_domain(dom) for dom in self.start_urls]
self.blog_ids = {blogurl: blogid}
self.blog_id = blogid
self.rules = (
Rule(
SgmlLinkExtractor(allow_domains=self.allowed_domains,deny_domains=['facebook.com','google.com','twitter.com','pintrest.com'],unique=True),
process_request='add_meta',follow=True, callback='parse_item'),
)
super(MySpider, self).__init__()
def get_domain(self, blog):
blog = blog.replace('http://', '').replace('https://', '').replace('www.', '').split('/')[0]
return blog
def add_meta(self, request):
request.meta['id'] = self.blog_id
if not self.DENY_PATTERNS.search(request.url):
return request
def parse_item(self, response):
sel = Selector(response)
date = self.post_date_extract(sel,response)
title,title_xpath = self.post_title_extract(sel, response)
if title and title_xpath:
post_text = self.find_post_text(title_xpath,sel)
title = ' '.join(title.split()).replace('&','&') if title else None
text = ' '.join(post_text.split()).replace('&','&') if post_text else None
date = date if date else None
base_url = urlparse(response.url)
item = BlogItem(blog_url=base_url.netloc,
post_url=response.url,
post_date=date,
post_title=title,
post_text=text,
blog_id=response.meta['id'],
)
if not self.DENY_PATTERNS.search(item['post_url']) and item['post_text']:
yield item
def post_date_extract(self,sel,response):
date=None
date_meta = sel.xpath('//meta[contains(@property,"article:published_time")]/@content').extract()
if date_meta:
date=date_meta[0] if date_meta else None
if not date:
date_title = re.compile('\d+\/\d+\/\d+').findall(response.url)
date=date_title[0] if date_title else None
if not date:
date_span_xpath = ' '.join(sel.xpath('//span/text()').extract())
date_span = re.compile('\w+ \d+, \d+').findall(date_span_xpath)
date = date_span[0] if date_span else None
if not date:
date_text_xpath = ' '.join(sel.xpath('//text()').extract())
date_text = re.compile('\w+ \d{2} \d{4}').findall(date_text_xpath)
if not date_text:
date_text = re.compile('\w+ \d+, \d{4}').findall(date_text_xpath)
if not date_text:
date_text = re.compile('\d+\/\d+\/\d+').findall(date_text_xpath)
if not date_text:
date_text = re.compile('\d+\.\d+\.\d+').findall(date_text_xpath)
date = date_text[0] if date_text else None
return date
def post_title_extract(self,sel,response):
title = None
title_score = 0
slug_score = 0
title_xpath = None
blog=self.get_domain(response.url)
slug = response.url.split('/')[-1] or response.url.split('/')[-2]
slug = slug.replace('-',' ').rstrip('.html')
head_title = sel.xpath('//title/text()').extract()
head_title = head_title[0] if head_title else ''
if '|' in head_title:
pos=[head_title.split('|')[0],head_title.split('|')[-1]]
word = pos[0] if fuzz.partial_ratio(pos[0],blog)>fuzz.partial_ratio(pos[-1],blog) else pos[-1]
head_title_clean = head_title.replace(word,'').replace('|','')
else:
head_title_clean = head_title
text_to_remove = sel.xpath('//link[@rel="alternate"]/@title').extract()
if text_to_remove and head_title:
words = (' '.join(text_to_remove)+head_title).split()
if Counter(words).most_common(3):
for wor in Counter(words).most_common(3):
head_title_clean = head_title_clean.replace(wor[0],'')
[h1,h1a,h2,h2a,h3,h3a]=["//h1","//h1/a","//h2","//h2/a","//h3","//h3/a"]
head_xpaths = [h1a,h1,h2a,h2,h3a,h3]
title_lists = [sel.xpath(head+'//text()').extract() for head in head_xpaths]
title_dict = OrderedDict(zip(head_xpaths,title_lists))
for title_xpaths,title_list in title_dict.iteritems():
if title_list:
for titles in title_list:
#to prevent from one word getting higher score
if titles.count(' ')>0 or head_title_clean.count(' ')<1:
title_ratio = fuzz.partial_token_sort_ratio(titles,head_title_clean)
if title_ratio>title_score:
title_score = title_ratio
title = titles
title_xpath = title_xpaths
if title_score==100 and title.count(' ')>0:
break
#slug_ratio to be added in case
slug_ratio = fuzz.partial_ratio(titles.lower(),slug)
if slug_ratio>80:
slug_score = slug_ratio
title = titles
title_xpath = title_xpaths
if slug_score==100:
break
if slug_score==100:
break
if title_score==100:
break
if title_score<51 and slug_score<81:
title = head_title_clean
return title,title_xpath
def post_text_extract(self,sel,post_xpaths):
sel = sel
post_xpaths = post_xpaths
div_len = 0
div_html = ''
div_text = ''
post_text = ''
for post_xpath in post_xpaths:
div_html = sel.xpath(post_xpath).extract()
div_text = clean_html(' '.join(div_html))
if len(div_text) > div_len:
if len(re.compile('\w+ \d+,.\d+').findall(div_html[0])) > 10:
continue
else:
post_text = ' '.join(div_text.split())
div_len = len(div_text)
return post_text,div_len
def find_post_text(self,title_xpath,sel):
post_xpaths1 = [title_xpath + "/following-sibling::div[1]", title_xpath +
"/following-sibling::div[2]", title_xpath + "/following-sibling::div[3]"]
post_xpaths2 = [title_xpath + "/../following-sibling::div[1]", title_xpath +
"/../following-sibling::div[2]", title_xpath + "/../following-sibling::div[3]"]
post_xpaths3 = [title_xpath + "/../../following-sibling::div[1]", title_xpath +
"/following-sibling::div[2]", title_xpath + "/following-sibling::div[3]"]
post_text1,post_len1 = self.post_text_extract(sel,post_xpaths1)
post_text2,post_len2 = self.post_text_extract(sel,post_xpaths2)
post_text3,post_len3 = self.post_text_extract(sel,post_xpaths3)
pos = [' '.join(post_text1.split()),' '.join(post_text2.split())]
post_text = max(pos,key=lambda p:len(p))
if len(post_text3)>len(post_text):
if post_text3.lower().count('comments')<=post_text.lower().count('comments'):
post_text = post_text3
p_post=sel.xpath('//h1/../p')
if title_xpath =='//h1':
if len(p_post)>=5:
post_text = clean_html(' '.join(p_post.xpath('//text()').extract())).strip()
return post_text
|
[
"jezeel@quadloops.com"
] |
jezeel@quadloops.com
|
5a668a78310a29db11e8c2f968fa104e08adb74f
|
cd84b46ee2bf6c9a8c3c4c4a592f4d5110f3fe71
|
/test_calculator.py
|
e97073edd9479b7e8d8179ebd96cedf04b892d27
|
[] |
no_license
|
songx2-xy/CalculatorAppCI
|
7e16588e0d758144af60851e1729ced0f1b8c380
|
d31b8a9774247645275edec9ce4b8ebb6299d3b3
|
refs/heads/main
| 2023-03-14T14:19:05.431301
| 2021-03-14T06:57:14
| 2021-03-14T06:57:14
| 346,766,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
#! /usr/bin/env python3
# coding=utf-8
"""
Unit testing the calculator app
"""
import calculator
class TestCalculator:
def test_add(self):
assert 5 == calculator.add(1, 4)
def test_subtract(self):
assert 2 == calculator.subtract(5, 3)
def test_multiply(self):
assert 2 == calculator.multiply(1, 2)
|
[
"songx2@oregonstate.edu"
] |
songx2@oregonstate.edu
|
94c4a64ddb64bd10e62b9900c4645a5862443b76
|
9ca2f63f855906695b4b587f137257a7793a75bb
|
/Hlt/Hlt/Hlt2Lines/python/Hlt2Lines/CaloTest/Lines.py
|
e5c5f37c661d828deed77db615fe081bd491c2f3
|
[] |
no_license
|
kidaak/lhcbsoft
|
a59ffb6c884e5b6e2d28740551d3c49f8338ec3d
|
b22a1dfea5d4d572e318d1e5d05dfb7484da5f3d
|
refs/heads/master
| 2021-01-22T16:53:18.266147
| 2015-12-10T14:40:48
| 2015-12-10T14:40:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# @file Hlt2CaloTest.py
# @author Albert Puig (albert.puig@cern.ch)
# @date 19.12.2014
# =============================================================================
"""Test for Calo reconstruction for HLT2."""
from Hlt2Lines.Utilities.Hlt2LinesConfigurableUser import Hlt2LinesConfigurableUser
class CaloTestLines(Hlt2LinesConfigurableUser) :
__slots__ = { 'UseFullCalo' : True,
# Postscale
'Postscale' : {'Hlt2CaloTest' : 0.0}
}
def __apply_configuration__(self) :
from HltLine.HltLine import Hlt2Line
from Stages import Photons
stages = {'Photons' : [Photons(self.getProp('UseFullCalo'))]}
line = Hlt2Line('CaloTest',
prescale = self.prescale,
algos = self.algorithms(stages)['Photons'],
postscale = self.postscale)
|
[
"raaij@4525493e-7705-40b1-a816-d608a930855b"
] |
raaij@4525493e-7705-40b1-a816-d608a930855b
|
27c0fe609b593d75a9d9c00780603d84a1d71159
|
f252214c417a1d0d7735125c09d5651f6fca0880
|
/scrapper/core.py
|
538bd9d83539d8e89a5b3da9efd675d4848a8cb3
|
[
"MIT"
] |
permissive
|
jakubmatyszewski/nofluffscrapper
|
ac039ff0bb3e46529014dcf28fe19de569ca8c82
|
f7dc1b426bccf035eef6637d5621c235834b62ef
|
refs/heads/master
| 2021-07-14T15:21:57.159010
| 2020-09-17T22:00:09
| 2020-09-17T22:00:09
| 247,995,505
| 0
| 0
|
MIT
| 2021-06-02T02:51:30
| 2020-03-17T14:47:32
|
Python
|
UTF-8
|
Python
| false
| false
| 7,207
|
py
|
import time
import json
import redis
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
MAX_WAIT = 60
URL = 'https://nofluffjobs.com/'
redis_client = redis.Redis(host='redis',
charset="utf-8",
decode_responses=True)
def wait(fn):
def modified_fn(*args, **kwargs):
start_time = time.time()
while True:
try:
return fn(*args, **kwargs)
except Exception as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(1)
return modified_fn
class Scrapper:
def __init__(self):
self.not_specified = []
self.report = []
self.offers = []
self.current_page = 1
self.now = datetime.now().strftime("%d/%m/%Y-%H:%M:%S")
self.open_browser()
self.set_language_to_english()
@wait
def open_browser(self):
self.driver = webdriver.Remote(
"http://selenium:4444/wd/hub",
DesiredCapabilities.FIREFOX)
self.driver.get(URL)
def close_browser(self):
self.driver.quit()
@wait
def wait_for(self, fn):
return fn()
def set_language_to_english(self):
current_language = self.driver.find_element_by_class_name(
'language-picker__lang-selected')
if current_language.text != "English":
current_language.click()
flags = self.driver.find_elements_by_class_name('language-picker__flag')
for flag in flags:
if flag.get_attribute('src').endswith("EN.svg"):
flag.click()
break
def apply_button(self):
[button for button in self.driver.find_elements_by_class_name('btn-link')
if button.text == 'Apply'][0].click()
def get_filters_done(self,
locations=[],
seniority=[],
categories=[]
):
self.filter_config = {'location': locations,
'category': categories,
'seniority': seniority}
filters = self.driver.find_elements_by_class_name('filter-name')
for filtr in filters:
if filtr.text == 'Location':
location = filtr
elif filtr.text == 'Category':
category = filtr
elif filtr.text == 'More':
more = filtr
location.click()
for button in self.driver.find_elements_by_class_name('filters-btn'):
if button.text in locations:
button.click()
self.apply_button()
category.click()
for button in self.driver.find_elements_by_class_name('filters-btn'):
if button.text in categories:
button.click()
self.apply_button()
more.click()
for level in seniority:
self.driver.find_element_by_xpath(f"//label[@for='{level.lower()}']").click()
self.apply_button()
@wait
def check_if_in_offer(self):
crumbs = self.driver.find_elements_by_tag_name('nfj-posting-breadcrumbs')
assert len(crumbs) > 0
@wait
def check_if_on_list_view(self):
jobs = self.driver.find_elements_by_class_name('posting-title__position')
assert len(jobs) > 0
@wait
def get_requirements(self):
reqs = []
for re in self.driver.find_elements_by_tag_name('nfj-posting-requirements'):
reqs += [button.text.lower()
for button in re.find_elements_by_tag_name('button')]
return reqs
@wait
def get_description(self):
description = self.driver.find_element_by_class_name('posting-details-description')
position = description.find_element_by_tag_name('h1').text
try:
company = description.find_element_by_class_name('company-name').text
except:
company = description.find_element_by_tag_name('dd').text
_url = self.driver.current_url
salary = self.driver.find_element_by_tag_name('nfj-posting-salaries').text
return position, company, _url, salary
def check_if_i_am_suited(self, stack, nonos=[]):
reqs = self.get_requirements()
for i, req in enumerate(reqs):
if req in stack:
reqs[i] = 1
elif req in nonos:
return False
else:
self.not_specified.append(req)
reqs[i] = 0
rate = sum(reqs) / len(reqs)
if rate > 0.4:
position, company, _url, salary = self.get_description()
# -- email only --
description = f'{position} @ {company}\n{_url}\n{salary}'
self.report.append(f"{description}\nSuited in {rate:.2f}/1.\n\n")
# --/ email only --
self.offers.append({'position': position,
'company': company,
'url': _url,
'salary': salary})
return False
def check_offers(self, stack, nonos):
is_it_last_page = False
while is_it_last_page is False:
time.sleep(1)
offers = self.driver.find_elements_by_class_name('posting-list-item')
for i in range(len(offers)):
self.check_if_on_list_view()
link = offers[i].get_property('href')
self.driver.execute_script(
f"window.scrollTo(0, {offers[i].rect['y']-200})")
self.driver.execute_script(f"window.open('{link}');")
self.driver.switch_to.window(self.driver.window_handles[1])
self.check_if_in_offer()
time.sleep(0.5)
self.check_if_i_am_suited(stack, nonos)
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
self.driver.execute_script(
f"window.scrollTo(0, document.body.scrollHeight)")
time.sleep(0.5)
is_it_last_page = self.page_flipping()
for skill in self.not_specified:
redis_client.set(f'skillproposal:{skill}', skill)
result_data = {'filters': self.filter_config, 'offers': self.offers}
redis_client.set(f'result:{self.now}', json.dumps(result_data))
def page_flipping(self):
try:
self.current_page += 1
last_page = self.driver.find_elements_by_class_name('page-link')[-2]
if self.current_page < int(last_page.text):
page_link = self.driver.find_element_by_xpath(f"//*[contains(text(), '{self.current_page}') and @class='page-link']")
self.driver.execute_script(
f"window.scrollTo(0, {page_link.rect['y']-200})")
page_link.click()
return False
except Exception as e:
print(e)
print('Just one page available.', flush=True)
return True
return True
|
[
"jakubmatyszewski1@gmail.com"
] |
jakubmatyszewski1@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.