blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73e2a31cf8dd068edcc1630093cac0255bfb04e9 | 8da59482eb565f760685901e8442274ab6405292 | /institute/secretary/models.py | c8a8822c1d0afc1fb37847962076a95cf34ed9dd | [] | no_license | HNEhsan/uni-project-django | e3feb30ad61c2e89706c4e3c276408286e4ac323 | 642bd523af3ede2b642e406570085a74bce2eb36 | refs/heads/master | 2023-08-17T02:58:00.494964 | 2021-09-20T02:50:40 | 2021-09-20T02:50:40 | 401,089,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | from django.db import models
#from teacher.models import Teacher
# Create your models here.
class SecretaryRegister(models.Model):
NationalCode = models.CharField(max_length=10, primary_key=True)
Name = models.CharField(max_length=50)
FamilyName = models.CharField(max_length=100)
Age = models.IntegerField()
Tele = models.CharField(max_length=8)
Phone = models.CharField(max_length=11)
Address = models.CharField(max_length=250)
class Term(models.Model):
TermId = models.CharField(max_length=20, primary_key=True)
StartDay = models.DateTimeField('start term day')
EndDay = models.DateTimeField('end term day')
DateNumberInWeek = models.IntegerField(default=1)
Description = models.CharField(max_length=200)
class Lesson(models.Model):
LessonId = models.CharField(max_length=20, primary_key=True)
#TermId
TermId = models.ForeignKey(Term, on_delete=models.CASCADE)
#TeacherId
#TeacherId = models.ForeignKey(Teacher, on_delete=models.CASCADE)
Name = models.CharField(max_length=100)
Studentnumber = models.IntegerField(default=0)
ClassNumber = models.CharField(max_length=5)
DateTime = models.DateTimeField('Day and Time start class')
class StudentSiteRegister(models.Model):
Username = models.CharField(max_length=20, primary_key=True)
Password = models.CharField(max_length=20)
class TeacherSiteRegister(models.Model):
Username = models.CharField(max_length=20, primary_key=True)
Password = models.CharField(max_length=20)
class SecretarySiteRegister(models.Model):
Username = models.CharField(max_length=20, primary_key=True)
Password = models.CharField(max_length=20)
| [
"ehsan.hosseinnejad77@gmail.com"
] | ehsan.hosseinnejad77@gmail.com |
ae06a39f9884a0f2050f91162f583b508eac6f60 | 6decd6f110b2a62c580e6bdc49ac2e9d354deb5b | /apis/urls.py | 84a374ff9f440e0dc1f81337f6c119e83109a00f | [] | no_license | ruan1998/djtest | 85e2f94d2cd243516fd156ad12355a2b8cba875c | 60b6655209fd2859e74ba2082cd1c872853612fd | refs/heads/master | 2020-07-31T09:03:39.245776 | 2019-09-24T09:14:58 | 2019-09-24T09:14:58 | 210,554,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from django.urls import path
from .views import movie, menu, image
urlpatterns = [
# path('', team.movie),
path('menu', menu.get_menu),
path('movie', movie.MovieView.as_view()),
# path('image', image.image),
# path('imagetext', image.image_text),
path('image', image.ImageView.as_view()),
]
| [
"1533724109@qq.com"
] | 1533724109@qq.com |
250b58f3dc7355770eed426d5376486157536409 | 8d109aa989c63e4d6f10be64233415ec62534ddf | /src/utils/config.py | a65f5878fb7f0ca9f53b481b781bcbca3eb98c04 | [] | no_license | N3TC4T/xrpl-arangodb-importer | f5967e2c5f6c9f562f2332ab62c8c2d82b2988f4 | 66760a9507729f77cf5fd1ffee8e857c0447e832 | refs/heads/master | 2022-07-06T19:45:35.071704 | 2022-06-20T19:55:07 | 2022-06-20T19:55:07 | 222,242,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import json
class Config(object):
def __init__(self):
with open('config.json') as config_file:
self.config = json.load(config_file)
def get_config_itme(self, cm='conf_module', ci='conf_item'):
return self.config[cm][ci]
| [
"netcat.av@gmail.com"
] | netcat.av@gmail.com |
240f780193b6ee30893bd3e9a30204ee41ce0351 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/330/usersdata/302/93464/submittedfiles/lista1.py | 4313e851ecf6723c1825fd854fb9da93e0c1e682 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # -*- coding: utf-8 -*-
n = int(input('Digite a quantidade de valores da matriz: '))
a = []
for i in range(0,n,1):
a.append(float(input('Digite a%d:' %(i+1))))
s = 0
for i in range(0,n,1):
if a[i]%2 != 0:
s = s + a[i]
print(s)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
e6690ed5e365578120df0979b1250bc26f56874c | fe05003912a82f88d3688327d4f663e07657bf95 | /3.7_general_balanced_paranthesis.py | 6abc955f5ebcf6489102fb02749cae89958b60a0 | [] | no_license | pjyi2147/python_algorithm | a3e27cb1820316dec5b9d4dcacdfaf5d1d607b26 | ba99fe9217de6c50fef41a953b46888daef71af4 | refs/heads/master | 2022-11-05T05:08:12.625265 | 2017-03-15T16:50:19 | 2017-03-15T16:50:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | #3.7 general balanced paranthesis
from pythonds.basic.stack import Stack
def parChecker(sampleString):
s = Stack()
index = 0
Balance = True
while index < len(sampleString) and Balance:
string = sampleString[index]
if string in '([{':
s.push(string)
else:
if s.isEmpty():
Balance = False
else:
top = s.pop()
if not matches(top, string):
Balance = False
index = index + 1
if Balance and s.isEmpty():
return True
else:
return False
def matches(open, close):
open_sets = '([{'
close_sets = ')]}'
return open_sets.index(open) == close_sets.index(close)
print(parChecker('[]()()()'))
print(parChecker('[][][[[[[]][}}}}[{}{}[}()(){}{}{}'))
| [
"yjs990427@gmail.com"
] | yjs990427@gmail.com |
176377d49320c3c35018ed1803f1841dc4517e2a | af3f5afe7d14a731d999a06c0de64647d9d5227d | /followBackBot.py | 33971b78f4e2daaff5c50ad1a9016abedd583a2e | [] | no_license | camillezajac/twitterBots | 13249f31e098b2c23abc8e9fc13bf8e23000f2c5 | f50b5356692a7b3a9d7d047bbfce0196115f0f7e | refs/heads/main | 2023-02-14T16:16:56.258189 | 2021-01-05T04:55:21 | 2021-01-05T04:55:21 | 326,890,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | import tweepy
import time
# authenticate API
consumer_key = "your code here"
consumer_secret = "your code here"
access_token = "your code here"
access_token_secret = "your code here"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
# follow users who follow you
for follower in tweepy.Cursor(api.followers).items():
if not follower.following:
follower.follow() | [
"noreply@github.com"
] | camillezajac.noreply@github.com |
5e15cffbb4eeed4229b5074ed1dbb6bcecd5d526 | bf270288391035094d8afeeaae5a0b8c284b85d1 | /app/user.py | f918ab95ae46098f7ecdf00585be3c7b09c0fa62 | [] | no_license | jacobpennels/Telepatholodroid | 29201e40d55be292556b19b5a3c822591da7a39a | f39d330998779cdc6a11a0c907713fa75400490e | refs/heads/master | 2021-09-09T12:13:12.726376 | 2018-03-16T00:54:54 | 2018-03-16T00:54:54 | 105,992,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | from app import database_connector
class User:
def __init__(self, title, fname, lname, email, date_joined, institute, country, user_id):
self.user_id = user_id
self.title = title
self.fname = fname
self.lname = lname
self.email = email
self.date_joined = date_joined
self.institute = institute
self.authenticated = False
self.active = False
self.country = country
# Methods needed by Flask-Login
def is_authenticated(self):
return self.authenticated
def is_active(self):
return self.active
def is_anonymous(self):
return False # No anonymous users
def get_id(self):
return self.user_id
# Class methods
| [
"jaykoknight@btinternet.com"
] | jaykoknight@btinternet.com |
6d4d477a22b165e5e1c2d696e298c7041172de85 | 8964f146366a7d3c100accb48b26585057841aec | /Test Cases/equiv.py | 3af45f5e939d8b0684adad32e69a57a2309de9a5 | [
"MIT"
] | permissive | Function-0/SQL-Database-Python-Clone | 83b817327d64cd5e2d8287e50070e8c6b967fed6 | 07fa6a63aa85459127c47f7056b49e4b8cf0abad | refs/heads/master | 2020-05-17T16:45:33.401317 | 2019-04-27T22:51:19 | 2019-04-27T22:51:19 | 183,828,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py | # k and smallest_index were swapped in key i
# perform that swap in all keys after i.
def propagate(t, keys, future, k, smallest_index):
for i in range(future, len(keys)):
lst = t[keys[i]]
lst[smallest_index], lst[k] = lst[k], lst[smallest_index]
def find_min(L, i, j):
'''(list, int, int) -> int
Return the index of the smallest item in L[i:j].
'''
smallest_index = i
for k in range(i + 1, j):
if L[k] < L[smallest_index]:
smallest_index = k
return smallest_index
def selection_sort(L, i, j, t, keys, future):
'''(list, int, int) -> NoneType
Sort the elements of L[i:j] in non-descending order.
Also update remaining keys.
'''
for k in range(i, j):
smallest_index = find_min(L, k, j)
L[smallest_index], L[k] = L[k], L[smallest_index]
propagate(t, keys, future, k, smallest_index)
def sort_part(t, keys, lower, upper, i):
selection_sort(t[keys[i]], lower, upper, t, keys, i + 1)
def sort_key(t, keys, buckets, i):
lower = 0
for bucket in buckets:
upper = bucket
sort_part(t, keys, lower, upper, i)
lower = upper
def sort_table(t, keys):
'''Sort a table by its first column, then further sort
on second column etc.'''
num_rows = len(t[keys[0]])
buckets = [num_rows]
for i in range(len(keys)):
sort_key(t, keys, buckets, i)
newbuckets = []
lst = t[keys[i]]
lower = 0
for bucket in buckets:
upper = bucket
for i in range(lower, upper - 1):
if lst[i] != lst[i + 1]:
newbuckets.append(i + 1)
newbuckets.append(upper)
lower = upper
buckets = newbuckets
def equiv_tables(t1, t2):
'''Return True if tables t1 and t2 are equivalent.'''
keys1 = list(t1.keys())
keys2 = list(t2.keys())
keys1.sort()
keys2.sort()
if keys1 != keys2:
return False
# tables have the same keys...
sort_table(t1, keys1)
sort_table(t2, keys1)
#print(keys1)
#print(t1)
#print(t2)
for k in keys1:
if t1[k] != t2[k]:
return False
return True
t1 = {'a': ['b', 'c', 'q', 'b', 'c', 'q'], 'x': ['y', 'z', 'r', 'y', 'z', 'r'],
'd': ['e', 'e', 'e', 'f', 'f', 'f'], 'u': ['v', 'v', 'v', 'w', 'w', 'w']}
t1keys = list(t1.keys())
sort_table(t1, t1keys)
| [
"noreply@github.com"
] | Function-0.noreply@github.com |
1f2e16db73f4d88295d4d2a95488a10a5cd5ab5d | ee2ae98f9bafd0f539f36abca3f7133959cd0962 | /backend/nameless_frost_27639/wsgi.py | 9f29a3e855af429e370d4fe8a99cfa5f83f2a19b | [] | no_license | crowdbotics-apps/nameless-frost-27639 | c036fc2f078541882301de65d643ecb40c9f2492 | 4c23611555f15af46259675791bf87fbcd9bb4b8 | refs/heads/master | 2023-05-06T18:13:26.553269 | 2021-06-01T03:16:58 | 2021-06-01T03:16:58 | 372,688,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for nameless_frost_27639 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nameless_frost_27639.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
67d903636f1c75e3b4e45403548c442f248de85d | 8c4beb1c5263d3771b3dd69783cd274dee1900a6 | /lucky78/urls.py | 6bbb5f30a31888e734e21dd32ba7f0f094838245 | [] | no_license | codervince/lucky78 | 6523a141ff21ac50d59cd1407a8fcd7a435d9b8d | 53b6c21760021ef1fe82666df659553b3ad1514a | refs/heads/master | 2021-01-19T18:25:25.730866 | 2016-03-30T10:02:34 | 2016-03-30T10:02:34 | 55,052,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | """Lucky78 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url( r'^admin/', include( admin.site.urls ) ),
url( r'^accounts/', include( 'allauth.urls' ) ),
url( r'^', include( 'app.urls', namespace = 'app' ) ),
url( r'^funds/', include( 'funds.urls', namespace = 'funds' ) ),
]
| [
"publicvince102@gmail.com"
] | publicvince102@gmail.com |
65813f60c0b619ba70727345d3a100f11aa36a18 | 8da7d35bdeacbd821b9df4189d49afe401dd51a3 | /DISClib/DataStructures/adjlist.py | 726e96bc89c22b81149ba5e358b90f4ddeef2d8a | [] | no_license | EDA-SEC-02-EQUIPO-7/EDA-2020-20-RETO-3-SEC-02-GRUPO-07 | 657aa7d77a808191f23ff7cb055f2e7fe28c50c0 | 8d74409e21c025b10a9f979a5ddcd86a96f25b66 | refs/heads/master | 2022-12-31T12:30:56.424380 | 2020-10-23T23:43:02 | 2020-10-23T23:43:02 | 298,278,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,303 | py | """
* Copyright 2020, Departamento de sistemas y Computación,
* Universidad de Los Andes
*
* Desarrollado para el curso ISIS1225 - Estructuras de Datos y Algoritmos
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Contribución de:
*
* Dario Correal
*
"""
import config
from DISClib.ADT import map as map
from DISClib.DataStructures import liststructure as lt
from DISClib.DataStructures import listiterator as it
from DISClib.DataStructures import edge as e
from DISClib.Utils import error as error
assert config
"""
Este código está basado en las implementaciones propuestas en:
- Algorithms, 4th Edition. R. Sedgewick
- Data Structures and Algorithms in Java, 6th Edition. Michael Goodrich
"""
def newGraph(size, cmpfunction, directed):
"""
Crea un grafo vacio. Los vertices son guardados en un map
de tipo linear probing
Args:
size: Tamaño inicial del grafo
cmpfunction: Funcion de comparacion
directed: Indica si el grafo es dirigido o no
Returns:
Un nuevo grafo
Raises:
Exception
"""
try:
graph = {'vertices': None,
'edges': 0,
'type': 'ADJ_LIST',
'cmpfunction': cmpfunction,
'directed': directed,
'indegree': None
}
graph['vertices'] = map.newMap(numelements=size,
maptype='PROBING',
comparefunction=cmpfunction)
if (directed):
graph['indegree'] = map.newMap(numelements=size,
maptype='PROBING',
comparefunction=cmpfunction)
return graph
except Exception as exp:
error.reraise(exp, 'ajlist:newgraph')
def insertVertex(graph, vertex):
"""
Inserta el vertice vertex en el grafo graph
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertex: El vertice que se desea insertar
Returns:
Un nuevo grafo
Raises:
Exception
"""
try:
edges = lt.newList()
map.put(graph['vertices'], vertex, edges)
if (graph['directed']):
map.put(graph['indegree'], vertex, 0)
return graph
except Exception as exp:
error.reraise(exp, 'ajlist:insertvertex')
def removeVertex(graph, vertex):
"""
Remueve el vertice vertex del grafo graph
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertex: El vertice que se desea remover
Returns:
Un nuevo grafo
Raises:
Exception
"""
# TODO
pass
def numVertex(graph):
"""
Retorna el numero de vertices en el grafo graph
Args:
graph: El grafo sobre el que se ejecuta la operacion
Returns:
El numero de vertices
Raises:
Exception
"""
try:
return map.size(graph['vertices'])
except Exception as exp:
error.reraise(exp, 'ajlist:numtvertex')
def numEdges(graph):
"""
Retorna el numero de arcos en el grafo graph
Args:
graph: El grafo sobre el que se ejecuta la operacion
Returns:
El numero de arcos
Raises:
Exception
"""
try:
return (graph['edges'])
except Exception as exp:
error.reraise(exp, 'ajlist:numedges')
def vertices(graph):
"""
Retorna una lista con todos los vertices del grafo graph
Args:
graph: El grafo sobre el que se ejecuta la operacion
Returns:
Una lista con los vertices del grafo
Raises:
Exception
"""
try:
lstmap = map.keySet(graph['vertices'])
return lstmap
except Exception as exp:
error.reraise(exp, 'ajlist:vertices')
def edges(graph):
"""
Retorna una lista con todos los arcos del grafo graph
Args:
graph: El grafo sobre el que se ejecuta la operacion
Returns:
Una lista con los arcos
Raises:
Exception
"""
try:
lstmap = map.valueSet(graph['vertices'])
itervertex = it.newIterator(lstmap)
lstresp = lt.newList('SINGLE_LINKED', e.compareedges)
while it.hasNext(itervertex):
lstedge = it.next(itervertex)
iteredge = it.newIterator(lstedge)
while (it.hasNext(iteredge)):
edge = it.next(iteredge)
if (graph['directed']):
lt.addLast(lstresp, edge)
elif (not lt.isPresent(lstresp, edge, )):
lt.addLast(lstresp, edge)
return lstresp
except Exception as exp:
error.reraise(exp, 'ajlist:edges')
def degree(graph, vertex):
"""
Retorna el numero de arcos asociados al vertice vertex
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertex: El vertice del que se desea conocer el grado
Returns:
El grado del vertice
Raises:
Exception
"""
try:
element = map.get(graph['vertices'], vertex)
lst = element['value']
return (lt.size(lst))
except Exception as exp:
error.reraise(exp, 'ajlist:degree')
def indegree(graph, vertex):
"""
Retorna el numero de arcos que llegan al vertice vertex
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertex: El vertice del que se desea conocer el grado
Returns:
El grado del vertice
Raises:
Exception
"""
try:
if (graph['directed']):
degree = map.get(graph['indegree'], vertex)
return degree['value']
return 0
except Exception as exp:
error.reraise(exp, 'ajlist:indegree')
def outdegree(graph, vertex):
"""
Retorna el numero de arcos que salen del grafo vertex
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertex: El vertice del que se desea conocer el grado
Returns:
El grado del vertice
Raises:
Exception
"""
try:
if (graph['directed']):
element = map.get(graph['vertices'], vertex)
lst = element['value']
return (lt.size(lst))
return 0
except Exception as exp:
error.reraise(exp, 'ajlist:outdegree')
def getEdge(graph, vertexa, vertexb):
"""
Retorna el arco asociado a los vertices vertexa ---- vertexb
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertexa: Vertice de inicio
vertexb: Vertice destino
Returns:
El grado el arco
Raises:
Exception
"""
try:
element = map.get(graph['vertices'], vertexa)
lst = element['value']
itvertex = it.newIterator(lst)
while (it.hasNext(itvertex)):
edge = it.next(itvertex)
if (e.either(edge) == vertexa or
(e.other(edge, e.either(edge)) == vertexa)):
if (e.either(edge) == vertexb or
(e.other(edge, e.either(edge)) == vertexb)):
return edge
return None
except Exception as exp:
error.reraise(exp, 'ajlist:getedge')
def containsVertex(graph, vertex):
"""
Retorna el arco asociado a los vertices vertexa ---- vertexb
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertexa: Vertice que se busca
Returns:
True si el vertice esta presente
Raises:
Exception
"""
try:
return map.get(graph['vertices'], vertex) is not None
except Exception as exp:
error.reraise(exp, 'ajlist:containsvertex')
def addEdge(graph, vertexa, vertexb, weight=0):
"""
Agrega un arco entre los vertices vertexa ---- vertexb, con peso weight.
Si el grafo es no dirigido se adiciona dos veces el mismo arco,
en el mismo orden
Si el grafo es dirigido se adiciona solo el arco vertexa --> vertexb
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertexa: Vertice de inicio
vertexb: Vertice de destino
wight: peso del arco
Returns:
True si el vertice esta presente
Raises:
Exception
"""
try:
# Se crea el arco
edge = e.newEdge(vertexa, vertexb, weight)
# Se obtienen las listas de adyacencias de cada vertice
# Se anexa a cada lista el arco correspondiente
entrya = map.get(graph['vertices'], vertexa)
lt.addLast(entrya['value'], edge)
if (not graph['directed']):
entryb = map.get(graph['vertices'], vertexb)
lt.addLast(entryb['value'], edge)
else:
degree = map.get(graph['indegree'], vertexb)
map.put(graph['indegree'], vertexb, degree['value']+1)
graph['edges'] += 1
return graph
except Exception as exp:
error.reraise(exp, 'ajlist:addedge')
def adjacents(graph, vertex):
"""
Retorna una lista con todos los vertices adyacentes al vertice vertex
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertex: El vertice del que se quiere la lista
Returns:
La lista de adyacencias
Raises:
Exception
"""
try:
element = map.get(graph['vertices'], vertex)
lst = element['value']
lstresp = lt.newList()
iter = it.newIterator(lst)
while (it.hasNext(iter)):
edge = it.next(iter)
v = e.either(edge)
if (v == vertex):
lt.addLast(lstresp, e.other(edge, v))
else:
lt.addLast(lstresp, v)
return lstresp
except Exception as exp:
error.reraise(exp, 'ajlist:adjacents')
def adjacentEdges(graph, vertex):
"""
Retorna una lista con todos los arcos asociados a los vértices
adyacentes de vertex
Args:
graph: El grafo sobre el que se ejecuta la operacion
vertex: El vertice del que se quiere la lista
Returns:
La lista de adyacencias
Raises:
Exception
"""
try:
element = map.get(graph['vertices'], vertex)
lst = element['value']
return lst
except Exception as exp:
error.reraise(exp, 'ajlist:adjacentEdges')
| [
"noreply@github.com"
] | EDA-SEC-02-EQUIPO-7.noreply@github.com |
3f2081334c3ed8057b4710a18a3be0ae909fa1fc | b86d418e9389350e772eda1d8a493bf3c2ff5661 | /setup.py | 13624931abc30f672c288d66ed773e4ded6f25e6 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | dpaysan/unet_nuclei_segmentation | 161085eb0b7fe24af96f565bfd2d05f2cbe22896 | df0f656ecf511aa39c411f897ab99cfa6c119c7b | refs/heads/master | 2023-08-25T14:46:21.530617 | 2023-08-07T12:33:52 | 2023-08-07T12:33:52 | 369,880,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | from setuptools import setup, find_packages
from codecs import open
from os import path
__version__ = '0.0.1'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
setup(
name='src',
version=__version__,
description='a stand-alone version of unet segmentation of fluorescent nuclei based on the code and trained Unet from the CellProfiler ClassifyPixels-Unet plugin',
long_description=long_description,
url='https://github.com/VolkerH/unet_nuclei',
download_url='https://github.com/VolkerH/unet_nuclei/tarball/' + __version__,
license='unknown/to be determined',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Volker Hilsenstein',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='volker.hilsenstein@gmail.com'
)
| [
"paysand@student.ethz.ch"
] | paysand@student.ethz.ch |
5b938a6b377cb776257a75168249dbc08c67cdca | 441c3c55870e00c4a0d2eb1e5a051b6483d116db | /build/omni_nav/catkin_generated/pkg.develspace.context.pc.py | 58fb1b18d04d6de08fb3ca23d744bafe8d503815 | [] | no_license | h26m48/omni_robo | 819348e953d5be9d7da6c9fded55facd2e5e457e | e5fc91a3180198854e306bc1f45f0df345a7929c | refs/heads/master | 2020-04-19T15:41:05.278791 | 2019-01-30T05:39:27 | 2019-01-30T05:39:27 | 168,281,573 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "omni_nav"
PROJECT_SPACE_DIR = "/home/uchida/omni_robo/devel"
PROJECT_VERSION = "0.0.0"
| [
"888aetuzak215@gmail.com"
] | 888aetuzak215@gmail.com |
e418874c00457ba2e34cdf5d102a6937e90fd320 | 2d8ad9574e44ba2b9db2c0c7c95273c161a0c20d | /make_time_for_TASR.py | 438643b27d715c90a85f100a44e0f69c810baffa | [] | no_license | Starboomboom37183/Recommendation-System | 2c8aa7d8f94a9a386ac64f6b72bbb024c7dfdbfc | a481bb799e44035186b0d3544d3ffa5091439763 | refs/heads/master | 2020-03-28T20:31:55.878743 | 2018-09-17T06:54:07 | 2018-09-17T06:54:07 | 149,079,335 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,111 | py | # !/usr/bin/python
# _*_ coding:utf-8 _*_
from get_mashup import MSSSQL
import numpy as np
import datetime
def transfer_time(s):
temp = s.split(' ')
str = ''
str+=temp[3]
if(temp[2]=='Jan'):
str+='-1'
elif temp[2]=='Feb':
str+='-2'
elif temp[2]=='Mar':
str+='-3'
elif temp[2]=='Apr':
str+='-4'
elif temp[2]=='May':
str+='-5'
elif temp[2]=='Jun':
str+='-6'
elif temp[2]=='Jul':
str+='-7'
elif temp[2]=='Aug':
str+='-8'
elif temp[2]=='Sep':
str+='-9'
elif temp[2]=='Oct':
str+='-10'
elif temp[2]=='Nov':
str+='-11'
elif temp[2]=='Dec':
str+='-12'
str+= '-'
str+=temp[1]
str+=' '
str+=temp[4]
d1= datetime.datetime.strptime(str, '%Y-%m-%d %H:%M:%S')
return d1
input1 = open('User.txt')
t_user = {}
for line in input1:
s = line.strip('\r\n').split(' ')
t_user[s[0]] = datetime.datetime.strptime('2017-09-03 17:00:02','%Y-%m-%d %H:%M:%S')
input2 = open('webservice.txt')
t_s = {}
for line in input2:
s = line.strip('\r\n').split(' ')
t_s[s[0]] = datetime.datetime.strptime('2017-09-03 17:00:02','%Y-%m-%d %H:%M:%S')
input1.close()
input2.close()
'''
ms = MSSSQL(host="172.28.37.29", user="sa", pwd="wy9756784750", db="pweb")
resList = ms.ExeQuery("select title,updated from dbo.userinfo")
c = 0
for title,updated in resList:
if t_user.has_key(title):
updated = updated.replace('T',' ')
updated = updated.replace('Z', '')
d2 = datetime.datetime.strptime(updated, '%Y-%m-%d %H:%M:%S')
t_user[title] = d2
input3 = open('final_file.txt')
d = datetime.datetime.strptime('2005-09-03 17:00:02','%Y-%m-%d %H:%M:%S')
print d
set_time = {}
c = 0
for line in input3:
s = line.strip('\r\n').split(' ')
t = d + datetime.timedelta(seconds=int(s[2]))
ut = (t-t_user[s[0]]).days
if ut<0:
ut = 0
'''
input5 = open('name_replace.txt')
rep = {}
for line in input5:
s = line.strip('\r\n').split(' ')
rep[s[1]] = s[0]
ms = MSSSQL(host="172.28.37.29", user="sa", pwd="wy9756784750", db="pweb")
resList = ms.ExeQuery("select * from dbo.watchlist")
c = 0
for wsname,url,id,date in resList:
str_array = url.split('/')
url = str_array[-1]
url = url.replace('%2Fcomments', '')
if rep.has_key(url):
url = rep[url]
t = transfer_time(date)
if t_s.has_key(url):
if t<t_s[url]:
t_s[url] = t
if t_user.has_key(wsname):
if t<t_user[wsname]:
t_user[wsname] = t
##print t_user
##print t_s
input3 = open('final_file.txt')
d = datetime.datetime.strptime('2005-09-03 17:00:02','%Y-%m-%d %H:%M:%S')
print d
set_time = {}
c = 0
output = open('final_file_1.txt','w')
for line in input3:
s = line.strip('\r\n').split(' ')
t = d + datetime.timedelta(seconds=int(s[2]))
ut = (t-t_user[s[0]]).days
st = (t-t_s[s[1]]).days
##print ut,st
'''
if ut == 0:
print s[0],s[1],t_user[s[0]],t
'''
line = line.strip('\r\n')+' '+str(ut)+' '+ str(st)+'\r\n'
output.write(line)
| [
"1558448539@qq.com"
] | 1558448539@qq.com |
a26e683d363dea336a27a40943d9620d10297ab6 | 8ff7130f77e2e76c32c45b3d6df53b3586499a58 | /autocomplete.py | 0e2e87bc3cddcc1e562897850a9859512cced334 | [] | no_license | CasperEriksen/spt-gro | 630f05029bf0fb6f39b48e2352050ed94386bee6 | 14a6f92f14da86673b7c5cb0817c1596b261d693 | refs/heads/master | 2020-03-24T13:15:40.370863 | 2018-07-29T07:35:18 | 2018-07-29T07:35:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,983 | py | #!/bin/usr/python
# coding: utf-8
import re
from ordbog import dictionaries
from itertools import compress
import dictionary
import os
import argparse
def relevance_sort(search_results, search_term):
not_top = [not bool(re.match(search_term, text)) for text in search_results]
return [x for (_, x) in sorted(zip(not_top, search_results))]
def find_recommendations(dic, search_terms, directions, tables, language):
n = sum([len(term) for term in search_terms])
search_terms[-1] += "%"
search_results = dic.lookup(search_terms, language)
search_terms[-1] = search_terms[-1][:-1]
matches = ['000']
for d, d_name in directions:
for t, t_name in tables:
entries = search_results[d][t]
for entry in entries:
term = re.search(r'<h2>(.+?)<', entry)
if not term: term = re.search(r'<h3>(.+?)<', entry)
if term:
term = term.group(1).rstrip()
if term != matches[-1]:
matches.append(term)
return matches[1:]
def tab(dic, search_terms, directions, tables, language):
matches = find_recommendations(dic, search_terms, directions, tables, language)
search_term = " ".join(search_terms)
fil = [m.startswith(search_term) for m in matches]
if len(fil) < 2 or all(fil):
commonprefix = os.path.commonprefix(matches)
else:
commonprefix = os.path.commonprefix(list(compress(matches, fil)))
fil2 = [not f for f in fil]
matches = [m for (_, m) in sorted(zip(fil2, matches))]
return commonprefix, matches
def main():
parser = argparse.ArgumentParser()
parser.add_argument('search_terms', nargs="+")
parser.add_argument('-l', '--lang', nargs='?',
default='en', dest='lang', choices=dictionaries.keys(),
help=u'language (enda, daen, de, fr)')
parser.add_argument('-t', '--trans', nargs='?', default=0, dest="translate",
choices=['0', '1', '2'], help='0: from Danish, 1: to Danish, 2: both ways')
args = parser.parse_args()
translate = int(args.translate)
language = args.lang
search_terms = [term.lstrip(' ').rstrip(' ') for term in args.search_terms]
language_name = dictionaries[language]['name']
directions = [('fromDanish', 'Dansk-%s' % language_name), ('toDanish', '%s-Dansk' % language_name)]
if dictionaries[language]['doubflag'] < 2 or translate == 0:
del directions[1]
elif translate == 1:
del directions[0]
dic = dictionary.Dictionary(dictionaries)
tables = [('lookup', 'Artikler')]
if len(search_terms) > 1:
tables.append(('collocation_lookup', 'Ordforbindelser'))
prefix, tab_terms = tab(dic, search_terms, directions, tables, language)
if not tab_terms and len(search_terms) == 1:
tables = [('collocation_lookup', 'Ordforbindelser')]
prefix, tab_terms = tab(dic, search_terms, directions, tables, language)
ofile = open('tabterms.txt', 'w')
ofile.write("%s\n" % " ".join(search_terms))
ofile.write("%s\n" % prefix)
if len(tab_terms) >= 1:
for term in tab_terms:
ofile.write("%s\n" % (term))
else:
ofile.write("")
ofile.close()
if __name__ == '__main__':
main()
| [
"Casper.Eriksen@skat.dk"
] | Casper.Eriksen@skat.dk |
deb52afb2b156411e5c1de2b5ca0c5721770e6c9 | 85225916d51fb34d050c414e6a83687ff70d51cb | /listComprehension.py | 0b5fe24109779aaa0019ea64db411c8426453025 | [] | no_license | dip007/pythonBeginners_SS | ff31e3df86e09681dc6e90e111d450e123ae33d4 | 5361f9e5c305c946d65233de28ac501db6de50fd | refs/heads/master | 2022-04-26T08:21:46.825584 | 2020-04-19T12:31:37 | 2020-04-19T12:31:37 | 256,993,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | x,y,z,n = (int(input()) for i in range(4))
print([[a,b,c] for a in range(0,x+1) for b in range(0,y+1) for c in range(0,z+1) if a+b+c!=n])
#Input and output: This prints the details in lexicographic order.
# 1
# 1
# 1
# 2
# [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 1]]
# x=int(input())
# y=int(input())
# z=int(input())
# n=int(input())
# p=0
# ar=[]
# for i in range(x+1):
# for j in range(y+1):
# for k in range(z+1):
# if (i+j+k)!=n:
# ar.append([])
# ar[p]=[i,j,k]
# p=p+1
# print (ar,end='') | [
"subhradipsaha5@gmail.com"
] | subhradipsaha5@gmail.com |
175a846eb57f6a81839959da759bc35c3aa67727 | 18dc5cbaeb9baa167d3feee027588879468ddf8a | /MySQL-AutoXtraBackup/partial_recovery/partial.py | bbbc83a891c6d44f6f754a2eb663aa79d975b050 | [
"MIT"
] | permissive | dilipsingh556/test | 78ccceefe4faad1976ab6fe49296e32af15a68bb | 40261df2374497f5efe4878af490bbc41922ff0b | refs/heads/master | 2020-05-09T03:52:06.050856 | 2019-04-12T10:13:54 | 2019-04-12T10:13:54 | 180,982,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,508 | py | import os
import shutil
import subprocess
from general_conf.generalops import GeneralClass
import re
from general_conf import check_env
import sys
import logging
logger = logging.getLogger(__name__)
class PartialRecovery(GeneralClass):
def __init__(self, config='/etc/bck.conf'):
self.conf = config
GeneralClass.__init__(self, self.conf)
if shutil.which('mysqlfrm') is None:
logger.critical("Could not find mysqlfrm! Please install it or check if it is in PATH")
raise RuntimeError("Could not find mysqlfrm! Please install it or check if it is in PATH")
def create_mysql_client_command(self, statement):
command_connection = '{} --defaults-file={} -u{} --password={}'
command_execute = ' -e "{}"'
if hasattr(self, 'mysql_socket'):
command_connection += ' --socket={}'
command_connection += command_execute
new_command = command_connection.format(
self.mysql,
self.mycnf,
self.mysql_user,
self.mysql_password,
self.mysql_socket,
statement)
return new_command
else:
command_connection += ' --host={} --port={}'
command_connection += command_execute
new_command = command_connection.format(
self.mysql,
self.mycnf,
self.mysql_user,
self.mysql_password,
self.mysql_host,
self.mysql_port,
statement)
return new_command
def check_innodb_file_per_table(self):
"""
Function for checking MySQL innodb_file_per_table option.
It is needed for "Transportable Tablespace" concept.
:return: True/False
"""
statement = "select @@global.innodb_file_per_table"
run_command = self.create_mysql_client_command(statement=statement)
logger.debug("Checking if innodb_file_per_table is enabled")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and int(output[-1]) == 1:
logger.debug("OK: innodb_file_per_table is enabled!")
return True
elif status == 0 and int(output[-1]) == 0:
logger.debug("OK: innodb_file_per_table is disabled!")
return False
else:
logger.error("FAILED: InnoDB file per-table Check")
logger.error(output)
raise RuntimeError("FAILED: InnoDB file per-table Check")
def check_mysql_version(self):
"""
Function for checking MySQL version.
Version must be >= 5.6 for using "Transportable Tablespace" concept.
:return: True/False
"""
statement = "select @@version"
run_command = self.create_mysql_client_command(statement=statement)
logger.debug("Checking MySQL version")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and ('5.6' in output):
logger.debug("You have correct version of MySQL")
return True
elif status == 0 and ('5.7' in output):
logger.debug("You have correct version of MySQL")
return True
elif status == 0 and ('5.7' not in output) and ('5.6' not in output):
logger.error("Your MySQL server is not supported. MySQL version must be >= 5.6")
raise RuntimeError("Your MySQL server is not supported. MySQL version must be >= 5.6")
else:
logger.error("FAILED: MySQL version check")
logger.error(output)
raise RuntimeError("FAILED: MySQL version check")
def check_database_exists_on_mysql(self, database_name):
"""
Function check if this database already exists in MySQL Server.(.frm and .ibd files are exist)
In other words database is not dropped. If there is no such database, there is an input for creation.
:param database_name: Specified database name
:return: True/False
"""
statement = "SELECT count(*) FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = '%s'" % database_name
run_command = self.create_mysql_client_command(statement=statement)
logger.debug("Checking if database exists in MySQL")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and int(output[-1]) == 1:
logger.debug("Database exists!")
return True
if status == 0 and int(output[-1]) == 0:
logger.debug("There is no such database!")
logger.debug("Create Specified Database in MySQL Server, before restoring single table")
answer = input("We can create it for you do you want? (yes/no): ")
if answer == 'yes':
create_db = "create database %s" % database_name
run_command = self.create_mysql_client_command(statement=create_db)
logger.debug("Creating specified database")
status, output = subprocess.getstatusoutput(run_command)
if status == 0:
logger.debug("OK: {} database created".format(database_name))
return True
else:
logger.error("FAILED: to create database!")
logger.error(output)
raise RuntimeError("FAILED: to create database!")
else: # if you type non-yes word
logger.error("Exited!")
return False
else:
logger.error("FAILED: Check for database")
logger.error(output)
raise RuntimeError("FAILED: Check for database")
def check_table_exists_on_mysql(
self,
path_to_frm_file,
database_name,
table_name):
"""
Function to check if table exists on MySQL.
If it is dropped, we will try to extract table create statement from .frm file from backup file.
:param path_to_frm_file: Path for .frm file
:param database_name: Specified database name
:param table_name: Specified table name
:return: True/False
"""
statement = "select count(*) from INFORMATION_SCHEMA.tables " \
"where table_schema = '%s'" \
"and table_name = '%s'" % (database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
logger.debug("Checking if table exists in MySQL Server")
status, output = subprocess.getstatusoutput(run_command)
if status == 0 and int(output[-1]) == 1:
logger.debug("Table exists in MySQL Server.")
return True
elif status == 0 and int(output[-1]) == 0:
logger.debug("Table does not exist in MySQL Server.")
logger.debug("You can not restore table, with not existing tablespace file(.ibd)!")
logger.debug("We will try to extract table create statement from .frm file, from backup folder")
create = self.run_mysqlfrm_utility(path_to_frm_file=path_to_frm_file)
regex = re.compile(r'((\n)CREATE((?!#).)*ENGINE=\w+)', re.DOTALL)
matches = [m.groups() for m in regex.finditer(create)]
for m in matches:
create_table = m[0]
new_create_table = create_table.replace("`", "")
run_command = self.create_mysql_client_command(statement=new_create_table)
status, output = subprocess.getstatusoutput(run_command)
if status == 0:
logger.debug("Table Created from .frm file!")
return True
else:
logger.error("Failed to create table from .frm file!")
logger.error(output)
raise RuntimeError("Failed to create table from .frm file!")
else:
logger.error("FAILED: Check if table exists")
logger.error(output)
raise RuntimeError("FAILED: Check if table exists")
@staticmethod
def run_mysqlfrm_utility(path_to_frm_file):
command = '/usr/bin/mysqlfrm --diagnostic %s' % path_to_frm_file
logger.debug("Running mysqlfrm tool")
status, output = subprocess.getstatusoutput(command)
if status == 0:
logger.debug("OK: Success to run mysqlfrm")
return output
else:
logger.error("FAILED: run mysqlfrm")
logger.error(output)
raise RuntimeError("FAILED: run mysqlfrm")
def get_table_ibd_file(self, database_name, table_name):
"""
Locate backed up database and table.
Exactly we are looking for .ibd file.
.ibd file is a tablespace file where table data located.
:param database_name: Specified database name
:param table_name: Specified table name
:return .ibd file full path / False if not exists
"""
database_dir_list = []
database_objects_full_path = []
find_objects_full_path = []
table_dir_list = []
# Look for all files in database directory
for i in os.listdir(self.full_dir):
for x in os.listdir(self.full_dir + "/" + i):
if os.path.isdir(
self.full_dir +
"/" +
i +
"/" +
x) and x == database_name:
for z in os.listdir(self.full_dir + "/" + i + "/" + x):
database_dir_list.append(z)
database_objects_full_path.append(
self.full_dir + "/" + i + "/" + x + "/" + z)
# If database directory exists find already provided table in database
# directory
if len(database_dir_list) > 0:
for i in database_dir_list:
base_file = os.path.splitext(i)[0]
ext = os.path.splitext(i)[1]
if table_name == base_file:
table_dir_list.append(i)
# If table name from input is valid and it is located in database
# directory return .ibd file name
if len(database_dir_list) > 0 and len(
table_dir_list) == 2: # Why 2? because every InnoDB table must have .frm and .ibd file
for i in table_dir_list:
ext = os.path.splitext(i)[1]
if ext == '.ibd':
for a in database_objects_full_path:
if i in a:
find_objects_full_path.append(a)
if len(find_objects_full_path) > 0:
for x in find_objects_full_path:
return x
else:
logger.error("Sorry, There is no such Database or Table in backup directory")
logger.error("Or maybe table storage engine is not InnoDB")
raise RuntimeError("Sorry, There is no such Database or Table in backup directory "
"Or maybe table storage engine is not InnoDB ")
def lock_table(self, database_name, table_name):
# Executing lock tables write on specified table
statement = "LOCK TABLES %s.%s WRITE" % (database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.debug("Applying write lock!")
if status == 0:
logger.debug("OK: Table is locked")
return True
else:
logger.error("FAILED: to LOCK!")
logger.error(output)
raise RuntimeError("FAILED: to LOCK!")
def alter_tablespace(self, database_name, table_name):
# Running alter table discard tablespace here
statement = "ALTER TABLE %s.%s DISCARD TABLESPACE" % (
database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.debug("Discarding tablespace")
if status == 0:
logger.debug("OK: Tablespace discarded successfully")
return True
else:
logger.error("FAILED: discard tablespace!")
logger.error(output)
raise RuntimeError("FAILED: discard tablespace!")
@staticmethod
def copy_ibd_file_back(path_of_ibd_file, path_to_mysql_database_dir):
# Copy .ibd file back
try:
logger.debug("OK: Copying .ibd file back")
shutil.copy(path_of_ibd_file, path_to_mysql_database_dir)
return True
except Exception as err:
logger.error("FAILED: copy .ibd file back")
logger.error(err)
raise RuntimeError("FAILED: copy .ibd file back")
def give_chown(self, path_to_mysql_database_dir):
# run chown command
comm = '%s %s' % (self.chown_command, path_to_mysql_database_dir)
status, output = subprocess.getstatusoutput(comm)
logger.debug("Running chown command!")
if status == 0:
logger.debug("OK: Chown command completed")
return True
else:
logger.error("FAILED: Chown Command")
raise RuntimeError("FAILED: Chown Command")
def import_tablespace(self, database_name, table_name):
# Running alter table import tablespace
statement = "ALTER TABLE %s.%s IMPORT TABLESPACE" % (
database_name, table_name)
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.debug("Importing Tablespace!")
if status == 0:
logger.debug("OK: Tablespace imported")
return True
else:
logger.error("FAILED: Tablespace import")
logger.error(output)
raise RuntimeError("FAILED: Tablespace import")
def unlock_tables(self):
# Run unlock tables command
statement = "unlock tables"
run_command = self.create_mysql_client_command(statement=statement)
status, output = subprocess.getstatusoutput(run_command)
logger.debug("Unlocking tables!")
if status == 0:
logger.debug("OK: Unlocked!")
return True
else:
logger.error("FAILED: Unlocking")
logger.error(output)
raise RuntimeError("FAILED: Unlocking")
def final_actions(self):
# Type Database name of table which you want to restore
database_name = input("Type Database name: ")
# Type name of table which you want to restore
table_name = input("Type Table name: ")
path = self.get_table_ibd_file(
database_name=database_name,
table_name=table_name)
path_to_mysql_datadir = self.datadir + "/" + database_name
if path:
path_to_frm_file = path[:-3] + 'frm'
obj_check_env = check_env.CheckEnv(self.conf)
if path:
try:
obj_check_env.check_mysql_uptime()
self.check_innodb_file_per_table()
self.check_mysql_version()
self.check_database_exists_on_mysql(
database_name=database_name)
self.check_table_exists_on_mysql(
path_to_frm_file=path_to_frm_file,
database_name=database_name,
table_name=table_name)
self.lock_table(database_name=database_name, table_name=table_name)
self.alter_tablespace(database_name=database_name, table_name=table_name)
self.copy_ibd_file_back(path_of_ibd_file=path, path_to_mysql_database_dir=path_to_mysql_datadir)
self.give_chown(path_to_mysql_database_dir=path_to_mysql_datadir)
self.import_tablespace(database_name=database_name, table_name=table_name)
self.unlock_tables()
except Exception as err:
logger.error("FAILED: Table is not recovered")
logger.error(err)
raise RuntimeError("FAILED: Table is not recovered")
else:
logger.debug("OK: Table Recovered! ...")
return True
| [
"dilip_singh556@yahoo.com"
] | dilip_singh556@yahoo.com |
819b4d9d34967f31d726f790217eb7114031bffe | dc9a84e39b1cb7e4247e1346fd852da3c2442dd0 | /back/node_modules/bcrypt/build/config.gypi | 411d3a17789f08bd930ec2ab2a00b3be2c9a288c | [
"MIT"
] | permissive | Mamisoa26/ProjetNirina | 1685f0d901a1b9363f74fb0c1ddc6af96057fa83 | 93f475ce03644f2b99fb4fbe3cd4cb2f7b7ec93f | refs/heads/master | 2023-01-03T11:33:46.585386 | 2019-08-30T11:47:57 | 2019-08-30T11:47:57 | 205,341,400 | 0 | 0 | null | 2022-12-22T12:29:48 | 2019-08-30T08:40:04 | JavaScript | UTF-8 | Python | false | false | 5,777 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"gas_version": "2.27",
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"is_debug": 0,
"llvm_version": 0,
"napi_build_version": "0",
"node_byteorder": "little",
"node_code_cache_path": "yes",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "so.72",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "/home/dev/.node-gyp/12.6.0",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/home/dev/Bureau/ecommerce/back/node_modules/bcrypt/lib/binding/bcrypt_lib.node",
"module_name": "bcrypt_lib",
"module_path": "/home/dev/Bureau/ecommerce/back/node_modules/bcrypt/lib/binding",
"napi_version": "4",
"node_abi_napi": "napi",
"node_napi_label": "node-v72",
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/6.9.0 node/v12.6.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"noproxy": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/dev/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"preid": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"audit": "true",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"prefix": "/usr",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/dev/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"audit_level": "low",
"prefer_offline": "",
"color": "true",
"sign_git_commit": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0022",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"before": "",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"update_notifier": "true",
"auth_type": "legacy",
"node_version": "12.6.0",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/dev/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"rakotonindrinasandiarivelo12@gmail.com"
] | rakotonindrinasandiarivelo12@gmail.com |
e34b734043606ce4916c9eacceca3ecfd10d6f39 | c7c4588d2ffec63b877daf192e447ab836b75c5d | /day-09/script2.py | af01e5514b20dd31ce115827e3c1e8cbcde9aa8d | [] | no_license | mahrtynas/advent-of-code-2020 | 6ad907689ff185086fbd107a36babfb12f282706 | f8d4506a42c61689f8076c7bc5e98732fdafcb2d | refs/heads/master | 2023-02-06T13:57:04.941123 | 2020-12-22T09:23:11 | 2020-12-22T09:23:11 | 318,467,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | def is_valid_target(values, target):
for i in range(len(values) - 1):
d = set([target - x for x in values[:i] + values[(i + 1):]])
if values[i] in d:
return True
print("Target %s can not be achieved" % target)
return False
def find_contiguous_set(values, target):
while True:
v = values.pop(-1)
t = v
i = 0
while t <= target:
t += values[(-1-i)]
if t == target:
d = set(values[(-1-i):] + [v])
print("Set %s sums to %s" % (d, target))
print("Max: %s, min: %s, sum = %s" % (max(d), min(d), max(d) + min(d)))
return
i += 1
def main():
with open("input.txt") as f:
numbers = [int(x) for x in f.read().splitlines()]
preamble_size = 25
for i in range(preamble_size, len(numbers) - 1):
target = numbers[i]
values = numbers[(i - preamble_size):i]
if not is_valid_target(values, target):
find_contiguous_set(numbers[:i], target)
if __name__ == "__main__":
main() | [
"mseskaitis@gmail.com"
] | mseskaitis@gmail.com |
fdb5faa68cac82c0f3d4f68e997ab2f4251ca55d | 0f44013e993727e94a52d3f21e6c934b3243bda4 | /src/hello_wx.py | 35fb1c048c06dac8f52e584fdcb12b1b618a2edd | [
"MIT"
] | permissive | davidfstr/Python-in-Mac-App-Store | bf336e43ec5dbdfe942ad5a4c9e2bb84f32abe00 | 1bdbb928bd8a4c7e398bc6916485e3dfc2f03825 | refs/heads/master | 2021-06-07T21:49:03.462443 | 2021-04-23T14:07:35 | 2021-04-23T14:07:35 | 21,087,030 | 93 | 23 | null | 2015-04-09T04:56:14 | 2014-06-22T06:14:24 | Shell | UTF-8 | Python | false | false | 223 | py | import wx
def main():
app = wx.PySimpleApp()
colors = ['Red', 'Blue', 'Green', 'Pink', 'White']
dialog = wx.SingleChoiceDialog(
None, 'Pick something...', 'Pick a Color', colors)
dialog.ShowModal()
| [
"davidfstr@gmail.com"
] | davidfstr@gmail.com |
459098cdcbc4db3bb0ec45f2ae780ce551e04a9b | 36bbcb179e64185125998b8b2a7bd00b4c54073e | /template.py | 3e385e3d279d583082b86c44e61eba98ecd1cb18 | [] | no_license | savanto/prob | 4edde98caa90bb0861c97f5ec27467fe062caca1 | 585cb598bf2c49cfdc7a7a54b36d1abed5ae026a | refs/heads/master | 2020-05-02T21:02:53.400615 | 2014-10-21T04:19:56 | 2014-10-21T04:19:56 | 19,366,198 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | #!/usr/bin/env python
"""
PROBLEM
Problem PROBLEM
"""
| [
"savanto@gmail.com"
] | savanto@gmail.com |
31563306cb26a940817b59ba8a33317d20c54344 | 4c995a7626134308b6f900f70e4aac799a4379ff | /home/admin.py | eaa5825110f336ef117f618b8d7676cd0c9e69bd | [] | no_license | leventerevesz/irrigation-server | 3c6a25d36e5ccd2bc4547ce55e0a6be72533504d | 9efd022b6dda81e4088dd78036d652cd88d8214a | refs/heads/master | 2022-12-16T16:00:06.359359 | 2019-12-05T21:13:16 | 2019-12-05T21:13:16 | 219,180,821 | 0 | 0 | null | 2022-12-08T06:59:35 | 2019-11-02T16:26:20 | Python | UTF-8 | Python | false | false | 211 | py | from django.contrib import admin
from .models import Settings, Log
class LogAdmin(admin.ModelAdmin):
list_display = ('message', 'datetime')
admin.site.register(Settings)
admin.site.register(Log, LogAdmin) | [
"levete.revesz@gmail.com"
] | levete.revesz@gmail.com |
2de17c60a0220cf1f1c8838295b87f451491456f | bfe6c95fa8a2aae3c3998bd59555583fed72900a | /summaryRanges.py | 731e8b9f2fedab96a943f3eef675bae077229713 | [] | no_license | zzz136454872/leetcode | f9534016388a1ba010599f4771c08a55748694b2 | b5ea6c21bff317884bdb3d7e873aa159b8c30215 | refs/heads/master | 2023-09-01T17:26:57.624117 | 2023-08-29T03:18:56 | 2023-08-29T03:18:56 | 240,464,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py | from typing import *
# 不知道是哪一个
class Solution1:
def summaryRanges(self, nums: List[int]) -> List[str]:
out = []
if len(nums) == 0:
return out
pre = nums[0]
pre_start = nums[0]
for i in range(1, len(nums)):
if nums[i] != pre + 1:
if pre_start == pre:
out.append(str(pre))
else:
out.append(str(pre_start) + '->' + str(pre))
pre_start = nums[i]
pre = nums[i]
if pre_start == pre:
out.append(str(pre))
else:
out.append(str(pre_start) + '->' + str(pre))
return out
# nums = [0,1,2,4,5,7]
# nums = [0,2,3,4,6,8,9]
# sl=Solution()
# print(sl.summaryRanges(nums))
# 将数据流变为多个不相交区间
class SummaryRanges:
def __init__(self):
self.log = [[-2, -2], [123456, 123456]]
def addNum(self, val: int) -> None:
# print('before', self.log)
left = 0
right = len(self.log) - 1
while left <= right:
mid = (left + right) // 2
if val >= self.log[mid][0]:
left = mid + 1
else:
right = mid - 1
# print('search', left)
if val <= self.log[left - 1][1]:
return
if val == self.log[left - 1][1] + 1:
self.log[left - 1][1] += 1
if self.log[left - 1][1] == self.log[left][0] - 1:
self.log[left - 1][1] = self.log[left][1]
del self.log[left]
return
if val == self.log[left][0] - 1:
self.log[left][0] -= 1
return
self.log.insert(left, [val, val])
def getIntervals(self) -> List[List[int]]:
return self.log[1:len(self.log) - 1]
# Your SummaryRanges object will be instantiated and called as such:
# obj = SummaryRanges()
# obj.addNum(val)
# param_2 = obj.getIntervals()
opList = [
"SummaryRanges", "addNum", "getIntervals", "addNum", "getIntervals",
"addNum", "getIntervals", "addNum", "getIntervals", "addNum",
"getIntervals"
]
dataList = [[], [1], [], [3], [], [7], [], [2], [], [6], []]
# opList=["SummaryRanges", "addNum", "addNum", "addNum", "getIntervals"]
# dataList=[[], [1], [3], [2], []]
class Tester:
def __init__(self, opList, dataList):
testedClass = eval(opList[0])
testedInstance = testedClass(*dataList[0])
for i in range(1, len(opList)):
print(opList[i], dataList[i])
if not dataList[i]:
print(getattr(testedInstance, opList[i])())
else:
print(getattr(testedInstance, opList[i])(*dataList[i]))
Tester(opList, dataList)
| [
"zzz136454872@163.com"
] | zzz136454872@163.com |
63f8481fd944a0952460cc2bac048562bb372bd0 | e7282ef96d0d3ebbe86311f4b3d1db12c1dcf584 | /fio_csv.py | 5c8ca6ae29274c5b8af6b2e3e5300da3f9a90b37 | [] | no_license | Saltflow/fio_helper | 31c67bbfe51ad522d2702879df056d4e7fe7e122 | 8ab80c14436493e2a63830622a3f8c43354c33fc | refs/heads/master | 2023-04-14T18:30:24.825766 | 2021-04-20T12:00:14 | 2021-04-20T12:00:14 | 321,084,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,011 | py | #!/usr/bin/env python3
import argparse
import json
import pprint
import os
import re
import sys
import pandas
import numpy as np
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def get_arg_parser():
p = argparse.ArgumentParser(
description='Create graphs from various fio json outputs')
p.add_argument('path', help='Source path for fio output')
p.add_argument("--bs", "-b", help="list blocksize as variable", type=bool, default=False)
p.add_argument("--size", "-s", help="list size as variable", type=bool, default=False)
p.add_argument(
'-d',
'--dir', action="store_true",
help='Read output files from a directory and consider files to be of the same run')
p.add_argument('-o', '--output', help='output file name for xlsx',
default='file')
return p
def parse_keys(key):
keys = key.split('=')
return keys[0], int(keys[1])
class FioResults(object):
def __init__(self, args):
# two parsing modes: single file, dir with files to aggregate
self.b_width = 0.15
self.args = args
self.data = {
'results': [],
'directory': self.args.dir
}
self.cache = {}
self.meta = {}
@property
def num_clients(self):
if self.meta is {}:
return 0
# TODO fix dirty hack
k = list(self.meta.keys())[0]
return len(self.meta[k]['clients'])
@property
def num_threads(self):
if self.meta is {}:
return 0
# TODO fix dirty hack
k = list(self.meta.keys())[0]
return self.meta[k]['count'] / len(self.meta[k]['clients'])
def parse_data(self):
if self.args.dir:
self._parse_dir()
else:
self._parse_file(self.args.path)
def _parse_dir(self):
for f in os.listdir(self.args.path):
path = '{}/{}'.format(self.args.path, f)
if os.path.isfile(path):
filename = os.path.basename(path)
self._parse_file(path, filename)
def _parse_file(self, path, file='default'):
with open(path) as file_:
try:
d = json.load(file_)
self.data['results'].append(
{
'name':file ,
'stats':d
}
)
except ValueError:
print('IGNORING file {}, contains no valid JSON'.format(path))
def _aggregate_data(self, test): # : tests: self.data['results']
if not self.data['results']:
print('ERROR...no data found.')
sys.exit()
d = {}
result = test['stats']
self.cache['name'] = test['name']
if 'jobs' in result:
result_key = 'jobs'
elif 'client_stats' in result:
result_key = 'client_stats'
for job in result[result_key]:
# Skip 'All clients' if present
if job['jobname'] == 'All clients':
continue
if job['error'] is not 0:
print('job {} reported an error...skipping'.format(
job['jobname']
))
continue
# Extract data from json
if job['jobname'] not in d:
d[job['jobname']] = {'read': 0,
'write': 0,
'r_iops': 0,
'w_iops': 0,
'lat_us': {},
'lat_ms': {},
'clients': [],
'options': {},
'count': 0}
d[job['jobname']]['options'] = job['job options']
d[job['jobname']]['count'] += 1
# if job['hostname'] not in d[job['jobname']]['clients']:
# d[job['jobname']]['clients'].append(job['hostname'])
d[job['jobname']]['read'] += job['read']['bw']
d[job['jobname']]['write'] += job['write']['bw']
d[job['jobname']]['r_iops'] += job['read']['iops']
d[job['jobname']]['w_iops'] += job['write']['iops']
for k, v in job['latency_us'].items():
if k in d[job['jobname']]['lat_us']:
d[job['jobname']]['lat_us'][k] += job['latency_us'][k]
else:
d[job['jobname']]['lat_us'][k] = job['latency_us'][k]
for k, v in job['latency_ms'].items():
if k in d[job['jobname']]['lat_ms']:
d[job['jobname']]['lat_ms'][k] += job['latency_ms'][k]
else:
d[job['jobname']]['lat_ms'][k] = job['latency_ms'][k]
var = ""
for key,value in d.items():
var = parse_keys(key)[0]
break
# create data frames from extracted data
self.cache['bw'] = pandas.DataFrame(data={
'name': [k for k in d.keys()],
var : [parse_keys(k)[1] for k in d.keys()],
'read': [v['read'] for v in d.values()],
'write': [v['write'] for v in d.values()]})
self.cache['iops'] = pandas.DataFrame(data={
'name': [k for k in d.keys()],
var : [parse_keys(k)[1] for k in d.keys()],
'read': [v['r_iops'] for v in d.values()],
'write': [v['w_iops'] for v in d.values()]})
lat_data = {'lats': list(d[next(iter(d))]['lat_us'].keys())
+ [k + '000' for k in d[next(iter(d))]['lat_ms'].keys()]}
self.cache['meta_clients'] = {k: v['count'] for k, v in d.items()}
for name in d.keys():
c = []
for k in d[name]['lat_us'].keys():
c.append(d[name]['lat_us'][k] / d[name]['count'])
for k in d[name]['lat_ms'].keys():
c.append(d[name]['lat_ms'][k] / d[name]['count'])
lat_data[name] = c
self.cache['lat_dist'] = pandas.DataFrame(data=lat_data)
# collect some metadata about the jobs
for name in d.keys():
self.meta[name] = {
'count': d[name]['count'],
'clients': d[name]['clients'],
}
def get_aggregate_bw(self,test):
if 'bw' not in self.cache or 'name' != test['name']:
self._aggregate_data(test)
return self.cache['bw']
def get_aggregate_iops(self,test):
if 'iops' not in self.cache or 'name' != test['name']:
self._aggregate_data(test)
return self.cache['iops']
def get_aggregate_lat_dist(self,test):
if 'lat_dist' not in self.cache or 'name' != test['name']:
self._aggregate_data(test)
return self.cache['lat_dist']
def print_(self):
mergedbw = 0
mergediops = 0
sep_lat = []
for test in self.data['results']:
print('aggregate iops')
ag_iops = self.get_aggregate_iops(test)
ag_iops.insert(0, 'way', attach_name(test['name'], ag_iops.shape[0]))
print('aggregate bandwidth')
ag_bw = self.get_aggregate_bw(test)
ag_bw.insert(0, 'way', attach_name(test['name'], ag_bw.shape[0]))
sep_lat.append(self.get_aggregate_lat_dist(test))
if(type(mergedbw) == int): # tricks to get accumulated result
mergedbw = ag_bw.copy()
mergediops = ag_iops.copy()
else:
mergedbw = mergedbw.append(ag_bw)
mergediops = mergediops.append(ag_iops)
with pandas.ExcelWriter(self.args.output+".xlsx") as writer:
mergedbw.to_excel(writer, sheet_name='bw')
mergediops.to_excel(writer, sheet_name='iops')
for i in range(len(sep_lat)):
sep_lat[i].to_excel(writer, self.data['results'][i]['name'] + "_lat_dist")
def attach_name(name, number):
result = []
result.append(name)
# In convenience for excel graph
for i in range(number- 1):
result.append('')
return result
def get_workers(val):
return 0
def get_bs(val):
return 0
def get_op(val):
return val.split('_')[-1]
def get_fio(path):
return FioResults(argparse.Namespace(dir=True, path=path, output='graphs'))
def main():
a_parser = get_arg_parser()
args = a_parser.parse_args()
if args.dir:
if not os.path.isdir(args.path):
raise a_parser.ArgumentError(('-d was passed but path is not a ',
'directory'))
else:
if os.path.isdir(args.path):
raise a_parser.ArgumentError(('-d was not passed but path is a ',
'directory'))
results = FioResults(args)
results.parse_data()
results.print_()
if __name__ == "__main__":
main()
| [
"2017302580274@whu.edu.cn"
] | 2017302580274@whu.edu.cn |
b3fbab07b998e1fee199eb0830317781fa2a95f5 | 4f7f2adf35772020adfbc994cdff8a756fcc4497 | /src/machine_learning/training_process/classifier_keras.py | 57463798ddda704cd5c5201e922f72be5f867bc1 | [] | no_license | gprolog/machine_learning_tools | 6b84e15fe3ffcb55df24b33c9ea394936f9b74fc | e343318829f7e9eb436c6269818ba0344675962c | refs/heads/master | 2020-03-15T05:11:13.318779 | 2018-05-02T03:57:11 | 2018-05-02T03:57:11 | 131,983,521 | 1 | 0 | null | 2018-05-03T11:09:06 | 2018-05-03T11:09:06 | null | UTF-8 | Python | false | false | 4,688 | py | from __future__ import print_function
import os, json
from classifier_base import *
import keras
from keras.models import *
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from sklearn.preprocessing import OneHotEncoder
from keras.wrappers.scikit_learn import *
from sklearn.datasets import load_svmlight_file
import numpy as np
from scipy.sparse import csr_matrix
def csr_matrix_to_ndarray(m):
return csr_matrix(m, dtype=np.float32).toarray()
class TMSAKerasTrainer(TrainerInterface):
def __init__(self, config):
print('TMSAKerasTrainer init')
super(self.__class__, self).__init__()
self.config_ = config
self.input_dim_ = config['model']['keras']['input_dim']
self.num_classes_ = config['model']['keras']['num_classes']
self.batch_size_ = config['model']['keras']['batch_size']
self.epochs_ = config['model']['keras']['epochs']
def train(self):
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(self.y_, 2)
# enc = OneHotEncoder(sparse=False) # Key here is sparse=False!
# y_train = enc.fit_transform(self.y_.reshape((self.y_.shape[0]), 1))
# y = np.array(self.y_)
self.model_ = Sequential()
#self.model_.add(Dense(10240, activation='relu', input_dim=self.input_dim_))
#self.model_.add(Dropout(0.2))
#self.model_.add(Dense(1024, activation='relu'))
#self.model_.add(Dropout(0.2))
#self.model_.add(Dense(512, activation='relu'))
self.model_.add(Dense(512, activation='relu', input_dim=self.input_dim_))
self.model_.add(Dropout(0.2))
self.model_.add(Dense(64, activation='relu'))
self.model_.add(Dropout(0.2))
self.model_.add(Dense(2, activation='softmax'))
#self.model_.add(Dense(2, activation='sigmoid'))
self.model_.summary()
self.model_.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
X_array = csr_matrix_to_ndarray(self.X_)
history = self.model_.fit(X_array, y_train,
batch_size=self.batch_size_,
epochs=self.epochs_,
verbose=1,
validation_data=(X_array, y_train))
#print history
def save_model(self, model_path = None):
if model_path:
print('get model path from parameter')
else:
model_path = os.path.join(self.config_['filesystem']['mid_folder'], \
self.config_['model']['keras']['model_name'])
print('get model path from config')
dir_path, file_name = os.path.split(model_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
self.model_.save(model_path)
return model_path
class TMSAKerasClassifier(ClassifierInterface):
def __init__(self, config):
super(self.__class__, self).__init__(config)
self.config_ = config
self.model_path_ = ''
self.model_ = None
self.num_classes_ = config['model']['keras']['num_classes']
def load_model(self, model_path = None):
if model_path and os.path.exists(model_path):
print('Load model from parameter: {}'.format(model_path))
self.model_path_ = model_path
else:
self.model_path_ = os.path.join(self.config_['filesystem']['mid_folder'], \
self.config_['model']['keras']['model_name'])
print('Load model from config: {}'.format(self.model_path_))
self.model_ = load_model(self.model_path_)
def score(self, X, y):
self.clear_score()
X_array = csr_matrix_to_ndarray(X)
y_test = keras.utils.to_categorical(y, self.num_classes_)
score = self.model_.evaluate(X_array, y_test, verbose=0)
probabilities = self.model_.predict(X_array)
nor_prob_list = probabilities[:,0]
index = 0
for prob in nor_prob_list:
if prob >= 0.5 and y[index] == 1:
self.fn_ += 1
self.fnl_.append(index)
elif prob < 0.5 and y[index] == 1:
self.tp_ += 1
self.tpl_.append(index)
elif prob >= 0.5 and y[index] == 0:
self.tn_ += 1
self.tnl_.append(index)
else:
self.fp_ += 1
self.fpl_.append(index)
index += 1
return self.tpl_, self.tnl_, self.fpl_, self.fnl_, probabilities[:,1]
| [
"chaoying_liu@trendmicro.com"
] | chaoying_liu@trendmicro.com |
1b150b237cca8fca96f6077ef567407c69d49e5d | f8e99e4118a7da8720ba28d57b4e96ee97ed7a39 | /Array/78.py3 | 9008a51db481d2467f6eab6106410a9ac0913e20 | [] | no_license | MinhNNguyen/Python-Practice-Leetcode | 46cdef7642e0dd22afbb5c459862c5b8c22ed6d8 | e8de6631b461aa8cdc63562e532f81e3b2e2179a | refs/heads/master | 2020-05-07T00:54:53.362626 | 2019-05-01T07:11:37 | 2019-05-01T07:11:37 | 180,249,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py3 | #Iterative approach
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
res = [[]]
for n in nums:
for i in range(len(res)):
res.append(res[i]+[n])
return res
| [
"robertnguyen@roberts-mbp.dsldevice.lan"
] | robertnguyen@roberts-mbp.dsldevice.lan |
f5c377a7d7c1af9461bd60efb9fb4ac6fc8cfe35 | 7b8b11e6fc8b9f36e951c34ca95a835ade290f21 | /InterviewBit/TwoPointers/MaxContinuousSeriesOf1s.py | daeb356ea8802d1c1483b882e8d9b56b0596ad6c | [] | no_license | alexander-travov/algo | 7627ba3f22c520108b241f7a0bbc777277965680 | d2b22994a65b796009b48240c0397d51b8982432 | refs/heads/master | 2020-05-30T07:02:07.784023 | 2019-09-14T10:15:47 | 2019-09-14T10:15:47 | 189,592,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | """
Max Continuous Series of 1s
===========================
You are given with an array of 1s and 0s. And you are given with an integer M, which signifies number of flips allowed.
Find the position of zeros which when flipped will produce maximum continuous series of 1s.
For this problem, return the indices of maximum continuous series of 1s in order.
Example:
Input :
Array = {1 1 0 1 1 0 0 1 1 1 }
M = 1
Output :
[0, 1, 2, 3, 4]
"""
from __future__ import print_function
def max_1series(a, max_num_zeros):
l = 0
r = -1
num_zeros = 0
max_len = -1
while True:
if num_zeros <= max_num_zeros:
max_len = max(max_len, r-l+1)
r += 1
if r == len(a):
break
elif a[r] == 0:
num_zeros += 1
else:
l += 1
if a[l-1] == 0:
num_zeros -= 1
return max_len
print(max_1series([0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0], 2))
| [
"alexander.travov@gmail.com"
] | alexander.travov@gmail.com |
b17a03c0eb64efe0d2a2b156f6671af4fbcc869b | f6524cb86952fbc15505febdef451ab1a0d062ed | /arduino-experiments/dumper/decode.py | 1fb5e94343937c2ac48aee473a91503564f8ade8 | [] | no_license | Zonde/ZondeESP32 | efd7c65d90030dd051be7be5e1d84094073d6aac | 8059aab2886734135b19542837ecfb69c30c1ac8 | refs/heads/master | 2021-09-10T15:25:12.881472 | 2018-03-28T13:53:35 | 2018-03-28T13:53:35 | 124,524,502 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | """
Partial decoder for 802.11 payloads, decodes all files in ./dumps/ as raw binary payload data
Author: Daan de Graaf
"""
import sys
from os import listdir
from os.path import join
def analyse_packet(path):
data = open(path, 'rb').read()
type = (data[0] & 0b00001100) >> 2
if type == 0b00:
print("Type: Management")
elif type == 0b01:
print("Type: Control")
sys.exit(1)
else:
print("Type: UNKNOWN")
sys.exit(1)
subtype = (data[0] & 0b11110000) >> 4
if subtype == 0b0100:
print("Subtype: Probe request")
else:
print("Subtype: Unknown")
sys.exit(1)
def fmt_addr(addr):
return ':'.join('{:02x}'.format(part) for part in addr)
receiver = data[4:10]
print("Receiver: {}".format(fmt_addr(receiver)))
transmitter = data[10:16]
print("Transmitter: {}".format(fmt_addr(transmitter)))
bssid = data[16:22]
print("BSSID: {}".format(fmt_addr(bssid)))
# Everything besides the frame body is 34 bytes, -4 bytes FCS at the end means the body should start at byte 30. But probe request doesn't use address 4, so the body starts at byte 24
body = data[24:-4]
i = 0
while i < len(body):
elem_id = body[i]
length = body[i+1]
if elem_id == 0:
print("SSID element found, length: {}".format(length))
if length > 0:
ssid = body[i+2:i+2+length]
print("SSID: {}".format(ssid))
else:
pass
#print("Unknown element of length {} found".format(length))
i += 2 + length
print()
dumps = listdir('dumps')
for dump in dumps:
path = join('dumps', dump)
analyse_packet(path)
| [
"daandegraaf9@gmail.com"
] | daandegraaf9@gmail.com |
208ff0e812a9c70c6668056dec437805ca6421a5 | f7c29c49733f3d0721bc423f15a009c163bde8bd | /Crawler/Deep in Python Web Crawler/Chapter_6/6-2_link.py | 672ed4919069873fb45794c185eee060926463c0 | [] | no_license | Cylra/py3Learn | 036665ba73b09fdf852a3149603ac1b2da18d92c | 7fac7c548f2183b636ef8d6336e2499e5ceb63a1 | refs/heads/master | 2021-06-20T11:12:02.732047 | 2020-03-31T14:40:58 | 2020-03-31T14:40:58 | 100,771,707 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | #! /usr/bin/python3
import urllib.request
import re
def getLink(url):
headers = ("User-Agent", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders = [headers]
urllib.request.install_opener(opener)
html1 = urllib.request.urlopen(url)
data = str(html1.read())
#构建获取链接的正则表达式
pat = '(https?://[^\s)";]+\.(\w|/)*)'
links = re.compile(pat).findall(data)
#print(links)
#去除重复元素
links = list(set(links))
return links
#要爬取的网页链接
url = "http://blog.csdn.net/"
linkList = getLink(url)
for link in linkList:
print(link[0]) | [
"longyu998127@gmail.com"
] | longyu998127@gmail.com |
ccbad334062bd86d13f5945c58071f6f3365eccb | 78ec82174f57653ac628c5bdc69e3a94f19824be | /evennia/server/portal/grapevine.py | 76447726de944882c9bdcfb67ca1289233917652 | [
"BSD-3-Clause"
] | permissive | fermuch/evennia | ad6813202ec40fa479f9eb7860ea6aeec36860be | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | refs/heads/master | 2020-07-05T21:44:01.206747 | 2019-08-16T19:15:27 | 2019-08-16T19:15:27 | 202,788,779 | 0 | 0 | NOASSERTION | 2019-08-16T19:39:35 | 2019-08-16T19:39:35 | null | UTF-8 | Python | false | false | 11,514 | py | """
Grapevine network connection
This is an implementation of the Grapevine Websocket protocol v 1.0.0 as
outlined here: https://grapevine.haus/docs
This will allow the linked game to transfer status as well as connects
the grapevine client to in-game channels.
"""
import json
from twisted.internet import protocol
from django.conf import settings
from evennia.server.session import Session
from evennia.utils import get_evennia_version
from evennia.utils.logger import log_info, log_err
from autobahn.twisted.websocket import (
WebSocketClientProtocol, WebSocketClientFactory, connectWS)
# There is only one at this time
GRAPEVINE_URI = "wss://grapevine.haus/socket"
GRAPEVINE_CLIENT_ID = settings.GRAPEVINE_CLIENT_ID
GRAPEVINE_CLIENT_SECRET = settings.GRAPEVINE_CLIENT_SECRET
GRAPEVINE_CHANNELS = settings.GRAPEVINE_CHANNELS
# defined error codes
CLOSE_NORMAL = 1000
GRAPEVINE_AUTH_ERROR = 4000
GRAPEVINE_HEARTBEAT_FAILURE = 4001
class RestartingWebsocketServerFactory(WebSocketClientFactory,
protocol.ReconnectingClientFactory):
"""
A variant of the websocket-factory that auto-reconnects.
"""
initialDelay = 1
factor = 1.5
maxDelay = 60
def __init__(self, sessionhandler, *args, **kwargs):
self.uid = kwargs.pop('uid')
self.channel = kwargs.pop('grapevine_channel')
self.sessionhandler = sessionhandler
# self.noisy = False
self.port = None
self.bot = None
WebSocketClientFactory.__init__(self, GRAPEVINE_URI, *args, **kwargs)
def buildProtocol(self, addr):
"""
Build new instance of protocol
Args:
addr (str): Not used, using factory/settings data
"""
protocol = GrapevineClient()
protocol.factory = self
protocol.channel = self.channel
protocol.sessionhandler = self.sessionhandler
return protocol
def startedConnecting(self, connector):
"""
Tracks reconnections for debugging.
Args:
connector (Connector): Represents the connection.
"""
log_info("(re)connecting to grapevine channel '%s'" % self.channel)
def clientConnectionFailed(self, connector, reason):
"""
Called when Client failed to connect.
Args:
connector (Connection): Represents the connection.
reason (str): The reason for the failure.
"""
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionLost(self, connector, reason):
"""
Called when Client loses connection.
Args:
connector (Connection): Represents the connection.
reason (str): The reason for the failure.
"""
if not (self.bot or (self.bot and self.bot.stopping)):
self.retry(connector)
def reconnect(self):
"""
Force a reconnection of the bot protocol. This requires
de-registering the session and then reattaching a new one,
otherwise you end up with an ever growing number of bot
sessions.
"""
self.bot.stopping = True
self.bot.transport.loseConnection()
self.sessionhandler.server_disconnect(self.bot)
self.start()
def start(self):
"Connect protocol to remote server"
try:
from twisted.internet import ssl
except ImportError:
log_err("To use Grapevine, The PyOpenSSL module must be installed.")
else:
context_factory = ssl.ClientContextFactory() if self.isSecure else None
connectWS(self, context_factory)
# service.name = "websocket/grapevine"
# self.sessionhandler.portal.services.addService(service)
class GrapevineClient(WebSocketClientProtocol, Session):
"""
Implements the grapevine client
"""
def __init__(self):
WebSocketClientProtocol.__init__(self)
Session.__init__(self)
self.restart_downtime = None
def at_login(self):
pass
def onOpen(self):
"""
Called when connection is established.
"""
self.restart_downtime = None
self.restart_task = None
self.stopping = False
self.factory.bot = self
self.init_session("grapevine", GRAPEVINE_URI, self.factory.sessionhandler)
self.uid = int(self.factory.uid)
self.logged_in = True
self.sessionhandler.connect(self)
self.send_authenticate()
def onMessage(self, payload, isBinary):
"""
Callback fired when a complete WebSocket message was received.
Args:
payload (bytes): The WebSocket message received.
isBinary (bool): Flag indicating whether payload is binary or
UTF-8 encoded text.
"""
if not isBinary:
data = json.loads(str(payload, 'utf-8'))
self.data_in(data=data)
self.retry_task = None
def onClose(self, wasClean, code=None, reason=None):
"""
This is executed when the connection is lost for whatever
reason. it can also be called directly, from the disconnect
method.
Args:
wasClean (bool): ``True`` if the WebSocket was closed cleanly.
code (int or None): Close status as sent by the WebSocket peer.
reason (str or None): Close reason as sent by the WebSocket peer.
"""
self.disconnect(reason)
if code == GRAPEVINE_HEARTBEAT_FAILURE:
log_err("Grapevine connection lost (Heartbeat error)")
elif code == GRAPEVINE_AUTH_ERROR:
log_err("Grapevine connection lost (Auth error)")
elif self.restart_downtime:
# server previously warned us about downtime and told us to be
# ready to reconnect.
log_info("Grapevine connection lost (Server restart).")
def _send_json(self, data):
"""
Send (json-) data to client.
Args:
data (str): Text to send.
"""
return self.sendMessage(json.dumps(data).encode('utf-8'))
def disconnect(self, reason=None):
"""
Generic hook for the engine to call in order to
disconnect this protocol.
Args:
reason (str or None): Motivation for the disconnection.
"""
self.sessionhandler.disconnect(self)
# autobahn-python: 1000 for a normal close, 3000-4999 for app. specific,
# in case anyone wants to expose this functionality later.
#
# sendClose() under autobahn/websocket/interfaces.py
self.sendClose(CLOSE_NORMAL, reason)
# send_* method are automatically callable through .msg(heartbeat={}) etc
def send_authenticate(self, *args, **kwargs):
"""
Send grapevine authentication. This should be send immediately upon connection.
"""
data = {
"event": "authenticate",
"payload": {
"client_id": GRAPEVINE_CLIENT_ID,
"client_secret": GRAPEVINE_CLIENT_SECRET,
"supports": ["channels"],
"channels": GRAPEVINE_CHANNELS,
"version": "1.0.0",
"user_agent": get_evennia_version('pretty')
}
}
# override on-the-fly
data.update(kwargs)
self._send_json(data)
def send_heartbeat(self, *args, **kwargs):
"""
Send heartbeat to remote grapevine server.
"""
# pass along all connected players
data = {
"event": "heartbeat",
"payload": {
}
}
sessions = self.sessionhandler.get_sessions(include_unloggedin=False)
data['payload']['players'] = [sess.account.key for sess in sessions
if hasattr(sess, "account")]
self._send_json(data)
def send_subscribe(self, channelname, *args, **kwargs):
"""
Subscribe to new grapevine channel
Use with session.msg(subscribe="channelname")
"""
data = {
"event": "channels/subscribe",
"payload": {
"channel": channelname
}
}
self._send_json(data)
def send_unsubscribe(self, channelname, *args, **kwargs):
"""
Un-subscribe to a grapevine channel
Use with session.msg(unsubscribe="channelname")
"""
data = {
"event": "channels/unsubscribe",
"payload": {
"channel": channelname
}
}
self._send_json(data)
def send_channel(self, text, channel, sender, *args, **kwargs):
"""
Send text type Evennia -> grapevine
This is the channels/send message type
Use with session.msg(channel=(message, channel, sender))
"""
data = {
"event": "channels/send",
"payload": {
"message": text,
"channel": channel,
"name": sender
}
}
self._send_json(data)
def send_default(self, *args, **kwargs):
"""
Ignore other outputfuncs
"""
pass
def data_in(self, data, **kwargs):
"""
Send data grapevine -> Evennia
Kwargs:
data (dict): Converted json data.
"""
event = data['event']
if event == "authenticate":
# server replies to our auth handshake
if data['status'] != "success":
log_err("Grapevine authentication failed.")
self.disconnect()
else:
log_info("Connected and authenticated to Grapevine network.")
elif event == "heartbeat":
# server sends heartbeat - we have to send one back
self.send_heartbeat()
elif event == "restart":
# set the expected downtime
self.restart_downtime = data['payload']['downtime']
elif event == "channels/subscribe":
# subscription verification
if data.get('status', 'success') == "failure":
err = data.get("error", "N/A")
self.sessionhandler.data_in(bot_data_in=((f"Grapevine error: {err}"),
{'event': event}))
elif event == "channels/unsubscribe":
# unsubscribe-verification
pass
elif event == "channels/broadcast":
# incoming broadcast from network
payload = data["payload"]
print("channels/broadcast:", payload['channel'], self.channel)
if str(payload['channel']) != self.channel:
# only echo from channels this particular bot actually listens to
return
else:
# correct channel
self.sessionhandler.data_in(
self, bot_data_in=(
str(payload['message'],),
{"event": event,
"grapevine_channel": str(payload['channel']),
"sender": str(payload['name']),
"game": str(payload['game'])}))
elif event == "channels/send":
pass
else:
self.sessionhandler.data_in(self, bot_data_in=("", kwargs))
| [
"griatch@gmail.com"
] | griatch@gmail.com |
ca50249a8419ecd331683f942b8fa6a2d35ae5f8 | 1951778835329f7a5cea0d64b54db213ebcf2112 | /DGY/DGY/urls.py | a8a853bcaa310f46abc3ebbb167c176f644af162 | [] | no_license | DingGY/opfwebsystem | 27c6cd42f59453c20d263de2540a00cf32dfc285 | 574d8c2f7f1307aeb7b1cc72d2851b4f5c4b107c | refs/heads/master | 2021-09-06T18:02:38.919365 | 2018-02-09T11:29:16 | 2018-02-09T11:29:16 | 111,661,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | """DGY URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from bug_manage.views import *
from production_record.views import *
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', login_view),
url(r'^test/$', test),
url(r'^login/$', login_view),
url(r'^websocket/webcomm/$', web_comm),
url(r'^bug_manage/index/$', boot),
url(r'^bug_manage/manage/$', bug_manage),
url(r'^bug_manage/userhelp/$', bug_help),
url(r'^bug_manage/download/$', bug_download),
url(r'^localclient/$', local_client),
url(r'^ajax/set_config/$', set_config),
url(r'^ajax/step/(.+)/$', step_action),
url(r'^ajax/task/(.+)/$', task_action),
url(r'^ajax/check/$', login_check),
url(r'^ajax/signup/$', login_signup),
url(r'^ajax/func/(.+)/$', func_action),
url(r'^ajax/index/get_task/$', get_task_info),
url(r'^production_record/record/$', MeterCodeList.as_view()),
url(r'^production_record/clientrecord/$', save_production_info),
url(r'^production_record/find/$', MeterCodeFindList.as_view()),
]
| [
"iamdgy@qq.com"
] | iamdgy@qq.com |
f6202d23842644bd25eeb7b2059c3bbadd673813 | 148ecaf700da298e947808154b29b548b662b595 | /HW5/question1.py | 74178509c4da0808a26434cbaf406830fbad20ff | [] | no_license | anliec/CV_homeworks | 0fb0274302a95747dc1c9697d4dbc113624e564f | a73d9ec3a545e717db42e6f1fa61ec08d13f26b2 | refs/heads/master | 2021-04-06T19:41:08.289842 | 2018-04-20T07:51:00 | 2018-04-20T07:51:00 | 125,178,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,981 | py | import cv2
import numpy as np
from scipy.signal import convolve2d
from math import floor
def int_img_to_unint8(img):
max_value = np.max(np.abs(img))
min_value = np.min(img)
if min_value < 0.0:
return np.array(img * 127 / max_value + 127, dtype=np.uint8)
else:
return np.array(img * 255 / max_value, dtype=np.uint8)
def compute_gradient(img: np.ndarray, direction: str):
ret = np.zeros(img.shape)
if direction == "X":
imgA = np.array(img[:-1, :], dtype=np.int32)
imgB = np.array(img[1:, :], dtype=np.int32)
ret[:-1, :] = imgA - imgB
elif direction == "Y":
imgA = np.array(img[:, :-1], dtype=np.int32)
imgB = np.array(img[:, 1:], dtype=np.int32)
ret[:, :-1] = imgA - imgB
else:
raise ValueError("direction argument is expected to be 'X' or 'Y' only")
return ret
def harris_transform(img: np.ndarray, win_size: int, alpha=1.0):
i_x = compute_gradient(img, 'X')
i_y = compute_gradient(img, 'Y')
# reshape both gradient so they have the same size
xx, xy = i_x.shape
yx, yy = i_y.shape
i_x = i_x[:min(xx, yx), :min(xy, yy)]
i_y = i_y[:min(xx, yx), :min(xy, yy)]
win = np.ones(shape=(win_size, win_size))
i_xx_sum = convolve2d(np.power(i_x, 2), win, 'valid')
i_yy_sum = convolve2d(np.power(i_y, 2), win, 'valid')
i_xy_sum = convolve2d(i_x * i_y, win, 'valid')
return i_xx_sum * i_yy_sum - i_xy_sum**2 + alpha * (i_xx_sum + i_yy_sum), i_x, i_y
def harris_filtering(harris_r: np.ndarray, number_of_points: int, win_size=5):
r = harris_r.copy()
r_w, r_h = r.shape
r_filtered = np.zeros(shape=r.shape)
win_radius = int(floor((win_size - 1) / 2))
for i in range(number_of_points):
p = np.argmax(r)
p_x, p_y = p // r_h, p % r_h
# r_filtered[p_x, p_y] = r[p_x, p_y]
r_filtered[p_x, p_y] = 1
r[max(0, p_x - win_radius):p_x + win_radius + 1, max(0, p_y - win_radius):p_y + win_radius + 1] = 0.0
return r_filtered
if __name__ == '__main__':
trans_a = cv2.imread("subject/transA.jpg", cv2.IMREAD_GRAYSCALE)
trans_b = cv2.imread("subject/transB.jpg", cv2.IMREAD_GRAYSCALE)
sim_a = cv2.imread("subject/simA.jpg", cv2.IMREAD_GRAYSCALE)
sim_b = cv2.imread("subject/simB.jpg", cv2.IMREAD_GRAYSCALE)
print("Question 1.1")
trans_shape_x, trans_shape_y = trans_a.shape
disp = np.zeros(shape=(trans_shape_x, trans_shape_y * 2))
disp[1:, :trans_shape_y] = compute_gradient(trans_a, 'X')
disp[:, trans_shape_y + 1:] = compute_gradient(trans_a, 'Y')
cv2.imwrite("Images/ps4-1-1-a.png", int_img_to_unint8(disp))
sim_shape_x, sim_shape_y = sim_a.shape
disp = np.zeros(shape=(sim_shape_x, sim_shape_y * 2))
disp[1:, :sim_shape_y] = compute_gradient(sim_a, 'X')
disp[:, sim_shape_y + 1:] = compute_gradient(sim_a, 'Y')
cv2.imwrite("Images/ps4-1-1-b.png", int_img_to_unint8(disp))
print("Question 1.2 and 1.3")
win_size = 10
number_of_point = 200
r, _, _ = harris_transform(trans_a, win_size)
cv2.imwrite("Images/ps4-1-2-a.png", int_img_to_unint8(r))
r_f = harris_filtering(r, number_of_point, win_size*2)
cv2.imwrite("Images/ps4-1-3-a.png", int_img_to_unint8(r_f))
r, _, _ = harris_transform(trans_b, win_size)
cv2.imwrite("Images/ps4-1-2-b.png", int_img_to_unint8(r))
r_f = harris_filtering(r, number_of_point, win_size*2)
cv2.imwrite("Images/ps4-1-3-b.png", int_img_to_unint8(r_f))
r, _, _ = harris_transform(sim_a, win_size)
cv2.imwrite("Images/ps4-1-2-c.png", int_img_to_unint8(r))
r_f = harris_filtering(r, number_of_point, win_size*2)
cv2.imwrite("Images/ps4-1-3-c.png", int_img_to_unint8(r_f))
r, _, _ = harris_transform(sim_b, win_size)
cv2.imwrite("Images/ps4-1-2-d.png", int_img_to_unint8(r))
r_f = harris_filtering(r, number_of_point, win_size*2)
cv2.imwrite("Images/ps4-1-3-d.png", int_img_to_unint8(r_f))
| [
"nicolas.six@pau.fr"
] | nicolas.six@pau.fr |
43f6940106543f569c4ec219144e4e15cecbb70a | caeb1420025ccbdb33c13b02f60e0bb2a5d6f69c | /blog/migrations/0001_initial.py | e8c09f240a26fcb3a52820c438169998bbb37a16 | [] | no_license | isactenofora/my-first-blog | 9f3cbb4789eb94a3bc30776f787374b2e2d757ae | c7f5c8f7053ec845b79e62e95530cf702a7a552a | refs/heads/master | 2020-04-07T02:47:08.183392 | 2018-11-25T16:01:10 | 2018-11-25T16:01:10 | 157,989,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | # Generated by Django 2.0.9 on 2018-11-17 12:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"isaclemente4@gmail.com"
] | isaclemente4@gmail.com |
fd52d4cc190f636ed426bc57cc6e3ee1e6e13018 | d1da1bfc310cb428152a9d4e41509b08ee4a6846 | /sum.py | ad49dc34b7f622a868e159ca638237e3394ff94c | [] | no_license | imrajashish/python | 3c9c3a2b3cdd939741fc062ca52df6e3a40d5a45 | 898032f9acb707e0cb0ad40b6b6f2a2406610893 | refs/heads/master | 2022-12-07T12:07:08.500570 | 2020-09-04T15:21:26 | 2020-09-04T15:21:26 | 283,292,211 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | '''num1=input("Enter the number: "))
num2=input("Enter the second number: "))
sum= float(num2)+float(num1)
print("Sum of {0} and {1} is {2}" .format(num1, num2, sum))'''
#print(sum)
def factorial(n):
# single line to find factorial
return 1 if (n==1 or n==0) else n * factorial(n - 1);
# Driver Code
num = input("enter the number");
print("Factorial of",num,"is",
factorial(num))
| [
"imrajashish07@gmail.com"
] | imrajashish07@gmail.com |
f2f6a6f9e5d23d65d8fd1be5c5e14bff2763d1d1 | 6edd5a50f07843de18175c04796348f7fdc4f74d | /Python/lis_annotation.py | 800448c84c36fb068f1d50253ec6fbeaae2d1d23 | [] | no_license | rogergranada/_utilsdev | 4f14a1e910103c33e3a8e820bb3e55483bd27e69 | 977a8d98a6934b9354ec233da6e0ef31621282f3 | refs/heads/master | 2021-06-01T17:07:14.773949 | 2020-10-22T22:06:43 | 2020-10-22T22:06:43 | 124,608,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,084 | py | #!/usr/bin/env python
# coding: utf-8
import sys
import argparse
from os.path import join, dirname
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def fix_boxes(x, y, w, h, valx=0, valy=0, mode=None):
""" Fix bounding box coordinates when they are out of the image """
x, y, w, h = map(int, (x, y, w, h))
if x < valx:
w += x
x = 0
if y < valy:
h += y
y = 0
if mode == 'person':
if y != 0: y = 0
if y+h < 120: h = 114
if x+w > 257: w = 257-x
if y+h > 257: h = 257-y
return x, y, w, h
def fix_bbox_file(input, output=None):
if not output:
output = join(dirname(input), 'fixed_bboxes.txt')
with open(input) as fin, open(output, 'w') as fout:
for i, line in enumerate(fin):
if i == 0 or line.startswith('---') or line.startswith('Modified'):
fout.write(line)
continue #header
# 86 \t person \t (0,51,49,64) \t 0 \t /home/roger/KSCGR/data1/boild-egg/rgb256/86.jpg
arr = line.strip().split('\t')
if not arr[2].startswith('(-,'):
x, y, w, h = eval(arr[2])
x, y, w, h = fix_boxes(x, y, w, h, valx=0, valy=0)
fout.write('%s\t%s\t(%d,%d,%d,%d)\t%s\t%s\n' % (arr[0], arr[1], x, y, w, h, arr[3], arr[4]))
else:
fout.write(line)
def load_dic_file(input):
""" Return dictionaries for bounding boxes to keep and bounding boxes to delete """
with open(input) as fin:
dkeep = {}
dremove = {}
for i, line in enumerate(fin):
arr =line.strip().split(';')
id = int(arr[0][:-1])
operation = arr[0][-1]
idframes = arr[1].split(',')
last = -1
for pair in idframes:
ids = map(int, pair.split('-'))
if len(ids) != 2:
logger.error('Pair of frames is not correct: {} [LINE: {}]'.format(ids, i))
sys.exit(0)
id_start, id_end = ids
if id_start > id_end:
logger.error('Start frame is greater than end frame: ({} : {}) [LINE: {}]'.format(ids[0], ids[1], i))
sys.exit(0)
if id_start >= last:
last = id_end
else:
logger.error('Start frame is lesser than the previous frame: {} - {}'.format(id_end, last))
sys.exit(0)
if operation == '+':
if dkeep.has_key(id):
dkeep[id]['start'].append(id_start)
dkeep[id]['end'].append(id_end)
else:
dkeep[id] = {'record': False, 'data': None, 'start': [id_start], 'end': [id_end]}
elif operation == '-':
if dremove.has_key(id):
dremove[id].append((id_start, id_end))
else:
dremove[id] = [(id_start, id_end)]
else:
logger.error('There is not an operation: {} [LINE: {}]'.format(operation, i))
sys.exit(0)
return dkeep, dremove
def coordinates_objects(file_input, file_frames, output=None):
if not output:
output = join(dirname(file_input), 'person_norm.txt')
dobj, drem = load_dic_file(file_frames)
for k in drem: print k, drem[k]
last_idfr = -1
recorded_idfr = False
with open(file_input) as fin, open(output, 'w') as fout:
for i, line in enumerate(fin):
if i == 0 or line.startswith('---') or line.startswith('Modified'):
fout.write(line)
continue #header
# 86 \t person \t (0,51,49,64) \t 0 \t /home/roger/KSCGR/data1/boild-egg/rgb256/86.jpg
arr = line.strip().split('\t')
idfr = int(arr[0])
if idfr != last_idfr:
if idfr > 0 and not recorded_idfr:
last_path = join(dirname(arr[4]), str(last_idfr)+'.jpg')
fout.write('%d\tNone\t(-,-,-,-)\tNone\t%s\n' % (last_idfr, last_path))
last_idfr = idfr
recorded_idfr = False
if not arr[2].startswith('(-,'):
obj = arr[1]
x, y, w, h = eval(arr[2])
x, y, w, h = fix_boxes(x, y, w, h, valx=0, valy=0, mode=obj)
idobj = int(arr[3])
if drem.has_key(idobj):
remove = False
for j in range(len(drem[idobj])):
id_start, id_end = drem[idobj][j]
if idfr >= id_start and idfr <= id_end:
remove = True
if remove: continue
#if idfr == 1952:
# print '\n',idobj, drem[idobj]
if not dobj.has_key(idobj):
#print('Dictionary does not contain object: {} - {}'.format(obj, idfr))
logger.debug('Dictionary does not contain object: {}'.format(obj))
fout.write('%d\t%s\t(%d,%d,%d,%d)\t%s\t%s\n' % (idfr, arr[1], x, y, w, h, arr[3], arr[4]))
else:
if idfr in dobj[idobj]['start']:
dobj[idobj]['record'] = True
dobj[idobj]['data'] = (x, y, w, h)
if dobj[idobj]['record']:
stored_x, stored_y, stored_w, stored_h = dobj[idobj]['data']
fout.write('%d\t%s\t(%d,%d,%d,%d)\t%s\t%s\n' % (idfr, arr[1], stored_x, stored_y, stored_w, stored_h, arr[3], arr[4]))
else:
fout.write('%d\t%s\t(%d,%d,%d,%d)\t%s\t%s\n' % (idfr, arr[1], x, y, w, h, arr[3], arr[4]))
recorded_idfr = True
if idfr in dobj[idobj]['end']:
dobj[idobj]['record'] = False
dobj[idobj]['data'] = None
else:
fout.write(line)
def remove_negative_file(input, output=None):
if not output:
output = join(dirname(input), 'bbox_nonneg.txt')
with open(input) as fin, open(output, 'w') as fout:
for i, line in enumerate(fin):
if i == 0 or line.startswith('---') or line.startswith('Modified'):
fout.write(line)
continue #header
# 86 \t person \t (0,51,49,64) \t 0 \t /home/roger/KSCGR/data1/boild-egg/rgb256/86.jpg
arr = line.strip().split('\t')
id = int(arr[0])
if not arr[2].startswith('(-,'):
x, y, w, h = eval(arr[2])
#vals = (0,0,140,135)
if x < 0: x = 0
if y < 0: y = 0
if x+w > 256: w = 256-x
if y+h > 256: h = 256-y
fout.write('%d\t%s\t(%d,%d,%d,%d)\t0\t%s\n' % (id, arr[1], x, y, w, h, arr[4]))
else:
fout.write(line)
def generate_relations(input, output=None):
""" Create file containing relations. Input file has the form:
idfr_start-idfr_end-id_obj1-id_relation-id_obj2
"""
if not output:
output = join(dirname(input), 'relations.txt')
do = {
0: 'person',
1: 'bowl',
2: 'ham',
3: 'egg',
4: 'knife',
5: 'cutting board',
6: 'oil bottle',
7: 'frying pan',
8: 'hashi',
9: 'saltshaker',
10: 'spoon',
11: 'glass',
12: 'egg ham',
13: 'pan lid',
14: 'turner',
15: 'plate',
16: 'beaten egg',
17: 'egg crepe',
18: 'boild egg',
19: 'pan handler',
20: 'pan',
21: 'ham egg',
22: 'milk carton',
23: 'omelette'
}
dr = {
0: 'hold',
1: 'in',
2: 'on',
3: 'cut',
4: 'move'
}
relations = []
max_fr = 0
with open(input) as fin:
for i, line in enumerate(fin):
arr = map(int, line.strip().split('-'))
start, end, o1, r, o2 = arr
if start >= end:
logger.error('Start is greater than end frame: {} - {} [LINE: {}]'.format(start, end, i))
sys.exit(0)
relations.append((start, end, o1, r, o2))
if end > max_fr:
max_fr = end
max_fr += 1
with open(output, 'w') as fout:
# header
fout.write('Frame\tSubject\tRelation\tObject\n')
for idfr in range(max_fr):
recorded = False
for start, end, o1, r, o2 in relations:
if idfr >= start and idfr <= end:
fout.write('%d\t%s\t%s\t%s\n' % (idfr, do[o1], dr[r], do[o2]))
recorded = True
if not recorded:
fout.write('%d\tNone\tNone\tNone\n' % idfr)
def merge_objects_person(input_object, input_person, output=None):
if not output:
output = join(dirname(input_object), 'person_objects.txt')
d, dids = {}, {}
with open(input_object) as fo:
for line in fo:
if line.startswith('Frame') or line.startswith('---') or line.startswith('Modified'):
continue #header
# 86 \t person \t (0,51,49,64) \t 0 \t /home/roger/KSCGR/data1/boild-egg/rgb256/86.jpg
arr = line.strip().split('\t')
dids[int(arr[3])] = arr[1]
if d.has_key(int(arr[0])):
d[int(arr[0])].append(line)
else:
d[int(arr[0])] = [line]
id_person = max(dids.keys())+1
print dids.keys()
with open(input_person) as fp, open(output, 'w') as fout:
for line in fp:
if line.startswith('Frame') or line.startswith('---') or line.startswith('Modified'):
fout.write(line)
continue #header
arr = line.strip().split('\t')
id = int(arr[0])
recorded = False
if d.has_key(id):
recorded = True
for l in d[id]:
fout.write(l)
if not arr[2].startswith('(-,'):
fout.write('%d\tperson\t%s\t%d\t%s\n' % (id, arr[2], id_person, arr[4]))
recorded = True
if not recorded:
fout.write(line)
def change_name_object(input, output=None):
# Remove this function
if not output:
output = join(dirname(input), 'object_beaten.txt')
with open(input) as fin, open(output, 'w') as fout:
for i, line in enumerate(fin):
if i == 0 or line.startswith('---') or line.startswith('Modified'):
fout.write(line)
continue #header
# 86 \t person \t (0,51,49,64) \t 0 \t /home/roger/KSCGR/data1/boild-egg/rgb256/86.jpg
arr = line.strip().split('\t')
idfr = int(arr[0])
if not arr[2].startswith('(-,'):
idobj = int(arr[3])
obj = arr[1]
if obj == 'egg' and idfr >= 3757:
fout.write('%d\tbeaten egg\t(65,126,33,25)\t14\t%s\n' % (idfr, arr[4]))
elif obj == 'omelette' and idfr <= 5060:
fout.write('%d\tbeaten egg\t%s\t14\t%s\n' % (idfr, arr[2], arr[4]))
else:
fout.write(line)
else:
fout.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_1', metavar='input_object', help='Plain text file')
#parser.add_argument('input_2', metavar='input_person', help='Plain text file')
args = parser.parse_args()
#fix_bbox_file(args.input_1)
#coordinates_objects(args.input_1, args.input_2)
#remove_negative_file(args.input_1)
generate_relations(args.input_1)
#merge_objects_person(args.input_1, args.input_2)
#change_name_object(args.input_1)
| [
"roger.leitzke@gmail.com"
] | roger.leitzke@gmail.com |
7baba143bd8212ef69fad25fddf79a9c52285611 | f5f88d090ef7dd5c396569e3747b4944962741e7 | /OLC2_Proyecto 1_201503958/parsetab.py | 6bc4736290ab19b863948fcc5c903b3626a72fde | [] | no_license | diemorales96/OLC2_Proyecto1_201503958 | 2c6b5862178c5bdabba2f7c76901c0e91dbf1248 | f7fce6cb116c03b4c83e1167ec1bc9169ded67ce | refs/heads/master | 2023-08-05T10:46:43.334467 | 2021-09-23T05:31:22 | 2021-09-23T05:31:22 | 397,853,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39,945 | py |
# parsetab.py
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = '3.10'
_lr_method = 'LALR'
_lr_signature = 'leftORleftANDrightUNOTleftMENORQUEMAYORQUEIGUALIGUALDIFERENTEMENORIGUALMAYORIGUALleftMASMENOSleftDIVPORMODnonassocPOTrightUMENOSAND CADENA CARACTER COMA CORA CORC DECIMAL DIFERENTE DIV DOBLEDPUNTOS DOSPUNTOS ENTERO ID IGUAL IGUALIGUAL MAS MAYORIGUAL MAYORQUE MENORIGUAL MENORQUE MENOS MOD NOT OR PARA PARC POR POT PUNTOCOMA RARRAY RBOOLEAN RBREAK RCHAR RCONTINUE RDOUBLE RELSE RELSEIF REND RFALSE RFOR RFUNCTION RGLOBAL RIF RIN RINT RLENGTH RLOCAL RLOG RNULO RPARSE RPRINT RPRINTLN RRETURN RSTRING RTRUE RTRUNCATE RWHILEinit : instruccionesinstrucciones : instrucciones instruccioninstrucciones : instruccioninstruccion : imprimir_instr finins\n | imprimir_ins finins\n | declaracion finins\n | if_instr finins\n | while_instr finins\n | for_instr finins\n | break_instr finins\n | continue_instr finins\n | funcion_instr finins\n | return_instr finins\n | llamada_instr finins\n finins : PUNTOCOMAinstruccion : error PUNTOCOMAimprimir_instr : RPRINTLN PARA contenidos_print PARCimprimir_ins : RPRINT PARA contenidos_print PARCcontenidos_print : contenidos_print COMA valores_printcontenidos_print : valores_printvalores_print : expresion declaracion : declaracion_instr_completa\n | declaracion_instr_simple\n | decla_arr\n | acces\n declaracion_instr_simple : ID IGUAL expresion\n declaracion_instr_completa : ID IGUAL expresion DOBLEDPUNTOS tipo\n if_instr : RIF expresion instrucciones REND\n if_instr : RIF expresion instrucciones RELSE instrucciones REND\n if_instr : RIF expresion instrucciones elseif_instr RELSE instrucciones REND\n if_instr : RIF expresion instrucciones elseif_instr REND\n elseif_instr : elseif_instr elseif_instruction\n elseif_instr : elseif_instruction\n elseif_instruction : RELSEIF expresion instrucciones \n while_instr : RWHILE expresion instrucciones REND\n for_instr : RFOR ID RIN expresion DOSPUNTOS expresion instrucciones REND\n for_instr : RFOR ID RIN expresion instrucciones REND\n break_instr : RBREAK\n continue_instr : RCONTINUE\n funcion_instr : RFUNCTION ID PARA parametros PARC instrucciones RENDfuncion_instr : RFUNCTION ID PARA PARC instrucciones RENDparametros : parametros COMA parametroparametros : parametroparametro : ID DOBLEDPUNTOS tiporeturn_instr : RRETURN expresionllamada_instr : ID PARA PARCllamada_instr : ID PARA parametros_llamada PARCparametros_llamada : parametros_llamada COMA parametro_llamadaparametros_llamada : parametro_llamadaparametro_llamada : expresiondeclaracion : RGLOBAL ID IGUAL expresiondeclaracion : RGLOBAL IDdeclaracion : RLOCAL ID IGUAL expresiondeclaracion : RLOCAL IDdecla_arr : ID IGUAL elementelement : element CORA elementos CORCelement : CORA elementos CORCelementos : elementos COMA elementos2elementos : elementos2elementos2 : expresionexpresion : CORA elementos CORCacces : ID corchetes IGUAL expresioncorchetes : corchetes CORA expre CORCcorchetes : CORA expre CORCexpre : expresionexpresion : ID recursrecurs : recurs CORA unico CORCrecurs : CORA unico CORCunico : expresiontipo : RINT\n | RDOUBLE\n | RCHAR\n | RSTRING\n | RBOOLEAN \n | RARRAY\n expresion : expresion MAS expresion\n | expresion MENOS expresion\n | expresion POR expresion\n | expresion DIV expresion\n | expresion POT expresion\n | expresion MOD expresion\n | expresion MENORQUE expresion\n | expresion MAYORQUE expresion\n | expresion MENORIGUAL expresion\n | expresion MAYORIGUAL expresion\n | expresion IGUALIGUAL expresion\n | expresion DIFERENTE expresion\n | expresion OR expresion\n | expresion AND expresion\n \n expresion : MENOS expresion %prec UMENOS\n | NOT expresion %prec UNOT \n \n expresion : PARA expresion PARC\n expresion : llamada_instrexpresion : IDexpresion : ENTEROexpresion : DECIMALexpresion : CADENAexpresion : CARACTERexpresion : RTRUEexpresion : RFALSEexpresion : RPARSE PARA tipo COMA expresion PARC\n expresion : RTRUNCATE PARA RINT COMA expresion PARCexpresion : RLOG PARA expresion COMA expresion PARCexpresion : RLENGTH PARA ID PARC'
_lr_action_items = {'error':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[15,15,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,15,-94,-93,-95,-96,-97,-98,-99,-100,15,-46,15,-66,-90,-91,15,-47,15,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,15,15,15,15,15,-68,-104,15,15,15,15,15,-67,15,15,-101,-102,-103,15,]),'RPRINTLN':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[16,16,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,16,-94,-93,-95,-96,-97,-98,-99,-100,16,-46,16,-66,-90,-91,16,-47,16,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,16,16,16,16,16,-68,-104,16,16,16,16,16,-67,16,16,-101,-102,-103,16,]),'RPRINT':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[17,17,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,17,-94,-93,-95,-96,-97,-98,-99,-100,17,-46,17,-66,-90,-91,17,-47,17,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,17,17,17,17,17,-68,-104,17,17,17,17,17,-67,17,17,-101,-102,-103,17,]),'RGLOBAL':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[22,22,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,22,-94,-93,-95,-96,-97,-98,-99,-100,22,-46,22,-66,-90,-91,22,-47,22,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,22,22,22,22,22,-68,-104,22,22,22,22,22,-67,22,22,-101,-102,-103,22,]),'RLOCAL':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[24,24,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,24,-94,-93,-95,-96,-97,-98,-99,-100,24,-46,24,-66,-90,-91,24,-47,24,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,24,24,24,24,24,-68,-104,24,24,24,24,24,-67,24,24,-101,-102,-103,24,]),'RIF':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[25,25,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,25,-94,-93,-95,-96,-97,-98,-99,-100,25,-46,25,-66,-90,-91,25,-47,25,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,25,25,25,25,25,-68,-104,25,25,25,25,25,-67,25,25,-101,-102,-103,25,]),'RWHILE':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[26,26,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,26,-94,-93,-95,-96,-97,-98,-99,-100,26,-46,26,-66,-90,-91,26,-47,26,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,26,26,26,26,26,-68,-104,26,26,26,26,26,-67,26,26,-101,-102,-103,26,]),'RFOR':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[27,27,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,27,-94,-93,-95,-96,-97,-98,-99,-100,27,-46,27,-66,-90,-91,27,-47,27,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,27,27,27,27,27,-68,-104,27,27,27,27,27,-67,27,27,-101,-102,-103,27,]),'RBREAK':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[28,28,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,28,-94,-93,-95,-96,-97,-98,-99,-100,28,-46,28,-66,-90,-91,28,-47,28,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,28,28,28,28,28,-68,-104,28,28,28,28,28,-67,28,28,-101,-102,-103,28,]),'RCONTINUE':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[29,29,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,29,-94,-93,-95,-96,-97,-98,-99,-100,29,-46,29,-66,-90,-91,29,-47,29,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,29,29,29,29,29,-68,-104,29,29,29,29,29,-67,29,29,-101,-102,-103,29,]),'RFUNCTION':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[30,30,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,30,-94,-93,-95,-96,-97,-98,-99,-100,30,-46,30,-66,-90,-91,30,-47,30,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,30,30,30,30,30,-68,-104,30,30,30,30,30,-67,30,30,-101,-102,-103,30,]),'RRETURN':([0,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,54,56,60,61,62,63,64,65,66,71,80,92,110,112,113,119,126,136,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,174,182,183,186,189,193,195,197,199,202,203,204,208,211,215,216,217,218,],[31,31,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,31,-94,-93,-95,-96,-97,-98,-99,-100,31,-46,31,-66,-90,-91,31,-47,31,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,31,31,31,31,31,-68,-104,31,31,31,31,31,-67,31,31,-101,-102,-103,31,]),'ID':([0,2,3,22,24,25,26,27,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,49,50,52,54,55,56,57,58,59,60,61,62,63,64,65,66,71,79,80,86,87,88,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,110,111,112,113,117,118,119,120,121,123,126,127,129,136,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,159,171,174,182,183,186,189,190,191,192,193,194,195,197,198,199,202,203,204,208,211,215,216,217,218,],[23,23,-3,48,53,56,56,72,73,56,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,56,56,56,56,56,23,56,-94,56,56,56,-93,-95,-96,-97,-98,-99,-100,23,56,-46,56,56,56,56,23,56,56,56,56,56,56,56,56,56,56,56,56,56,56,-66,56,-90,-91,56,169,23,56,172,56,-47,56,56,23,56,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,56,56,-92,23,23,23,23,23,-68,56,56,56,-104,56,23,23,172,23,23,23,-67,23,23,-101,-102,-103,23,]),'$end':([1,2,3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,],[0,-1,-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,]),'REND':([3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,92,119,137,138,182,185,195,199,202,203,211,218,],[-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,135,170,184,-33,201,-32,209,213,214,-34,219,220,]),'RELSE':([3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,92,137,138,185,203,],[-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,136,183,-33,-32,-34,]),'RELSEIF':([3,32,33,34,35,36,37,38,39,40,41,42,43,44,45,92,137,138,185,203,],[-3,-2,-4,-15,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-16,139,139,-33,-32,-34,]),'PUNTOCOMA':([4,5,6,7,8,9,10,11,12,13,14,15,18,19,20,21,28,29,48,53,56,60,61,62,63,64,65,66,74,80,84,85,110,112,113,122,124,125,126,131,134,135,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,161,162,163,164,165,166,170,178,180,184,189,193,200,201,204,209,213,214,215,216,217,219,220,],[34,34,34,34,34,34,34,34,34,34,34,45,-22,-23,-24,-25,-38,-39,-52,-54,-94,-93,-95,-96,-97,-98,-99,-100,-45,-46,-26,-55,-66,-90,-91,-17,-18,-51,-47,-62,-53,-28,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,-70,-71,-72,-73,-74,-75,-35,-27,-57,-31,-68,-104,-56,-29,-67,-37,-41,-30,-101,-102,-103,-40,-36,]),'PARA':([16,17,23,25,26,31,46,47,49,50,52,55,56,57,58,59,67,68,69,70,73,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[46,47,49,59,59,59,59,59,59,59,59,59,49,59,59,59,115,116,117,118,121,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,59,]),'IGUAL':([23,48,51,53,133,181,],[50,79,87,91,-64,-63,]),'CORA':([23,25,26,31,46,47,49,50,51,52,55,56,57,58,59,79,85,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,110,111,117,120,123,127,129,133,139,155,156,180,181,189,190,191,192,194,200,204,],[52,55,55,55,55,55,55,86,88,55,55,111,55,55,55,55,129,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,55,156,55,55,55,55,55,55,-64,55,55,55,-57,-63,-68,55,55,55,55,-56,-67,]),'MENOS':([25,26,31,46,47,49,50,52,54,55,56,57,58,59,60,61,62,63,64,65,66,71,74,77,79,80,83,84,86,87,88,90,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,109,110,111,112,113,114,117,120,123,125,126,127,129,131,134,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,158,159,168,171,180,186,189,190,191,192,193,194,204,205,206,207,208,215,216,217,],[57,57,57,57,57,57,57,57,94,57,-94,57,57,57,-93,-95,-96,-97,-98,-99,-100,94,94,94,57,-46,94,94,57,57,57,94,57,57,57,57,57,57,57,57,57,57,57,57,57,57,57,94,-66,57,-90,94,94,57,57,57,94,-47,57,57,94,94,57,-76,-77,-78,-79,-80,-81,94,94,94,94,94,94,94,94,-61,57,57,94,-92,94,94,-61,94,-68,57,57,57,-104,57,-67,94,94,94,94,-101,-102,-103,]),'NOT':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,58,]),'ENTERO':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,61,]),'DECIMAL':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,62,]),'CADENA':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,]),'CARACTER':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,]),'RTRUE':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,65,]),'RFALSE':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,66,]),'RPARSE':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,67,]),'RTRUNCATE':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,68,]),'RLOG':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,69,]),'RLENGTH':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,70,]),'PARC':([49,56,60,61,62,63,64,65,66,75,76,77,78,80,81,82,83,110,112,113,114,121,126,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,161,162,163,164,165,166,169,173,175,176,177,189,193,204,205,206,207,210,212,215,216,217,],[80,-94,-93,-95,-96,-97,-98,-99,-100,122,-20,-21,124,-46,126,-49,-50,-66,-90,-91,159,174,-47,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,-70,-71,-72,-73,-74,-75,193,197,-43,-19,-48,-68,-104,-67,215,216,217,-44,-42,-101,-102,-103,]),'MAS':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[93,-94,-93,-95,-96,-97,-98,-99,-100,93,93,93,-46,93,93,93,93,-66,-90,93,93,93,-47,93,93,-76,-77,-78,-79,-80,-81,93,93,93,93,93,93,93,93,-61,93,-92,93,93,-61,93,-68,-104,-67,93,93,93,93,-101,-102,-103,]),'POR':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[95,-94,-93,-95,-96,-97,-98,-99,-100,95,95,95,-46,95,95,95,95,-66,-90,95,95,95,-47,95,95,95,95,-78,-79,-80,-81,95,95,95,95,95,95,95,95,-61,95,-92,95,95,-61,95,-68,-104,-67,95,95,95,95,-101,-102,-103,]),'DIV':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[96,-94,-93,-95,-96,-97,-98,-99,-100,96,96,96,-46,96,96,96,96,-66,-90,96,96,96,-47,96,96,96,96,-78,-79,-80,-81,96,96,96,96,96,96,96,96,-61,96,-92,96,96,-61,96,-68,-104,-67,96,96,96,96,-101,-102,-103,]),'POT':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[97,-94,-93,-95,-96,-97,-98,-99,-100,97,97,97,-46,97,97,97,97,-66,-90,97,97,97,-47,97,97,97,97,97,97,None,97,97,97,97,97,97,97,97,97,-61,97,-92,97,97,-61,97,-68,-104,-67,97,97,97,97,-101,-102,-103,]),'MOD':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[98,-94,-93,-95,-96,-97,-98,-99,-100,98,98,98,-46,98,98,98,98,-66,-90,98,98,98,-47,98,98,98,98,-78,-79,-80,-81,98,98,98,98,98,98,98,98,-61,98,-92,98,98,-61,98,-68,-104,-67,98,98,98,98,-101,-102,-103,]),'MENORQUE':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[99,-94,-93,-95,-96,-97,-98,-99,-100,99,99,99,-46,99,99,99,99,-66,-90,99,99,99,-47,99,99,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,99,99,-61,99,-92,99,99,-61,99,-68,-104,-67,99,99,99,99,-101,-102,-103,]),'MAYORQUE':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[100,-94,-93,-95,-96,-97,-98,-99,-100,100,100,100,-46,100,100,100,100,-66,-90,100,100,100,-47,100,100,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,100,100,-61,100,-92,100,100,-61,100,-68,-104,-67,100,100,100,100,-101,-102,-103,]),'MENORIGUAL':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[101,-94,-93,-95,-96,-97,-98,-99,-100,101,101,101,-46,101,101,101,101,-66,-90,101,101,101,-47,101,101,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,101,101,-61,101,-92,101,101,-61,101,-68,-104,-67,101,101,101,101,-101,-102,-103,]),'MAYORIGUAL':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[102,-94,-93,-95,-96,-97,-98,-99,-100,102,102,102,-46,102,102,102,102,-66,-90,102,102,102,-47,102,102,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,102,102,-61,102,-92,102,102,-61,102,-68,-104,-67,102,102,102,102,-101,-102,-103,]),'IGUALIGUAL':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[103,-94,-93,-95,-96,-97,-98,-99,-100,103,103,103,-46,103,103,103,103,-66,-90,103,103,103,-47,103,103,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,103,103,-61,103,-92,103,103,-61,103,-68,-104,-67,103,103,103,103,-101,-102,-103,]),'DIFERENTE':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[104,-94,-93,-95,-96,-97,-98,-99,-100,104,104,104,-46,104,104,104,104,-66,-90,104,104,104,-47,104,104,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,104,104,-61,104,-92,104,104,-61,104,-68,-104,-67,104,104,104,104,-101,-102,-103,]),'OR':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[105,-94,-93,-95,-96,-97,-98,-99,-100,105,105,105,-46,105,105,105,105,-66,-90,-91,105,105,-47,105,105,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,105,-92,105,105,-61,105,-68,-104,-67,105,105,105,105,-101,-102,-103,]),'AND':([54,56,60,61,62,63,64,65,66,71,74,77,80,83,84,90,109,110,112,113,114,125,126,131,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,158,159,168,171,180,186,189,193,204,205,206,207,208,215,216,217,],[106,-94,-93,-95,-96,-97,-98,-99,-100,106,106,106,-46,106,106,106,106,-66,-90,-91,106,106,-47,106,106,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,106,-89,-61,106,-92,106,106,-61,106,-68,-104,-67,106,106,106,106,-101,-102,-103,]),'COMA':([56,60,61,62,63,64,65,66,75,76,77,78,80,81,82,83,107,108,109,110,112,113,126,130,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,160,161,162,163,164,165,166,167,168,173,175,176,177,179,187,189,193,204,210,212,215,216,217,],[-94,-93,-95,-96,-97,-98,-99,-100,123,-20,-21,123,-46,127,-49,-50,155,-59,-60,-66,-90,-91,-47,155,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,190,-70,-71,-72,-73,-74,-75,191,192,198,-43,-19,-48,155,-58,-68,-104,-67,-44,-42,-101,-102,-103,]),'DOBLEDPUNTOS':([56,60,61,62,63,64,65,66,80,84,110,112,113,126,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,172,180,189,193,204,215,216,217,],[-94,-93,-95,-96,-97,-98,-99,-100,-46,128,-66,-90,-91,-47,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,196,-61,-68,-104,-67,-101,-102,-103,]),'CORC':([56,60,61,62,63,64,65,66,80,89,90,107,108,109,110,112,113,126,130,132,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,157,158,159,179,187,188,189,193,204,215,216,217,],[-94,-93,-95,-96,-97,-98,-99,-100,-46,133,-65,154,-59,-60,-66,-90,-91,-47,180,181,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,189,-69,-92,200,-58,204,-68,-104,-67,-101,-102,-103,]),'DOSPUNTOS':([56,60,61,62,63,64,65,66,80,110,112,113,126,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,159,171,189,193,204,215,216,217,],[-94,-93,-95,-96,-97,-98,-99,-100,-46,-66,-90,-91,-47,-76,-77,-78,-79,-80,-81,-82,-83,-84,-85,-86,-87,-88,-89,-61,-92,194,-68,-104,-67,-101,-102,-103,]),'RIN':([72,],[120,]),'RINT':([115,116,128,196,],[161,167,161,161,]),'RDOUBLE':([115,128,196,],[162,162,162,]),'RCHAR':([115,128,196,],[163,163,163,]),'RSTRING':([115,128,196,],[164,164,164,]),'RBOOLEAN':([115,128,196,],[165,165,165,]),'RARRAY':([115,128,196,],[166,166,166,]),}
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'init':([0,],[1,]),'instrucciones':([0,54,71,136,171,174,183,186,197,208,],[2,92,119,182,195,199,202,203,211,218,]),'instruccion':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[3,32,3,3,32,32,3,3,3,32,3,3,32,3,32,32,32,3,32,32,]),'imprimir_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,]),'imprimir_ins':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,]),'declaracion':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,]),'if_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,]),'while_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,]),'for_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,]),'break_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,]),'continue_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,11,]),'funcion_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,]),'return_instr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,]),'llamada_instr':([0,2,25,26,31,46,47,49,50,52,54,55,57,58,59,71,79,86,87,88,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,119,120,123,127,129,136,139,155,156,171,174,182,183,186,190,191,192,194,195,197,199,202,203,208,211,218,],[14,14,60,60,60,60,60,60,60,60,14,60,60,60,60,14,60,60,60,60,60,14,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,60,14,60,60,60,60,14,60,60,60,14,14,14,14,14,60,60,60,60,14,14,14,14,14,14,14,14,]),'declaracion_instr_completa':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,]),'declaracion_instr_simple':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,]),'decla_arr':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,]),'acces':([0,2,54,71,92,119,136,171,174,182,183,186,195,197,199,202,203,208,211,218,],[21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,21,]),'finins':([4,5,6,7,8,9,10,11,12,13,14,],[33,35,36,37,38,39,40,41,42,43,44,]),'corchetes':([23,],[51,]),'expresion':([25,26,31,46,47,49,50,52,55,57,58,59,79,86,87,88,91,93,94,95,96,97,98,99,100,101,102,103,104,105,106,111,117,120,123,127,129,139,155,156,190,191,192,194,],[54,71,74,77,77,83,84,90,109,112,113,114,125,109,131,90,134,140,141,142,143,144,145,146,147,148,149,150,151,152,153,158,168,171,77,83,109,186,109,158,205,206,207,208,]),'contenidos_print':([46,47,],[75,78,]),'valores_print':([46,47,123,],[76,76,176,]),'parametros_llamada':([49,],[81,]),'parametro_llamada':([49,127,],[82,177,]),'element':([50,],[85,]),'expre':([52,88,],[89,132,]),'elementos':([55,86,129,],[107,130,179,]),'elementos2':([55,86,129,155,],[108,108,108,187,]),'recurs':([56,],[110,]),'elseif_instr':([92,],[137,]),'elseif_instruction':([92,137,],[138,185,]),'unico':([111,156,],[157,188,]),'tipo':([115,128,196,],[160,178,210,]),'parametros':([121,],[173,]),'parametro':([121,198,],[175,212,]),}
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> init","S'",1,None,None,None),
('init -> instrucciones','init',1,'p_init','grammar.py',213),
('instrucciones -> instrucciones instruccion','instrucciones',2,'p_instrucciones_instrucciones_instruccion','grammar.py',217),
('instrucciones -> instruccion','instrucciones',1,'p_instrucciones_instruccion','grammar.py',223),
('instruccion -> imprimir_instr finins','instruccion',2,'p_instruccion','grammar.py',230),
('instruccion -> imprimir_ins finins','instruccion',2,'p_instruccion','grammar.py',231),
('instruccion -> declaracion finins','instruccion',2,'p_instruccion','grammar.py',232),
('instruccion -> if_instr finins','instruccion',2,'p_instruccion','grammar.py',233),
('instruccion -> while_instr finins','instruccion',2,'p_instruccion','grammar.py',234),
('instruccion -> for_instr finins','instruccion',2,'p_instruccion','grammar.py',235),
('instruccion -> break_instr finins','instruccion',2,'p_instruccion','grammar.py',236),
('instruccion -> continue_instr finins','instruccion',2,'p_instruccion','grammar.py',237),
('instruccion -> funcion_instr finins','instruccion',2,'p_instruccion','grammar.py',238),
('instruccion -> return_instr finins','instruccion',2,'p_instruccion','grammar.py',239),
('instruccion -> llamada_instr finins','instruccion',2,'p_instruccion','grammar.py',240),
('finins -> PUNTOCOMA','finins',1,'p_finins','grammar.py',245),
('instruccion -> error PUNTOCOMA','instruccion',2,'p_instruccion_error','grammar.py',249),
('imprimir_instr -> RPRINTLN PARA contenidos_print PARC','imprimir_instr',4,'p_imprimir','grammar.py',254),
('imprimir_ins -> RPRINT PARA contenidos_print PARC','imprimir_ins',4,'p_imprimir2','grammar.py',258),
('contenidos_print -> contenidos_print COMA valores_print','contenidos_print',3,'p_contenidos_print','grammar.py',262),
('contenidos_print -> valores_print','contenidos_print',1,'p_lista_contenidos_print','grammar.py',267),
('valores_print -> expresion','valores_print',1,'p_valores_print','grammar.py',271),
('declaracion -> declaracion_instr_completa','declaracion',1,'p_declaracion','grammar.py',276),
('declaracion -> declaracion_instr_simple','declaracion',1,'p_declaracion','grammar.py',277),
('declaracion -> decla_arr','declaracion',1,'p_declaracion','grammar.py',278),
('declaracion -> acces','declaracion',1,'p_declaracion','grammar.py',279),
('declaracion_instr_simple -> ID IGUAL expresion','declaracion_instr_simple',3,'p_declaracion_simple','grammar.py',285),
('declaracion_instr_completa -> ID IGUAL expresion DOBLEDPUNTOS tipo','declaracion_instr_completa',5,'p_declaracion_completa','grammar.py',290),
('if_instr -> RIF expresion instrucciones REND','if_instr',4,'p_if1','grammar.py',297),
('if_instr -> RIF expresion instrucciones RELSE instrucciones REND','if_instr',6,'p_if2','grammar.py',303),
('if_instr -> RIF expresion instrucciones elseif_instr RELSE instrucciones REND','if_instr',7,'p_if3','grammar.py',309),
('if_instr -> RIF expresion instrucciones elseif_instr REND','if_instr',5,'p_if4','grammar.py',314),
('elseif_instr -> elseif_instr elseif_instruction','elseif_instr',2,'p_elsif','grammar.py',320),
('elseif_instr -> elseif_instruction','elseif_instr',1,'p_elseif_instr','grammar.py',327),
('elseif_instruction -> RELSEIF expresion instrucciones','elseif_instruction',3,'p_elseif_instruction','grammar.py',335),
('while_instr -> RWHILE expresion instrucciones REND','while_instr',4,'p_while','grammar.py',342),
('for_instr -> RFOR ID RIN expresion DOSPUNTOS expresion instrucciones REND','for_instr',8,'p_for','grammar.py',347),
('for_instr -> RFOR ID RIN expresion instrucciones REND','for_instr',6,'p_for2','grammar.py',352),
('break_instr -> RBREAK','break_instr',1,'p_break','grammar.py',359),
('continue_instr -> RCONTINUE','continue_instr',1,'p_continue','grammar.py',365),
('funcion_instr -> RFUNCTION ID PARA parametros PARC instrucciones REND','funcion_instr',7,'p_funcion1','grammar.py',371),
('funcion_instr -> RFUNCTION ID PARA PARC instrucciones REND','funcion_instr',6,'p_funcion_2','grammar.py',375),
('parametros -> parametros COMA parametro','parametros',3,'p_parametros_1','grammar.py',379),
('parametros -> parametro','parametros',1,'p_parametros_2','grammar.py',384),
('parametro -> ID DOBLEDPUNTOS tipo','parametro',3,'p_parametro','grammar.py',388),
('return_instr -> RRETURN expresion','return_instr',2,'p_return','grammar.py',392),
('llamada_instr -> ID PARA PARC','llamada_instr',3,'p_llamada1','grammar.py',396),
('llamada_instr -> ID PARA parametros_llamada PARC','llamada_instr',4,'p_llamada2','grammar.py',400),
('parametros_llamada -> parametros_llamada COMA parametro_llamada','parametros_llamada',3,'p_parametrosLL_1','grammar.py',404),
('parametros_llamada -> parametro_llamada','parametros_llamada',1,'p_parametrosLL_2','grammar.py',409),
('parametro_llamada -> expresion','parametro_llamada',1,'p_parametroLL','grammar.py',413),
('declaracion -> RGLOBAL ID IGUAL expresion','declaracion',4,'p_declaracion2','grammar.py',418),
('declaracion -> RGLOBAL ID','declaracion',2,'p_declaracion2_global','grammar.py',422),
('declaracion -> RLOCAL ID IGUAL expresion','declaracion',4,'p_declaracion_local','grammar.py',426),
('declaracion -> RLOCAL ID','declaracion',2,'p_declaracion_local2','grammar.py',430),
('decla_arr -> ID IGUAL element','decla_arr',3,'p_decla_array','grammar.py',435),
('element -> element CORA elementos CORC','element',4,'p_elemen','grammar.py',439),
('element -> CORA elementos CORC','element',3,'p_elemen2','grammar.py',444),
('elementos -> elementos COMA elementos2','elementos',3,'p_elementos','grammar.py',448),
('elementos -> elementos2','elementos',1,'p_elementos2','grammar.py',453),
('elementos2 -> expresion','elementos2',1,'p_elementos3','grammar.py',457),
('expresion -> CORA elementos CORC','expresion',3,'p_expresionarray','grammar.py',461),
('acces -> ID corchetes IGUAL expresion','acces',4,'p_accesarr','grammar.py',465),
('corchetes -> corchetes CORA expre CORC','corchetes',4,'p_corchetes','grammar.py',469),
('corchetes -> CORA expre CORC','corchetes',3,'p_corchetes2','grammar.py',474),
('expre -> expresion','expre',1,'p_corchetes3','grammar.py',478),
('expresion -> ID recurs','expresion',2,'p_acceso_array','grammar.py',482),
('recurs -> recurs CORA unico CORC','recurs',4,'p_acceso_array2','grammar.py',485),
('recurs -> CORA unico CORC','recurs',3,'p_acceso_array3','grammar.py',489),
('unico -> expresion','unico',1,'p_acceso_array4','grammar.py',493),
('tipo -> RINT','tipo',1,'p_tipo','grammar.py',498),
('tipo -> RDOUBLE','tipo',1,'p_tipo','grammar.py',499),
('tipo -> RCHAR','tipo',1,'p_tipo','grammar.py',500),
('tipo -> RSTRING','tipo',1,'p_tipo','grammar.py',501),
('tipo -> RBOOLEAN','tipo',1,'p_tipo','grammar.py',502),
('tipo -> RARRAY','tipo',1,'p_tipo','grammar.py',503),
('expresion -> expresion MAS expresion','expresion',3,'p_expresion_binaria','grammar.py',521),
('expresion -> expresion MENOS expresion','expresion',3,'p_expresion_binaria','grammar.py',522),
('expresion -> expresion POR expresion','expresion',3,'p_expresion_binaria','grammar.py',523),
('expresion -> expresion DIV expresion','expresion',3,'p_expresion_binaria','grammar.py',524),
('expresion -> expresion POT expresion','expresion',3,'p_expresion_binaria','grammar.py',525),
('expresion -> expresion MOD expresion','expresion',3,'p_expresion_binaria','grammar.py',526),
('expresion -> expresion MENORQUE expresion','expresion',3,'p_expresion_binaria','grammar.py',527),
('expresion -> expresion MAYORQUE expresion','expresion',3,'p_expresion_binaria','grammar.py',528),
('expresion -> expresion MENORIGUAL expresion','expresion',3,'p_expresion_binaria','grammar.py',529),
('expresion -> expresion MAYORIGUAL expresion','expresion',3,'p_expresion_binaria','grammar.py',530),
('expresion -> expresion IGUALIGUAL expresion','expresion',3,'p_expresion_binaria','grammar.py',531),
('expresion -> expresion DIFERENTE expresion','expresion',3,'p_expresion_binaria','grammar.py',532),
('expresion -> expresion OR expresion','expresion',3,'p_expresion_binaria','grammar.py',533),
('expresion -> expresion AND expresion','expresion',3,'p_expresion_binaria','grammar.py',534),
('expresion -> MENOS expresion','expresion',2,'p_expresion_unaria','grammar.py',568),
('expresion -> NOT expresion','expresion',2,'p_expresion_unaria','grammar.py',569),
('expresion -> PARA expresion PARC','expresion',3,'p_expresion_agrupacion','grammar.py',578),
('expresion -> llamada_instr','expresion',1,'p_expresion_llamada','grammar.py',583),
('expresion -> ID','expresion',1,'p_expresion_identificador','grammar.py',588),
('expresion -> ENTERO','expresion',1,'p_expresion_entero','grammar.py',592),
('expresion -> DECIMAL','expresion',1,'p_primitivo_decimal','grammar.py',596),
('expresion -> CADENA','expresion',1,'p_primitivo_cadena','grammar.py',600),
('expresion -> CARACTER','expresion',1,'p_primitivo_caracter','grammar.py',604),
('expresion -> RTRUE','expresion',1,'p_primitivo_true','grammar.py',608),
('expresion -> RFALSE','expresion',1,'p_primitivo_false','grammar.py',612),
('expresion -> RPARSE PARA tipo COMA expresion PARC','expresion',6,'p_expresion_cast','grammar.py',616),
('expresion -> RTRUNCATE PARA RINT COMA expresion PARC','expresion',6,'p_expresion_truncate','grammar.py',621),
('expresion -> RLOG PARA expresion COMA expresion PARC','expresion',6,'p_expresion_log','grammar.py',625),
('expresion -> RLENGTH PARA ID PARC','expresion',4,'p_expresion_length','grammar.py',633),
]
| [
"diemorab@gmail.com"
] | diemorab@gmail.com |
43cf9f14d261bdd474a55dfba77df37cf2ca3d8b | 5475ca8d4ad2b0b68135d3ea70f32c1c55d21b65 | /docs/conf.py | 421f39f323231dc7f3b6eef5a13dc6328d3f70e8 | [
"MIT"
] | permissive | instacart/lore | e5c9aa49439a635ba80d66ecf5d76dc763e88308 | a14f65a96d0ea2513a35e424b4e16d948115b89c | refs/heads/master | 2023-05-25T08:09:53.463945 | 2022-09-27T19:41:48 | 2022-09-27T19:41:48 | 107,602,547 | 1,578 | 139 | MIT | 2023-05-13T02:26:19 | 2017-10-19T21:51:45 | Python | UTF-8 | Python | false | false | 6,191 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.lore_no_env = True
sys.path.insert(0, os.path.abspath('../'))
import lore
# -- Scrub lore.env information ----------------------------------------------
version_info = [sys.version_info[0], sys.version_info[1], sys.version_info[2]]
lore.env.HOST = 'localhost'
lore.env.PYTHON_VERSION = '.'.join([str(i) for i in version_info])
lore.env.PYTHON_VERSION_INFO = version_info
lore.env.ROOT = '.'
lore.env.DATA_DIR = './data'
lore.env.WORK_DIR = '.'
lore.env.MODELS_DIR = './models'
lore.env.LIB_DIR = './libs'
lore.env.ENV_FILE = './.env'
lore.env.HOME = '/home/User'
lore.env.TESTS_DIR = './tests'
lore.env.LOG_DIR = './logs'
lore.env.JUPYTER_KERNEL_PATH = '/'
lore.env.REQUIREMENTS = './requirements.txt'
# -- Project information -----------------------------------------------------
project = 'Lore'
copyright = '2018, Instacart'
author = 'Montana Low and Jeremy Stanley'
# The short X.Y version
version = '.'.join(lore.__version__.split('.')[0:1])
# The full version, including alpha/beta/rc tags
release = lore.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Loredoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Lore.tex', 'Lore Documentation',
'Montana Low and Jeremy Stanley', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lore', 'Lore Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Lore', 'Lore Documentation',
author, 'Lore', 'Machine Learning Framework for Data Scientists by Engineers',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"noreply@github.com"
] | instacart.noreply@github.com |
4de4d92cd87c66f2d7af6124d092c332f4542ba7 | 3050e5b0a65d963ca4d96f77ff7f02947b083dfc | /bts/dataset.py | 081982f8d8894a4e7c82bc56f571c18c40fdc446 | [
"MIT"
] | permissive | NajusAnaxi/UNet-based-for-Brain-Tumor-Segmentation | b6417b8cd41151b9a0990f6f20bcf5d78e3ef84d | 24ca4432873f145ad33810f40c851ac10bf030fa | refs/heads/main | 2023-08-23T20:03:01.077884 | 2021-10-21T10:33:39 | 2021-10-21T10:33:39 | 416,876,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,907 | py | from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torchvision.transforms.functional as TF
from PIL import Image
import os
import random
class TumorDataset(Dataset):
""" Returns a TumorDataset class object which represents our tumor dataset.
TumorDataset inherits from torch.utils.data.Dataset class.
"""
def __init__(self, root_dir, transform=True, DEBUG=False):
""" Constructor for our TumorDataset class.
Parameters:
root_dir(str): Directory with all the images.
transform(bool): Flag to apply image random transformation.
DEBUG(bool): To switch to debug mode for image transformation.
Returns: None
"""
self.root_dir = root_dir
self.transform = {'hflip': TF.hflip,
'vflip': TF.vflip,
'rotate': TF.rotate}
self.default_transformation = transforms.Compose([
transforms.Grayscale(),
transforms.Resize((512, 512))
])
self.DEBUG = DEBUG
if not transform:
self.transform = None
def __getitem__(self, index):
""" Overridden method from inheritted class to support
indexing of dataset such that datset[I] can be used
to get Ith sample.
Parameters:
index(int): Index of the dataset sample
Return:
sample(dict): Contains the index, image, mask torch.Tensor.
'index': Index of the image.
'image': Contains the tumor image torch.Tensor.
'mask' : Contains the mask image torch.Tensor.
"""
image_name = os.path.join(self.root_dir, str(index)+'.png')
mask_name = os.path.join(self.root_dir, str(index)+'_mask.png')
image = Image.open(image_name)
mask = Image.open(mask_name)
image = self.default_transformation(image)
mask = self.default_transformation(mask)
# Custom transformations
if self.transform:
image, mask = self._random_transform(image, mask)
image = TF.to_tensor(image)
mask = TF.to_tensor(mask)
sample = {'index': int(index), 'image': image, 'mask': mask}
return sample
def _random_transform(self, image, mask):
""" Applies a set of transformation in random order.
Each transformation has a probability of 0.5
"""
choice_list = list(self.transform)
for _ in range(len(choice_list)):
choice_key = random.choice(choice_list)
if self.DEBUG:
print(f'Transform choose: {choice_key}')
action_prob = random.randint(0, 1)
if action_prob >= 0.5:
if self.DEBUG:
print(f'\tApplying transformation: {choice_key}')
if choice_key == 'rotate':
rotation = random.randint(15, 75)
if self.DEBUG:
print(f'\t\tRotation by: {rotation}')
image = self.transform[choice_key](image, rotation)
mask = self.transform[choice_key](mask, rotation)
else:
image = self.transform[choice_key](image)
mask = self.transform[choice_key](mask)
choice_list.remove(choice_key)
return image, mask
def __len__(self):
""" Overridden method from inheritted class so that
len(self) returns the size of the dataset.
"""
error_msg = 'Part of dataset is missing!\nNumber of tumor and mask images are not same.'
total_files = len(os.listdir(self.root_dir))
assert (total_files % 2 == 0), error_msg
return total_files//2
| [
"noreply@github.com"
] | NajusAnaxi.noreply@github.com |
a85acdc9c3cedf35cd064df7cde294c82fcfb9cf | 966ea314bcd64f40bfaea457f914fcedbe26426a | /April-week3/teststdin.py | 5a306710441cfe3a30ed647f4c6c96989f39a683 | [] | no_license | vandanasen/Python-Projects | 30caa85cf87ba712e1307b0441fed2d7fa9298a0 | 9b24a9f6af0374bb0d6a3a15c05099f49edfd581 | refs/heads/master | 2020-03-26T00:26:06.067905 | 2019-03-11T22:58:25 | 2019-03-11T22:58:25 | 144,320,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | #Import System module
import sys
print("Enter a number")
#input is read using stdin
x=int(sys.stdin.readline())
print(x)
#output printed using stdout
sys.stdout.write('This is stdout text\n')
#Error message is shown using stderr
sys.stderr.write('This is a stderror\n')
| [
"vandy_senthil@yahoo.com"
] | vandy_senthil@yahoo.com |
33f432b772d2ca4608fdedeb8ab534706c8456e7 | 088508fbfdbe9e0d6dbc780d1eb376f5536d5a91 | /strava/__init__.py | 994dcf36c08397d445c5f5a0ece13851b7b81edc | [
"MIT"
] | permissive | lincolwn/python-strava | 6706a88bd499c647f0d9a55d0174e1d5bff2480a | eb642c743b93a336a027b38a06cf842dc0464bed | refs/heads/master | 2021-06-13T06:49:48.238317 | 2020-11-18T17:44:54 | 2020-11-18T17:44:54 | 200,271,701 | 1 | 2 | MIT | 2021-06-10T19:15:09 | 2019-08-02T17:13:52 | Python | UTF-8 | Python | false | false | 82 | py | __title__ = 'Strava Client'
__version__ = '0.1.8'
__author__ = 'Lincolwn Martins'
| [
"lincolwn@gmail.com"
] | lincolwn@gmail.com |
327bcf4869247752f51e656a3f087e131c62d8b2 | 25488c9c54ac2048f75bbdc3fc728cd5764da3e2 | /working_tfidfmodel.py | 8a9efa1ba0e6ceeed17a9098756c58408786cde3 | [] | no_license | gputti/proj1 | 0ce1c41aedbc21ee0b29f97c3108989ea7c4b063 | c1b6e5ba3b8c4282a2467567e1ddf190866f745c | refs/heads/master | 2021-09-15T10:35:54.095788 | 2018-05-30T19:56:32 | 2018-05-30T19:56:32 | 110,478,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,812 | py | ################################################
# Date: 01/02/2018
# this is working as of date.
# This uses TF-IDF model building.
#
################################################
import sys, getopt
import tempfile
import logging
import tempfile
from os import walk
from gensim import corpora, models, similarities
from collections import defaultdict
from pprint import pprint # pretty-printer
import nltk
from nltk.tokenize import word_tokenize
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
############ FUNCTIONS BELOW ############
def getLinesArrayFromFile(filename):
with open(filename) as f:
#lines = f.readlines().split('.')
##lines = [line1 for line in f for line1 in line.split('.')]
lines = [line1 for line1 in f]
f.close()
#lines = [line.rstrip('\n') for line in open(filename)]
newlines = []
#print (len(lines) )
for index, item in enumerate(lines):
item = item.strip()
if item and len(item) > 20:
newlines.append(item.strip())
lines = newlines
#print (len(lines) )
#for index, item in enumerate(lines):
# print index, ',', len(item), ',', item[0:60]
return lines
def getAllFilesAsList(dirpath):
files = []
for (dirpath, dirnames, filenames) in walk(docpath):
for fn in filenames:
if not fn.startswith('.'):
files.append(fn)
return files
def printUniqueWords(texts):
uniwords = []
for index, item in enumerate(texts):
uniwords.extend([word for word in texts[index] if not word.strip() == '' ])
print('total unique words: ', len(uniwords) )
def filterLessFrequentWords(texts, ii):
frequency = defaultdict(int)
for text in texts:
for token in text:
if not token.strip() == '':
frequency[token] += 1
output = [[token for token in text if frequency[token] > ii] for text in texts]
return output
############ END OF FUNCTIONS ############
meta_docpath = "meta_docs"
docpath = "docs"
documents = []
files = getAllFilesAsList(docpath)
print("reading following files:")
print(files)
for file in files:
documents.extend(getLinesArrayFromFile(docpath+"/" +file))
print("total no of documents: ", len(documents))
#stoplist = set('for a of the and to is was in or then'.split())
stoplist = nltk.corpus.stopwords.words('english')
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
##print ">> texts: ", len(texts), texts[999]
dictionary = corpora.Dictionary(texts)
# store the dictionary, for future reference
dictionary.save(meta_docpath + '/gopi.dict')
print "no of words in dictionary: " , len(dictionary)
corpus = [dictionary.doc2bow(text) for text in texts] # bow = bag of words
corpora.MmCorpus.serialize(meta_docpath + '/gopi.mm', corpus) # store to disk, for later use
tf_idf = models.TfidfModel(corpus)
print(tf_idf)
sims = similarities.Similarity('./meta_docs/',tf_idf[corpus], num_features=len(dictionary))
print(type(sims))
new_doc = ""
while True: # infinite loop
new_doc = raw_input("\nEnter the question(q to quit): ")
if new_doc == "q":
break # stops the loop
query_doc = [w.lower() for w in word_tokenize(new_doc)]
#print(query_doc)
query_doc_bow = dictionary.doc2bow(query_doc)
#print(query_doc_bow)
query_doc_tf_idf = tf_idf[query_doc_bow]
#print(query_doc_tf_idf)
results = sims[query_doc_tf_idf]
results = sorted(enumerate(results), key=lambda item: -item[1] )
#print(results)
for index, item in enumerate(results):
if index < 3:
print '#',index, ')',item[1]," - ",documents[item[0]]
print
print('DONE')
print('~')
| [
"noreply@github.com"
] | gputti.noreply@github.com |
b850ab3c55f599b7beb9478e17b8427f431b5535 | 1d5808310d163a101b958c2a1abd1a0f5991bab4 | /newbot.py | 50c7b55c50f1b605247addd6bf9ba5c0cbe3fb8b | [] | no_license | sachmo99/Chatbot | c16af64bd0f4fac1a08b0d0c84ae6986a70c48bb | bfece13b1459b45ddea49a4f7ab8714a5287a404 | refs/heads/master | 2021-07-13T01:26:27.004902 | 2017-10-15T18:12:04 | 2017-10-15T18:12:04 | 107,031,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | from __future__ import unicode_literals
from hackathon import *
lastUpdateID = None
while True:
updates = getUpdates(lastUpdateID)
if len(updates["result"]) > 0:
lastUpdateID = getLatestUpdateID(updates) + 1
for update in updates["result"]:
message,who = extract(update)
try:
print "from", who, "message", message
except:
pass
reply = process(message)
sendMessage(reply,who)
print "reply sent" | [
"noreply@github.com"
] | sachmo99.noreply@github.com |
6825f758821f16bfda3455641993ecc7b49aac55 | 07504838d12c6328da093dce3726e8ed096cecdb | /pylon/resources/properties/regName.py | e07a63dcddcc3f32ae30609575ccc6655d8246e0 | [] | no_license | lcoppa/fiat-lux | 9caaa7f3105e692a149fdd384ec590676f06bf00 | 7c166bcc08768da67c241078b397570de159e240 | refs/heads/master | 2020-04-04T02:47:19.917668 | 2013-10-10T10:22:51 | 2013-10-10T10:22:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | """regName standard property type, originally defined in resource file set
standard 00:00:00:00:00:00:00:00-0."""
# Copyright (C) 2013 Echelon Corporation. All Rights Reserved.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software" to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# This file is generated from device resource files using an automated
# database to source code conversion process. Grammar and punctuation within
# the embedded documentation may not be correct, as this data is gathered and
# combined from several sources. The machine-generated code may not meet
# compliance with PEP-8 and PEP-257 recommendations at all times.
# Generated at 23-Sep-2013 09:14.
import pylon.resources.datapoints.str_asc
from pylon.resources.standard import standard
class regName(pylon.resources.datapoints.str_asc.str_asc):
"""regName standard property type. Register name. The name of a utility
data logger register device."""
def __init__(self):
super().__init__(
)
self._default_bytes = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self._original_name = 'SCPTregName'
self._property_scope, self._property_key = 0, 163
self._definition = standard.add(self)
if __name__ == '__main__':
# unit test code.
item = regName()
pass
| [
"lcoppa@rocketmail.com"
] | lcoppa@rocketmail.com |
cbf50215b26fbe9f380591f173308297e88f2266 | 13800b7827598e76428a335559b7bf11867ec2f0 | /python/ccxt/ascendex.py | 56a109c86e3224ca20517a5090a88d97352d3dcc | [
"MIT"
] | permissive | ccxt/ccxt | b40a0466f5c430a3c0c6026552ae697aa80ba6c6 | e4065f6a490e6fc4dd7a72b375428b2faa570668 | refs/heads/master | 2023-09-04T03:41:29.787733 | 2023-09-03T19:25:57 | 2023-09-03T19:25:57 | 91,253,698 | 30,798 | 8,190 | MIT | 2023-09-14T21:59:09 | 2017-05-14T15:41:56 | Python | UTF-8 | Python | false | false | 132,963 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.abstract.ascendex import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ascendex(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(ascendex, self).describe(), {
'id': 'ascendex',
'name': 'AscendEX',
'countries': ['SG'], # Singapore
# 8 requests per minute = 0.13333 per second => rateLimit = 750
# testing 400 works
'rateLimit': 400,
'certified': False,
'pro': True,
# new metainfo interface
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': False,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createPostOnlyOrder': True,
'createReduceOnlyOrder': True,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDepositAddresses': False,
'fetchDepositAddressesByNetwork': False,
'fetchDeposits': True,
'fetchDepositsWithdrawals': True,
'fetchDepositWithdrawFee': 'emulated',
'fetchDepositWithdrawFees': True,
'fetchFundingHistory': False,
'fetchFundingRate': 'emulated',
'fetchFundingRateHistory': False,
'fetchFundingRates': True,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': True,
'fetchMarginMode': False,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchPosition': False,
'fetchPositionMode': False,
'fetchPositions': True,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransactionFee': False,
'fetchTransactionFees': False,
'fetchTransactions': 'emulated',
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawal': False,
'fetchWithdrawals': True,
'reduceMargin': True,
'setLeverage': True,
'setMarginMode': True,
'setPositionMode': False,
'transfer': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': '1d',
'1w': '1w',
'1M': '1m',
},
'version': 'v2',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/112027508-47984600-8b48-11eb-9e17-d26459cc36c6.jpg',
'api': {
'rest': 'https://ascendex.com',
},
'test': {
'rest': 'https://api-test.ascendex-sandbox.com',
},
'www': 'https://ascendex.com',
'doc': [
'https://ascendex.github.io/ascendex-pro-api/#ascendex-pro-api-documentation',
],
'fees': 'https://ascendex.com/en/feerate/transactionfee-traderate',
'referral': {
'url': 'https://ascendex.com/en-us/register?inviteCode=EL6BXBQM',
'discount': 0.25,
},
},
'api': {
'v1': {
'public': {
'get': {
'assets': 1,
'products': 1,
'ticker': 1,
'barhist/info': 1,
'barhist': 1,
'depth': 1,
'trades': 1,
'cash/assets': 1, # not documented
'cash/products': 1, # not documented
'margin/assets': 1, # not documented
'margin/products': 1, # not documented
'futures/collateral': 1,
'futures/contracts': 1,
'futures/ref-px': 1,
'futures/market-data': 1,
'futures/funding-rates': 1,
'risk-limit-info': 1,
'exchange-info': 1,
},
},
'private': {
'get': {
'info': 1,
'wallet/transactions': 1,
'wallet/deposit/address': 1, # not documented
'data/balance/snapshot': 1,
'data/balance/history': 1,
},
'accountCategory': {
'get': {
'balance': 1,
'order/open': 1,
'order/status': 1,
'order/hist/current': 1,
'risk': 1,
},
'post': {
'order': 1,
'order/batch': 1,
},
'delete': {
'order': 1,
'order/all': 1,
'order/batch': 1,
},
},
'accountGroup': {
'get': {
'cash/balance': 1,
'margin/balance': 1,
'margin/risk': 1,
'futures/collateral-balance': 1,
'futures/position': 1,
'futures/risk': 1,
'futures/funding-payments': 1,
'order/hist': 1,
'spot/fee': 1,
},
'post': {
'transfer': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
},
},
},
},
'v2': {
'public': {
'get': {
'assets': 1,
'futures/contract': 1,
'futures/collateral': 1,
'futures/pricing-data': 1,
'futures/ticker': 1,
'risk-limit-info': 1,
},
},
'private': {
'data': {
'get': {
'order/hist': 1,
},
},
'get': {
'account/info': 1,
},
'accountGroup': {
'get': {
'order/hist': 1,
'futures/position': 1,
'futures/free-margin': 1,
'futures/order/hist/current': 1,
'futures/order/open': 1,
'futures/order/status': 1,
},
'post': {
'futures/isolated-position-margin': 1,
'futures/margin-type': 1,
'futures/leverage': 1,
'futures/transfer/deposit': 1,
'futures/transfer/withdraw': 1,
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/open': 1,
'subuser/subuser-transfer': 1,
'subuser/subuser-transfer-hist': 1,
},
'delete': {
'futures/order': 1,
'futures/order/batch': 1,
'futures/order/all': 1,
},
},
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.002'),
'maker': self.parse_number('0.002'),
},
},
'precisionMode': TICK_SIZE,
'options': {
'account-category': 'cash', # 'cash', 'margin', 'futures' # obsolete
'account-group': None,
'fetchClosedOrders': {
'method': 'v2PrivateDataGetOrderHist', # 'v1PrivateAccountGroupGetAccountCategoryOrderHistCurrent'
},
'defaultType': 'spot', # 'spot', 'margin', 'swap'
'accountsByType': {
'spot': 'cash',
'swap': 'futures',
'future': 'futures',
'margin': 'margin',
},
'transfer': {
'fillResponseFromRequest': True,
},
'networks': {
'BSC': 'BEP20(BSC)',
'ARB': 'arbitrum',
'SOL': 'Solana',
'AVAX': 'avalanche C chain',
'OMNI': 'Omni',
},
'networksById': {
'BEP20(BSC)': 'BSC',
'arbitrum': 'ARB',
'Solana': 'SOL',
'avalanche C chain': 'AVAX',
'Omni': 'OMNI',
},
},
'exceptions': {
'exact': {
# not documented
'1900': BadRequest, # {"code":1900,"message":"Invalid Http Request Input"}
'2100': AuthenticationError, # {"code":2100,"message":"ApiKeyFailure"}
'5002': BadSymbol, # {"code":5002,"message":"Invalid Symbol"}
'6001': BadSymbol, # {"code":6001,"message":"Trading is disabled on symbol."}
'6010': InsufficientFunds, # {'code': 6010, 'message': 'Not enough balance.'}
'60060': InvalidOrder, # {'code': 60060, 'message': 'The order is already filled or canceled.'}
'600503': InvalidOrder, # {"code":600503,"message":"Notional is too small."}
# documented
'100001': BadRequest, # INVALID_HTTP_INPUT Http request is invalid
'100002': BadRequest, # DATA_NOT_AVAILABLE Some required data is missing
'100003': BadRequest, # KEY_CONFLICT The same key exists already
'100004': BadRequest, # INVALID_REQUEST_DATA The HTTP request contains invalid field or argument
'100005': BadRequest, # INVALID_WS_REQUEST_DATA Websocket request contains invalid field or argument
'100006': BadRequest, # INVALID_ARGUMENT The arugment is invalid
'100007': BadRequest, # ENCRYPTION_ERROR Something wrong with data encryption
'100008': BadSymbol, # SYMBOL_ERROR Symbol does not exist or not valid for the request
'100009': AuthenticationError, # AUTHORIZATION_NEEDED Authorization is require for the API access or request
'100010': BadRequest, # INVALID_OPERATION The action is invalid or not allowed for the account
'100011': BadRequest, # INVALID_TIMESTAMP Not a valid timestamp
'100012': BadRequest, # INVALID_STR_FORMAT str format does not
'100013': BadRequest, # INVALID_NUM_FORMAT Invalid number input
'100101': ExchangeError, # UNKNOWN_ERROR Some unknown error
'150001': BadRequest, # INVALID_JSON_FORMAT Require a valid json object
'200001': AuthenticationError, # AUTHENTICATION_FAILED Authorization failed
'200002': ExchangeError, # TOO_MANY_ATTEMPTS Tried and failed too many times
'200003': ExchangeError, # ACCOUNT_NOT_FOUND Account not exist
'200004': ExchangeError, # ACCOUNT_NOT_SETUP Account not setup properly
'200005': ExchangeError, # ACCOUNT_ALREADY_EXIST Account already exist
'200006': ExchangeError, # ACCOUNT_ERROR Some error related with error
'200007': ExchangeError, # CODE_NOT_FOUND
'200008': ExchangeError, # CODE_EXPIRED Code expired
'200009': ExchangeError, # CODE_MISMATCH Code does not match
'200010': AuthenticationError, # PASSWORD_ERROR Wrong assword
'200011': ExchangeError, # CODE_GEN_FAILED Do not generate required code promptly
'200012': ExchangeError, # FAKE_COKE_VERIFY
'200013': ExchangeError, # SECURITY_ALERT Provide security alert message
'200014': PermissionDenied, # RESTRICTED_ACCOUNT Account is restricted for certain activity, such, or withdraw.
'200015': PermissionDenied, # PERMISSION_DENIED No enough permission for the operation
'300001': InvalidOrder, # INVALID_PRICE Order price is invalid
'300002': InvalidOrder, # INVALID_QTY Order size is invalid
'300003': InvalidOrder, # INVALID_SIDE Order side is invalid
'300004': InvalidOrder, # INVALID_NOTIONAL Notional is too small or too large
'300005': InvalidOrder, # INVALID_TYPE Order typs is invalid
'300006': InvalidOrder, # INVALID_ORDER_ID Order id is invalid
'300007': InvalidOrder, # INVALID_TIME_IN_FORCE Time In Force in order request is invalid
'300008': InvalidOrder, # INVALID_ORDER_PARAMETER Some order parameter is invalid
'300009': InvalidOrder, # TRADING_VIOLATION Trading violation on account or asset
'300011': InsufficientFunds, # INVALID_BALANCE No enough account or asset balance for the trading
'300012': BadSymbol, # INVALID_PRODUCT Not a valid product supported by exchange
'300013': InvalidOrder, # INVALID_BATCH_ORDER Some or all orders are invalid in batch order request
'300014': InvalidOrder, # {"code":300014,"message":"Order price doesn't conform to the required tick size: 0.1","reason":"TICK_SIZE_VIOLATION"}
'300020': InvalidOrder, # TRADING_RESTRICTED There is some trading restriction on account or asset
'300021': InvalidOrder, # TRADING_DISABLED Trading is disabled on account or asset
'300031': InvalidOrder, # NO_MARKET_PRICE No market price for market type order trading
'310001': InsufficientFunds, # INVALID_MARGIN_BALANCE No enough margin balance
'310002': InvalidOrder, # INVALID_MARGIN_ACCOUNT Not a valid account for margin trading
'310003': InvalidOrder, # MARGIN_TOO_RISKY Leverage is too high
'310004': BadSymbol, # INVALID_MARGIN_ASSET This asset does not support margin trading
'310005': InvalidOrder, # INVALID_REFERENCE_PRICE There is no valid reference price
'510001': ExchangeError, # SERVER_ERROR Something wrong with server.
'900001': ExchangeError, # HUMAN_CHALLENGE Human change do not pass
},
'broad': {},
},
'commonCurrencies': {
'BOND': 'BONDED',
'BTCBEAR': 'BEAR',
'BTCBULL': 'BULL',
'BYN': 'BeyondFi',
'PLN': 'Pollen',
},
})
def get_account(self, params={}):
# get current or provided bitmax sub-account
account = self.safe_value(params, 'account', self.options['account'])
lowercaseAccount = account.lower()
return self.capitalize(lowercaseAccount)
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: an associative dictionary of currencies
"""
assets = self.v1PublicGetAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode" : "LTCBULL",
# "assetName" : "3X Long LTC Token",
# "precisionScale" : 9,
# "nativeScale" : 4,
# "withdrawalFee" : "0.2",
# "minWithdrawalAmt" : "1.0",
# "status" : "Normal"
# },
# ]
# }
#
margin = self.v1PublicGetMarginAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"BTT",
# "borrowAssetCode":"BTT-B",
# "interestAssetCode":"BTT-I",
# "nativeScale":0,
# "numConfirmations":1,
# "withdrawFee":"100.0",
# "minWithdrawalAmt":"1000.0",
# "statusCode":"Normal",
# "statusMessage":"",
# "interestRate":"0.001"
# }
# ]
# }
#
cash = self.v1PublicGetCashAssets(params)
#
# {
# "code":0,
# "data":[
# {
# "assetCode":"LTCBULL",
# "nativeScale":4,
# "numConfirmations":20,
# "withdrawFee":"0.2",
# "minWithdrawalAmt":"1.0",
# "statusCode":"Normal",
# "statusMessage":""
# }
# ]
# }
#
assetsData = self.safe_value(assets, 'data', [])
marginData = self.safe_value(margin, 'data', [])
cashData = self.safe_value(cash, 'data', [])
assetsById = self.index_by(assetsData, 'assetCode')
marginById = self.index_by(marginData, 'assetCode')
cashById = self.index_by(cashData, 'assetCode')
dataById = self.deep_extend(assetsById, marginById, cashById)
ids = list(dataById.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = dataById[id]
code = self.safe_currency_code(id)
scale = self.safe_string_2(currency, 'precisionScale', 'nativeScale')
precision = self.parse_number(self.parse_precision(scale))
fee = self.safe_number_2(currency, 'withdrawFee', 'withdrawalFee')
status = self.safe_string_2(currency, 'status', 'statusCode')
active = (status == 'Normal')
marginInside = ('borrowAssetCode' in currency)
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'margin': marginInside,
'name': self.safe_string(currency, 'assetName'),
'active': active,
'deposit': None,
'withdraw': None,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': precision,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'minWithdrawalAmt'),
'max': None,
},
},
'networks': {},
}
return result
def fetch_markets(self, params={}):
"""
retrieves data on all markets for ascendex
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
products = self.v1PublicGetProducts(params)
#
# {
# "code": 0,
# "data": [
# {
# "symbol": "LBA/BTC",
# "baseAsset": "LBA",
# "quoteAsset": "BTC",
# "status": "Normal",
# "minNotional": "0.000625",
# "maxNotional": "6.25",
# "marginTradable": False,
# "commissionType": "Quote",
# "commissionReserveRate": "0.001",
# "tickSize": "0.000000001",
# "lotSize": "1"
# },
# ]
# }
#
cash = self.v1PublicGetCashProducts(params)
#
# {
# "code": 0,
# "data": [
# {
# "symbol": "QTUM/BTC",
# "displayName": "QTUM/BTC",
# "domain": "BTC",
# "tradingStartTime": 1569506400000,
# "collapseDecimals": "0.0001,0.000001,0.00000001",
# "minQty": "0.000000001",
# "maxQty": "1000000000",
# "minNotional": "0.000625",
# "maxNotional": "12.5",
# "statusCode": "Normal",
# "statusMessage": "",
# "tickSize": "0.00000001",
# "useTick": False,
# "lotSize": "0.1",
# "useLot": False,
# "commissionType": "Quote",
# "commissionReserveRate": "0.001",
# "qtyScale": 1,
# "priceScale": 8,
# "notionalScale": 4
# }
# ]
# }
#
perpetuals = self.v2PublicGetFuturesContract(params)
#
# {
# "code": 0,
# "data": [
# {
# "symbol": "BTC-PERP",
# "status": "Normal",
# "displayName": "BTCUSDT",
# "settlementAsset": "USDT",
# "underlying": "BTC/USDT",
# "tradingStartTime": 1579701600000,
# "priceFilter": {
# "minPrice": "1",
# "maxPrice": "1000000",
# "tickSize": "1"
# },
# "lotSizeFilter": {
# "minQty": "0.0001",
# "maxQty": "1000000000",
# "lotSize": "0.0001"
# },
# "commissionType": "Quote",
# "commissionReserveRate": "0.001",
# "marketOrderPriceMarkup": "0.03",
# "marginRequirements": [
# {
# "positionNotionalLowerBound": "0",
# "positionNotionalUpperBound": "50000",
# "initialMarginRate": "0.01",
# "maintenanceMarginRate": "0.006"
# },
# ...
# ]
# }
# ]
# }
#
productsData = self.safe_value(products, 'data', [])
productsById = self.index_by(productsData, 'symbol')
cashData = self.safe_value(cash, 'data', [])
perpetualsData = self.safe_value(perpetuals, 'data', [])
cashAndPerpetualsData = self.array_concat(cashData, perpetualsData)
cashAndPerpetualsById = self.index_by(cashAndPerpetualsData, 'symbol')
dataById = self.deep_extend(productsById, cashAndPerpetualsById)
ids = list(dataById.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = dataById[id]
settleId = self.safe_value(market, 'settlementAsset')
settle = self.safe_currency_code(settleId)
status = self.safe_string(market, 'status')
domain = self.safe_string(market, 'domain')
active = False
if ((status == 'Normal') or (status == 'InternalTrading')) and (domain != 'LeveragedETF'):
active = True
spot = settle is None
swap = not spot
linear = True if swap else None
minQty = self.safe_number(market, 'minQty')
maxQty = self.safe_number(market, 'maxQty')
minPrice = self.safe_number(market, 'tickSize')
maxPrice = None
underlying = self.safe_string_2(market, 'underlying', 'symbol')
parts = underlying.split('/')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if swap:
lotSizeFilter = self.safe_value(market, 'lotSizeFilter')
minQty = self.safe_number(lotSizeFilter, 'minQty')
maxQty = self.safe_number(lotSizeFilter, 'maxQty')
priceFilter = self.safe_value(market, 'priceFilter')
minPrice = self.safe_number(priceFilter, 'minPrice')
maxPrice = self.safe_number(priceFilter, 'maxPrice')
symbol = base + '/' + quote + ':' + settle
fee = self.safe_number(market, 'commissionReserveRate')
marginTradable = self.safe_value(market, 'marginTradable', False)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': 'swap' if swap else 'spot',
'spot': spot,
'margin': marginTradable if spot else None,
'swap': swap,
'future': False,
'option': False,
'active': active,
'contract': swap,
'linear': linear,
'inverse': not linear if swap else None,
'taker': fee,
'maker': fee,
'contractSize': self.parse_number('1') if swap else None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'lotSize'),
'price': self.safe_number(market, 'tickSize'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': minQty,
'max': maxQty,
},
'price': {
'min': minPrice,
'max': maxPrice,
},
'cost': {
'min': self.safe_number(market, 'minNotional'),
'max': self.safe_number(market, 'maxNotional'),
},
},
'info': market,
})
return result
def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the ascendex server
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns int: the current integer timestamp in milliseconds from the ascendex server
"""
request = {
'requestTime': self.milliseconds(),
}
response = self.v1PublicGetExchangeInfo(self.extend(request, params))
#
# {
# "code": 0,
# "data": {
# "requestTimeEcho": 1656560463601,
# "requestReceiveAt": 1656560464331,
# "latency": 730
# }
# }
#
data = self.safe_value(response, 'data')
return self.safe_integer(data, 'requestReceiveAt')
def fetch_accounts(self, params={}):
"""
fetch all the accounts associated with a profile
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `account structures <https://github.com/ccxt/ccxt/wiki/Manual#account-structure>` indexed by the account type
"""
accountGroup = self.safe_string(self.options, 'account-group')
response = None
if accountGroup is None:
response = self.v1PrivateGetInfo(params)
#
# {
# "code":0,
# "data":{
# "email":"igor.kroitor@gmail.com",
# "accountGroup":8,
# "viewPermission":true,
# "tradePermission":true,
# "transferPermission":true,
# "cashAccount":["cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda"],
# "marginAccount":["martXoh1v1N3EMQC5FDtSj5VHso8aI2Z"],
# "futuresAccount":["futc9r7UmFJAyBY2rE3beA2JFxav2XFF"],
# "userUID":"U6491137460"
# }
# }
#
data = self.safe_value(response, 'data', {})
accountGroup = self.safe_string(data, 'accountGroup')
self.options['account-group'] = accountGroup
return [
{
'id': accountGroup,
'type': None,
'currency': None,
'info': response,
},
]
def parse_balance(self, response):
timestamp = self.milliseconds()
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
balances = self.safe_value(response, 'data', [])
for i in range(0, len(balances)):
balance = balances[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['free'] = self.safe_string(balance, 'availableBalance')
account['total'] = self.safe_string(balance, 'totalBalance')
result[code] = account
return self.safe_balance(result)
def parse_margin_balance(self, response):
timestamp = self.milliseconds()
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
balances = self.safe_value(response, 'data', [])
for i in range(0, len(balances)):
balance = balances[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['free'] = self.safe_string(balance, 'availableBalance')
account['total'] = self.safe_string(balance, 'totalBalance')
debt = self.safe_string(balance, 'borrowed')
interest = self.safe_string(balance, 'interest')
account['debt'] = Precise.string_add(debt, interest)
result[code] = account
return self.safe_balance(result)
def parse_swap_balance(self, response):
timestamp = self.milliseconds()
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
data = self.safe_value(response, 'data', {})
collaterals = self.safe_value(data, 'collaterals', [])
for i in range(0, len(collaterals)):
balance = collaterals[i]
code = self.safe_currency_code(self.safe_string(balance, 'asset'))
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
self.load_markets()
self.load_accounts()
query = None
marketType = None
marketType, query = self.handle_market_type_and_params('fetchBalance', None, params)
isMargin = self.safe_value(params, 'margin', False)
marketType = 'margin' if isMargin else marketType
params = self.omit(params, 'margin')
options = self.safe_value(self.options, 'fetchBalance', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, marketType, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetBalance')
method = self.get_supported_mapping(marketType, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesPosition',
})
if (accountCategory == 'cash') or (accountCategory == 'margin'):
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# cash
#
# {
# 'code': 0,
# 'data': [
# {
# 'asset': 'BCHSV',
# 'totalBalance': '64.298000048',
# 'availableBalance': '64.298000048',
# },
# ]
# }
#
# margin
#
# {
# 'code': 0,
# 'data': [
# {
# 'asset': 'BCHSV',
# 'totalBalance': '64.298000048',
# 'availableBalance': '64.298000048',
# 'borrowed': '0',
# 'interest': '0',
# },
# ]
# }
#
# swap
#
# {
# "code": 0,
# "data": {
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "collaterals": [
# {"asset":"ADA","balance":"0.355803","referencePrice":"1.05095","discountFactor":"0.9"},
# {"asset":"USDT","balance":"0.000014519","referencePrice":"1","discountFactor":"1"}
# ],
# }j
# }
#
if marketType == 'swap':
return self.parse_swap_balance(response)
elif marketType == 'margin':
return self.parse_margin_balance(response)
else:
return self.parse_balance(response)
def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v1PublicGetDepth(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "m":"depth-snapshot",
# "symbol":"BTC-PERP",
# "data":{
# "ts":1590223998202,
# "seqnum":115444921,
# "asks":[
# ["9207.5","18.2383"],
# ["9207.75","18.8235"],
# ["9208","10.7873"],
# ],
# "bids":[
# ["9207.25","0.4009"],
# ["9207","0.003"],
# ["9206.5","0.003"],
# ]
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
orderbook = self.safe_value(data, 'data', {})
timestamp = self.safe_integer(orderbook, 'ts')
result = self.parse_order_book(orderbook, symbol, timestamp)
result['nonce'] = self.safe_integer(orderbook, 'seqnum')
return result
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"QTUM/BTC",
# "open":"0.00016537",
# "close":"0.00019077",
# "high":"0.000192",
# "low":"0.00016537",
# "volume":"846.6",
# "ask":["0.00018698","26.2"],
# "bid":["0.00018408","503.7"],
# "type":"spot"
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
type = self.safe_string(ticker, 'type')
delimiter = '/' if (type == 'spot') else None
symbol = self.safe_symbol(marketId, market, delimiter)
close = self.safe_string(ticker, 'close')
bid = self.safe_value(ticker, 'bid', [])
ask = self.safe_value(ticker, 'ask', [])
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': None,
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(bid, 0),
'bidVolume': self.safe_string(bid, 1),
'ask': self.safe_string(ask, 0),
'askVolume': self.safe_string(ask, 1),
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}, market)
def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.v1PublicGetTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "symbol":"BTC-PERP", # or "BTC/USDT"
# "open":"9073",
# "close":"9185.75",
# "high":"9185.75",
# "low":"9185.75",
# "volume":"576.8334",
# "ask":["9185.75","15.5863"],
# "bid":["9185.5","0.003"],
# "type":"derivatives", # or "spot"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_ticker(data, market)
def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
see https://ascendex.github.io/ascendex-pro-api/#ticker
see https://ascendex.github.io/ascendex-futures-pro-api-v2/#ticker
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
request = {}
market = None
if symbols is not None:
symbol = self.safe_value(symbols, 0)
market = self.market(symbol)
marketIds = self.market_ids(symbols)
request['symbol'] = ','.join(marketIds)
type = None
type, params = self.handle_market_type_and_params('fetchTickers', market, params)
response = None
if type == 'spot':
response = self.v1PublicGetTicker(self.extend(request, params))
else:
response = self.v2PublicGetFuturesTicker(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "symbol":"QTUM/BTC",
# "open":"0.00016537",
# "close":"0.00019077",
# "high":"0.000192",
# "low":"0.00016537",
# "volume":"846.6",
# "ask":["0.00018698","26.2"],
# "bid":["0.00018408","503.7"],
# "type":"spot"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
if not isinstance(data, list):
return self.parse_tickers([data], symbols)
return self.parse_tickers(data, symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "m":"bar",
# "s":"BTC/USDT",
# "data":{
# "i":"1",
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
#
data = self.safe_value(ohlcv, 'data', {})
return [
self.safe_integer(data, 'ts'),
self.safe_number(data, 'o'),
self.safe_number(data, 'h'),
self.safe_number(data, 'l'),
self.safe_number(data, 'c'),
self.safe_number(data, 'v'),
]
def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int [since]: timestamp in ms of the earliest candle to fetch
:param int [limit]: the maximum amount of candles to fetch
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns int[][]: A list of candles ordered, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.safe_string(self.timeframes, timeframe, timeframe),
}
# if since and limit are not specified
# the exchange will return just 1 last candle by default
duration = self.parse_timeframe(timeframe)
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultLimit = self.safe_integer(options, 'limit', 500)
if since is not None:
request['from'] = since
if limit is None:
limit = defaultLimit
else:
limit = min(limit, defaultLimit)
request['to'] = self.sum(since, limit * duration * 1000, 1)
elif limit is not None:
request['n'] = limit # max 500
response = self.v1PublicGetBarhist(self.extend(request, params))
#
# {
# "code":0,
# "data":[
# {
# "m":"bar",
# "s":"BTC/USDT",
# "data":{
# "i":"1",
# "ts":1590228000000,
# "o":"9139.59",
# "c":"9131.94",
# "h":"9139.99",
# "l":"9121.71",
# "v":"25.20648"
# }
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# public fetchTrades
#
# {
# "p":"9128.5", # price
# "q":"0.0030", # quantity
# "ts":1590229002385, # timestamp
# "bm":false, # if True, the buyer is the market maker, we only use self field to "define the side" of a public trade
# "seqnum":180143985289898554
# }
#
timestamp = self.safe_integer(trade, 'ts')
priceString = self.safe_string_2(trade, 'price', 'p')
amountString = self.safe_string(trade, 'q')
buyerIsMaker = self.safe_value(trade, 'bm', False)
side = 'sell' if buyerIsMaker else 'buy'
market = self.safe_market(None, market)
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': None,
'order': None,
'type': None,
'takerOrMaker': None,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
see https://ascendex.github.io/ascendex-pro-api/#market-trades
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['n'] = limit # max 100
response = self.v1PublicGetTrades(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "m":"trades",
# "symbol":"BTC-PERP",
# "data":[
# {"p":"9128.5","q":"0.0030","ts":1590229002385,"bm":false,"seqnum":180143985289898554},
# {"p":"9129","q":"0.0030","ts":1590229002642,"bm":false,"seqnum":180143985289898587},
# {"p":"9129.5","q":"0.0030","ts":1590229021306,"bm":false,"seqnum":180143985289899043}
# ]
# }
# }
#
records = self.safe_value(response, 'data', [])
trades = self.safe_value(records, 'data', [])
return self.parse_trades(trades, market, since, limit)
def parse_order_status(self, status):
statuses = {
'PendingNew': 'open',
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Canceled': 'canceled',
'Rejected': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "16e607e2b83a8bXHbAwwoqDo55c166fa",
# "orderId": "16e85b4d9b9a8bXHbAwwoqDoc3d66830",
# "orderType": "Market",
# "symbol": "BTC/USDT",
# "timestamp": 1573576916201
# }
#
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640819389454,
# "orderId": "a17e0874ecbdU0711043490bbtcpDU5X",
# "seqNum": -1,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.002",
# "stopPrice": "0",
# "stopBy": "ref-px",
# "status": "Ack",
# "lastExecTime": 1640819389454,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "symbol": "BTC/USDT",
# "price": "8131.22",
# "orderQty": "0.00082",
# "orderType": "Market",
# "avgPx": "7392.02",
# "cumFee": "0.005152238",
# "cumFilledQty": "0.00082",
# "errorCode": "",
# "feeAsset": "USDT",
# "lastExecTime": 1575953151764,
# "orderId": "a16eee20b6750866943712zWEDdAjt3",
# "seqNum": 2623469,
# "side": "Buy",
# "status": "Filled",
# "stopPrice": "",
# "execInst": "NULL_VAL" # "Post"(for postOnly orders), "reduceOnly"(for reduceOnly orders)
# }
#
# {
# "orderId": "a173ad938fc3U22666567717788c3b66", # orderId
# "seqNum": 18777366360, # sequence number
# "accountId": "cshwSjbpPjSwHmxPdz2CPQVU9mnbzPpt", # accountId
# "symbol": "BTC/USDT", # symbol
# "orderType": "Limit", # order type(Limit/Market/StopMarket/StopLimit)
# "side": "Sell", # order side(Buy/Sell)
# "price": "11346.77", # order price
# "stopPrice": "0", # stop price(0 by default)
# "orderQty": "0.01", # order quantity(in base asset)
# "status": "Canceled", # order status(Filled/Canceled/Rejected)
# "createTime": 1596344995793, # order creation time
# "lastExecTime": 1596344996053, # last execution time
# "avgFillPrice": "11346.77", # average filled price
# "fillQty": "0.01", # filled quantity(in base asset)
# "fee": "-0.004992579", # cummulative fee. if negative, self value is the commission charged; if possitive, self value is the rebate received.
# "feeAsset": "USDT" # fee asset
# }
#
# {
# "ac": "FUTURES",
# "accountId": "testabcdefg",
# "avgPx": "0",
# "cumFee": "0",
# "cumQty": "0",
# "errorCode": "NULL_VAL",
# "execInst": "NULL_VAL",
# "feeAsset": "USDT",
# "lastExecTime": 1584072844085,
# "orderId": "r170d21956dd5450276356bbtcpKa74",
# "orderQty": "1.1499",
# "orderType": "Limit",
# "price": "4000",
# "sendingTime": 1584072841033,
# "seqNum": 24105338,
# "side": "Buy",
# "status": "Canceled",
# "stopPrice": "",
# "symbol": "BTC-PERP"
# },
#
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market, '/')
timestamp = self.safe_integer_2(order, 'timestamp', 'sendingTime')
lastTradeTimestamp = self.safe_integer(order, 'lastExecTime')
if timestamp is None:
timestamp = lastTradeTimestamp
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'orderQty')
average = self.safe_string(order, 'avgPx')
filled = self.safe_string_n(order, ['cumFilledQty', 'cumQty', 'fillQty'])
id = self.safe_string(order, 'orderId')
clientOrderId = self.safe_string(order, 'id')
if clientOrderId is not None:
if len(clientOrderId) < 1:
clientOrderId = None
rawTypeLower = self.safe_string_lower(order, 'orderType')
type = rawTypeLower
if rawTypeLower is not None:
if rawTypeLower == 'stoplimit':
type = 'limit'
if rawTypeLower == 'stopmarket':
type = 'market'
side = self.safe_string_lower(order, 'side')
feeCost = self.safe_number_2(order, 'cumFee', 'fee')
fee = None
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeAsset')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
stopPrice = self.safe_number(order, 'stopPrice')
reduceOnly = None
execInst = self.safe_string(order, 'execInst')
if execInst == 'reduceOnly':
reduceOnly = True
postOnly = None
if execInst == 'Post':
postOnly = True
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': postOnly,
'reduceOnly': reduceOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'triggerPrice': stopPrice,
'amount': amount,
'cost': None,
'average': average,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
}, market)
def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `fee structures <https://github.com/ccxt/ccxt/wiki/Manual#fee-structure>` indexed by market symbols
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
response = self.v1PrivateAccountGroupGetSpotFee(self.extend(request, params))
#
# {
# code: '0',
# data: {
# domain: 'spot',
# userUID: 'U1479576458',
# vipLevel: '0',
# fees: [
# {symbol: 'HT/USDT', fee: {taker: '0.001', maker: '0.001'}},
# {symbol: 'LAMB/BTC', fee: {taker: '0.002', maker: '0.002'}},
# {symbol: 'STOS/USDT', fee: {taker: '0.002', maker: '0.002'}},
# ...
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
fees = self.safe_value(data, 'fees', [])
result = {}
for i in range(0, len(fees)):
fee = fees[i]
marketId = self.safe_string(fee, 'symbol')
symbol = self.safe_symbol(marketId, None, '/')
takerMaker = self.safe_value(fee, 'fee', {})
result[symbol] = {
'info': fee,
'symbol': symbol,
'maker': self.safe_number(takerMaker, 'maker'),
'taker': self.safe_number(takerMaker, 'taker'),
}
return result
def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
Create an order on the exchange
:param str symbol: Unified CCXT market symbol
:param str type: "limit" or "market"
:param str side: "buy" or "sell"
:param float amount: the amount of currency to trade
:param float [price]: *ignored in "market" orders* the price at which the order is to be fullfilled at in units of the quote currency
:param dict [params]: Extra parameters specific to the exchange API endpoint
:param str [params.timeInForce]: "GTC", "IOC", "FOK", or "PO"
:param bool [params.postOnly]: True or False
:param float [params.stopPrice]: The price at which a trigger order is triggered at
:returns: `An order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = self.market(symbol)
marketType = None
marketType, params = self.handle_market_type_and_params('createOrder', market, params)
options = self.safe_value(self.options, 'createOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, marketType, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'symbol': market['id'],
'time': self.milliseconds(),
'orderQty': self.amount_to_precision(symbol, amount),
'orderType': type, # limit, market, stop_market, stop_limit
'side': side, # buy or sell,
# 'execInst': # Post for postOnly, ReduceOnly for reduceOnly
# 'respInst': 'ACK', # ACK, 'ACCEPT, DONE
}
isMarketOrder = ((type == 'market') or (type == 'stop_market'))
isLimitOrder = ((type == 'limit') or (type == 'stop_limit'))
timeInForce = self.safe_string(params, 'timeInForce')
postOnly = self.is_post_only(isMarketOrder, False, params)
reduceOnly = self.safe_value(params, 'reduceOnly', False)
stopPrice = self.safe_value_2(params, 'triggerPrice', 'stopPrice')
params = self.omit(params, ['timeInForce', 'postOnly', 'reduceOnly', 'stopPrice', 'triggerPrice'])
if reduceOnly:
if marketType != 'swap':
raise InvalidOrder(self.id + ' createOrder() does not support reduceOnly for ' + marketType + ' orders, reduceOnly orders are supported for perpetuals only')
request['execInst'] = 'ReduceOnly'
if isLimitOrder:
request['orderPrice'] = self.price_to_precision(symbol, price)
if timeInForce == 'IOC':
request['timeInForce'] = 'IOC'
if timeInForce == 'FOK':
request['timeInForce'] = 'FOK'
if postOnly:
request['postOnly'] = True
if stopPrice is not None:
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
if isLimitOrder:
request['orderType'] = 'stop_limit'
elif isMarketOrder:
request['orderType'] = 'stop_market'
if clientOrderId is not None:
request['id'] = clientOrderId
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryPostOrder')
method = self.get_supported_mapping(marketType, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupPostFuturesOrder',
})
if method == 'v1PrivateAccountCategoryPostOrder':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "code":0,
# "data": {
# "accountId":"cshwT8RKojkT1HoaA5UdeimR2SrmHG2I",
# "ac":"CASH",
# "action":"place-order",
# "status":"Ack",
# "info": {
# "symbol":"TRX/USDT",
# "orderType":"StopLimit",
# "timestamp":1654290662172,
# "id":"",
# "orderId":"a1812b6840ddU8191168955av0k6Eyhj"
# }
# }
# }
#
#
# swap
#
# {
# "code":0,
# "data": {
# "meta": {
# "id":"",
# "action":"place-order",
# "respInst":"ACK"
# },
# "order": {
# "ac":"FUTURES",
# "accountId":"futwT8RKojkT1HoaA5UdeimR2SrmHG2I",
# "time":1654290969965,
# "orderId":"a1812b6cf322U8191168955oJamfTh7b",
# "seqNum":-1,
# "orderType":"StopLimit",
# "execInst":"NULL_VAL",
# "side":"Buy",
# "symbol":"TRX-PERP",
# "price":"0.083",
# "orderQty":"1",
# "stopPrice":"0.082",
# "stopBy":"ref-px",
# "status":"Ack",
# "lastExecTime":1654290969965,
# "lastQty":"0",
# "lastPx":"0",
# "avgFilledPx":"0",
# "cumFilledQty":"0",
# "fee":"0",
# "cumFee":"0",
# "feeAsset":"",
# "errorCode":"",
# "posStopLossPrice":"0",
# "posStopLossTrigger":"market",
# "posTakeProfitPrice":"0",
# "posTakeProfitTrigger":"market",
# "liquidityInd":"n"
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value_2(data, 'order', 'info', {})
return self.parse_order(order, market)
def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('fetchOrder', market, params)
options = self.safe_value(self.options, 'fetchOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'orderId': id,
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderStatus')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderStatus',
})
if method == 'v1PrivateAccountCategoryGetOrderStatus':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryGetOrderStatus
#
# {
# "code": 0,
# "accountCategory": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "data": [
# {
# "symbol": "BTC/USDT",
# "price": "8131.22",
# "orderQty": "0.00082",
# "orderType": "Market",
# "avgPx": "7392.02",
# "cumFee": "0.005152238",
# "cumFilledQty": "0.00082",
# "errorCode": "",
# "feeAsset": "USDT",
# "lastExecTime": 1575953151764,
# "orderId": "a16eee20b6750866943712zWEDdAjt3",
# "seqNum": 2623469,
# "side": "Buy",
# "status": "Filled",
# "stopPrice": "",
# "execInst": "NULL_VAL"
# }
# ]
# }
#
# AccountGroupGetFuturesOrderStatus
#
# {
# "code": 0,
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "data": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640247020217,
# "orderId": "r17de65747aeU0711043490bbtcp0cmt",
# "seqNum": 28796162908,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640247020232,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_order(data, market)
def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch open orders for
:param int [limit]: the maximum number of open orders structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
symbol = market['symbol']
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
type, query = self.handle_market_type_and_params('fetchOpenOrders', market, params)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
}
options = self.safe_value(self.options, 'fetchOpenOrders', {})
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryGetOrderOpen')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderOpen',
})
if method == 'v1PrivateAccountCategoryGetOrderOpen':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryGetOrderOpen
#
# {
# "ac": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "code": 0,
# "data": [
# {
# "avgPx": "0", # Average filled price of the order
# "cumFee": "0", # cumulative fee paid for self order
# "cumFilledQty": "0", # cumulative filled quantity
# "errorCode": "", # error code; could be empty
# "feeAsset": "USDT", # fee asset
# "lastExecTime": 1576019723550, # The last execution time of the order
# "orderId": "s16ef21882ea0866943712034f36d83", # server provided orderId
# "orderQty": "0.0083", # order quantity
# "orderType": "Limit", # order type
# "price": "7105", # order price
# "seqNum": 8193258, # sequence number
# "side": "Buy", # order side
# "status": "New", # order status on matching engine
# "stopPrice": "", # only available for stop market and stop limit orders; otherwise empty
# "symbol": "BTC/USDT",
# "execInst": "NULL_VAL" # execution instruction
# },
# ]
# }
#
# AccountGroupGetFuturesOrderOpen
#
# {
# "code": 0,
# "data": [
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640247020217,
# "orderId": "r17de65747aeU0711043490bbtcp0cmt",
# "seqNum": 28796162908,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640247020232,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
if accountCategory == 'futures':
return self.parse_orders(data, market, since, limit)
# a workaround for https://github.com/ccxt/ccxt/issues/7187
orders = []
for i in range(0, len(data)):
order = self.parse_order(data[i], market)
orders.append(order)
return self.filter_by_symbol_since_limit(orders, symbol, since, limit)
def fetch_closed_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetches information on multiple closed orders made by the user
see https://ascendex.github.io/ascendex-pro-api/#list-history-orders-v2
:param str symbol: unified market symbol of the market orders were made in
:param int [since]: the earliest time in ms to fetch orders for
:param int [limit]: the maximum number of orde structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:param int [params.until]: the latest time in ms to fetch orders for
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
# 'category': accountCategory,
# 'symbol': market['id'],
# 'orderType': 'market', # optional, string
# 'side': 'buy', # or 'sell', optional, case insensitive.
# 'status': 'Filled', # "Filled", "Canceled", or "Rejected"
# 'startTime': exchange.milliseconds(),
# 'endTime': exchange.milliseconds(),
# 'page': 1,
# 'pageSize': 100,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type, query = self.handle_market_type_and_params('fetchClosedOrders', market, params)
options = self.safe_value(self.options, 'fetchClosedOrders', {})
defaultMethod = self.safe_string(options, 'method', 'v2PrivateDataGetOrderHist')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupGetFuturesOrderHistCurrent',
})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash') # margin, futures
if method == 'v2PrivateDataGetOrderHist':
request['account'] = accountCategory
if limit is not None:
request['limit'] = limit
else:
request['account-category'] = accountCategory
if limit is not None:
request['pageSize'] = limit
if since is not None:
request['startTime'] = since
until = self.safe_string(params, 'until')
if until is not None:
request['endTime'] = until
response = getattr(self, method)(self.extend(request, query))
#
# accountCategoryGetOrderHistCurrent
#
# {
# "code":0,
# "accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda",
# "ac":"CASH",
# "data":[
# {
# "seqNum":15561826728,
# "orderId":"a17294d305c0U6491137460bethu7kw9",
# "symbol":"ETH/USDT",
# "orderType":"Limit",
# "lastExecTime":1591635618200,
# "price":"200",
# "orderQty":"0.1",
# "side":"Buy",
# "status":"Canceled",
# "avgPx":"0",
# "cumFilledQty":"0",
# "stopPrice":"",
# "errorCode":"",
# "cumFee":"0",
# "feeAsset":"USDT",
# "execInst":"NULL_VAL"
# }
# ]
# }
#
# {
# "code": 0,
# "data": [
# {
# "orderId" : "a173ad938fc3U22666567717788c3b66", # orderId
# "seqNum" : 18777366360, # sequence number
# "accountId" : "cshwSjbpPjSwHmxPdz2CPQVU9mnbzPpt", # accountId
# "symbol" : "BTC/USDT", # symbol
# "orderType" : "Limit", # order type(Limit/Market/StopMarket/StopLimit)
# "side" : "Sell", # order side(Buy/Sell)
# "price" : "11346.77", # order price
# "stopPrice" : "0", # stop price(0 by default)
# "orderQty" : "0.01", # order quantity(in base asset)
# "status" : "Canceled", # order status(Filled/Canceled/Rejected)
# "createTime" : 1596344995793, # order creation time
# "lastExecTime": 1596344996053, # last execution time
# "avgFillPrice": "11346.77", # average filled price
# "fillQty" : "0.01", # filled quantity(in base asset)
# "fee" : "-0.004992579", # cummulative fee. if negative, self value is the commission charged; if possitive, self value is the rebate received.
# "feeAsset" : "USDT" # fee asset
# }
# ]
# }
#
# accountGroupGetFuturesOrderHistCurrent
#
# {
# "code": 0,
# "data": [
# {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640245777002,
# "orderId": "r17de6444fa6U0711043490bbtcpJ2lI",
# "seqNum": 28796124902,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "Canceled",
# "lastExecTime": 1640246574886,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "USDT",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
isArray = isinstance(data, list)
if not isArray:
data = self.safe_value(data, 'data', [])
return self.parse_orders(data, market, since, limit)
def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
type, query = self.handle_market_type_and_params('cancelOrder', market, params)
options = self.safe_value(self.options, 'cancelOrder', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'symbol': market['id'],
'time': self.milliseconds(),
'id': 'foobar',
}
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrder')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupDeleteFuturesOrder',
})
if method == 'v1PrivateAccountCategoryDeleteOrder':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'id')
if clientOrderId is None:
request['orderId'] = id
else:
request['id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'id'])
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryDeleteOrder
#
# {
# "code": 0,
# "data": {
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "ac": "CASH",
# "action": "cancel-order",
# "status": "Ack",
# "info": {
# "id": "wv8QGquoeamhssvQBeHOHGQCGlcBjj23",
# "orderId": "16e6198afb4s8bXHbAwwoqDo2ebc19dc",
# "orderType": "", # could be empty
# "symbol": "ETH/USDT",
# "timestamp": 1573594877822
# }
# }
# }
#
# AccountGroupDeleteFuturesOrder
#
# {
# "code": 0,
# "data": {
# "meta": {
# "id": "foobar",
# "action": "cancel-order",
# "respInst": "ACK"
# },
# "order": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "time": 1640244480476,
# "orderId": "r17de63086f4U0711043490bbtcpPUF4",
# "seqNum": 28795959269,
# "orderType": "Limit",
# "execInst": "NULL_VAL",
# "side": "Buy",
# "symbol": "BTC-PERP",
# "price": "30000",
# "orderQty": "0.0021",
# "stopPrice": "0",
# "stopBy": "market",
# "status": "New",
# "lastExecTime": 1640244480491,
# "lastQty": "0",
# "lastPx": "0",
# "avgFilledPx": "0",
# "cumFilledQty": "0",
# "fee": "0",
# "cumFee": "0",
# "feeAsset": "BTCPC",
# "errorCode": "",
# "posStopLossPrice": "0",
# "posStopLossTrigger": "market",
# "posTakeProfitPrice": "0",
# "posTakeProfitTrigger": "market",
# "liquidityInd": "n"
# }
# }
# }
#
data = self.safe_value(response, 'data', {})
order = self.safe_value_2(data, 'order', 'info', {})
return self.parse_order(order, market)
def cancel_all_orders(self, symbol: Optional[str] = None, params={}):
"""
cancel all open orders
:param str symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
type, query = self.handle_market_type_and_params('cancelAllOrders', market, params)
options = self.safe_value(self.options, 'cancelAllOrders', {})
accountsByType = self.safe_value(self.options, 'accountsByType', {})
accountCategory = self.safe_string(accountsByType, type, 'cash')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_value(account, 'id')
request = {
'account-group': accountGroup,
'account-category': accountCategory,
'time': self.milliseconds(),
}
if symbol is not None:
request['symbol'] = market['id']
defaultMethod = self.safe_string(options, 'method', 'v1PrivateAccountCategoryDeleteOrderAll')
method = self.get_supported_mapping(type, {
'spot': defaultMethod,
'margin': defaultMethod,
'swap': 'v2PrivateAccountGroupDeleteFuturesOrderAll',
})
if method == 'v1PrivateAccountCategoryDeleteOrderAll':
if accountCategory is not None:
request['category'] = accountCategory
else:
request['account-category'] = accountCategory
response = getattr(self, method)(self.extend(request, query))
#
# AccountCategoryDeleteOrderAll
#
# {
# "code": 0,
# "data": {
# "ac": "CASH",
# "accountId": "cshQtyfq8XLAA9kcf19h8bXHbAwwoqDo",
# "action": "cancel-all",
# "info": {
# "id": "2bmYvi7lyTrneMzpcJcf2D7Pe9V1P9wy",
# "orderId": "",
# "orderType": "NULL_VAL",
# "symbol": "",
# "timestamp": 1574118495462
# },
# "status": "Ack"
# }
# }
#
# AccountGroupDeleteFuturesOrderAll
#
# {
# "code": 0,
# "data": {
# "ac": "FUTURES",
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "action": "cancel-all",
# "info": {
# "symbol":"BTC-PERP"
# }
# }
# }
#
return response
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# destTag: "",
# tagType: "",
# tagId: "",
# chainName: "ERC20",
# numConfirmations: 20,
# withdrawalFee: 1,
# nativeScale: 4,
# tips: []
# }
#
address = self.safe_string(depositAddress, 'address')
tagId = self.safe_string(depositAddress, 'tagId')
tag = self.safe_string(depositAddress, tagId)
self.check_address(address)
code = None if (currency is None) else currency['code']
chainName = self.safe_string(depositAddress, 'chainName')
network = self.safe_network(chainName)
return {
'currency': code,
'address': address,
'tag': tag,
'network': network,
'info': depositAddress,
}
def safe_network(self, networkId):
networksById = {
'TRC20': 'TRC20',
'ERC20': 'ERC20',
'GO20': 'GO20',
'BEP2': 'BEP2',
'BEP20(BSC)': 'BEP20',
'Bitcoin': 'BTC',
'Bitcoin ABC': 'BCH',
'Litecoin': 'LTC',
'Matic Network': 'MATIC',
'Solana': 'SOL',
'xDai': 'STAKE',
'Akash': 'AKT',
}
return self.safe_string(networksById, networkId, networkId)
def fetch_deposit_address(self, code: str, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: an `address structure <https://github.com/ccxt/ccxt/wiki/Manual#address-structure>`
"""
self.load_markets()
currency = self.currency(code)
chainName = self.safe_string(params, 'chainName')
params = self.omit(params, 'chainName')
request = {
'asset': currency['id'],
}
response = self.v1PrivateGetWalletDepositAddress(self.extend(request, params))
#
# {
# "code":0,
# "data":{
# "asset":"USDT",
# "assetName":"Tether",
# "address":[
# {
# "address":"1N22odLHXnLPCjC8kwBJPTayarr9RtPod6",
# "destTag":"",
# "tagType":"",
# "tagId":"",
# "chainName":"Omni",
# "numConfirmations":3,
# "withdrawalFee":4.7,
# "nativeScale":4,
# "tips":[]
# },
# {
# "address":"0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# "destTag":"",
# "tagType":"",
# "tagId":"",
# "chainName":"ERC20",
# "numConfirmations":20,
# "withdrawalFee":1.0,
# "nativeScale":4,
# "tips":[]
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
addresses = self.safe_value(data, 'address', [])
numAddresses = len(addresses)
address = None
if numAddresses > 1:
addressesByChainName = self.index_by(addresses, 'chainName')
if chainName is None:
chainNames = list(addressesByChainName.keys())
chains = ', '.join(chainNames)
raise ArgumentsRequired(self.id + ' fetchDepositAddress() returned more than one address, a chainName parameter is required, one of ' + chains)
address = self.safe_value(addressesByChainName, chainName, {})
else:
# first address
address = self.safe_value(addresses, 0, {})
result = self.parse_deposit_address(address, currency)
return self.extend(result, {
'info': response,
})
def fetch_deposits(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all deposits made to an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch deposits for
:param int [limit]: the maximum number of deposits structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
request = {
'txType': 'deposit',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all withdrawals made from an account
:param str code: unified currency code
:param int [since]: the earliest time in ms to fetch withdrawals for
:param int [limit]: the maximum number of withdrawals structures to retrieve
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `transaction structures <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
request = {
'txType': 'withdrawal',
}
return self.fetch_transactions(code, since, limit, self.extend(request, params))
def fetch_deposits_withdrawals(self, code: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch history of deposits and withdrawals
:param str [code]: unified currency code for the currency of the deposit/withdrawals, default is None
:param int [since]: timestamp in ms of the earliest deposit/withdrawal, default is None
:param int [limit]: max number of deposit/withdrawals to return, default is None
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a list of `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
self.load_markets()
request = {
# 'asset': currency['id'],
# 'page': 1,
# 'pageSize': 20,
# 'startTs': self.milliseconds(),
# 'endTs': self.milliseconds(),
# 'txType': undefned, # deposit, withdrawal
}
currency = None
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTs'] = since
if limit is not None:
request['pageSize'] = limit
response = self.v1PrivateGetWalletTransactions(self.extend(request, params))
#
# {
# code: 0,
# data: {
# data: [
# {
# requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB",
# time: 1591606166000,
# asset: "USDT",
# transactionType: "deposit",
# amount: "25",
# commission: "0",
# networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce",
# status: "pending",
# numConfirmed: 8,
# numConfirmations: 20,
# destAddress: {address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722"}
# }
# ],
# page: 1,
# pageSize: 20,
# hasNext: False
# }
# }
#
data = self.safe_value(response, 'data', {})
transactions = self.safe_value(data, 'data', [])
return self.parse_transactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'reviewing': 'pending',
'pending': 'pending',
'confirmed': 'ok',
'rejected': 'rejected',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# requestId: "wuzd1Ojsqtz4bCA3UXwtUnnJDmU8PiyB",
# time: 1591606166000,
# asset: "USDT",
# transactionType: "deposit",
# amount: "25",
# commission: "0",
# networkTransactionId: "0xbc4eabdce92f14dbcc01d799a5f8ca1f02f4a3a804b6350ea202be4d3c738fce",
# status: "pending",
# numConfirmed: 8,
# numConfirmations: 20,
# destAddress: {
# address: "0xe7c70b4e73b6b450ee46c3b5c0f5fb127ca55722",
# destTag: "..." # for currencies that have it
# }
# }
#
destAddress = self.safe_value(transaction, 'destAddress', {})
address = self.safe_string(destAddress, 'address')
tag = self.safe_string(destAddress, 'destTag')
timestamp = self.safe_integer(transaction, 'time')
currencyId = self.safe_string(transaction, 'asset')
amountString = self.safe_string(transaction, 'amount')
feeCostString = self.safe_string(transaction, 'commission')
amountString = Precise.string_sub(amountString, feeCostString)
code = self.safe_currency_code(currencyId, currency)
return {
'info': transaction,
'id': self.safe_string(transaction, 'requestId'),
'txid': self.safe_string(transaction, 'networkTransactionId'),
'type': self.safe_string(transaction, 'transactionType'),
'currency': code,
'network': None,
'amount': self.parse_number(amountString),
'status': self.parse_transaction_status(self.safe_string(transaction, 'status')),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressFrom': None,
'addressTo': address,
'tag': tag,
'tagFrom': None,
'tagTo': tag,
'updated': None,
'comment': None,
'fee': {
'currency': code,
'cost': self.parse_number(feeCostString),
'rate': None,
},
}
def fetch_positions(self, symbols: Optional[List[str]] = None, params={}):
"""
fetch all open positions
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict[]: a list of `position structure <https://github.com/ccxt/ccxt/wiki/Manual#position-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
}
response = self.v2PrivateAccountGroupGetFuturesPosition(self.extend(request, params))
#
# {
# "code": 0,
# "data": {
# "accountId": "fut2ODPhGiY71Pl4vtXnOZ00ssgD7QGn",
# "ac": "FUTURES",
# "collaterals": [
# {
# "asset": "USDT",
# "balance": "44.570287262",
# "referencePrice": "1",
# "discountFactor": "1"
# }
# ],
# "contracts": [
# {
# "symbol": "BTC-PERP",
# "side": "LONG",
# "position": "0.0001",
# "referenceCost": "-3.12277254",
# "unrealizedPnl": "-0.001700233",
# "realizedPnl": "0",
# "avgOpenPrice": "31209",
# "marginType": "isolated",
# "isolatedMargin": "1.654972977",
# "leverage": "2",
# "takeProfitPrice": "0",
# "takeProfitTrigger": "market",
# "stopLossPrice": "0",
# "stopLossTrigger": "market",
# "buyOpenOrderNotional": "0",
# "sellOpenOrderNotional": "0",
# "markPrice": "31210.723063672",
# "indexPrice": "31223.148857925"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
position = self.safe_value(data, 'contracts', [])
result = []
for i in range(0, len(position)):
result.append(self.parse_position(position[i]))
symbols = self.market_symbols(symbols)
return self.filter_by_array_positions(result, 'symbol', symbols, False)
def parse_position(self, position, market=None):
#
# {
# "symbol": "BTC-PERP",
# "side": "LONG",
# "position": "0.0001",
# "referenceCost": "-3.12277254",
# "unrealizedPnl": "-0.001700233",
# "realizedPnl": "0",
# "avgOpenPrice": "31209",
# "marginType": "isolated",
# "isolatedMargin": "1.654972977",
# "leverage": "2",
# "takeProfitPrice": "0",
# "takeProfitTrigger": "market",
# "stopLossPrice": "0",
# "stopLossTrigger": "market",
# "buyOpenOrderNotional": "0",
# "sellOpenOrderNotional": "0",
# "markPrice": "31210.723063672",
# "indexPrice": "31223.148857925"
# },
#
marketId = self.safe_string(position, 'symbol')
market = self.safe_market(marketId, market)
notional = self.safe_string(position, 'buyOpenOrderNotional')
if Precise.string_eq(notional, '0'):
notional = self.safe_string(position, 'sellOpenOrderNotional')
marginMode = self.safe_string(position, 'marginType')
collateral = None
if marginMode == 'isolated':
collateral = self.safe_string(position, 'isolatedMargin')
return self.safe_position({
'info': position,
'id': None,
'symbol': market['symbol'],
'notional': self.parse_number(notional),
'marginMode': marginMode,
'liquidationPrice': None,
'entryPrice': self.safe_number(position, 'avgOpenPrice'),
'unrealizedPnl': self.safe_number(position, 'unrealizedPnl'),
'percentage': None,
'contracts': self.safe_number(position, 'position'),
'contractSize': self.safe_number(market, 'contractSize'),
'markPrice': self.safe_number(position, 'markPrice'),
'lastPrice': None,
'side': self.safe_string_lower(position, 'side'),
'hedged': None,
'timestamp': None,
'datetime': None,
'lastUpdateTimestamp': None,
'maintenanceMargin': None,
'maintenanceMarginPercentage': None,
'collateral': collateral,
'initialMargin': None,
'initialMarginPercentage': None,
'leverage': self.safe_integer(position, 'leverage'),
'marginRatio': None,
'stopLossPrice': self.safe_number(position, 'stopLossPrice'),
'takeProfitPrice': self.safe_number(position, 'takeProfitPrice'),
})
def parse_funding_rate(self, contract, market=None):
#
# {
# "time": 1640061364830,
# "symbol": "EOS-PERP",
# "markPrice": "3.353854865",
# "indexPrice": "3.3542",
# "openInterest": "14242",
# "fundingRate": "-0.000073026",
# "nextFundingTime": 1640073600000
# }
#
marketId = self.safe_string(contract, 'symbol')
symbol = self.safe_symbol(marketId, market)
currentTime = self.safe_integer(contract, 'time')
nextFundingRate = self.safe_number(contract, 'fundingRate')
nextFundingRateTimestamp = self.safe_integer(contract, 'nextFundingTime')
return {
'info': contract,
'symbol': symbol,
'markPrice': self.safe_number(contract, 'markPrice'),
'indexPrice': self.safe_number(contract, 'indexPrice'),
'interestRate': self.parse_number('0'),
'estimatedSettlePrice': None,
'timestamp': currentTime,
'datetime': self.iso8601(currentTime),
'previousFundingRate': None,
'nextFundingRate': None,
'previousFundingTimestamp': None,
'nextFundingTimestamp': None,
'previousFundingDatetime': None,
'nextFundingDatetime': None,
'fundingRate': nextFundingRate,
'fundingTimestamp': nextFundingRateTimestamp,
'fundingDatetime': self.iso8601(nextFundingRateTimestamp),
}
def fetch_funding_rates(self, symbols: Optional[List[str]] = None, params={}):
"""
fetch the funding rate for multiple markets
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `funding rates structures <https://github.com/ccxt/ccxt/wiki/Manual#funding-rates-structure>`, indexe by market symbols
"""
self.load_markets()
symbols = self.market_symbols(symbols)
response = self.v2PublicGetFuturesPricingData(params)
#
# {
# "code": 0,
# "data": {
# "contracts": [
# {
# "time": 1640061364830,
# "symbol": "EOS-PERP",
# "markPrice": "3.353854865",
# "indexPrice": "3.3542",
# "openInterest": "14242",
# "fundingRate": "-0.000073026",
# "nextFundingTime": 1640073600000
# },
# ],
# "collaterals": [
# {
# "asset": "USDTR",
# "referencePrice": "1"
# },
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
contracts = self.safe_value(data, 'contracts', [])
result = self.parse_funding_rates(contracts)
return self.filter_by_array(result, 'symbol', symbols)
def modify_margin_helper(self, symbol: str, amount, type, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
amount = self.amount_to_precision(symbol, amount)
request = {
'account-group': accountGroup,
'symbol': market['id'],
'amount': amount, # positive value for adding margin, negative for reducing
}
response = self.v2PrivateAccountGroupPostFuturesIsolatedPositionMargin(self.extend(request, params))
#
# Can only change margin for perpetual futures isolated margin positions
#
# {
# "code": 0
# }
#
if type == 'reduce':
amount = Precise.string_abs(amount)
return self.extend(self.parse_margin_modification(response, market), {
'amount': self.parse_number(amount),
'type': type,
})
def parse_margin_modification(self, data, market=None):
errorCode = self.safe_string(data, 'code')
status = 'ok' if (errorCode == '0') else 'failed'
return {
'info': data,
'type': None,
'amount': None,
'code': market['quote'],
'symbol': market['symbol'],
'status': status,
}
def reduce_margin(self, symbol: str, amount, params={}):
"""
remove margin from a position
:param str symbol: unified market symbol
:param float amount: the amount of margin to remove
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#reduce-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'reduce', params)
def add_margin(self, symbol: str, amount, params={}):
"""
add margin
:param str symbol: unified market symbol
:param float amount: amount of margin to add
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `margin structure <https://github.com/ccxt/ccxt/wiki/Manual#add-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'add', params)
def set_leverage(self, leverage, symbol: Optional[str] = None, params={}):
"""
set the level of leverage for a market
:param float leverage: the rate of leverage
:param str symbol: unified market symbol
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: response from the exchange
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
if (leverage < 1) or (leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
if market['type'] != 'future':
raise BadSymbol(self.id + ' setLeverage() supports futures contracts only')
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
'symbol': market['id'],
'leverage': leverage,
}
return self.v2PrivateAccountGroupPostFuturesLeverage(self.extend(request, params))
def set_margin_mode(self, marginMode, symbol: Optional[str] = None, params={}):
"""
set margin mode to 'cross' or 'isolated'
:param str marginMode: 'cross' or 'isolated'
:param str symbol: unified market symbol
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: response from the exchange
"""
marginMode = marginMode.lower()
if marginMode == 'cross':
marginMode = 'crossed'
if marginMode != 'isolated' and marginMode != 'crossed':
raise BadRequest(self.id + ' setMarginMode() marginMode argument should be isolated or cross')
self.load_markets()
self.load_accounts()
market = self.market(symbol)
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
request = {
'account-group': accountGroup,
'symbol': market['id'],
'marginMode': marginMode,
}
if market['type'] != 'future':
raise BadSymbol(self.id + ' setMarginMode() supports futures contracts only')
return self.v2PrivateAccountGroupPostFuturesMarginType(self.extend(request, params))
def fetch_leverage_tiers(self, symbols: Optional[List[str]] = None, params={}):
"""
retrieve information on the maximum leverage, and maintenance margin for trades of varying trade sizes
:param str[]|None symbols: list of unified market symbols
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a dictionary of `leverage tiers structures <https://github.com/ccxt/ccxt/wiki/Manual#leverage-tiers-structure>`, indexed by market symbols
"""
self.load_markets()
response = self.v2PublicGetFuturesContract(params)
#
# {
# "code":0,
# "data":[
# {
# "symbol":"BTC-PERP",
# "status":"Normal",
# "displayName":"BTCUSDT",
# "settlementAsset":"USDT",
# "underlying":"BTC/USDT",
# "tradingStartTime":1579701600000,
# "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"},
# "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"},
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "marketOrderPriceMarkup":"0.03",
# "marginRequirements":[
# {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"},
# {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"},
# {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"},
# {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"},
# {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"},
# {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"}
# ]
# }
# ]
# }
#
data = self.safe_value(response, 'data')
symbols = self.market_symbols(symbols)
return self.parse_leverage_tiers(data, symbols, 'symbol')
def parse_market_leverage_tiers(self, info, market=None):
"""
:param dict info: Exchange market response for 1 market
:param dict market: CCXT market
"""
#
# {
# "symbol":"BTC-PERP",
# "status":"Normal",
# "displayName":"BTCUSDT",
# "settlementAsset":"USDT",
# "underlying":"BTC/USDT",
# "tradingStartTime":1579701600000,
# "priceFilter":{"minPrice":"1","maxPrice":"1000000","tickSize":"1"},
# "lotSizeFilter":{"minQty":"0.0001","maxQty":"1000000000","lotSize":"0.0001"},
# "commissionType":"Quote",
# "commissionReserveRate":"0.001",
# "marketOrderPriceMarkup":"0.03",
# "marginRequirements":[
# {"positionNotionalLowerBound":"0","positionNotionalUpperBound":"50000","initialMarginRate":"0.01","maintenanceMarginRate":"0.006"},
# {"positionNotionalLowerBound":"50000","positionNotionalUpperBound":"200000","initialMarginRate":"0.02","maintenanceMarginRate":"0.012"},
# {"positionNotionalLowerBound":"200000","positionNotionalUpperBound":"2000000","initialMarginRate":"0.04","maintenanceMarginRate":"0.024"},
# {"positionNotionalLowerBound":"2000000","positionNotionalUpperBound":"20000000","initialMarginRate":"0.1","maintenanceMarginRate":"0.06"},
# {"positionNotionalLowerBound":"20000000","positionNotionalUpperBound":"40000000","initialMarginRate":"0.2","maintenanceMarginRate":"0.12"},
# {"positionNotionalLowerBound":"40000000","positionNotionalUpperBound":"1000000000","initialMarginRate":"0.333333","maintenanceMarginRate":"0.2"}
# ]
# }
#
marginRequirements = self.safe_value(info, 'marginRequirements', [])
id = self.safe_string(info, 'symbol')
market = self.safe_market(id, market)
tiers = []
for i in range(0, len(marginRequirements)):
tier = marginRequirements[i]
initialMarginRate = self.safe_string(tier, 'initialMarginRate')
tiers.append({
'tier': self.sum(i, 1),
'currency': market['quote'],
'minNotional': self.safe_number(tier, 'positionNotionalLowerBound'),
'maxNotional': self.safe_number(tier, 'positionNotionalUpperBound'),
'maintenanceMarginRate': self.safe_number(tier, 'maintenanceMarginRate'),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRate)),
'info': tier,
})
return tiers
def parse_deposit_withdraw_fee(self, fee, currency=None):
#
# {
# "assetCode": "USDT",
# "assetName": "Tether",
# "precisionScale": 9,
# "nativeScale": 4,
# "blockChain": [
# {
# "chainName": "Omni",
# "withdrawFee": "30.0",
# "allowDeposit": True,
# "allowWithdraw": True,
# "minDepositAmt": "0.0",
# "minWithdrawal": "50.0",
# "numConfirmations": 3
# },
# ]
# }
#
blockChains = self.safe_value(fee, 'blockChain', [])
blockChainsLength = len(blockChains)
result = {
'info': fee,
'withdraw': {
'fee': None,
'percentage': None,
},
'deposit': {
'fee': None,
'percentage': None,
},
'networks': {},
}
for i in range(0, blockChainsLength):
blockChain = blockChains[i]
networkId = self.safe_string(blockChain, 'chainName')
currencyCode = self.safe_string(currency, 'code')
networkCode = self.network_id_to_code(networkId, currencyCode)
result['networks'][networkCode] = {
'deposit': {'fee': None, 'percentage': None},
'withdraw': {'fee': self.safe_number(blockChain, 'withdrawFee'), 'percentage': False},
}
if blockChainsLength == 1:
result['withdraw']['fee'] = self.safe_number(blockChain, 'withdrawFee')
result['withdraw']['percentage'] = False
return result
def fetch_deposit_withdraw_fees(self, codes: Optional[List[str]] = None, params={}):
"""
fetch deposit and withdraw fees
see https://ascendex.github.io/ascendex-pro-api/#list-all-assets
:param str[]|None codes: list of unified currency codes
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a list of `fee structures <https://github.com/ccxt/ccxt/wiki/Manual#fee-structure>`
"""
self.load_markets()
response = self.v2PublicGetAssets(params)
data = self.safe_value(response, 'data')
return self.parse_deposit_withdraw_fees(data, codes, 'assetCode')
def transfer(self, code: str, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict [params]: extra parameters specific to the ascendex api endpoint
:returns dict: a `transfer structure <https://github.com/ccxt/ccxt/wiki/Manual#transfer-structure>`
"""
self.load_markets()
self.load_accounts()
account = self.safe_value(self.accounts, 0, {})
accountGroup = self.safe_string(account, 'id')
currency = self.currency(code)
amount = self.currency_to_precision(code, amount)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
if fromId != 'cash' and toId != 'cash':
raise ExchangeError(self.id + ' transfer() only supports direct balance transfer between spot and future, spot and margin')
request = {
'account-group': accountGroup,
'amount': amount,
'asset': currency['id'],
'fromAccount': fromId,
'toAccount': toId,
}
response = self.v1PrivateAccountGroupPostTransfer(self.extend(request, params))
#
# {code: '0'}
#
transferOptions = self.safe_value(self.options, 'transfer', {})
fillResponseFromRequest = self.safe_value(transferOptions, 'fillResponseFromRequest', True)
transfer = self.parse_transfer(response, currency)
if fillResponseFromRequest:
transfer['fromAccount'] = fromAccount
transfer['toAccount'] = toAccount
transfer['amount'] = amount
transfer['currency'] = code
return transfer
def parse_transfer(self, transfer, currency=None):
#
# {code: '0'}
#
status = self.safe_integer(transfer, 'code')
currencyCode = self.safe_currency_code(None, currency)
timestamp = self.milliseconds()
return {
'info': transfer,
'id': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': currencyCode,
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': self.parse_transfer_status(status),
}
def parse_transfer_status(self, status):
if status == 0:
return 'ok'
return 'failed'
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
version = api[0]
access = api[1]
type = self.safe_string(api, 2)
url = ''
accountCategory = (type == 'accountCategory')
if accountCategory or (type == 'accountGroup'):
url += self.implode_params('/{account-group}', params)
params = self.omit(params, 'account-group')
request = self.implode_params(path, params)
url += '/api/pro/'
if version == 'v2':
if type == 'data':
request = 'data/' + version + '/' + request
else:
request = version + '/' + request
else:
url += version + '/'
if accountCategory:
url += self.implode_params('{account-category}/', params)
params = self.omit(params, 'account-category')
url += request
if (version == 'v1') and (request == 'cash/balance') or (request == 'margin/balance'):
request = 'balance'
if (version == 'v1') and (request == 'spot/fee'):
request = 'fee'
if request.find('subuser') >= 0:
parts = request.split('/')
request = parts[2]
params = self.omit(params, self.extract_params(path))
if access == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.milliseconds())
payload = timestamp + '+' + request
hmac = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
headers = {
'x-auth-key': self.apiKey,
'x-auth-timestamp': timestamp,
'x-auth-signature': hmac,
}
if method == 'GET':
if params:
url += '?' + self.urlencode(params)
else:
headers['Content-Type'] = 'application/json'
body = self.json(params)
url = self.urls['api']['rest'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None # fallback to default error handler
#
# {'code': 6010, 'message': 'Not enough balance.'}
# {'code': 60060, 'message': 'The order is already filled or canceled.'}
# {"code":2100,"message":"ApiKeyFailure"}
# {"code":300001,"message":"Price is too low from market price.","reason":"INVALID_PRICE","accountId":"cshrHKLZCjlZ2ejqkmvIHHtPmLYqdnda","ac":"CASH","action":"place-order","status":"Err","info":{"symbol":"BTC/USDT"}}
#
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'message')
error = (code is not None) and (code != '0')
if error or (message is not None):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
return None
| [
"travis@travis-ci.org"
] | travis@travis-ci.org |
da205b21a2b3d4bd14360f4723bfd82fcedc9bb2 | 7e1c4c72038adb45083dca4df339f67cf2d718d7 | /FileManager.py | cb7e5f4e54a6dadea154f736e70765e6cdff4467 | [] | no_license | Tarzan1009/PSI | 9b004e9c87efb7ab1a78d88040dbef7b09ed0c02 | 6a5d0749428c1ca05a6009313638f8bfb42b9882 | refs/heads/master | 2022-03-26T08:32:20.904801 | 2019-11-14T11:39:03 | 2019-11-14T11:39:03 | 212,543,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | class File_manager:
def __init__(self, file_name):
self.file_name = file_name
def read_file(self):
file = open(self.file_name)
text = file.read()
return text
def update_file(self, text_data):
file = open(self.file_name)
file.write(text_data)
file.close()
| [
"piotrek1009@gmail.com"
] | piotrek1009@gmail.com |
7fc40fc99c78e47101169025e25ff3b9e7f056f3 | 0466f4381774f5e87c05f46cae79cdb1da95e7f0 | /venv/Scripts/pip3-script.py | 7c824e70f4999bafd189a8d5f2aa1d5a619ab130 | [] | no_license | DuBaoXing123/git_demo | a56d6906e79471ee69e0abe97a89ab3ccf7106b1 | d3995e085338d011286440a3d82b62770b9ce3c2 | refs/heads/master | 2022-12-03T01:42:22.187759 | 2020-08-21T09:55:09 | 2020-08-21T09:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!E:\soft_demo\django_learn\git_demo\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"m18235876047@163.com"
] | m18235876047@163.com |
d903c99e7bd4a1faaf6618e547c4f50cd83c4216 | 8db2618e65efb303668967142e90f5520b287f2b | /src/python/src/grpc/framework/face/testing/event_invocation_synchronous_event_service_test_case.py | 0f0b0e3d5232a7014fc8be7e6861a516750d7485 | [
"BSD-3-Clause"
] | permissive | huamichaelchen/grpc | c58ddf104f870bc105bf4675040ee72bd4d5fde5 | fa507530590944683a0c672fcd4ea5a54977c903 | refs/heads/master | 2022-02-17T04:21:24.235158 | 2015-03-27T17:10:29 | 2015-03-27T17:10:29 | 33,004,650 | 0 | 0 | NOASSERTION | 2022-02-11T03:24:12 | 2015-03-27T19:13:49 | C | UTF-8 | Python | false | false | 14,959 | py | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A test to verify an implementation of the Face layer of RPC Framework."""
import abc
import unittest
from grpc.framework.face import interfaces
from grpc.framework.face.testing import callback as testing_callback
from grpc.framework.face.testing import control
from grpc.framework.face.testing import coverage
from grpc.framework.face.testing import digest
from grpc.framework.face.testing import stock_service
from grpc.framework.face.testing import test_case
_TIMEOUT = 3
class EventInvocationSynchronousEventServiceTestCase(
test_case.FaceTestCase, coverage.FullCoverage):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must also extend unittest.TestCase.
"""
__metaclass__ = abc.ABCMeta
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self.control = control.PauseFailControl()
self.digest = digest.digest(
stock_service.STOCK_TEST_SERVICE, self.control, None)
self.stub, self.memo = self.set_up_implementation(
self.digest.name, self.digest.methods,
self.digest.event_method_implementations, None)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self.tear_down_implementation(self.memo)
def testSuccessfulUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
response = callback.response()
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
callback.block_until_terminated()
responses = callback.responses()
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
unused_call, request_consumer = self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
response = callback.response()
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
unused_call, request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
responses = callback.responses()
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
# pylint: disable=cell-var-from-loop
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_callback = testing_callback.Callback()
second_callback = testing_callback.Callback()
def make_second_invocation(first_response):
first_callback.complete(first_response)
self.stub.event_value_in_value_out(
name, second_request, second_callback.complete,
second_callback.abort, _TIMEOUT)
self.stub.event_value_in_value_out(
name, first_request, make_second_invocation, first_callback.abort,
_TIMEOUT)
second_callback.block_until_terminated()
first_response = first_callback.response()
second_response = second_callback.response()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
def testExpiredUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.pause():
self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testExpiredUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.pause():
self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testExpiredStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for unused_test_messages in test_messages_sequence:
callback = testing_callback.Callback()
self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testExpiredStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
unused_call, request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.EXPIRED, callback.abortion())
def testFailedUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.fail():
self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testFailedUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.fail():
self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testFailedStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
with self.control.fail():
unused_call, request_consumer = self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testFailedStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
with self.control.fail():
unused_call, request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
request_consumer.terminate()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.SERVICER_FAILURE, callback.abortion())
def testParallelInvocations(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
first_callback = testing_callback.Callback()
second_request = test_messages.request()
second_callback = testing_callback.Callback()
self.stub.event_value_in_value_out(
name, first_request, first_callback.complete, first_callback.abort,
_TIMEOUT)
self.stub.event_value_in_value_out(
name, second_request, second_callback.complete,
second_callback.abort, _TIMEOUT)
first_callback.block_until_terminated()
second_callback.block_until_terminated()
first_response = first_callback.response()
second_response = second_callback.response()
test_messages.verify(first_request, first_response, self)
test_messages.verify(second_request, second_response, self)
@unittest.skip('TODO(nathaniel): implement.')
def testWaitingForSomeButNotAllParallelInvocations(self):
raise NotImplementedError()
def testCancelledUnaryRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.unary_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
with self.control.pause():
call = self.stub.event_value_in_value_out(
name, request, callback.complete, callback.abort, _TIMEOUT)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
def testCancelledUnaryRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.unary_stream_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
request = test_messages.request()
callback = testing_callback.Callback()
call = self.stub.event_value_in_stream_out(
name, request, callback, callback.abort, _TIMEOUT)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
def testCancelledStreamRequestUnaryResponse(self):
for name, test_messages_sequence in (
self.digest.stream_unary_messages_sequences.iteritems()):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
callback = testing_callback.Callback()
call, request_consumer = self.stub.event_stream_in_value_out(
name, callback.complete, callback.abort, _TIMEOUT)
for request in requests:
request_consumer.consume(request)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
def testCancelledStreamRequestStreamResponse(self):
for name, test_messages_sequence in (
self.digest.stream_stream_messages_sequences.iteritems()):
for unused_test_messages in test_messages_sequence:
callback = testing_callback.Callback()
call, unused_request_consumer = self.stub.event_stream_in_stream_out(
name, callback, callback.abort, _TIMEOUT)
call.cancel()
callback.block_until_terminated()
self.assertEqual(interfaces.Abortion.CANCELLED, callback.abortion())
| [
"nathaniel@google.com"
] | nathaniel@google.com |
d5e0fd0f6396a0baee88fd21b33d3362ff68e996 | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/addons/io_mesh_ply/export_ply.py | 3a5ef0ae473daaa1a8153aff9d2c3a07022a3b19 | [
"Unlicense",
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause"
] | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 6,382 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
"""
This script exports Stanford PLY files from Blender. It supports normals,
colors, and texture coordinates per face or per vertex.
Only one mesh can be exported at a time.
"""
import bpy
import os
def save_mesh(
filepath,
mesh,
use_normals=True,
use_uv_coords=True,
use_colors=True,
):
def rvec3d(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
def rvec2d(v):
return round(v[0], 6), round(v[1], 6)
file = open(filepath, "w", encoding="utf8", newline="\n")
fw = file.write
# Be sure tessellated loop trianlges are available!
if not mesh.loop_triangles and mesh.polygons:
mesh.calc_loop_triangles()
has_uv = bool(mesh.uv_layers)
has_vcol = bool(mesh.vertex_colors)
if not has_uv:
use_uv_coords = False
if not has_vcol:
use_colors = False
if not use_uv_coords:
has_uv = False
if not use_colors:
has_vcol = False
if has_uv:
active_uv_layer = mesh.uv_layers.active
if not active_uv_layer:
use_uv_coords = False
has_uv = False
else:
active_uv_layer = active_uv_layer.data
if has_vcol:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
use_colors = False
has_vcol = False
else:
active_col_layer = active_col_layer.data
# in case
color = uvcoord = uvcoord_key = normal = normal_key = None
mesh_verts = mesh.vertices # save a lookup
ply_verts = [] # list of dictionaries
# vdict = {} # (index, normal, uv) -> new index
vdict = [{} for i in range(len(mesh_verts))]
ply_faces = [[] for f in range(len(mesh.loop_triangles))]
vert_count = 0
for i, f in enumerate(mesh.loop_triangles):
smooth = not use_normals or f.use_smooth
if not smooth:
normal = f.normal[:]
normal_key = rvec3d(normal)
if has_uv:
uv = [active_uv_layer[l].uv[:] for l in f.loops]
if has_vcol:
col = [active_col_layer[l].color[:] for l in f.loops]
pf = ply_faces[i]
for j, vidx in enumerate(f.vertices):
v = mesh_verts[vidx]
if smooth:
normal = v.normal[:]
normal_key = rvec3d(normal)
if has_uv:
uvcoord = uv[j][0], uv[j][1]
uvcoord_key = rvec2d(uvcoord)
if has_vcol:
color = col[j]
color = (
int(color[0] * 255.0),
int(color[1] * 255.0),
int(color[2] * 255.0),
int(color[3] * 255.0),
)
key = normal_key, uvcoord_key, color
vdict_local = vdict[vidx]
pf_vidx = vdict_local.get(key) # Will be None initially
if pf_vidx is None: # same as vdict_local.has_key(key)
pf_vidx = vdict_local[key] = vert_count
ply_verts.append((vidx, normal, uvcoord, color))
vert_count += 1
pf.append(pf_vidx)
fw("ply\n")
fw("format ascii 1.0\n")
fw("comment Created by Blender %s - "
"www.blender.org, source file: %r\n" %
(bpy.app.version_string, os.path.basename(bpy.data.filepath)))
fw("element vertex %d\n" % len(ply_verts))
fw("property float x\n"
"property float y\n"
"property float z\n")
if use_normals:
fw("property float nx\n"
"property float ny\n"
"property float nz\n")
if use_uv_coords:
fw("property float s\n"
"property float t\n")
if use_colors:
fw("property uchar red\n"
"property uchar green\n"
"property uchar blue\n"
"property uchar alpha\n")
fw("element face %d\n" % len(mesh.loop_triangles))
fw("property list uchar uint vertex_indices\n")
fw("end_header\n")
for i, v in enumerate(ply_verts):
fw("%.6f %.6f %.6f" % mesh_verts[v[0]].co[:]) # co
if use_normals:
fw(" %.6f %.6f %.6f" % v[1]) # no
if use_uv_coords:
fw(" %.6f %.6f" % v[2]) # uv
if use_colors:
fw(" %u %u %u %u" % v[3]) # col
fw("\n")
for pf in ply_faces:
if len(pf) == 3:
fw("3 %d %d %d\n" % tuple(pf))
else:
fw("4 %d %d %d %d\n" % tuple(pf))
file.close()
print("writing %r done" % filepath)
return {'FINISHED'}
def save(
operator,
context,
filepath="",
use_mesh_modifiers=True,
use_normals=True,
use_uv_coords=True,
use_colors=True,
global_matrix=None
):
obj = context.active_object
if global_matrix is None:
from mathutils import Matrix
global_matrix = Matrix()
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
if use_mesh_modifiers and obj.modifiers:
mesh = obj.to_mesh(context.depsgraph, True)
else:
mesh = obj.data.copy()
if not mesh:
raise Exception("Error, could not get mesh data from active object")
mesh.transform(global_matrix @ obj.matrix_world)
if use_normals:
mesh.calc_normals()
ret = save_mesh(filepath, mesh,
use_normals=use_normals,
use_uv_coords=use_uv_coords,
use_colors=use_colors,
)
bpy.data.meshes.remove(mesh)
return ret
| [
"admin@irradiate.net"
] | admin@irradiate.net |
95c9d7660a00b352864d548655eb1355d7f31051 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Quantization/trend_MovingAverage/cycle_7/ar_/test_artificial_32_Quantization_MovingAverage_7__100.py | 8305723ff3d368d8e41b3fd67b43c64a43216550 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 276 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
af4a3ca0f095c64aec2861072f8df4e2f3cc359f | 87a1e3066bfccadc8ce7695324708a91348d724d | /KataTDD/conjunto.py | a47c2c32642fc6e7814c1d564eed6576dc93330c | [] | no_license | rcallea/KataTDD | 6ae9f45f0b6ab4ee918f5c54e4e22ee083df8eaf | dd6978775439363b6ae62e5a4f0e7b4fa0d7f62a | refs/heads/master | 2022-12-04T08:44:35.721167 | 2020-08-20T23:42:24 | 2020-08-20T23:42:24 | 277,676,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | class Conjunto:
def __init__(self, conjunto):
self.__conjunto=conjunto
def promedio(self):
if len(self.__conjunto) > 0:
return sum(self.__conjunto) / len(self.__conjunto)
else:
return None
| [
"cr.calle@uniandes.edu.co"
] | cr.calle@uniandes.edu.co |
44694de6f6c4804e25e7793cec7ac706fe05abc3 | a85d1d6a54c8c143d0d64f02b80c54aba78b3a84 | /0928/함수3_지역,전역.py | ebd72e7617e8046ac1ce89eaaf84fadcb2d2ee5c | [] | no_license | w51w/python | 30007548ba19076285954099125f42bc63a3d204 | bc556a520ad0a9d99b5445fc92113c4afa83b4c2 | refs/heads/master | 2023-01-28T17:37:15.344106 | 2020-12-06T14:56:47 | 2020-12-06T15:42:40 | 308,628,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | '''
def my_func(param):
param = "함수 안에서 생성"
print(param)
param ="함수 밖에서 생성"
my_func(param)
print(param)
'''
def my_func():
global param
param = '함수 안에서 변경'
print(param)
param='함수 밖에서 생성'
print(param)
my_func()
print(param) | [
"w51w@naver.com"
] | w51w@naver.com |
00a2666812ff6a1b59fbd5d9775be83fddc6d86e | 1bb422172b8775b638957a0360785c1a03e76469 | /FireDevs/wsgi.py | 131bc04795420aea9621cf9ccebc2d27c8619f60 | [] | no_license | hectorarem/UniTest | 5f5fb502d3b23e883b514f4cab914fb3bf0a5d70 | ef7063ba9c57256414cf646f68f9d1b6e22ae88c | refs/heads/main | 2023-07-15T15:31:04.480593 | 2021-08-26T07:28:54 | 2021-08-26T07:28:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for FireDevs project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FireDevs.settings')
application = get_wsgi_application()
| [
"30632801+hectorarem@users.noreply.github.com"
] | 30632801+hectorarem@users.noreply.github.com |
378d547a76b10d0503d9da30eb4190b856416529 | ffd70ccca7105fce363b14a2c60bc8acc227aa07 | /ros/src/rosserial_python/nodes/serial_node.py | 1d466b6f0d303b8d7ab538dae35958b43aedad89 | [
"MIT"
] | permissive | mnbf9rca/simple_robot_arm_with_ros | eaa1658a788b36d685328b91a63a5198e4fcce98 | 2be64c760831bf51428eec6b7fd290001d6600fb | refs/heads/master | 2021-07-14T13:04:41.674141 | 2019-10-15T20:56:58 | 2019-10-15T20:56:58 | 213,063,834 | 5 | 2 | MIT | 2021-07-07T09:56:48 | 2019-10-05T20:03:52 | C++ | UTF-8 | Python | false | false | 3,972 | py | #!/usr/bin/env python
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import rospy
from rosserial_python import SerialClient, RosSerialServer
from serial import SerialException
from time import sleep
import multiprocessing
import sys
if __name__=="__main__":
rospy.init_node("serial_node")
rospy.loginfo("ROS Serial Python Node")
port_name = rospy.get_param('~port','/dev/ttyUSB0')
baud = int(rospy.get_param('~baud','57600'))
# for systems where pyserial yields errors in the fcntl.ioctl(self.fd, TIOCMBIS, \
# TIOCM_DTR_str) line, which causes an IOError, when using simulated port
fix_pyserial_for_test = rospy.get_param('~fix_pyserial_for_test', False)
# TODO: should these really be global?
tcp_portnum = int(rospy.get_param('/rosserial_embeddedlinux/tcp_port', '11411'))
fork_server = rospy.get_param('/rosserial_embeddedlinux/fork_server', False)
# TODO: do we really want command line params in addition to parameter server params?
sys.argv = rospy.myargv(argv=sys.argv)
if len(sys.argv) >= 2 :
port_name = sys.argv[1]
if len(sys.argv) == 3 :
tcp_portnum = int(sys.argv[2])
if port_name == "tcp" :
server = RosSerialServer(tcp_portnum, fork_server)
rospy.loginfo("Waiting for socket connections on port %d" % tcp_portnum)
try:
server.listen()
except KeyboardInterrupt:
rospy.loginfo("got keyboard interrupt")
finally:
rospy.loginfo("Shutting down")
for process in multiprocessing.active_children():
rospy.loginfo("Shutting down process %r", process)
process.terminate()
process.join()
rospy.loginfo("All done")
else : # Use serial port
while not rospy.is_shutdown():
rospy.loginfo("Connecting to %s at %d baud" % (port_name,baud) )
try:
client = SerialClient(port_name, baud, fix_pyserial_for_test=fix_pyserial_for_test)
client.run()
except KeyboardInterrupt:
break
except SerialException:
sleep(1.0)
continue
except OSError:
sleep(1.0)
continue
| [
"robert.aleck@cynexia.com"
] | robert.aleck@cynexia.com |
f5c302c8fab9cfc3911bce34710d1eb2301119cf | ba9d15e50b88284868c04f516d240491bb2cf581 | /contact/views.py | fbc3d864fadba020fb10009b863c272705e3b6eb | [] | no_license | suhrobkamolov/mysite | 14aadf5dea041d33ced29fba106e2b8e174eb8ed | 55f105a95fbef52926ce77c8f7ecf32c4c0ba223 | refs/heads/master | 2021-05-11T20:34:52.559829 | 2018-02-10T16:46:15 | 2018-02-10T16:46:15 | 117,432,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | from django.shortcuts import render
from django.conf import settings
from django.core.mail import send_mail
from .forms import contactForm
from products.models import Category, Product
def contact(request):
categories = Category.objects.all()
products = Product.objects.all()
title = 'Feel free to contact us@@'
form = contactForm(request.POST or None)
confirm_message = None
if form.is_valid():
name = form.cleaned_data['name']
comment = form.cleaned_data['comment']
subject = 'Message from Mehvar Co'
message = '%s %s' % (comment, name)
emailFrom = form.cleaned_data['email']
emailTo = [settings.EMAIL_HOST_USER]
send_mail(subject, message, emailFrom, emailTo, fail_silently=True)
title = 'Thanks!!!'
confirm_message = 'We will contact you soon.'
form = None
context = {'title': title, 'form': form, 'confirm_message': confirm_message, 'categories': categories, 'products': products, }
return render(request, 'contact/contact.html', context)
| [
"samiallohjon@gmail.com"
] | samiallohjon@gmail.com |
b2df34bae4ca7b6d93a0dabd395ff329c31172d0 | 27b6ace01668745a4daeba3acf8ff93a5c68fd71 | /raiting/src/algorithm/coodinate_descent.py | 8bf32ff6c6928036ac22f1142a8ddbbc5335f04a | [] | no_license | o-ham5/fuk-food | 42216a8d8ba4cc3d4896c6352dde5174c9aa33d5 | daec364f4ea8f9b0a110369eb1b2606d4cc8c584 | refs/heads/master | 2022-03-27T09:20:06.355461 | 2019-12-27T13:07:23 | 2019-12-27T13:07:23 | 206,522,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | from numpy import mean
from tqdm import tqdm
def coodinate_descent(users, rests):
N_ROOP = 10
for roop in tqdm(range(N_ROOP)):
for user in users:
# update bias
user.bias = mean(
[eval_score - rest.esti_eval for rest, eval_score in user.evals.items()]
)
# update variance
user.variance = mean(
[(eval_score - (rest.esti_eval+user.bias)**2)
for rest, eval_score in user.evals.items()]
)
user.variance = max(user.variance, 1e-5)
for rest in rests:
# update eval
tmp_a = sum((user.evals[rest] - user.bias)/user.variance for user in rest.user_set)
tmp_b = sum(1/user.variance for user in rest.user_set)
rest.esti_eval = tmp_a / tmp_b
| [
"n-tateiwa@math.kyushu-u.ac.jp"
] | n-tateiwa@math.kyushu-u.ac.jp |
be9a4323d90dc5b6aaa9152fc4175685fbdcac82 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/224/usersdata/355/112710/submittedfiles/gravitacional.py | ccbd7077d25107bafd633103a3d44cf26cf3d352 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | # -*- coding: utf-8 -*-
import numpy as np
import funcoes
#ENTRADA
dimensao = int(input('Digite a dimensao das matrizes: '))
matrizA = input('Digite a Matriz A como uma única linha: ')
matrizD = input('Digite a Matriz D como uma única linha: ')
alfa = int(input('Digite o valor de alfa: '))
#PREPARANDO A ENTRADA
T = np.zeros((dimensao,dimensao))
A = np.fromstring(matrizA, sep=' ').reshape(dimensao, dimensao)
d = np.fromstring(matrizD, sep=' ').reshape(dimensao, dimensao)
#comece aqui...
#INÍCIO
a=[]
soma=0
for j in range(0,A.shape[1],1):
for i in range(0,A.shape[0],1):
soma=soma+A[i,j]
a.append(soma)
o=[]
for i in range(0,A.shape[0],1):
for j in range(0,A.shape[1],1):
soma=soma+A[i,j]
o.append(soma)
somat=0
for i in range(0,A.shape[0],1):
for j in range(0,A.shape[1],1):
for k in range(0,(A.shape[1])-1,1):
if d[i,k]!=0:
somat=(a[k]*(1/d[i,k]))+somat
if d[i,j]!=0:
T[i,j]=(o[i]*(a[j]*(1/d[i,j]**alfa))/somat)
#SAÍDA
somatorio = sum(sum(T))
print('%.4f' % somatorio)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
5da9a91cdf3b9f0a9ad1c2ea3a9fb2259797f969 | a5cd49415fd916afe20abc4662cf1096879ec21a | /fabulous/grapefruit.py | 02e3e9e8845dfbc8f9f147ec70272a656ec25b2d | [
"Apache-2.0"
] | permissive | Jacob-Mowat/fabulous | 112b2b737b86ce1abd8fe28c682c5e8ea7314744 | d30e12181b2aae09e442641db8e3f46236fda42b | refs/heads/master | 2020-04-08T05:17:16.696731 | 2018-11-25T17:23:08 | 2018-11-25T17:23:08 | 159,054,542 | 1 | 0 | Apache-2.0 | 2018-11-25T17:01:10 | 2018-11-25T17:01:10 | null | UTF-8 | Python | false | false | 55,618 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 The Fabulous Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
fabulous.grapefruit
~~~~~~~~~~~~~~~~~~~
The grapefruit provides routines for color manipulation.
This module is a bundled version of the grapefruit_ library.
.. _grapefruit: https://github.com/xav/grapefruit
"""
from __future__ import division
import sys
# $Id$
__author__ = 'Xavier Basty <xbasty@gmail.com>'
__version__ = '0.1a3'
# The default white reference, use 2° Standard Observer, D65 (daylight)
_DEFAULT_WREF = (0.95043, 1.00000, 1.08890)
_oneThird = 1.0 / 3
_srgbGammaCorrInv = 0.03928 / 12.92
_sixteenHundredsixteenth = 16.0 / 116
_RybWheel = (
0, 26, 52,
83, 120, 130,
141, 151, 162,
177, 190, 204,
218, 232, 246,
261, 275, 288,
303, 317, 330,
338, 345, 352,
360)
_RgbWheel = (
0, 8, 17,
26, 34, 41,
48, 54, 60,
81, 103, 123,
138, 155, 171,
187, 204, 219,
234, 251, 267,
282, 298, 329,
360)
class Color:
'''Hold a color value.
Example usage:
To create an instance of the grapefruit.Color from RGB values:
>>> from fabulous import grapefruit
>>> r, g, b = 1, 0.5, 0
>>> col = grapefruit.Color.NewFromRgb(r, g, b)
To get the values of the color in another colorspace:
>>> h, s, v = col.hsv
>>> l, a, b = col.lab
To get the complementary of a color:
>>> compl = col.ComplementaryColor(mode='rgb')
>>> print(compl.hsl)
(210.0, 1.0, 0.5)
To directly convert RGB values to their HSL equivalent:
>>> h, s, l = Color.RgbToHsl(r, g, b)
'''
WHITE_REFERENCE = {
'std_A' : (1.09847, 1.00000, 0.35582),
'std_B' : (0.99093, 1.00000, 0.85313),
'std_C' : (0.98071, 1.00000, 1.18225),
'std_D50' : (0.96421, 1.00000, 0.82519),
'std_D55' : (0.95680, 1.00000, 0.92148),
'std_D65' : (0.95043, 1.00000, 1.08890),
'std_D75' : (0.94972, 1.00000, 1.22639),
'std_E' : (1.00000, 1.00000, 1.00000),
'std_F1' : (0.92834, 1.00000, 1.03665),
'std_F2' : (0.99145, 1.00000, 0.67316),
'std_F3' : (1.03753, 1.00000, 0.49861),
'std_F4' : (1.09147, 1.00000, 0.38813),
'std_F5' : (0.90872, 1.00000, 0.98723),
'std_F6' : (0.97309, 1.00000, 0.60191),
'std_F7' : (0.95017, 1.00000, 1.08630),
'std_F8' : (0.96413, 1.00000, 0.82333),
'std_F9' : (1.00365, 1.00000, 0.67868),
'std_F10' : (0.96174, 1.00000, 0.81712),
'std_F11' : (1.00899, 1.00000, 0.64262),
'std_F12' : (1.08046, 1.00000, 0.39228),
'sup_A' : (1.11142, 1.00000, 0.35200),
'sup_B' : (0.99178, 1.00000, 0.84349),
'sup_C' : (0.97286, 1.00000, 1.16145),
'sup_D50' : (0.96721, 1.00000, 0.81428),
'sup_D55' : (0.95797, 1.00000, 0.90925),
'sup_D65' : (0.94810, 1.00000, 1.07305),
'sup_D75' : (0.94417, 1.00000, 1.20643),
'sup_E' : (1.00000, 1.00000, 1.00000),
'sup_F1' : (0.94791, 1.00000, 1.03191),
'sup_F2' : (1.03245, 1.00000, 0.68990),
'sup_F3' : (1.08968, 1.00000, 0.51965),
'sup_F4' : (1.14961, 1.00000, 0.40963),
'sup_F5' : (0.93369, 1.00000, 0.98636),
'sup_F6' : (1.02148, 1.00000, 0.62074),
'sup_F7' : (0.95780, 1.00000, 1.07618),
'sup_F8' : (0.97115, 1.00000, 0.81135),
'sup_F9' : (1.02116, 1.00000, 0.67826),
'sup_F10' : (0.99001, 1.00000, 0.83134),
'sup_F11' : (1.03820, 1.00000, 0.65555),
'sup_F12' : (1.11428, 1.00000, 0.40353)}
NAMED_COLOR = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgreen': '#90ee90',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32'}
def __init__(self, values, mode='rgb', alpha=1.0, wref=_DEFAULT_WREF):
'''Instantiate a new grapefruit.Color object.
Parameters:
:values:
The values of this color, in the specified representation.
:mode:
The representation mode used for values.
:alpha:
the alpha value (transparency) of this color.
:wref:
The whitepoint reference, default is 2° D65.
'''
if not(isinstance(values, tuple)):
raise TypeError('values must be a tuple')
if mode=='rgb':
self.__rgb = values
self.__hsl = Color.RgbToHsl(*values)
elif mode=='hsl':
self.__hsl = values
self.__rgb = Color.HslToRgb(*values)
else:
raise ValueError('Invalid color mode: ' + mode)
self.__a = alpha
self.__wref = wref
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
if isinstance(other, Color):
return (self.__rgb==other.__rgb) and (self.__a==other.__a)
if len(other) != 4:
return False
return list(self.__rgb + (self.__a,)) == list(other)
except TypeError:
return False
except AttributeError:
return False
def __repr__(self):
return str(self.__rgb + (self.__a,))
def __str__(self):
'''A string representation of this grapefruit.Color instance.
Returns:
The RGBA representation of this grapefruit.Color instance.
'''
return '(%g, %g, %g, %g)' % (self.__rgb + (self.__a,))
if sys.version_info[0] < 3:
def __unicode__(self):
'''A unicode string representation of this grapefruit.Color instance.
Returns:
The RGBA representation of this grapefruit.Color instance.
'''
return unicode('%g, %g, %g, %g)') % (self.__rgb + (self.__a,))
def __iter__(self):
return iter(self.__rgb + (self.__a,))
def __len__(self):
return 4
def __GetIsLegal(self):
return all(0.0 <= v <= 1.0 for v in self)
isLegal = property(fget=__GetIsLegal, doc='Boolean indicating whether the color is within the legal gamut.')
def __GetNearestLegal(self):
def clamp(x, lo, hi):
if x < lo:
return lo
elif x > hi:
return hi
else:
return x
return Color.NewFromRgb(*[clamp(v, 0.0, 1.0) for v in self])
nearestLegal = property(fget=__GetNearestLegal, doc='The nearest legal color.')
@staticmethod
def RgbToHsl(r, g, b):
'''Convert the color from RGB coordinates to HSL.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, l) tuple in the range:
h[0...360],
s[0...1],
l[0...1]
>>> Color.RgbToHsl(1, 0.5, 0)
(30.0, 1.0, 0.5)
'''
minVal = min(r, g, b) # min RGB value
maxVal = max(r, g, b) # max RGB value
l = (maxVal + minVal) / 2.0
if minVal==maxVal:
return (0.0, 0.0, l) # achromatic (gray)
d = maxVal - minVal # delta RGB value
if l < 0.5: s = d / (maxVal + minVal)
else: s = d / (2.0 - maxVal - minVal)
dr, dg, db = [(maxVal-val) / d for val in (r, g, b)]
if r==maxVal:
h = db - dg
elif g==maxVal:
h = 2.0 + dr - db
else:
h = 4.0 + dg - dr
h = (h*60.0) % 360.0
return (h, s, l)
@staticmethod
def _HueToRgb(n1, n2, h):
h %= 6.0
if h < 1.0: return n1 + ((n2-n1) * h)
if h < 3.0: return n2
if h < 4.0: return n1 + ((n2-n1) * (4.0 - h))
return n1
@staticmethod
def HslToRgb(h, s, l):
'''Convert the color from HSL coordinates to RGB.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
'''
if s==0: return (l, l, l) # achromatic (gray)
if l<0.5: n2 = l * (1.0 + s)
else: n2 = l+s - (l*s)
n1 = (2.0 * l) - n2
h /= 60.0
hueToRgb = Color._HueToRgb
r = hueToRgb(n1, n2, h + 2)
g = hueToRgb(n1, n2, h)
b = hueToRgb(n1, n2, h - 2)
return (r, g, b)
@staticmethod
def RgbToHsv(r, g, b):
'''Convert the color from RGB coordinates to HSV.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (h, s, v) tuple in the range:
h[0...360],
s[0...1],
v[0...1]
>>> Color.RgbToHsv(1, 0.5, 0)
(30.0, 1.0, 1.0)
'''
v = float(max(r, g, b))
d = v - min(r, g, b)
if d==0: return (0.0, 0.0, v)
s = d / v
dr, dg, db = [(v - val) / d for val in (r, g, b)]
if r==v:
h = db - dg # between yellow & magenta
elif g==v:
h = 2.0 + dr - db # between cyan & yellow
else: # b==v
h = 4.0 + dg - dr # between magenta & cyan
h = (h*60.0) % 360.0
return (h, s, v)
@staticmethod
def HsvToRgb(h, s, v):
'''Convert the color from RGB coordinates to HSV.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.HslToRgb(30.0, 1.0, 0.5)
(1.0, 0.5, 0.0)
'''
if s==0: return (v, v, v) # achromatic (gray)
h /= 60.0
h = h % 6.0
i = int(h)
f = h - i
if not(i&1): f = 1-f # if i is even
m = v * (1.0 - s)
n = v * (1.0 - (s * f))
if i==0: return (v, n, m)
if i==1: return (n, v, m)
if i==2: return (m, v, n)
if i==3: return (m, n, v)
if i==4: return (n, m, v)
return (v, m, n)
@staticmethod
def RgbToYiq(r, g, b):
'''Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % Color.RgbToYiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)'
'''
y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213)
i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591)
q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940)
return (y, i, q)
@staticmethod
def YiqToRgb(y, i, q):
'''Convert the color from YIQ coordinates to RGB.
Parameters:
:y:
Tte Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.YiqToRgb(0.592263, 0.458874, -0.0499818)
'(1, 0.5, 5.442e-07)'
'''
r = y + (i * 0.9562) + (q * 0.6210)
g = y - (i * 0.2717) - (q * 0.6485)
b = y - (i * 1.1053) + (q * 1.7020)
return (r, g, b)
@staticmethod
def RgbToYuv(r, g, b):
'''Convert the color from RGB coordinates to YUV.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, u, v) tuple in the range:
y[0...1],
u[-0.436...0.436],
v[-0.615...0.615]
>>> '(%g, %g, %g)' % Color.RgbToYuv(1, 0.5, 0)
'(0.5925, -0.29156, 0.357505)'
'''
y = (r * 0.29900) + (g * 0.58700) + (b * 0.11400)
u = -(r * 0.14713) - (g * 0.28886) + (b * 0.43600)
v = (r * 0.61500) - (g * 0.51499) - (b * 0.10001)
return (y, u, v)
@staticmethod
def YuvToRgb(y, u, v):
'''Convert the color from YUV coordinates to RGB.
Parameters:
:y:
The Y component value [0...1]
:u:
The U component value [-0.436...0.436]
:v:
The V component value [-0.615...0.615]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.YuvToRgb(0.5925, -0.2916, 0.3575)
'(0.999989, 0.500015, -6.3276e-05)'
'''
r = y + (v * 1.13983)
g = y - (u * 0.39465) - (v * 0.58060)
b = y + (u * 2.03211)
return (r, g, b)
@staticmethod
def RgbToXyz(r, g, b):
'''Convert the color from sRGB to CIE XYZ.
The methods assumes that the RGB coordinates are given in the sRGB
colorspace (D65).
.. note::
Compensation for the sRGB gamma correction is applied before converting.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (x, y, z) tuple in the range:
x[0...1],
y[0...1],
z[0...1]
>>> '(%g, %g, %g)' % Color.RgbToXyz(1, 0.5, 0)
'(0.488941, 0.365682, 0.0448137)'
'''
r, g, b = [((v <= 0.03928) and [v / 12.92] or [((v+0.055) / 1.055) **2.4])[0] for v in (r, g, b)]
x = (r * 0.4124) + (g * 0.3576) + (b * 0.1805)
y = (r * 0.2126) + (g * 0.7152) + (b * 0.0722)
z = (r * 0.0193) + (g * 0.1192) + (b * 0.9505)
return (x, y, z)
@staticmethod
def XyzToRgb(x, y, z):
'''Convert the color from CIE XYZ coordinates to sRGB.
.. note::
Compensation for sRGB gamma correction is applied before converting.
Parameters:
:x:
The X component value [0...1]
:y:
The Y component value [0...1]
:z:
The Z component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.XyzToRgb(0.488941, 0.365682, 0.0448137)
'(1, 0.5, 6.81883e-08)'
'''
r = (x * 3.2406255) - (y * 1.5372080) - (z * 0.4986286)
g = -(x * 0.9689307) + (y * 1.8757561) + (z * 0.0415175)
b = (x * 0.0557101) - (y * 0.2040211) + (z * 1.0569959)
return tuple((((v <= _srgbGammaCorrInv) and [v * 12.92] or [(1.055 * (v ** (1/2.4))) - 0.055])[0] for v in (r, g, b)))
@staticmethod
def XyzToLab(x, y, z, wref=_DEFAULT_WREF):
'''Convert the color from CIE XYZ to CIE L*a*b*.
Parameters:
:x:
The X component value [0...1]
:y:
The Y component value [0...1]
:z:
The Z component value [0...1]
:wref:
The whitepoint reference, default is 2° D65.
Returns:
The color as an (L, a, b) tuple in the range:
L[0...100],
a[-1...1],
b[-1...1]
>>> '(%g, %g, %g)' % Color.XyzToLab(0.488941, 0.365682, 0.0448137)
'(66.9518, 0.43084, 0.739692)'
>>> '(%g, %g, %g)' % Color.XyzToLab(0.488941, 0.365682, 0.0448137, Color.WHITE_REFERENCE['std_D50'])
'(66.9518, 0.411663, 0.67282)'
'''
# White point correction
x /= wref[0]
y /= wref[1]
z /= wref[2]
# Nonlinear distortion and linear transformation
x, y, z = [((v > 0.008856) and [v**_oneThird] or [(7.787 * v) + _sixteenHundredsixteenth])[0] for v in (x, y, z)]
# Vector scaling
l = (116 * y) - 16
a = 5.0 * (x - y)
b = 2.0 * (y - z)
return (l, a, b)
@staticmethod
def LabToXyz(l, a, b, wref=_DEFAULT_WREF):
'''Convert the color from CIE L*a*b* to CIE 1931 XYZ.
Parameters:
:l:
The L component [0...100]
:a:
The a component [-1...1]
:b:
The a component [-1...1]
:wref:
The whitepoint reference, default is 2° D65.
Returns:
The color as an (x, y, z) tuple in the range:
x[0...q],
y[0...1],
z[0...1]
>>> '(%g, %g, %g)' % Color.LabToXyz(66.9518, 0.43084, 0.739692)
'(0.488941, 0.365682, 0.0448137)'
>>> '(%g, %g, %g)' % Color.LabToXyz(66.9518, 0.411663, 0.67282, Color.WHITE_REFERENCE['std_D50'])
'(0.488941, 0.365682, 0.0448138)'
'''
y = (l + 16) / 116
x = (a / 5.0) + y
z = y - (b / 2.0)
return tuple((((v > 0.206893) and [v**3] or [(v - _sixteenHundredsixteenth) / 7.787])[0] * w for v, w in zip((x, y, z), wref)))
@staticmethod
def CmykToCmy(c, m, y, k):
'''Convert the color from CMYK coordinates to CMY.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:k:
The Black component value [0...1]
Returns:
The color as an (c, m, y) tuple in the range:
c[0...1],
m[0...1],
y[0...1]
>>> '(%g, %g, %g)' % Color.CmykToCmy(1, 0.32, 0, 0.5)
'(1, 0.66, 0.5)'
'''
mk = 1-k
return ((c*mk + k), (m*mk + k), (y*mk + k))
@staticmethod
def CmyToCmyk(c, m, y):
'''Convert the color from CMY coordinates to CMYK.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
Returns:
The color as an (c, m, y, k) tuple in the range:
c[0...1],
m[0...1],
y[0...1],
k[0...1]
>>> '(%g, %g, %g, %g)' % Color.CmyToCmyk(1, 0.66, 0.5)
'(1, 0.32, 0, 0.5)'
'''
k = min(c, m, y)
if k==1.0: return (0.0, 0.0, 0.0, 1.0)
mk = 1-k
return ((c-k) / mk, (m-k) / mk, (y-k) / mk, k)
@staticmethod
def RgbToCmy(r, g, b):
'''Convert the color from RGB coordinates to CMY.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (c, m, y) tuple in the range:
c[0...1],
m[0...1],
y[0...1]
>>> Color.RgbToCmy(1, 0.5, 0)
(0, 0.5, 1)
'''
return (1-r, 1-g, 1-b)
@staticmethod
def CmyToRgb(c, m, y):
'''Convert the color from CMY coordinates to RGB.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> Color.CmyToRgb(0, 0.5, 1)
(1, 0.5, 0)
'''
return (1-c, 1-m, 1-y)
@staticmethod
def RgbToIntTuple(r, g, b):
'''Convert the color from (r, g, b) to an int tuple.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...255],
g[0...2551],
b[0...2551]
>>> Color.RgbToIntTuple(1, 0.5, 0)
(255, 128, 0)
'''
return tuple(int(round(v*255)) for v in (r, g, b))
@staticmethod
def IntTupleToRgb(intTuple):
'''Convert a tuple of ints to (r, g, b).
Parameters:
The color as an (r, g, b) integer tuple in the range:
r[0...255],
g[0...255],
b[0...255]
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.IntTupleToRgb((255, 128, 0))
'(1, 0.501961, 0)'
'''
return tuple(v / 255 for v in intTuple)
@staticmethod
def RgbToHtml(r, g, b):
'''Convert the color from (r, g, b) to #RRGGBB.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A CSS string representation of this color (#RRGGBB).
>>> Color.RgbToHtml(1, 0.5, 0)
'#ff8000'
'''
return '#%02x%02x%02x' % tuple((min(round(v*255), 255) for v in (r, g, b)))
@staticmethod
def HtmlToRgb(html):
'''Convert the HTML color to (r, g, b).
Parameters:
:html:
the HTML definition of the color (#RRGGBB or #RGB or a color name).
Returns:
The color as an (r, g, b) tuple in the range:
r[0...1],
g[0...1],
b[0...1]
Throws:
:ValueError:
If html is neither a known color name or a hexadecimal RGB
representation.
>>> '(%g, %g, %g)' % Color.HtmlToRgb('#ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % Color.HtmlToRgb('ff8000')
'(1, 0.501961, 0)'
>>> '(%g, %g, %g)' % Color.HtmlToRgb('#f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % Color.HtmlToRgb('f60')
'(1, 0.4, 0)'
>>> '(%g, %g, %g)' % Color.HtmlToRgb('lemonchiffon')
'(1, 0.980392, 0.803922)'
'''
html = html.strip().lower()
if html[0]=='#':
html = html[1:]
elif html in Color.NAMED_COLOR:
html = Color.NAMED_COLOR[html][1:]
if len(html)==6:
rgb = html[:2], html[2:4], html[4:]
elif len(html)==3:
rgb = ['%c%c' % (v,v) for v in html]
else:
raise ValueError('input #%s is not in #RRGGBB format' % html)
return tuple(((int(n, 16) / 255.0) for n in rgb))
@staticmethod
def RgbToPil(r, g, b):
'''Convert the color from RGB to a PIL-compatible integer.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
A PIL compatible integer (0xBBGGRR).
>>> '0x%06x' % Color.RgbToPil(1, 0.5, 0)
'0x0080ff'
'''
r, g, b = [min(int(round(v*255)), 255) for v in (r, g, b)]
return (b << 16) + (g << 8) + r
@staticmethod
def PilToRgb(pil):
'''Convert the color from a PIL-compatible integer to RGB.
Parameters:
pil: a PIL compatible color representation (0xBBGGRR)
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r: [0...1]
g: [0...1]
b: [0...1]
>>> '(%g, %g, %g)' % Color.PilToRgb(0x0080ff)
'(1, 0.501961, 0)'
'''
r = 0xff & pil
g = 0xff & (pil >> 8)
b = 0xff & (pil >> 16)
return tuple((v / 255.0 for v in (r, g, b)))
@staticmethod
def _WebSafeComponent(c, alt=False):
'''Convert a color component to its web safe equivalent.
Parameters:
:c:
The component value [0...1]
:alt:
If True, return the alternative value instead of the nearest one.
Returns:
The web safe equivalent of the component value.
'''
# This sucks, but floating point between 0 and 1 is quite fuzzy...
# So we just change the scale a while to make the equality tests
# work, otherwise it gets wrong at some decimal far to the right.
sc = c * 100.0
# If the color is already safe, return it straight away
d = sc % 20
if d==0: return c
# Get the lower and upper safe values
l = sc - d
u = l + 20
# Return the 'closest' value according to the alt flag
if alt:
if (sc-l) >= (u-sc): return l/100.0
else: return u/100.0
else:
if (sc-l) >= (u-sc): return u/100.0
else: return l/100.0
@staticmethod
def RgbToWebSafe(r, g, b, alt=False):
'''Convert the color from RGB to 'web safe' RGB
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alt:
If True, use the alternative color instead of the nearest one.
Can be used for dithering.
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.RgbToWebSafe(1, 0.55, 0.0)
'(1, 0.6, 0)'
'''
webSafeComponent = Color._WebSafeComponent
return tuple((webSafeComponent(v, alt) for v in (r, g, b)))
@staticmethod
def RgbToGreyscale(r, g, b):
'''Convert the color from RGB to its greyscale equivalent
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (r, g, b) tuple in the range:
the range:
r[0...1],
g[0...1],
b[0...1]
>>> '(%g, %g, %g)' % Color.RgbToGreyscale(1, 0.8, 0)
'(0.6, 0.6, 0.6)'
'''
v = (r + g + b) / 3.0
return (v, v, v)
@staticmethod
def RgbToRyb(hue):
'''Maps a hue on the RGB color wheel to Itten's RYB wheel.
Parameters:
:hue:
The hue on the RGB color wheel [0...360]
Returns:
An approximation of the corresponding hue on Itten's RYB wheel.
>>> Color.RgbToRyb(15)
26.0
'''
d = hue % 15
i = int(hue / 15)
x0 = _RybWheel[i]
x1 = _RybWheel[i+1]
return x0 + (x1-x0) * d / 15
@staticmethod
def RybToRgb(hue):
'''Maps a hue on Itten's RYB color wheel to the standard RGB wheel.
Parameters:
:hue:
The hue on Itten's RYB color wheel [0...360]
Returns:
An approximation of the corresponding hue on the standard RGB wheel.
>>> Color.RybToRgb(15)
8.0
'''
d = hue % 15
i = int(hue / 15)
x0 = _RgbWheel[i]
x1 = _RgbWheel[i+1]
return x0 + (x1-x0) * d / 15
@staticmethod
def NewFromRgb(r, g, b, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed RGB values.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.NewFromRgb(1.0, 0.5, 0.0)
(1.0, 0.5, 0.0, 1.0)
>>> Color.NewFromRgb(1.0, 0.5, 0.0, 0.5)
(1.0, 0.5, 0.0, 0.5)
'''
return Color((r, g, b), 'rgb', alpha, wref)
@staticmethod
def NewFromHsl(h, s, l, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed HSL values.
Parameters:
:h:
The Hue component value [0...1]
:s:
The Saturation component value [0...1]
:l:
The Lightness component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5)
(1.0, 0.5, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5, 0.5)
(1.0, 0.5, 0.0, 0.5)
'''
return Color((h, s, l), 'hsl', alpha, wref)
@staticmethod
def NewFromHsv(h, s, v, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed HSV values.
Parameters:
:h:
The Hus component value [0...1]
:s:
The Saturation component value [0...1]
:v:
The Value component [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsv(30, 1, 1)
(1.0, 0.5, 0.0, 1.0)
>>> Color.NewFromHsv(30, 1, 1, 0.5)
(1.0, 0.5, 0.0, 0.5)
'''
h2, s, l = Color.RgbToHsl(*Color.HsvToRgb(h, s, v))
return Color((h, s, l), 'hsl', alpha, wref)
@staticmethod
def NewFromYiq(y, i, q, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed YIQ values.
Parameters:
:y:
The Y component value [0...1]
:i:
The I component value [0...1]
:q:
The Q component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromYiq(0.5922, 0.45885,-0.05))
'(0.999902, 0.499955, -6.6905e-05, 1)'
>>> str(Color.NewFromYiq(0.5922, 0.45885,-0.05, 0.5))
'(0.999902, 0.499955, -6.6905e-05, 0.5)'
'''
return Color(Color.YiqToRgb(y, i, q), 'rgb', alpha, wref)
@staticmethod
def NewFromYuv(y, u, v, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed YUV values.
Parameters:
:y:
The Y component value [0...1]
:u:
The U component value [-0.436...0.436]
:v:
The V component value [-0.615...0.615]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromYuv(0.5925, -0.2916, 0.3575))
'(0.999989, 0.500015, -6.3276e-05, 1)'
>>> str(Color.NewFromYuv(0.5925, -0.2916, 0.3575, 0.5))
'(0.999989, 0.500015, -6.3276e-05, 0.5)'
'''
return Color(Color.YuvToRgb(y, u, v), 'rgb', alpha, wref)
@staticmethod
def NewFromXyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromXyz(0.488941, 0.365682, 0.0448137))
'(1, 0.5, 6.81883e-08, 1)'
>>> str(Color.NewFromXyz(0.488941, 0.365682, 0.0448137, 0.5))
'(1, 0.5, 6.81883e-08, 0.5)'
'''
return Color(Color.XyzToRgb(x, y, z), 'rgb', alpha, wref)
@staticmethod
def NewFromLab(l, a, b, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed CIE-LAB values.
Parameters:
:l:
The L component [0...100]
:a:
The a component [-1...1]
:b:
The a component [-1...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692))
'(1, 0.5, 1.09491e-08, 1)'
>>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, wref=Color.WHITE_REFERENCE['std_D50']))
'(1.01238, 0.492011, -0.14311, 1)'
>>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5))
'(1, 0.5, 1.09491e-08, 0.5)'
>>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5, Color.WHITE_REFERENCE['std_D50']))
'(1.01238, 0.492011, -0.14311, 0.5)'
'''
return Color(Color.XyzToRgb(*Color.LabToXyz(l, a, b, wref)), 'rgb', alpha, wref)
@staticmethod
def NewFromCmy(c, m, y, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed CMY values.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.NewFromCmy(0, 0.5, 1)
(1, 0.5, 0, 1.0)
>>> Color.NewFromCmy(0, 0.5, 1, 0.5)
(1, 0.5, 0, 0.5)
'''
return Color(Color.CmyToRgb(c, m, y), 'rgb', alpha, wref)
@staticmethod
def NewFromCmyk(c, m, y, k, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed CMYK values.
Parameters:
:c:
The Cyan component value [0...1]
:m:
The Magenta component value [0...1]
:y:
The Yellow component value [0...1]
:k:
The Black component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromCmyk(1, 0.32, 0, 0.5))
'(0, 0.34, 0.5, 1)'
>>> str(Color.NewFromCmyk(1, 0.32, 0, 0.5, 0.5))
'(0, 0.34, 0.5, 0.5)'
'''
return Color(Color.CmyToRgb(*Color.CmykToCmy(c, m, y, k)), 'rgb', alpha, wref)
@staticmethod
def NewFromHtml(html, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed HTML color definition.
Parameters:
:html:
The HTML definition of the color (#RRGGBB or #RGB or a color name).
:alpha:
The color transparency [0...1], default is opaque.
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromHtml('#ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('ff8000'))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromHtml('#f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('f60'))
'(1, 0.4, 0, 1)'
>>> str(Color.NewFromHtml('lemonchiffon'))
'(1, 0.980392, 0.803922, 1)'
>>> str(Color.NewFromHtml('#ff8000', 0.5))
'(1, 0.501961, 0, 0.5)'
'''
return Color(Color.HtmlToRgb(html), 'rgb', alpha, wref)
@staticmethod
def NewFromPil(pil, alpha=1.0, wref=_DEFAULT_WREF):
'''Create a new instance based on the specifed PIL color.
Parameters:
:pil:
A PIL compatible color representation (0xBBGGRR)
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> str(Color.NewFromPil(0x0080ff))
'(1, 0.501961, 0, 1)'
>>> str(Color.NewFromPil(0x0080ff, 0.5))
'(1, 0.501961, 0, 0.5)'
'''
return Color(Color.PilToRgb(pil), 'rgb', alpha, wref)
def __GetAlpha(self):
return self.__a
alpha = property(fget=__GetAlpha, doc='The transparency of this color. 0.0 is transparent and 1.0 is fully opaque.')
def __GetWRef(self):
return self.__wref
whiteRef = property(fget=__GetWRef, doc='the white reference point of this color.')
def __GetRGB(self):
return self.__rgb
rgb = property(fget=__GetRGB, doc='The RGB values of this Color.')
def __GetHue(self):
return self.__hsl[0]
hue = property(fget=__GetHue, doc='The hue of this color.')
def __GetHSL(self):
return self.__hsl
hsl = property(fget=__GetHSL, doc='The HSL values of this Color.')
def __GetHSV(self):
h, s, v = Color.RgbToHsv(*self.__rgb)
return (self.__hsl[0], s, v)
hsv = property(fget=__GetHSV, doc='The HSV values of this Color.')
def __GetYIQ(self):
return Color.RgbToYiq(*self.__rgb)
yiq = property(fget=__GetYIQ, doc='The YIQ values of this Color.')
def __GetYUV(self):
return Color.RgbToYuv(*self.__rgb)
yuv = property(fget=__GetYUV, doc='The YUV values of this Color.')
def __GetXYZ(self):
return Color.RgbToXyz(*self.__rgb)
xyz = property(fget=__GetXYZ, doc='The CIE-XYZ values of this Color.')
def __GetLAB(self):
return Color.XyzToLab(wref=self.__wref, *Color.RgbToXyz(*self.__rgb))
lab = property(fget=__GetLAB, doc='The CIE-LAB values of this Color.')
def __GetCMY(self):
return Color.RgbToCmy(*self.__rgb)
cmy = property(fget=__GetCMY, doc='The CMY values of this Color.')
def __GetCMYK(self):
return Color.CmyToCmyk(*Color.RgbToCmy(*self.__rgb))
cmyk = property(fget=__GetCMYK, doc='The CMYK values of this Color.')
def __GetIntTuple(self):
return Color.RgbToIntTuple(*self.__rgb)
intTuple = property(fget=__GetIntTuple, doc='This Color as a tuple of integers in the range [0...255]')
def __GetHTML(self):
return Color.RgbToHtml(*self.__rgb)
html = property(fget=__GetHTML, doc='This Color as an HTML color definition.')
def __GetPIL(self):
return Color.RgbToPil(*self.__rgb)
pil = property(fget=__GetPIL, doc='This Color as a PIL compatible value.')
def __GetwebSafe(self):
return Color.RgbToWebSafe(*self.__rgb)
webSafe = property(fget=__GetwebSafe, doc='The web safe color nearest to this one (RGB).')
def __GetGreyscale(self):
return Color.RgbToGreyscale(*self.rgb)
greyscale = property(fget=__GetGreyscale, doc='The greyscale equivalent to this color (RGB).')
def ColorWithAlpha(self, alpha):
'''Create a new instance based on this one with a new alpha value.
Parameters:
:alpha:
The transparency of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromRgb(1.0, 0.5, 0.0, 1.0).ColorWithAlpha(0.5)
(1.0, 0.5, 0.0, 0.5)
'''
return Color(self.__rgb, 'rgb', alpha, self.__wref)
def ColorWithWhiteRef(self, wref, labAsRef=False):
'''Create a new instance based on this one with a new white reference.
Parameters:
:wref:
The whitepoint reference.
:labAsRef:
If True, the L*a*b* values of the current instance are used as reference
for the new color; otherwise, the RGB values are used as reference.
Returns:
A grapefruit.Color instance.
>>> c = Color.NewFromRgb(1.0, 0.5, 0.0, 1.0, Color.WHITE_REFERENCE['std_D65'])
>>> c2 = c.ColorWithWhiteRef(Color.WHITE_REFERENCE['sup_D50'])
>>> c2.rgb
(1.0, 0.5, 0.0)
>>> '(%g, %g, %g)' % c2.whiteRef
'(0.96721, 1, 0.81428)'
>>> c2 = c.ColorWithWhiteRef(Color.WHITE_REFERENCE['sup_D50'], labAsRef=True)
>>> '(%g, %g, %g)' % c2.rgb
'(1.01463, 0.490339, -0.148131)'
>>> '(%g, %g, %g)' % c2.whiteRef
'(0.96721, 1, 0.81428)'
>>> '(%g, %g, %g)' % c.lab
'(66.9518, 0.43084, 0.739692)'
>>> '(%g, %g, %g)' % c2.lab
'(66.9518, 0.43084, 0.739693)'
'''
if labAsRef:
l, a, b = self.__GetLAB()
return Color.NewFromLab(l, a, b, self.__a, wref)
else:
return Color(self.__rgb, 'rgb', self.__a, wref)
def ColorWithHue(self, hue):
'''Create a new instance based on this one with a new hue.
Parameters:
:hue:
The hue of the new color [0...360].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithHue(60)
(1.0, 1.0, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithHue(60).hsl
(60, 1, 0.5)
'''
h, s, l = self.__hsl
return Color((hue, s, l), 'hsl', self.__a, self.__wref)
def ColorWithSaturation(self, saturation):
'''Create a new instance based on this one with a new saturation value.
.. note::
The saturation is defined for the HSL mode.
Parameters:
:saturation:
The saturation of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithSaturation(0.5)
(0.75, 0.5, 0.25, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithSaturation(0.5).hsl
(30, 0.5, 0.5)
'''
h, s, l = self.__hsl
return Color((h, saturation, l), 'hsl', self.__a, self.__wref)
def ColorWithLightness(self, lightness):
'''Create a new instance based on this one with a new lightness value.
Parameters:
:lightness:
The lightness of the new color [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25)
(0.5, 0.25, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ColorWithLightness(0.25).hsl
(30, 1, 0.25)
'''
h, s, l = self.__hsl
return Color((h, s, lightness), 'hsl', self.__a, self.__wref)
def DarkerColor(self, level):
'''Create a new instance based on this one but darker.
Parameters:
:level:
The amount by which the color should be darkened to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).DarkerColor(0.25)
(0.5, 0.25, 0.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).DarkerColor(0.25).hsl
(30, 1, 0.25)
'''
h, s, l = self.__hsl
return Color((h, s, max(l - level, 0)), 'hsl', self.__a, self.__wref)
def LighterColor(self, level):
'''Create a new instance based on this one but lighter.
Parameters:
:level:
The amount by which the color should be lightened to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).LighterColor(0.25)
(1.0, 0.75, 0.5, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).LighterColor(0.25).hsl
(30, 1, 0.75)
'''
h, s, l = self.__hsl
return Color((h, s, min(l + level, 1)), 'hsl', self.__a, self.__wref)
def Saturate(self, level):
'''Create a new instance based on this one but more saturated.
Parameters:
:level:
The amount by which the color should be saturated to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 0.5, 0.5).Saturate(0.25)
(0.875, 0.5, 0.125, 1.0)
>>> Color.NewFromHsl(30, 0.5, 0.5).Saturate(0.25).hsl
(30, 0.75, 0.5)
'''
h, s, l = self.__hsl
return Color((h, min(s + level, 1), l), 'hsl', self.__a, self.__wref)
def Desaturate(self, level):
'''Create a new instance based on this one but less saturated.
Parameters:
:level:
The amount by which the color should be desaturated to produce
the new one [0...1].
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 0.5, 0.5).Desaturate(0.25)
(0.625, 0.5, 0.375, 1.0)
>>> Color.NewFromHsl(30, 0.5, 0.5).Desaturate(0.25).hsl
(30, 0.25, 0.5)
'''
h, s, l = self.__hsl
return Color((h, max(s - level, 0), l), 'hsl', self.__a, self.__wref)
def WebSafeDither(self):
'''Return the two websafe colors nearest to this one.
Returns:
A tuple of two grapefruit.Color instances which are the two
web safe colors closest this one.
>>> c = Color.NewFromRgb(1.0, 0.45, 0.0)
>>> c1, c2 = c.WebSafeDither()
>>> str(c1)
'(1, 0.4, 0, 1)'
>>> str(c2)
'(1, 0.6, 0, 1)'
'''
return (
Color(Color.RgbToWebSafe(*self.__rgb), 'rgb', self.__a, self.__wref),
Color(Color.RgbToWebSafe(alt=True, *self.__rgb), 'rgb', self.__a, self.__wref))
def Gradient(self, target, steps=100):
'''Create a list with the gradient colors between this and the other color.
Parameters:
:target:
The grapefruit.Color at the other end of the gradient.
:steps:
The number of gradients steps to create.
Returns:
A list of grapefruit.Color instances.
>>> c1 = Color.NewFromRgb(1.0, 0.0, 0.0, alpha=1)
>>> c2 = Color.NewFromRgb(0.0, 1.0, 0.0, alpha=0)
>>> c1.Gradient(c2, 3)
[(0.75, 0.25, 0.0, 0.75), (0.5, 0.5, 0.0, 0.5), (0.25, 0.75, 0.0, 0.25)]
'''
gradient = []
rgba1 = self.__rgb + (self.__a,)
rgba2 = target.__rgb + (target.__a,)
steps += 1
for n in range(1, steps):
d = 1.0*n/steps
r = (rgba1[0]*(1-d)) + (rgba2[0]*d)
g = (rgba1[1]*(1-d)) + (rgba2[1]*d)
b = (rgba1[2]*(1-d)) + (rgba2[2]*d)
a = (rgba1[3]*(1-d)) + (rgba2[3]*d)
gradient.append(Color((r, g, b), 'rgb', a, self.__wref))
return gradient
def ComplementaryColor(self, mode='ryb'):
'''Create a new instance which is the complementary color of this one.
Parameters:
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A grapefruit.Color instance.
>>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb')
(0.0, 0.5, 1.0, 1.0)
>>> Color.NewFromHsl(30, 1, 0.5).ComplementaryColor(mode='rgb').hsl
(210, 1, 0.5)
'''
h, s, l = self.__hsl
if mode == 'ryb': h = Color.RgbToRyb(h)
h = (h+180)%360
if mode == 'ryb': h = Color.RybToRgb(h)
return Color((h, s, l), 'hsl', self.__a, self.__wref)
def MonochromeScheme(self):
'''Return 4 colors in the same hue with varying saturation/lightness.
Returns:
A tuple of 4 grapefruit.Color in the same hue as this one,
with varying saturation/lightness.
>>> c = Color.NewFromHsl(30, 0.5, 0.5)
>>> ['(%g, %g, %g)' % clr.hsl for clr in c.MonochromeScheme()]
['(30, 0.2, 0.8)', '(30, 0.5, 0.3)', '(30, 0.2, 0.6)', '(30, 0.5, 0.8)']
'''
def _wrap(x, min, thres, plus):
if (x-min) < thres: return x + plus
else: return x-min
h, s, l = self.__hsl
s1 = _wrap(s, 0.3, 0.1, 0.3)
l1 = _wrap(l, 0.5, 0.2, 0.3)
s2 = s
l2 = _wrap(l, 0.2, 0.2, 0.6)
s3 = s1
l3 = max(0.2, l + (1-l)*0.2)
s4 = s
l4 = _wrap(l, 0.5, 0.2, 0.3)
return (
Color((h, s1, l1), 'hsl', self.__a, self.__wref),
Color((h, s2, l2), 'hsl', self.__a, self.__wref),
Color((h, s3, l3), 'hsl', self.__a, self.__wref),
Color((h, s4, l4), 'hsl', self.__a, self.__wref))
def TriadicScheme(self, angle=120, mode='ryb'):
'''Return two colors forming a triad or a split complementary with this one.
Parameters:
:angle:
The angle between the hues of the created colors.
The default value makes a triad.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of two grapefruit.Color forming a color triad with
this one or a split complementary.
>>> c1 = Color.NewFromHsl(30, 1, 0.5)
>>> c2, c3 = c1.TriadicScheme(mode='rgb')
>>> c2.hsl
(150.0, 1, 0.5)
>>> c3.hsl
(270.0, 1, 0.5)
>>> c2, c3 = c1.TriadicScheme(angle=40, mode='rgb')
>>> c2.hsl
(190.0, 1, 0.5)
>>> c3.hsl
(230.0, 1, 0.5)
'''
h, s, l = self.__hsl
angle = min(angle, 120) / 2.0
if mode == 'ryb': h = Color.RgbToRyb(h)
h += 180
h1 = (h - angle) % 360
h2 = (h + angle) % 360
if mode == 'ryb':
h1 = Color.RybToRgb(h1)
h2 = Color.RybToRgb(h2)
return (
Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref))
def TetradicScheme(self, angle=30, mode='ryb'):
'''Return three colors froming a tetrad with this one.
Parameters:
:angle:
The angle to substract from the adjacent colors hues [-90...90].
You can use an angle of zero to generate a square tetrad.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of three grapefruit.Color forming a color tetrad with
this one.
>>> col = Color.NewFromHsl(30, 1, 0.5)
>>> [c.hsl for c in col.TetradicScheme(mode='rgb', angle=30)]
[(90, 1, 0.5), (210, 1, 0.5), (270, 1, 0.5)]
'''
h, s, l = self.__hsl
if mode == 'ryb': h = Color.RgbToRyb(h)
h1 = (h + 90 - angle) % 360
h2 = (h + 180) % 360
h3 = (h + 270 - angle) % 360
if mode == 'ryb':
h1 = Color.RybToRgb(h1)
h2 = Color.RybToRgb(h2)
h3 = Color.RybToRgb(h3)
return (
Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref),
Color((h3, s, l), 'hsl', self.__a, self.__wref))
def AnalogousScheme(self, angle=30, mode='ryb'):
'''Return two colors analogous to this one.
Args:
:angle:
The angle between the hues of the created colors and this one.
:mode:
Select which color wheel to use for the generation (ryb/rgb).
Returns:
A tuple of grapefruit.Colors analogous to this one.
>>> c1 = Color.NewFromHsl(30, 1, 0.5)
>>> c2, c3 = c1.AnalogousScheme(angle=60, mode='rgb')
>>> c2.hsl
(330, 1, 0.5)
>>> c3.hsl
(90, 1, 0.5)
>>> c2, c3 = c1.AnalogousScheme(angle=10, mode='rgb')
>>> c2.hsl
(20, 1, 0.5)
>>> c3.hsl
(40, 1, 0.5)
'''
h, s, l = self.__hsl
if mode == 'ryb': h = Color.RgbToRyb(h)
h += 360
h1 = (h - angle) % 360
h2 = (h + angle) % 360
if mode == 'ryb':
h1 = Color.RybToRgb(h1)
h2 = Color.RybToRgb(h2)
return (Color((h1, s, l), 'hsl', self.__a, self.__wref),
Color((h2, s, l), 'hsl', self.__a, self.__wref))
def AlphaBlend(self, other):
'''Alpha-blend this color on the other one.
Args:
:other:
The grapefruit.Color to alpha-blend with this one.
Returns:
A grapefruit.Color instance which is the result of alpha-blending
this color on the other one.
>>> c1 = Color.NewFromRgb(1, 0.5, 0, 0.2)
>>> c2 = Color.NewFromRgb(1, 1, 1, 0.8)
>>> c3 = c1.AlphaBlend(c2)
>>> str(c3)
'(1, 0.875, 0.75, 0.84)'
'''
# get final alpha channel
fa = self.__a + other.__a - (self.__a * other.__a)
# get percentage of source alpha compared to final alpha
if fa==0: sa = 0
else: sa = min(1.0, self.__a/other.__a)
# destination percentage is just the additive inverse
da = 1.0 - sa
sr, sg, sb = [v * sa for v in self.__rgb]
dr, dg, db = [v * da for v in other.__rgb]
return Color((sr+dr, sg+dg, sb+db), 'rgb', fa, self.__wref)
def Blend(self, other, percent=0.5):
'''Blend this color with the other one.
Args:
:other:
the grapefruit.Color to blend with this one.
Returns:
A grapefruit.Color instance which is the result of blending
this color on the other one.
>>> c1 = Color.NewFromRgb(1, 0.5, 0, 0.2)
>>> c2 = Color.NewFromRgb(1, 1, 1, 0.6)
>>> c3 = c1.Blend(c2)
>>> str(c3)
'(1, 0.75, 0.5, 0.4)'
'''
dest = 1.0 - percent
rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb)))
a = (self.__a * percent) + (other.__a * dest)
return Color(rgb, 'rgb', a, self.__wref)
def _test():
import doctest
reload(doctest)
doctest.testmod()
if __name__=='__main__':
_test()
# vim: ts=2 sts=2 sw=2 et
| [
"jtunney@gmail.com"
] | jtunney@gmail.com |
ef17d2a575909e129128b97bcad960ab6863d872 | 060c409f2b6282e1bf08a2101eb09be5f0927c4e | /pp_i2cdevices.py | 606da375872eb25410654535137ec763b8d7771a | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-mit-taylor-variant"
] | permissive | KenT2/pipresents-gapless | cf850951e1c5911524c8e984d72df1621867c556 | 31a347bb8b45898a3fe08b1daf765e31d47b7a87 | refs/heads/master | 2022-01-30T06:41:41.601213 | 2022-01-08T17:22:24 | 2022-01-08T17:22:24 | 22,849,661 | 219 | 47 | NOASSERTION | 2021-09-20T09:09:31 | 2014-08-11T18:23:30 | Python | UTF-8 | Python | false | false | 8,830 | py | #!/usr/bin/env python
# Copyright (c) 2016 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
# Register and other configuration values:
ADS1x15_DEFAULT_ADDRESS = 0x48
ADS1x15_POINTER_CONVERSION = 0x00
ADS1x15_POINTER_CONFIG = 0x01
ADS1x15_POINTER_LOW_THRESHOLD = 0x02
ADS1x15_POINTER_HIGH_THRESHOLD = 0x03
ADS1x15_CONFIG_OS_SINGLE = 0x8000
ADS1x15_CONFIG_MUX_OFFSET = 12
# Mapping of gain values to config register values.
ADS1x15_CONFIG_GAIN = {
2/3: 0x0000,
1: 0x0200,
2: 0x0400,
4: 0x0600,
8: 0x0800,
16: 0x0A00
}
ADS1x15_CONFIG_MODE_CONTINUOUS = 0x0000
ADS1x15_CONFIG_MODE_SINGLE = 0x0100
# Mapping of data/sample rate to config register values for ADS1015 (faster).
ADS1015_CONFIG_DR = {
128: 0x0000,
250: 0x0020,
490: 0x0040,
920: 0x0060,
1600: 0x0080,
2400: 0x00A0,
3300: 0x00C0
}
# Mapping of data/sample rate to config register values for ADS1115 (slower).
ADS1115_CONFIG_DR = {
8: 0x0000,
16: 0x0020,
32: 0x0040,
64: 0x0060,
128: 0x0080,
250: 0x00A0,
475: 0x00C0,
860: 0x00E0
}
ADS1x15_CONFIG_COMP_WINDOW = 0x0010
ADS1x15_CONFIG_COMP_ACTIVE_HIGH = 0x0008
ADS1x15_CONFIG_COMP_LATCHING = 0x0004
ADS1x15_CONFIG_COMP_QUE = {
1: 0x0000,
2: 0x0001,
4: 0x0002
}
ADS1x15_CONFIG_COMP_QUE_DISABLE = 0x0003
class ADS1x15(object):
"""Base functionality for ADS1x15 analog to digital converters."""
def __init__(self, bus=None,address=ADS1x15_DEFAULT_ADDRESS, **kwargs):
self.bus=bus
pass
def _data_rate_default(self):
"""Retrieve the default data rate for this ADC (in samples per second).
Should be implemented by subclasses.
"""
raise NotImplementedError('Subclasses must implement _data_rate_default!')
def _data_rate_config(self, data_rate):
"""Subclasses should override this function and return a 16-bit value
that can be OR'ed with the config register to set the specified
data rate. If a value of None is specified then a default data_rate
setting should be returned. If an invalid or unsupported data_rate is
provided then an exception should be thrown.
"""
raise NotImplementedError('Subclass must implement _data_rate_config function!')
def _conversion_value(self, low, high):
"""Subclasses should override this function that takes the low and high
byte of a conversion result and returns a signed integer value.
"""
raise NotImplementedError('Subclass must implement _conversion_value function!')
def _read(self, bus, mux, gain, data_rate, mode):
"""Perform an ADC read with the provided mux, gain, data_rate, and mode
values. Returns the signed integer result of the read.
"""
# print bus,mux,gain,data_rate,mode
config = ADS1x15_CONFIG_OS_SINGLE # Go out of power-down mode for conversion.
# Specify mux value.
config |= (mux & 0x07) << ADS1x15_CONFIG_MUX_OFFSET
# Validate the passed in gain and then set it in the config.
if gain not in ADS1x15_CONFIG_GAIN:
raise ValueError('Gain must be one of: 2/3, 1, 2, 4, 8, 16')
config |= ADS1x15_CONFIG_GAIN[gain]
# print hex(ADS1x15_CONFIG_GAIN[gain]), hex(config)
# Set the mode (continuous or single shot).
config |= mode
# Get the default data rate if none is specified (default differs between
# ADS1015 and ADS1115).
if data_rate is None:
data_rate = self._data_rate_default()
# Set the data rate (this is controlled by the subclass as it differs
# between ADS1015 and ADS1115).
config |= self._data_rate_config(data_rate)
config |= ADS1x15_CONFIG_COMP_QUE_DISABLE # Disble comparator mode.
# Send the config value to start the ADC conversion.
# print hex(config)
# Explicitly break the 16-bit value down to a big endian pair of bytes.
bus.write_i2c_block_data(ADS1x15_DEFAULT_ADDRESS,ADS1x15_POINTER_CONFIG,[(config >> 8) & 0xFF, config & 0xFF])
# Wait for the ADC sample to finish based on the sample rate plus a
# small offset to be sure (0.1 millisecond).
time.sleep(1.0/data_rate+0.0001)
# Retrieve the result.
result = bus.read_i2c_block_data(ADS1x15_DEFAULT_ADDRESS, ADS1x15_POINTER_CONVERSION, 2)
raw_adc = self._conversion_value(result[1], result[0])
# convert from 25.83 FS range
# 25.85*4.096/(2048.0*3.3) = 0.0156666666667
volts=raw_adc*0.0156666666667
percent=int(volts*100/3.3)
# print raw_adc, volts,percent
return percent,volts
def read_adc(self, bus, channel, gain=1, data_rate=None):
"""Read a single ADC channel and return the ADC value as a signed integer
result. Channel must be a value within 0-3.
"""
assert 0 <= channel <= 3, 'Channel must be a value within 0-3!'
# Perform a single shot read and set the mux value to the channel plus
# the highest bit (bit 3) set.
return self._read(bus,channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_SINGLE)
class ADS1115(ADS1x15):
"""ADS1115 16-bit analog to digital converter instance."""
def __init__(self, *args, **kwargs):
super(ADS1115, self).__init__(*args, **kwargs)
def _data_rate_default(self):
# Default from datasheet page 16, config register DR bit default.
return 128
def _data_rate_config(self, data_rate):
if data_rate not in ADS1115_CONFIG_DR:
raise ValueError('Data rate must be one of: 8, 16, 32, 64, 128, 250, 475, 860')
return ADS1115_CONFIG_DR[data_rate]
def _conversion_value(self, low, high):
# Convert to 16-bit signed value.
value = ((high & 0xFF) << 8) | (low & 0xFF)
# Check for sign bit and turn into a negative value if set.
if value & 0x8000 != 0:
value -= 1 << 16
return value
class ADS1015(ADS1x15):
"""ADS1015 12-bit analog to digital converter instance."""
def __init__(self, *args, **kwargs):
super(ADS1015, self).__init__(*args, **kwargs)
def _data_rate_default(self):
# Default from datasheet page 19, config register DR bit default.
return 1600
def _data_rate_config(self, data_rate):
if data_rate not in ADS1015_CONFIG_DR:
raise ValueError('Data rate must be one of: 128, 250, 490, 920, 1600, 2400, 3300')
return ADS1015_CONFIG_DR[data_rate]
def _conversion_value(self, low, high):
# Convert to 12-bit signed value.
value = ((high & 0xFF) << 4) | ((low & 0xFF) >> 4)
# Check for sign bit and turn into a negative value if set.
if value & 0x800 != 0:
value -= 1 << 12
# krt - make 0 volts be 0
# value -=1
# print high,low,value
return value
class MCP4725DAC(object):
# https://github.com/samgratte/BeagleboneBlack/blob/master/BBB_MCP4725/i2c_mcp4725.py
def __init__(self):
pass
def write_dac_fast(self,bus,address,value):
if value < 0: value=0
if value>4095: value=4095
bus.write_byte_data(address,(value>>8)&0x0F,value&0xFF)
def write_dac(self,bus,address,value,store=False):
buf=[0,0]
if store is False:
command = 0x40
else:
command = 0x60
if value < 0: value=0
if value>4095: value=4095
buf[0]=(value>>4) & 0xFF
buf[1]=(value<<4) & 0xFF
# print address,buf[0],buf[1]
bus.write_i2c_block_data(address,command,buf)
return
| [
"ken@jksthompson.plus.com"
] | ken@jksthompson.plus.com |
8e4f927f238c2b172b89d0f8823be883e2c8d950 | 73aad38ffeb5bf9df4253daba8461f9d1a5fb6c4 | /phone/website/blog/models.py | 3ac00c4211a20b2b5e1378b67495bfe6bd347616 | [] | no_license | doubimike/doubimike.github.io | 35342574c03fd9c1d8399671527c43879b0f9b93 | 57d7ad56f879ed54df42f6e1c560b71c3ea77521 | refs/heads/master | 2016-08-11T10:24:48.166433 | 2016-05-03T08:06:28 | 2016-05-03T08:06:28 | 52,653,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from ckeditor.fields import RichTextField
# Create your models here.
class Tag(models.Model):
tag = models.CharField(max_length=20)
def __unicode__(self):
return self.tag
class Author(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField(blank=True)
website = models.URLField(blank=True)
def __unicode__(self):
return self.name
class Category(models.Model):
category = models.CharField(max_length=20)
def __unicode__(self):
return self.category
class Blog(models.Model):
blog_title = models.CharField(max_length=200)
blog_content = RichTextField()
pub_date = models.DateTimeField()
blog_author = models.ForeignKey(Author,default='DoubiMike')
blog_category = models.ForeignKey(Category,default='未分类')
tags = models.ManyToManyField(Tag,blank=True)
def __unicode__(self):
return self.blog_title
class Files(models.Model):
file = models.FileField(upload_to='upload/')
| [
"doubimike@qq.conm"
] | doubimike@qq.conm |
a7efff866693b09addf9a6ea392c395f2d9f16d7 | a7ef81e5f847979f0afb7ee57abb0401c2770f65 | /lab3/object_equality/point.py | a4016c3e65ce49357dd2b3d2730f51ac48bd8191 | [] | no_license | brianphan90/CPE101 | 46a91200162a7afa9ba2797ed0d875b4299a7f38 | 282bf01d20ccad2e4e3ab1d7cbf20ef2c32ce956 | refs/heads/master | 2020-04-08T21:35:08.465722 | 2018-11-30T01:36:40 | 2018-11-30T01:36:40 | 159,747,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | import utility
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self,other):
return utility.epsilon_equal(self.x, other.x) and utility.epsilon_equal(self.y, other.y)
| [
"brianphan90@Brians-MacBook-Pro.local"
] | brianphan90@Brians-MacBook-Pro.local |
6640af5a0efbab6fa7fa3aa91719a0e4ae516159 | 8f73f9b067533044a0e9146f9db5b1900c831b0c | /opencv/segmentit.py | 57680ed33bf83f9f980823a19379d77e78c6bc39 | [] | no_license | sinegami301194/Intoduction-to-ML | 61cd02ed862bbf500c8ec8daf3ece49393016577 | a22a7c31751fa2410895e683115d8489785ca180 | refs/heads/master | 2023-08-31T06:07:59.628184 | 2023-08-30T07:14:46 | 2023-08-30T07:14:46 | 184,025,490 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | import cv2
import numpy as np
img = cv2.imread('out1.png')
cv2.imshow('img', 64 * img)
cv2.waitKey(0)
cv2.destroyAllWindows()
np.unique(img.flatten()) | [
"noreply@github.com"
] | sinegami301194.noreply@github.com |
f4ee1250a5dd16ccc5577e2a9a5f2b910085514f | f25892635344b16a6ee723d2fcca01a9bb6d0198 | /env/Lib/site-packages/tidypython/separate.py | c61f655dc6ef40eeafffda21884dfdcf70bf4d5b | [] | no_license | AhlemOuldAli/Django_Project | 5502f279148d190870d15868fc66081d33a11c06 | fcd4563457c58a61af5b94621782db8c269849ad | refs/heads/main | 2023-02-14T15:18:38.706740 | 2021-01-15T14:15:53 | 2021-01-15T14:15:53 | 329,926,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,954 | py | import dplython
from dplython import Verb, DplyFrame, X, select, mutate, head
from readpy import *
import re
class separate(Verb):
__name__ = "separate"
def __call__(self, df):
if len(self.args) >= 2:
if not isinstance(self.args[0], dplython.later.Later):
raise ValueError("First argument must be of the form \"X.column1, X.column2, ...\"")
if not isinstance(self.args[1], list):
raise ValueError("Second argument must be a list.")
sp_col = self.args[0]._name
sp_into = self.args[1]
else:
raise ValueError("You must provide at least two arguments, the key and the value.")
if len(self.args) >= 3:
if not isinstance(self.args[2], str):
raise ValueError("Third argument should be the string separator.")
sep = self.args[2]
elif 'sep' in self.kwargs:
sep = self.kwargs['sep']
else:
sep = r'[^\w]+'
print(sep)
splitcol = list(map(lambda x: re.compile(sep).split(x), df[sp_col]))
for i, into_col in enumerate(sp_into):
df[into_col] = [row[i] if len(row) > i else None for row in splitcol]
columns = list(df.columns)
reorder_columns = columns[:columns.index(sp_col)] + sp_into + columns[(columns.index(sp_col)+1):-len(into_col)-1]
return df[reorder_columns]
def __rrshift__(self, other):
return self.__call__(DplyFrame(other.copy(deep=True)))
if __name__ == '__main__':
mtcars = read_tsv('test/data/mtcars.tsv')
mtcars = mtcars >> select(X.name, X.mpg, X.cyl)
d = zip(map(str, mtcars['name']), map(str, mtcars['mpg']), map(str, mtcars['cyl']))
d = ['|'.join(x) for x in d]
mtcars['name'] = d
mtcars = mtcars >> select(X.name)
mtcars_clean = mtcars >> separate(X.name, ['name', 'mpg', 'cyl'], ' ')
print(mtcars_clean >> head()) | [
"32915388+AhlemOuldAli@users.noreply.github.com"
] | 32915388+AhlemOuldAli@users.noreply.github.com |
0b9bf7fe68d5f84882bfda40f710ce848ca1d067 | 5e435be8c265662034e7fe589dd3b7998ec492f9 | /application/__init__.py | ca79fb723c9437453aa15842beccd3facaf22ab3 | [] | no_license | Aiixalex/Rate-My-Music-and-Movie | 45d4a5dd70d19ac3a2b7ad380023f68c21201f02 | 535017da78c73874c17a70b2f50ba35cd21802cf | refs/heads/master | 2023-04-03T19:36:51.716503 | 2021-04-18T01:53:31 | 2021-04-18T01:53:31 | 359,017,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app():
"""Construct the core application."""
app = Flask(__name__, instance_relative_config=False)
app.config.from_object('config.Config')
db.init_app(app)
with app.app_context():
from . import routes # Import routes
db.create_all() # Create sql tables for our data models
return app | [
"wyl2000@zju.edu.cn"
] | wyl2000@zju.edu.cn |
31bafc31d223156323b0fc56fb766615260379ae | efea83db16445d44d30126684461ce55f1383233 | /Reajustar.py | 34115e9ec8af12c4256ed6644ce36d39c497e4c5 | [] | no_license | cagiraldoa/Analisis-Numerico | c9857a306851fde37dcea75db83b68ac4ac664fd | a5f95f702b66f1996723f3d3a19cff1346cee241 | refs/heads/master | 2023-05-10T17:20:14.749461 | 2021-06-03T15:59:43 | 2021-06-03T15:59:43 | 373,371,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | '''
Para instalar la libreria cv2 debe ejecutar:
>> pip install opencv-python
>> pip install opencv-contrib-python
más información en OpenCV https://pypi.org/project/opencv-python/
'''
import os
import cv2
from os import scandir, getcwd
#Función que realiza la segmentación de rostros
def detectarRostros(imagenAnalizar):
# Crear el reconocedor haar cascade
'''
Una cascada de Haar es un clasificador que se utiliza para
detectar el objeto para el que ha sido entrenado
'''
entrenamiento ='data/haarcascade_frontalface_default.xml'
#entrenamiento = "haarcascade_frontalface_default.xml"
clasificadorRostros = cv2.CascadeClassifier(entrenamiento)
imagenGris = cv2.cvtColor(imagenAnalizar, cv2.COLOR_BGR2GRAY)
# Rostros detectados imagenAnalizar
rostros = clasificadorRostros.detectMultiScale(
imagenGris,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
imagenesRostros = []
for (x, y, w, h) in rostros:
ROI = imagenAnalizar[y:y+h, x:x+w]
imagenesRostros.append(ROI)
return [rostros, imagenesRostros]
#==========================================================
# función que genera archivos de imagenes por cada rostro identificado
BASE_DIR=os.path.dirname(os.path.abspath(__file__))
image_dir=os.path.join(BASE_DIR,"Imagenes conocidos")
def crearRostros(imagenAnalizar, rostros,f):
for (x, y, w, h) in rostros:
ROI = imagenAnalizar[y:y+h, x:x+w]
cv2.imwrite(f,ROI)
def main():
img=[]
for i in ls("D:\Escritorio\Parcial 3-Alejandro Fernandez Restrepo\Imagenes conocidos\Trump"):
img.append("D:\Escritorio\Parcial 3-Alejandro Fernandez Restrepo\Imagenes conocidos\Trump\\"+i)
print(i)
num=len(img)
#for i in (0,num-1):
i=8
imagenAnalizar = cv2.imread(img[i])
[dataRostros, imagenesRostros] = detectarRostros(imagenAnalizar)
crearRostros(imagenAnalizar, dataRostros,img[i])#
def ls(ruta = getcwd()):
return [arch.name for arch in scandir(ruta) if arch.is_file()]
if __name__ == '__main__':
main() | [
"cagiraldoa@eafit.edu.co"
] | cagiraldoa@eafit.edu.co |
94b47a2667c720678f3dcf9bef9f7c871330e82c | 055b1a127c865d655d177e93324095db9ab199aa | /Colored_text.py | ddd8f58fe8786c2559aa865278c82b0b902905d8 | [] | no_license | devkansara1310/Python1 | d5c60f54288bd8f736c7fd0b36fb4d967dd0d79c | c87a51b3ea8d4040a8add37010131f0c075fd958 | refs/heads/main | 2023-04-22T01:00:08.280725 | 2021-05-09T06:32:08 | 2021-05-09T06:32:08 | 364,584,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | #Print Colored text
import colorama
from colorama import Fore, Back, Style
colorama.init(autoreset=True)
print(Fore.BLUE+Back.YELLOW+"Hii DevOps"+Fore.YEllOW+Back.BLUE+"How are you all !")
print(Back.CYAN+"I am Dev Kansara")
| [
"noreply@github.com"
] | devkansara1310.noreply@github.com |
fab6841e0e3dcb2fcac4fa0e3b58f79d2ce557bd | 3e84e3dcb554a759ff315dd74d0f26a975e9390c | /hw.py | 35a4b57ca4ed2551ac1dcb8b4cb3b735ef9e7255 | [] | no_license | krunalamodi/Python | 14b5baeff8a5607e0178c81e3a3acf40c7f37774 | bf926d01c06c08ef36c331e2139e100b5f747270 | refs/heads/master | 2021-01-20T05:31:40.388990 | 2015-02-15T22:03:16 | 2015-02-15T22:03:16 | 30,666,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | words = ['Hello', 'World', 'by', 'Krunal']
for w in words:
print(w + " ")
print("Exiting..Bye :)") | [
"krunalamodi@gmail.com"
] | krunalamodi@gmail.com |
7d3cd4adc821d452c460e9ee1aff558815f2997a | 998009777670962fc9ee4711b8a40e1d76dea82d | /backend_web/backend/backend/urls.py | 3b65f673719f00cfd6f29c843e58814cc47bf7a4 | [] | no_license | megamoo456/Hiring | 005e4e383ed53b65476995147a00c43a7c8acec3 | 6014b06cc0c2c4df54c0252ca629041c7f56917e | refs/heads/master | 2023-08-25T06:15:15.271942 | 2021-10-24T19:47:22 | 2021-10-24T19:47:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | """backend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from backendapp import views
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
router.register(r'companies', views.companiesList)
urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| [
"48990417+megamoo456@users.noreply.github.com"
] | 48990417+megamoo456@users.noreply.github.com |
fb937dd6c22863636b64205029b4235d5f5bd43e | 493d6cb14f0d8120f82e8112148c2912ec64573d | /index/migrations/0002_newscenter_create_by.py | 5093a493d344dbe49d686e94a7b62e8e33783fe2 | [] | no_license | Lython7/SecureWorld | 4e23df10ba60f9459a7a20576c75d297a8bed131 | d00ba59020662773ebe10baf7d859805ccb722d9 | refs/heads/master | 2020-04-06T21:30:22.328868 | 2019-01-02T09:51:50 | 2019-01-02T09:51:50 | 157,805,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | # Generated by Django 2.0.2 on 2018-11-20 15:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('index', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='newscenter',
name='create_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='编辑人'),
),
]
| [
"15210370876@163.com"
] | 15210370876@163.com |
ef7751434fc945c0d0488c27744dc82ca3f34545 | b0c4602813d150e73d47eb0ebc16c2bbd790eac1 | /disk_class.py | f378464fbc3fc42133bda3953b3c3f01b178336d | [] | no_license | 4rfel/airhockey2 | 2d773248027a69d0891e102a0f5423d3c53f0c9e | 7ed25475c14615fe69ddaba2b736cd5c618afc0b | refs/heads/master | 2020-08-08T09:13:24.715704 | 2019-10-09T22:22:29 | 2019-10-09T22:22:29 | 213,802,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | from object_class import Basic_object
from random import randint
from math import radians, cos, sin, atan, pi
class Disk(Basic_object):
def __init__(self, sprite, center, size, min_speed, max_speed, table_width, table_height):
super().__init__(sprite, center, size)
self.table_width = table_width
self.table_height = table_height
deg = randint(0,360)
rad = radians(deg)
speed = randint(min_speed, max_speed)
self.x_speed = int(cos(rad)*speed)
self.y_speed = int(sin(rad)*speed)
def move(self):
if self.rect.left > 0 and self.rect.right < self.table_width:
self.rect.centerx += self.x_speed
else:
self.x_speed = -self.x_speed
self.rect.centerx += self.x_speed
if self.rect.top > 0 and self.rect.bottom < self.table_height:
self.rect.centery += self.y_speed
else:
self.y_speed = -self.y_speed
self.rect.centery += self.y_speed
def collide(self):
speed = (self.x_speed**2 + self.y_speed**2)**0.5
if self.x_speed != 0:
rad = atan(self.y_speed/self.x_speed)
elif self.y_speed > 0:
rad = 3*pi/2
else:
rad = pi/2
def get_center(self):
return self.rect.center
| [
"rafael_1999_@hotmail.com.br"
] | rafael_1999_@hotmail.com.br |
fad1602c24b8f61ae85f7f3ae21ceff9518dcfca | 3f227167cc1c596644facb5aa41db125089f2a99 | /egamegearstore/egamegearstore/settings/base.py | 260d19e410fa1374d967c8710b72ae02d65d5907 | [] | no_license | LordHung/eGameGearStore | 202eac5ade2c0186c8d54ebe89029f4c849a56be | de209c97d5caf5ef3d3dce210b6e31806ab6a56f | refs/heads/master | 2021-06-16T07:51:35.929624 | 2017-05-09T17:56:53 | 2017-05-09T17:56:53 | 79,032,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | """
Django settings for eggs project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#root of project
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'yourgmail@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
If using gmail, you will need to
unlock Captcha to enable Django
to send for you:
https://accounts.google.com/displayunlockcaptcha
'''
# Application definition
INSTALLED_APPS = (
#django app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#third party apps
'crispy_forms',
'registration',
#my apps
'newsletter',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'egamegearstore.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'egamegearstore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME', ''),
'USER': os.environ.get('DB_USER', ''),
'PASSWORD': os.environ.get('DB_PASSWORD', ''),
'HOST': os.environ.get('DB_HOST', ''),
'PORT': os.environ.get('DB_PORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "static_root")
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static_in_pro", "our_static"),
#os.path.join(BASE_DIR, "static_in_env"),
#'/var/www/static/',
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "media_root")
#Crispy FORM TAGs SETTINGS
CRISPY_TEMPLATE_PACK = 'bootstrap3'
#DJANGO REGISTRATION REDUX SETTINGS
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
| [
"hunguit14@gmail.com"
] | hunguit14@gmail.com |
562dcd6cecf8887e84f0a1c2a7f236459a45cdd2 | 8bbeb7b5721a9dbf40caa47a96e6961ceabb0128 | /python/21.Merge Two Sorted Lists(合并两个有序链表).py | 1cb11ad7f4b2021bb47b849789f00c8e13b2de71 | [
"MIT"
] | permissive | lishulongVI/leetcode | bb5b75642f69dfaec0c2ee3e06369c715125b1ba | 6731e128be0fd3c0bdfe885c1a409ac54b929597 | refs/heads/master | 2020-03-23T22:17:40.335970 | 2018-07-23T14:46:06 | 2018-07-23T14:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | """
<p>Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.</p>
<p><b>Example:</b>
<pre>
<b>Input:</b> 1->2->4, 1->3->4
<b>Output:</b> 1->1->2->3->4->4
</pre>
</p><p>将两个有序链表合并为一个新的有序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。 </p>
<p><strong>示例:</strong></p>
<pre><strong>输入:</strong>1->2->4, 1->3->4
<strong>输出:</strong>1->1->2->3->4->4
</pre>
<p>将两个有序链表合并为一个新的有序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。 </p>
<p><strong>示例:</strong></p>
<pre><strong>输入:</strong>1->2->4, 1->3->4
<strong>输出:</strong>1->1->2->3->4->4
</pre>
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
| [
"lishulong@wecash.net"
] | lishulong@wecash.net |
1242c94937d33b5991ab03cd76dd93e9ba134421 | 4f0337ad341885bac6d45049d2b9f91e59f1bf76 | /statsSend/urbanCodeDeploy/urbanCodeDeployStatisticsSender.py | 571fdeb35ff1f2a829ea28076213b306e36308fb | [
"MIT"
] | permissive | luigiberrettini/build-deploy-stats | c81e66613986b49b0705840da1e6f1dda306b67c | 52a0bf5aeb8d2f8ef62e4e836eb0b9874dea500d | refs/heads/master | 2020-12-02T22:11:35.280423 | 2017-08-29T15:58:20 | 2017-08-29T15:58:20 | 96,093,457 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,487 | py | #!/usr/bin/env python3
from datetime import datetime, timedelta, timezone
from dateutil import parser
from statsSend.session import Session
from statsSend.utils import print_exception
from statsSend.urlBuilder import UrlBuilder
from statsSend.urbanCodeDeploy.urbanCodeDeployTag import UrbanCodeDeployTag
class UrbanCodeDeployStatisticsSender:
epoch = datetime(1970, 1, 1, tzinfo = timezone.utc)
one_second = timedelta(seconds = 1)
def __init__(self, settings, reporter):
page_size = int(settings['page_size'])
url_builder = UrlBuilder(settings['server_url'], settings['api_url_prefix'], '', page_size)
headers = { 'Accept': 'application/json'}
user = settings['user']
password_or_auth_token = settings['password_or_auth_token']
verify_ssl_certs = settings['verify_ssl_certs']
self.session_factory = lambda: Session(url_builder, headers, user, password_or_auth_token, verify_ssl_certs)
self.tag_name = settings['tag_name']
self.since_posix_timestamp = (parser.parse(settings['since_timestamp']) - self.epoch) // self.one_second * 1000
self.reporter = reporter
async def send(self):
if ("report_categories" in dir(self.reporter)):
async with self.session_factory() as session:
try:
tag = UrbanCodeDeployTag(session, self.tag_name)
categories = [cat async for cat in self._categories(tag)]
self.reporter.report_categories(categories)
except Exception as err:
print_exception('Error sending categories')
async with self.session_factory() as session:
try:
tag = UrbanCodeDeployTag(session, self.tag_name)
async for app_process_request in tag.retrieve_application_process_requests_since_posix_timestamp(self.since_posix_timestamp):
try:
activity = app_process_request.to_activity()
self.reporter.report_activity(activity)
except Exception as err:
print_exception('Error reporting activity')
except Exception as err:
print_exception('Error reporting activities')
async def _categories(self, tag):
async for application in tag.retrieve_applications():
async for category in application.retrieve_categories():
yield category | [
"luigi.berrettini@gmail.com"
] | luigi.berrettini@gmail.com |
6c00f1537b22bed43527458a8b0ac2e1db7b9ad6 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /qwDPeZeufrHo2ejAY_16.py | 588f2296543e76694c6c64028d65f0e7cae3d94f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py |
def eval_algebra(eq):
li = eq.split(' ')
eq_pos, var_pos = li.index('='), li.index('x')
if var_pos == 0 and eq_pos == 1:
return eval(''.join(li[2:]))
n = len(eq)
if li[-1] == 'x' and li[-2] == '=':
return eval(''.join(li[:eq_pos]))
if var_pos > eq_pos:
li = li[eq_pos + 1:] + ['='] + li[:eq_pos]
eq_pos, var_pos = li.index('='), li.index('x')
oper = li[1]
if var_pos == 0:
if oper == "+":
return int(li[-1]) - int(li[2])
else:
return int(li[-1]) + int(li[2])
else:
if oper == "+":
return int(li[-1]) - int(li[0])
else:
return -1*(int(li[-1]) - int(li[0]))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
f904b353356eb74b16e5c7619819e12f667ff57c | 4ad7e9777b8f08595a524c7fe9f6e1900d722c6a | /Code/Website/backend/ml-tools/general.py | f25cb62e0daf68c75e8fe703553d100272c9f67d | [] | no_license | FIU-SCIS-Senior-Projects/Automated-Document-Summarization-1.0 | abf781d4e4285f112cd8cffb80ff6a56c5879ba2 | 935e13a8394a464e7a143e5868ee4f6099cf16a0 | refs/heads/master | 2021-01-21T11:00:16.521174 | 2017-07-28T18:45:37 | 2017-07-28T18:45:37 | 91,716,811 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,019 | py | """ This file contains the abstract classes that represent general concept used throughout machine learning algorithms. Any of the ML tools developed by the team assume that the objects passed to them inherit from these abstract classes, e.g. when passing a list of models to the cross-validation tool, the methods assume that these models implement the methods train(...), test(...), etc.
Author: Alberto Mizrahi
Last modified: 06/15/17
"""
from abc import ABC, abstractmethod
class Example(ABC):
""" Represents an example data point that the learning model will use to train on or to test its acuuracy with """
@abstractmethod
def get_features(self):
""" Returns the features (or input variables) of the example.
Returns:
A hash containing all the features names and their corresponding values for this example
"""
pass
@abstractmethod
def get_output(self):
""" Returns the target (or output) variable of the example. """
pass
def __repr__(self):
""" Return a string that represents this example"""
return str(self.get_features()) + "->" + str(self.get_output())
class LearningModel(ABC):
""" Represents a learning model that will be trained and then used to solve learning problems """
@abstractmethod
def train(self, examples):
""" Trains the model with the given training examples.
Args:
examples: A list of Example objects.
"""
pass
@abstractmethod
def test(self, examples):
""" Assuming that the model has been trained, it predicts the target variable for each example and checks whether its prediction is accurate.
Args:
examples: A list of Example objects.
Returns:
A percentage representing what fraction of the examples were predicted accurately.
"""
pass
@abstractmethod
def clear(self):
""" It clears, or "untrains", the model """
pass
| [
"amb110395@gmail.com"
] | amb110395@gmail.com |
cdb1f89f8d0dca3dc887c31676c2a38ba7718862 | 220b3d426d47b652ef86e0002e796d54cef25e75 | /scratch/PlayerFile.py | 66848386f688e78c49eed29489e87403a7db150f | [
"Apache-2.0"
] | permissive | HenryLiangzy/ITTF-Database-System | 23f390fefca65f1c5dc409f4091948f367bf5e19 | 07044000dd1d13354239b7e46f4ee903baf01132 | refs/heads/master | 2022-12-18T12:19:23.344562 | 2020-09-22T06:54:28 | 2020-09-22T06:54:28 | 297,559,698 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,868 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# last modified 2018/10/12 13:23
# By Henry Leung
import requests
import re
import time
import random
from bs4 import BeautifulSoup
import database
import analysis
def get_list():
connect, cursor = database.open_database()
cursor.execute("SELECT playerId FROM player")
id_list = cursor.fetchall()
database.close_databse(connect, cursor)
return id_list
def get_html(id, session):
url_header = {
'Connection': 'Keep-Alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
}
url = "https://results.ittf.link/index.php"
payload = {
'option': 'com_fabrik',
'view': 'details',
'formid': '99',
'rowid': id,
'Itemid': '226'
}
html = session.get(url, params=payload, headers=url_header, timeout=10)
if html.cookies.get_dict():
session.cookies.update(html.cookies)
return html
def match_id(player_id):
id = str(player_id)
match = re.search('([0-9]+)', id)
id_match = match.group(1)
return id_match
def analysis_html(html_data):
soup = BeautifulSoup(html_data, 'html.parser')
player_profile = analysis.get_personal_data(soup)
player_result = analysis.get_player_result(soup)
connect, cursor = database.open_database()
cursor.execute(
"INSERT INTO playerProfile (playerId, playerName, playerAssco, playerGender, playerAge) VALUES (?, ?, ?, ?, ?)",
(player_profile[0], player_profile[1], player_profile[2], player_profile[4], player_profile[3])
)
player_id = player_profile[0]
for element in player_result[0]:
cursor.execute(
"INSERT INTO playerResult (playerId, playerResult) VALUES (?, ?)",
(player_id, element)
)
for element in player_result[1]:
cursor.execute(
"INSERT INTO playerOtherResult (playerId, playerResult) VALUES (?, ?)",
(player_id, element)
)
for element in player_result[2]:
cursor.execute(
"INSERT INTO playerDoubleResult (playerId, playerResult) VALUES (?, ?)",
(player_id, element)
)
database.close_databse(connect, cursor)
return
if __name__ == '__main__':
player_id_list = get_list()
numbers = len(player_id_list)
html_session = requests.Session()
times = int(input("Input the times you want to start:"))
error_times = 0
stop_times = 0
while times < numbers:
player_id = player_id_list[times][0]
print(player_id)
try:
html_data = get_html(player_id, html_session)
# analysis_html(html_data.text)
times += 1
percentage = times / numbers * 100
time.sleep(random.randint(0, 2))
print("Finish", times, "th data less ", numbers - times, " records, ID:", player_id, "profile, complete", round(percentage, 4), "%")
except ConnectionRefusedError as e:
stop_point_id = player_id
stop_times = times
print("Connection refused from the server, break point id is:", stop_point_id)
break
except TimeoutError as e:
error_times += 1
if error_times == 3:
stop_times = times
print("Connection time out try 5 times and fail, break, stop time is", stop_times)
break
else:
stop_point_id = player_id
print("Connection time out, break point id is:", stop_point_id, "waiting 90s and retry")
time.sleep(90)
print("All record download successful") | [
"henryliangzy@gmail.com"
] | henryliangzy@gmail.com |
77a5295b2fc73ab18482d48e8073765f0b39e1ac | 2063973ec9813a6bf0515fde527ec9f2d783dc49 | /triggered_on_hotkey.py | b7757537ca09bb6a251046ec4d38be1f6c059115 | [] | no_license | jonfleming/chatterbot_chatbot | 80cdcb66851eb6632789391f57091fc7a9e73ef2 | a29eb10a3b8759bad6636cc2374dd61fda60ea47 | refs/heads/main | 2022-12-23T11:39:09.882091 | 2020-10-05T04:40:19 | 2020-10-05T04:40:19 | 301,290,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | # Triggered on hotkey press
import keyboard
def edit():
print('ctrl+e was pressed')
keyboard.add_hotkey('ctrl + e', edit)
print('here. waiing for input')
text = input()
print(text)
| [
"jon.fleming@mcg.com"
] | jon.fleming@mcg.com |
9ebfc9fc70f6d8128490b9ca6e7284f958a337ac | 6635b3dd3a1aca5d317ea4510c95ad67a6923f74 | /task2/ToDoApp/todolist/forms.py | e2d1009a983be6ed8e16cf95e0e7e78d994af54e | [] | no_license | tna0y/Industrial-Programming | 781a3a07346eb4026742bc38a3bd867cb0a35cbe | a55fc67e34b1dd8de453612e580bf767e18cd10d | refs/heads/master | 2021-04-28T02:30:32.811175 | 2018-02-26T04:43:44 | 2018-02-26T04:43:44 | 122,117,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | from django import forms
from django.contrib.auth.forms import User
class LoginForm(forms.Form):
username = forms.CharField(max_length=150)
password = forms.CharField(widget=forms.PasswordInput())
class RegisterForm(forms.Form):
username = forms.CharField(max_length=150)
password = forms.CharField(widget=forms.PasswordInput())
class NewTaskForm(forms.Form):
text = forms.CharField(widget=forms.Textarea())
| [
"mk@mkapps.org"
] | mk@mkapps.org |
6c63813d8a06d94889980d12e7660239b36001ef | 68f1614c00f0a80df9483a55828f70b6927b846f | /config.py | 6fa38ee0acc123078cde7875bd2585a0b7b14d98 | [] | no_license | alexbaryzhikov/pathfinding | 7fafdd443fe9f056ba4f42a11a0b868e599c8e52 | 9df6744b13889b896b81d1607a4a38585a062eb9 | refs/heads/master | 2022-05-03T18:34:17.895473 | 2022-03-11T18:15:57 | 2022-03-11T18:15:57 | 99,922,641 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | from pygame.locals import *
""" Screen """
SCREEN_W = 1920
SCREEN_H = 1080
SCREEN_MODE = 0
# SCREEN_W = 2560
# SCREEN_H = 1440
# SCREEN_MODE = 0 | FULLSCREEN #| DOUBLEBUF | HWSURFACE | RESIZABLE | NOFRAME
MAX_FPS = 120
FPS_BUF_SIZE = 100
""" Colors """
BG_COLOR = (12, 15, 22)
EDGE_COLOR = (80, 114, 27)
WALL_COLOR = (100, 0, 0)
PATH_COLOR = (150, 0, 0)
""" Agent """
AGENT_RADIUS = 35
AGENT_SPEED = 500
WP_TOLERANCE = 50 # switch to next waypoint when current is in vicinity
""" Obstacles """
COLLIDABLE_NUM = 50
COLLIDABLE_R_MIN = 10
COLLIDABLE_R_MAX = 70
COLLIDABLE_MIN_DIST = 2 # min allowed dist. betw. agent and obstacle
""" Grid """
GRID_NODE_WIDTH = 40
MOVE_COST = 10
MOVE_COST_DIAG = 14
""" Mesh """
BORDER_POINTS_THRESHOLD = 80 # max dist. betw. obstacle and border to spawn border point
BORDER_POINTS_MERGE_DIST = 60 # don't spawn border points closer that this
| [
"aleksiarts@gmail.com"
] | aleksiarts@gmail.com |
e646239f9830abc70bb1c11a6bb57d4703c6c2e9 | 264451eec9207bc708c684f27817c738da4dccd8 | /TablicaCrawler.py | 9db679daa0dac5a43f1eb913a5bfbdf6ad74463f | [] | no_license | ibezuglyi/python-house-finder | cac60a8acd77e70f1fed982d2d086376784a288a | bcc971f74ab85fdabb554a038c8cdf17ce001a26 | refs/heads/master | 2021-01-19T05:54:05.061563 | 2014-03-25T21:48:40 | 2014-03-25T21:48:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,319 | py | import urllib2
from bs4 import BeautifulSoup
from HouseConverter import HouseConverter
from Offer import Offer
from Page import Page
from TimeConverter import TimeConverter
__author__ = 'dit'
class TablicaCrawler():
def init_crawler(self, src):
if src is not None:
self.source = src
self.data = None
def __init__(self, src=None):
self.init_crawler(src)
self.time_converter = TimeConverter()
self.houseConverter = HouseConverter()
def getRawData(self):
if not self.data:
data = get_html(self.source)
self.data = data
return self.data
def get_raw_offers(self, css_class):
data = self.getRawData()
soup = BeautifulSoup(data)
self.raw_offers = soup.find_all("td", {'class': css_class})
return self.raw_offers
def get_offers(self, css_class):
raw_offers = self.get_raw_offers(css_class)
offers = [_as_offer(raw_offer, self.time_converter, self.houseConverter) for raw_offer in raw_offers]
return offers
def get_pages_count(self):
pages = self.get_pages()
return len(pages)
def get_pages(self):
page_data = self.getRawData()
soup = BeautifulSoup(page_data)
pager = soup.find_all("div", {'class': 'pager'})[0]
pages = [_as_page(p) for p in pager.find_all("span", {'class': 'item'})]
return pages
def generate_page_href(self, page, page_number):
pattern = page.href
href = pattern.replace("3", page_number)
return href
def get_page(self, page_number):
pages = self.get_pages()
page = [p for p in pages if p.number == page_number]
if page:
href = page[0].href
else:
href = self.generate_page_href(pages[2], str(page_number))
href = href is None and self.source or href
self.init_crawler(href)
print "parsing %s" % href
return self
def get_all_offers(self, css_class, upper_range=None):
pages = self.get_pages()
all_offers = []
if upper_range is None:
up_border = pages[-1].number
else:
up_border = upper_range
for p in range(1, up_border):
o = self.get_page(p).get_offers(css_class)
all_offers.extend(o)
return all_offers
def _as_page(raw_page):
number = raw_page.span.contents[0]
if raw_page.span.span:
number = raw_page.span.span.string
href = None
atags = raw_page.find_all('a')
if atags:
href = atags[0]['href']
return Page(number, href)
def get_html(src):
req = urllib2.Request(src)
res = urllib2.urlopen(req)
data = res.read()
return data
def _as_offer(raw_offer, time_converter, houseConverter):
tds = raw_offer.find_all("td")
if tds[0].p.br:
tds[0].p.br.extract()
date = " ".join([t.string.strip() for t in tds[0].p])
offer = tds[2].div.h3.a.span.contents[0]
href = tds[2].div.h3.a['href']
details = tds[2].div.p.small.contents[2]
price = tds[3].div.p.strong.string.strip()
raw_house = get_html(href)
params = houseConverter.get_house_details(raw_house)
return Offer(offer, details, price, time_converter.get_date(date), href, params)
| [
"ievgen.bezuglyi@gmail.com"
] | ievgen.bezuglyi@gmail.com |
6ee52dcd011ebc6a85b9d781ea841d799596e024 | 814d1aa3b0479fabc3b50f91873c212626b2de53 | /ttt/ttt/urls.py | ba9004a4a121fdab9e9746d1c0bbb2a66a553d64 | [] | no_license | y0ngma/facedCRM | cd187f2aa553314c2f50ba524aae0f7deec1c60f | 034c5e6a669c618232b1c2b89fc6a4dcc50b45be | refs/heads/master | 2022-11-11T03:58:50.966316 | 2020-07-06T05:26:04 | 2020-07-06T05:26:04 | 254,060,650 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | """ttt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path, include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('face/', include('face.urls')),
# Index
re_path(r'^$', views.IndexView.as_view(), name="index_url"),
path('users/', include('users.urls', namespace="users")),
]
| [
"takeit2sy@pusan.ac.kr"
] | takeit2sy@pusan.ac.kr |
a6b89e5488d0dd85e8efd1c3e9dee11c6f265fd7 | 125a0c03fef9b9eda01cdd10df95063d0bb1b328 | /test/longueur string.py | 4662d433b55e7df985b714f7d49c5f17b2759ad2 | [] | no_license | EdouardPascal/projet | 8548bd0a96a59d7635827938ce3a45e91c8375e6 | 44641e17ac6eeb0bf57b5bd005941b685aff4ad0 | refs/heads/master | 2020-05-16T22:31:19.405807 | 2014-07-04T20:47:46 | 2014-07-04T20:47:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | import os
def count_string(mot):
longueur=0
mot = mot.split(" ")
print mot
for i in mot:
longueur = longueur + len(i)
return longueur
print (count_string('Michael le grand'))
os.system('pause') | [
"pascal1999"
] | pascal1999 |
376c79f3633d8ad0e88d967a07c9640eb17239d3 | 65306b41168a5afa6fc80904cc0fbf737939a01a | /scale/job/migrations/0014_auto_20160317_1208.py | a11aa3e4e2985f151177854a0252b685d19e38b8 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | kfconsultant/scale | 9e5df45cd36211d1bc5e946cf499a4584a2d71de | 28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b | refs/heads/master | 2020-12-07T00:04:37.737556 | 2020-01-06T12:57:03 | 2020-01-06T12:57:03 | 232,587,229 | 0 | 0 | Apache-2.0 | 2020-01-08T14:53:36 | 2020-01-08T14:53:35 | null | UTF-8 | Python | false | false | 1,806 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('job', '0013_auto_20160316_1805'),
]
operations = [
migrations.AddField(
model_name='job',
name='delete_superseded',
field=models.BooleanField(default=True),
preserve_default=True,
),
migrations.AddField(
model_name='job',
name='is_superseded',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AddField(
model_name='job',
name='root_superseded_job',
field=models.ForeignKey(related_name='superseded_by_jobs', on_delete=django.db.models.deletion.PROTECT, blank=True, to='job.Job', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='job',
name='superseded',
field=models.DateTimeField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='job',
name='superseded_job',
field=models.OneToOneField(related_name='superseded_by_job', null=True, on_delete=django.db.models.deletion.PROTECT, blank=True, to='job.Job'),
preserve_default=True,
),
migrations.AlterField(
model_name='job',
name='last_modified',
field=models.DateTimeField(auto_now=True),
preserve_default=True,
),
migrations.AlterIndexTogether(
name='job',
index_together=set([('last_modified', 'job_type', 'status')]),
),
]
| [
"derick.faller.ctr@us.af.mil"
] | derick.faller.ctr@us.af.mil |
7fb30f002d1d98fc1fe122d7f9e9ab5e0a089436 | cc853480e60add95fc65d2a4f2e8aca539ad85f7 | /pollster/models/inline_response_200_5.py | a92c676001369c071c8f4ca8bbd97a776309f5cf | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | ominari-insights/python-pollster | 05b6d258b3d67f9579fd5ef5db63dbc64e259316 | 276de8d66a92577b1143fd92a70cff9c35a1dfcf | refs/heads/master | 2020-07-30T00:40:28.323248 | 2017-01-23T18:40:04 | 2017-01-23T18:40:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,861 | py | # coding: utf-8
from pprint import pformat
from six import iteritems
import re
class InlineResponse2005(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, end_date=None, partisan_affiliation=None, sample_subpopulation=None, partisanship=None, question_text=None, mode=None, observations=None, margin_of_error=None, response_text=None, poll_slug=None, pollster_label=None, value=None, survey_house=None, start_date=None):
"""
InlineResponse2005 - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'end_date': 'date',
'partisan_affiliation': 'str',
'sample_subpopulation': 'str',
'partisanship': 'str',
'question_text': 'str',
'mode': 'str',
'observations': 'int',
'margin_of_error': 'int',
'response_text': 'str',
'poll_slug': 'str',
'pollster_label': 'str',
'value': 'float',
'survey_house': 'str',
'start_date': 'date'
}
self.attribute_map = {
'end_date': 'end_date',
'partisan_affiliation': 'partisan_affiliation',
'sample_subpopulation': 'sample_subpopulation',
'partisanship': 'partisanship',
'question_text': 'question_text',
'mode': 'mode',
'observations': 'observations',
'margin_of_error': 'margin_of_error',
'response_text': 'response_text',
'poll_slug': 'poll_slug',
'pollster_label': 'pollster_label',
'value': 'value',
'survey_house': 'survey_house',
'start_date': 'start_date'
}
self._end_date = end_date
self._partisan_affiliation = partisan_affiliation
self._sample_subpopulation = sample_subpopulation
self._partisanship = partisanship
self._question_text = question_text
self._mode = mode
self._observations = observations
self._margin_of_error = margin_of_error
self._response_text = response_text
self._poll_slug = poll_slug
self._pollster_label = pollster_label
self._value = value
self._survey_house = survey_house
self._start_date = start_date
@property
def end_date(self):
"""
Gets the end_date of this InlineResponse2005.
Last day of the range of days the survey house conducted the poll
:return: The end_date of this InlineResponse2005.
:rtype: date
"""
return self._end_date
@property
def partisan_affiliation(self):
"""
Gets the partisan_affiliation of this InlineResponse2005.
`None` if `partisanship` is `Nonpartisan`; otherwise one of `Dem`, `Rep` or `Other`
:return: The partisan_affiliation of this InlineResponse2005.
:rtype: str
"""
return self._partisan_affiliation
@property
def sample_subpopulation(self):
"""
Gets the sample_subpopulation of this InlineResponse2005.
One of Adults, Adults - Democrat, Adults - Republican, Adults - independent, Registered Voters, Registered Voters - Democrat, Registered Voters - Republican, Registered Voters - independent, Likely Voters, Likely Voters - Democrat, Likely Voters - Republican, Likely Voters - independent
:return: The sample_subpopulation of this InlineResponse2005.
:rtype: str
"""
return self._sample_subpopulation
@property
def partisanship(self):
"""
Gets the partisanship of this InlineResponse2005.
One of `Nonpartisan`, `Pollster` (the survey house is partisan), `Sponsor` (the survey house is nonpartisan, but the sponsor is partisan)
:return: The partisanship of this InlineResponse2005.
:rtype: str
"""
return self._partisanship
@property
def question_text(self):
"""
Gets the question_text of this InlineResponse2005.
The exact text of the poll question, if the survey house provided it and Pollster editors entered it.
:return: The question_text of this InlineResponse2005.
:rtype: str
"""
return self._question_text
@property
def mode(self):
"""
Gets the mode of this InlineResponse2005.
One of `Automated Phone`, `Internet`, `IVR/Live Phone`, etc.
:return: The mode of this InlineResponse2005.
:rtype: str
"""
return self._mode
@property
def observations(self):
"""
Gets the observations of this InlineResponse2005.
Number of respondents in this sample subpopulation, or `null` (the empty string, in a TSV) if the survey house did not report it
:return: The observations of this InlineResponse2005.
:rtype: int
"""
return self._observations
@property
def margin_of_error(self):
"""
Gets the margin_of_error of this InlineResponse2005.
Margin of error in this sample subpopulation, or `null` (the empty string, in a TSV) if the survey house did not report it
:return: The margin_of_error of this InlineResponse2005.
:rtype: int
"""
return self._margin_of_error
@property
def response_text(self):
"""
Gets the response_text of this InlineResponse2005.
Response text, as published by the survey house
:return: The response_text of this InlineResponse2005.
:rtype: str
"""
return self._response_text
@property
def poll_slug(self):
"""
Gets the poll_slug of this InlineResponse2005.
String that uniquely identifies a Poll
:return: The poll_slug of this InlineResponse2005.
:rtype: str
"""
return self._poll_slug
@property
def pollster_label(self):
"""
Gets the pollster_label of this InlineResponse2005.
Label Pollster uses on its Question (i.e., a column header in `poll-responses-clean.tsv`)
:return: The pollster_label of this InlineResponse2005.
:rtype: str
"""
return self._pollster_label
@property
def value(self):
"""
Gets the value of this InlineResponse2005.
Value for this group of respondents, from 0 to 100.
:return: The value of this InlineResponse2005.
:rtype: float
"""
return self._value
@property
def survey_house(self):
"""
Gets the survey_house of this InlineResponse2005.
Name of the survey house (e.g., `Gallup`)
:return: The survey_house of this InlineResponse2005.
:rtype: str
"""
return self._survey_house
@property
def start_date(self):
"""
Gets the start_date of this InlineResponse2005.
First day of the range of days the survey house conducted the poll
:return: The start_date of this InlineResponse2005.
:rtype: date
"""
return self._start_date
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"adam@adamhooper.com"
] | adam@adamhooper.com |
ff87308e3bf37a01ba3b6bb7b641ca1822688169 | ba6ba4024bd7ca1e746ac0a88b8a51965a868e17 | /2020/20201201_AoC/puzzle_1_soloution.py | 8b53a1efa18257e1bb04a029d0170e7ce397cd38 | [] | no_license | mmcas/AdventOfCode | 457d79ed49af2b25e8645b1a9e94ce41293e7256 | 3e9dc1d329c5a169504ead612423dc80e669946e | refs/heads/master | 2021-07-10T03:31:34.779387 | 2020-12-09T19:05:11 | 2020-12-09T19:05:11 | 225,156,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | from puzzle_1_input import input
numbers_list = input()
def find_two_nums(num_list):
for num in num_list:
num_list.remove(num)
for other_num in num_list:
if other_num + num == 2020:
return(other_num * num)
else:
next
def find_three_nums(num_list):
for num in num_list:
num_list.remove(num)
for other_num in num_list:
for third_num in num_list:
if third_num + other_num + num == 2020:
return(third_num * other_num * num)
else:
next
#soloution 1a
print(find_two_nums(numbers_list))
#soloution 1b
print(find_three_nums(numbers_list)) | [
"madelene.casselbrant@gmail.com"
] | madelene.casselbrant@gmail.com |
6df22eb54bd84e1e3cf3937cfe99a1cf7fd07650 | 26d08ac445894e2c8d8050e5fb10a3b07c307ef8 | /shop/migrations/0001_initial.py | a82c59a14b79486e5efcac229ec5118c5975809f | [] | no_license | JamesJohnson11/OnlineShop | 5b9b9627c261fd54bad93f0e851476a555028896 | 5cc7370c432d5fa365c91a7647f0ed3cf52722cb | refs/heads/master | 2023-01-30T23:29:43.275727 | 2020-12-14T20:13:50 | 2020-12-14T20:13:50 | 321,127,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,841 | py | # Generated by Django 3.1.4 on 2020-12-13 18:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'verbose_name': 'category',
'verbose_name_plural': 'categories',
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', models.SlugField(max_length=200)),
('image', models.ImageField(blank=True, upload_to='products/%Y/%m/%d')),
('description', models.TextField(blank=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('available', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.category')),
],
options={
'ordering': ('name',),
'index_together': {('id', 'slug')},
},
),
]
| [
"bdotmgmt@gmail.com"
] | bdotmgmt@gmail.com |
cf8d39c26607266fc7a38125e1c267b4e0ece863 | 99206e2d7bd338e7760be8161c03bf58badc600b | /sumKint.py | 8f180efe1f4465a360dd13ad30e134e25a6aa33b | [] | no_license | Arun0033/Guvi | 524987307f1b58229293c81f82ab8e9dc9949743 | f49d4b2892bcf6b17ab4bc7acd761300ec9d9043 | refs/heads/master | 2020-05-25T23:00:35.506103 | 2019-07-21T04:27:32 | 2019-07-21T04:27:32 | 188,026,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | p=input()
p=p.split()
N=int(p[0])
K=int(p[1])
m=input()
m=m.split()
sum=0
for i in range(K):
sum=sum+int(m[i])
print(sum)
| [
"noreply@github.com"
] | Arun0033.noreply@github.com |
6bca958de029f1928a1d3778fea7af7a16fa6413 | d31cd04c825b259969f4c3e34a58f3d54daa88e9 | /o2o/GBDT/minium.py | b5ca232e5f5d18aef715b513f4d137b7f980b1cf | [] | no_license | rogeroyer/dataCastle | db1870f98e4fbf868aecdb40c256736d66ba649d | 0aa7bb97c5555fb0635d218c3ece2cef79753cd5 | refs/heads/master | 2021-06-05T04:52:10.304610 | 2019-12-01T06:18:30 | 2019-12-01T06:18:30 | 105,973,617 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 39,931 | py | #coding=utf-8
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingClassifier
# read_data = pd.read_csv(r'D:\aliyun\ccf_offline_stage1_train.csv', header=None)
# read_data.columns = ['user_id', 'merchant_id', 'coupon_id', 'discount_rate', 'distance', 'date_received', 'date']
#
# # 划分数据集 #
# read_data = read_data[read_data['date_received'] != 'null']
# train_data = read_data[((read_data['date_received'] >= '20160401') & (read_data['date_received'] < '20160615')) | (read_data['date_received'] == 'null')]
# # train_data = read_data[(read_data['date_received'] >= '20160315') & (read_data['date_received'] < '20160615')]
# # print train_data
# label_data = read_data[(read_data['date_received'] >= '20160515') & (read_data['date_received'] < '20160615')]
# # 给未消费数据打标为0 #
# label_data_part1 = label_data[label_data['date'] == 'null'] #318750
# label_data_part1['label'] = 0
# # 给领券消费的数据打标 #
# label_data_part2 = label_data[label_data['date'] != 'null']
# label_data_part2['date_received'] = pd.to_datetime(label_data_part2['date_received'])
# label_data_part2['date'] = pd.to_datetime(label_data_part2['date'])
# # 领了优惠券并在15天之内消费的打标为1否则为0 #
# label_data_part2['label'] = [0 if int(i.days) > 15 else 1 for i in (label_data_part2['date'] - label_data_part2['date_received'])]
# #去连接线- 将日期格式转换为文本格式#
# label_data_part2['date_received'] = [str(i)[:10].replace('-', '') for i in label_data_part2['date_received']]
# label_data_part2['date'] = [str(i)[:10].replace('-', '') for i in label_data_part2['date']]
# # 合并已经贴好标签的两个数据集 #
# label_data = label_data_part1.append(label_data_part2)
# train_data.to_csv('train_data.csv', index=None, header=None)
# label_data.to_csv('label_data.csv', index=None, header=None)
# # print train_data
# # print label_data
####################################################################
####################################################################
####################################################################
train_data = pd.read_csv(r'train_data.csv', header=None)
train_data.columns = ['user_id', 'merchant_id', 'coupon_id', 'discount_rate', 'distance', 'date_received', 'date']
train_data['date_received'] = [str(index) for index in train_data['date_received']]
############################数据预处理###################################
distance_means = train_data[train_data['distance'] != 'null']
distance_means['distance'] = [int(i) for i in distance_means['distance']]
distance = distance_means['distance'].median() #中位数#
train_data['distance'] = train_data['distance'].replace('null', distance)
train_data['distance'] = [int(i) for i in train_data['distance']]
#*********************************消费相关特征******************************************##
# 用户领券并消费的张数 #
train_data['afford'] = [1 if index != 'null' else 0 for index in train_data['date']]
feature25 = pd.pivot_table(train_data, index='user_id', values='afford', aggfunc='sum')
feature25['user_id'] = feature25.index
feature25.columns = ['afford', 'user_id']
train_data = train_data.drop('afford', axis=1)
# print feature25
# 用户领券未消费的张数 #
feature26 = pd.pivot_table(train_data, index='user_id', values='date_received', aggfunc='count')
feature26['user_id'] = feature26.index
feature26.columns = ['unafford', 'user_id']
feature26['unafford'] = feature26['unafford'] - feature25['afford']
# print feature26
# 用户领券消费过的平均折扣率 #
afford_data = train_data.iloc[:, [0, 3, 4, 6]]
afford_data = afford_data[afford_data['date'] != 'null']
afford_data['discount_rate'] = [index if index[0] == '0' else (1 - (int(str(index).split(':')[1])*1.0)/int(str(index).split(':')[0]))
for index in afford_data['discount_rate']]
afford_data['discount_rate'] = [float(index) for index in afford_data['discount_rate']]
feature27 = pd.pivot_table(afford_data, index='user_id', values='discount_rate', aggfunc='mean')
feature27['user_id'] = feature27.index
feature27.columns = ['feature27', 'user_id']
# print feature27
# 用户领券消费的平均距离 #
feature28 = pd.pivot_table(afford_data, index='user_id', values='distance', aggfunc='mean')
feature28['user_id'] = feature28.index
feature28.columns = ['feature28', 'user_id']
# print feature28
###############################################################################
# Just for test #
# def call_mode(group):
# return group #.mode()
# 求众数的函数 #
def call_mode(group):
d = {}
s = set()
for x in group:
if x in s:
d[x] += 1;
else:
s.add(x)
d[x] = 1
for key in d:
if d[key] == max(d.values()):
return key
# feature1 = pd.pivot_table(train_data, index=['user_id', 'merchant_id'], values='coupon_id', aggfunc='count')
# # feature2 = pd.pivot_table(train_data, index='user_id', values='coupon_id', aggfunc='count')
# print feature1
# feature1.to_csv('test.csv', header=None)
# feature1['user_id', 'merchant_id'] = feature1.index[0]
# print feature1
# print feature2
# print train_data.groupby('user_id')['coupon_id'].count()
# 求set数目的aggfunc函数 #
def call_set(group):
return len(set(group))
# 求set的aggfunc函数 #
def call_setlen(group):
return set(group)
# 用户在周几领取优惠券数目最多 #
train_data['date_received'] = pd.to_datetime(train_data['date_received']) #这一行不用加#
train_data['week'] = [index.weekday()+1 for index in train_data['date_received']] #这一行不用加#
feature24 = train_data.iloc[:, [0, 7]]
feature24 = pd.pivot_table(feature24, index='user_id', values='week', aggfunc=call_mode)
feature24['user_id'] = feature24.index
feature24.columns = ['week_set', 'user_id']
# print feature24
# 每个商户发的优惠券被多少个不同用户领取并归一化 #
feature22 = pd.pivot_table(train_data, index='merchant_id', values='user_id', aggfunc=call_set)
feature22['merchant_id'] = feature22.index
feature22.columns = ['feature22', 'merchant_id']
m = feature22['feature22'].max()
n = feature22['feature22'].min()
feature22['feature22_one'] = [1.0*(index-n)/(m-n) for index in feature22['feature22']]
# print feature22
# 每种优惠券被多少个不同用户领取并归一化 #
feature23 = pd.pivot_table(train_data, index='coupon_id', values='user_id', aggfunc=call_set)
feature23['merchant_id'] = feature23.index
feature23.columns = ['feature23', 'coupon_id']
m = feature23['feature23'].max()
n = feature23['feature23'].min()
feature23['feature23_one'] = [1.0*(index-n)/(m-n) for index in feature23['feature23']]
# print feature23
# 用户领取特定商家的优惠券张数 #
feature20 = pd.pivot_table(train_data, index=['user_id', 'merchant_id'], values='coupon_id', aggfunc='count')
feature20.to_csv('test.csv', header=None)
feature20 = pd.read_csv(r'test.csv', header=None)
feature20.columns = ['user_id', 'merchant_id', 'feature20']
# 每个商户发了多少优惠券 #
feature1 = pd.pivot_table(train_data, index='merchant_id', values='coupon_id', aggfunc='count')
feature1['merchant_id'] = feature1.index
feature1.columns = ['feature1', 'merchant_id']
# 每位用户领了多少张优惠券 #
feature2 = pd.pivot_table(train_data, index='user_id', values='coupon_id', aggfunc='count')
feature2['user_id'] = feature2.index
feature2.columns = ['feature2', 'user_id']
# print feature2
# 每个用户领了多少不同类型的优惠券 #
feature15 = pd.pivot_table(train_data, index='user_id', values='coupon_id', aggfunc=call_set)
feature15['user_id'] = feature15.index
feature15.columns = ['feature15', 'user_id']
feature15['Repeat'] = feature2['feature2'] - feature15['feature15'] # 重复领取次数 #
feature15['Multip'] = feature2['feature2'] + feature15['feature15'] # 相加 #
feature15['Cheng'] = feature2['feature2'] * feature15['feature15'] # 相乘 #
feature15['Chu'] = feature15['feature15'] / feature2['feature2'] # 相除 #
# print feature15
# 该用户领了多少不同商家的优惠券 #
feature14 = pd.pivot_table(train_data, index='user_id', values='merchant_id', aggfunc=call_set)
feature14['user_id'] = feature14.index
feature14.columns = ['feature14', 'user_id']
# print feature14
# 该用户领了多少商家的优惠券 #
feature141 = pd.pivot_table(train_data, index='user_id', values='merchant_id', aggfunc='count')
feature141['user_id'] = feature141.index
feature141.columns = ['feature141', 'user_id']
feature14['count'] = feature141['feature141']
feature14['repeat'] = feature14['count'] - feature14['feature14'] # 重复领券次数 #
feature14['multip'] = feature14['count'] + feature14['feature14'] # 相加 #
feature14['cheng'] = feature14['count'] * feature14['feature14'] # 相乘 #
feature14['chu'] = feature14['feature14'] / feature14['count'] # 相除 #
# print feature14
# 每个商户发行了多少不同类型的优惠券 #
feature21 = pd.pivot_table(train_data, index='merchant_id', values='coupon_id', aggfunc=call_set)
feature21['merchant_id'] = feature21.index
feature21.columns = ['feature21', 'merchant_id']
# print feature21
# 商户距离 3个 # # 均值,中位数,最大,最小 再两两交叉加减乘,本身与本身加减乘#
feature3 = pd.pivot_table(train_data, index='user_id', values='distance', aggfunc='mean')
feature3['user_id'] = feature3.index
feature3.columns = ['feature3', 'user_id']
feature = pd.pivot_table(train_data, index='user_id', values='distance', aggfunc='max')
feature3['one'] = feature['distance']
feature = pd.pivot_table(train_data, index='user_id', values='distance', aggfunc='min')
feature3['two'] = feature['distance']
feature = pd.pivot_table(train_data, index='user_id', values='distance', aggfunc='median')
feature3['three'] = feature['distance']
feature = pd.pivot_table(train_data, index='user_id', values='distance', aggfunc=call_mode)
feature3['four0'] = feature['distance']
# print feature3
#
# 用户距离 3个 # # 均值,中位数,最大,最小 再两两交叉加减乘,本身与本身加减乘#
feature19 = pd.pivot_table(train_data, index='merchant_id', values='distance', aggfunc='mean')
feature19['merchant_id'] = feature19.index
feature19.columns = ['feature19', 'merchant_id']
feature = pd.pivot_table(train_data, index='merchant_id', values='distance', aggfunc='max')
feature19['one'] = feature['distance']
feature = pd.pivot_table(train_data, index='merchant_id', values='distance', aggfunc='min')
feature19['two'] = feature['distance']
feature = pd.pivot_table(train_data, index='merchant_id', values='distance', aggfunc='median')
feature19['three'] = feature['distance']
feature = pd.pivot_table(train_data, index='merchant_id', values='distance', aggfunc=call_mode)
feature19['four0'] = feature['distance']
feature19['four'] = feature19['feature19'] + feature19['one']
feature19['four1'] = feature19['feature19'] - feature19['one']
feature19['four2'] = feature19['feature19'] * feature19['one']
feature19['five'] = feature19['feature19'] + feature19['two']
feature19['five1'] = feature19['feature19'] - feature19['two']
feature19['five2'] = feature19['feature19'] * feature19['two']
feature19['six'] = feature19['feature19'] + feature19['three']
feature19['six1'] = feature19['feature19'] - feature19['three']
feature19['six2'] = feature19['feature19'] * feature19['three']
feature19['four3'] = feature19['feature19'] + feature19['four0']
feature19['four4'] = feature19['feature19'] - feature19['four0']
feature19['four5'] = feature19['feature19'] * feature19['four0']
feature19['seven'] = feature19['one'] + feature19['two']
feature19['seven1'] = feature19['one'] - feature19['two']
feature19['seven2'] = feature19['one'] * feature19['two']
feature19['eight'] = feature19['one'] + feature19['three']
feature19['eight1'] = feature19['one'] - feature19['three']
feature19['eight2'] = feature19['one'] * feature19['three']
feature19['eight3'] = feature19['one'] + feature19['four0']
feature19['eight4'] = feature19['one'] - feature19['four0']
feature19['eight5'] = feature19['one'] * feature19['four0']
feature19['nine'] = feature19['two'] + feature19['three']
feature19['nine1'] = feature19['two'] - feature19['three']
feature19['nine2'] = feature19['two'] * feature19['three']
feature19['nine3'] = feature19['two'] + feature19['four0']
feature19['nine4'] = feature19['two'] - feature19['four0']
feature19['nine5'] = feature19['two'] * feature19['four0']
feature19['nine6'] = feature19['three'] + feature19['four0']
feature19['nine7'] = feature19['three'] - feature19['four0']
feature19['nine8'] = feature19['three'] * feature19['four0']
feature19['ten'] = feature19['one'] + feature19['one']
feature19['ten2'] = feature19['one'] * feature19['one']
feature19['eleven'] = feature19['feature19'] + feature19['feature19']
feature19['eleven2'] = feature19['feature19'] * feature19['feature19']
feature19['twelve'] = feature19['two'] + feature19['two']
feature19['twelve2'] = feature19['two'] * feature19['two']
feature19['thirteen'] = feature19['three'] + feature19['three']
feature19['thirteen2'] = feature19['three'] * feature19['three']
feature19['thirteen3'] = feature19['four0'] + feature19['four0']
feature19['thirteen4'] = feature19['four0'] * feature19['four0']
# print feature3
# print feature19
# 组合特征,从已经提取的特征里面交叉进行加减乘除 #
# 最以后一次领券,第一次领券 #
#########################################################################
#user_id单个属性提取特征 #
train_data['user_id1'] = train_data['user_id']
feature8 = pd.pivot_table(train_data, index='user_id', values='user_id1', aggfunc='count')
feature8['user_id'] = feature8.index
train_data = train_data.drop('user_id1', axis=1)
train_data['merchant_id1'] = train_data['merchant_id']
feature9 = pd.pivot_table(train_data, index='merchant_id', values='merchant_id1', aggfunc='count')
feature9['merchant_id'] = feature9.index
train_data = train_data.drop('merchant_id1', axis=1)
train_data['coupon_id1'] = train_data['coupon_id']
feature10 = pd.pivot_table(train_data, index='coupon_id', values='coupon_id1', aggfunc='count')
feature10['coupon_id'] = feature10.index
train_data = train_data.drop('coupon_id1', axis=1)
#####################################################################
#####################################################################
# 从打标的数据集中提取特征 #
# 读取已经打标的数据集 #
label_data = pd.read_csv('label_data.csv', header=None)
label_data.columns = ['user_id', 'merchant_id', 'coupon_id', 'discount_rate', 'distance', 'date_received', 'date', 'label']
# print label_data
# 折扣率 #
feature4 = label_data.iloc[:, [2, 3]]
feature4['discount_rate'] = [index if index[0] == '0' else (1 - (int(str(index).split(':')[1])*1.0)/int(str(index).split(':')[0]))
for index in feature4['discount_rate']]
feature4.columns = ['coupon_id', 'feature4']
# print feature4
# 满多少 #
feature5 = label_data.iloc[:, [2, 3]]
feature5.columns = ['coupon_id', 'up']
feature5['up'] = [index if index[0] == '0' else int(str(index).split(':')[0]) for index in feature5['up']]
up_median = feature5['up'].median()
feature5['up'] = [up_median if str(index)[0] == '0' else index for index in feature5['up']]
# print feature5
# 满多少归一化 #
feature17 = feature5
m = feature17['up'].max()
n = feature17['up'].min()
feature17['up'] = [1.0*(index-n)/(m-n) for index in feature17['up']]
# print feature17
# 减多少 #
feature6 = label_data.iloc[:, [2, 3]]
feature6.columns = ['coupon_id', 'down']
feature6['down'] = [index if index[0] == '0' else int(str(index).split(':')[1]) for index in feature6['down']]
up_median = feature6['down'].median()
feature6['down'] = [up_median if str(index)[0] == '0' else index for index in feature6['down']]
# print feature6
# 减多少归一化 #
feature18 = feature6
m = feature18['down'].max()
n = feature18['down'].min()
feature18['down'] = [1.0*(index-n)/(m-n) for index in feature18['down']]
# print feature18
# 折扣类型 #
feature7 = label_data.iloc[:, [0, 2, 3]] #
feature7.columns = ['user_id', 'coupon_id', 'feature7']
feature7['feature7'] = [0 if index[0] == '0' else 1 for index in feature7['feature7']]
# print feature7
# 每个用户喜欢领取的优惠券类型 #
feature16 = pd.pivot_table(feature7, index='user_id', values='feature7', aggfunc=call_mode)
feature16['user_id'] = feature16.index
feature16.columns = ['feature16', 'user_id']
# print feature16
# 领券日期相关特征 9个 #
label_data['date_received'] = pd.to_datetime(train_data['date_received'])
label_data['week'] = [index.weekday()+1 for index in label_data['date_received']]
feature11 = label_data.iloc[:, [0, 8]] ## 改 ##
feature11['one'] = [1 if index == 1 else 0 for index in feature11['week']]
feature11['two'] = [1 if index == 2 else 0 for index in feature11['week']]
feature11['three'] = [1 if index == 3 else 0 for index in feature11['week']]
feature11['four'] = [1 if index == 4 else 0 for index in feature11['week']]
feature11['five'] = [1 if index == 5 else 0 for index in feature11['week']]
feature11['six'] = [1 if index == 6 else 0 for index in feature11['week']]
feature11['seven'] = [1 if index == 7 else 0 for index in feature11['week']]
set_one = set([7, 6])
feature11['eight'] = [1 if index in set_one else 0 for index in feature11['week']]
feature11['month'] = [index.day for index in label_data['date_received']]
# print feature11
# 判断是否只领了一次券 #
feature = label_data.iloc[:, [0, 5]]
feature13 = pd.pivot_table(feature, index='user_id', values='date_received', aggfunc='count')
feature13['user_id'] = feature13.index
feature13['feature13'] = [1 if index == 1 else 0 for index in feature13['date_received']]
feature13 = feature13.drop('date_received', axis=1)
# print feature13
###################################################################
label_data = label_data.iloc[:, [0, 1, 2, 7]]
label_data = label_data.merge(feature1, on='merchant_id', how='left')
label_data = label_data.merge(feature2, on='user_id', how='left')
label_data = label_data.merge(feature3, on='user_id', how='left')
label_data['feature4'] = feature4['feature4']
label_data['feature5'] = feature5['up']
label_data['feature6'] = feature6['down']
label_data['feature7'] = feature7['feature7']
label_data = label_data.merge(feature8, on='user_id', how='left')
label_data = label_data.merge(feature9, on='merchant_id', how='left')
label_data = label_data.merge(feature10, on='coupon_id', how='left')
label_data['feature11_week'] = feature11['week']
label_data['feature11_one'] = feature11['one']
label_data['feature11_two'] = feature11['two']
label_data['feature11_three'] = feature11['three']
label_data['feature11_four'] = feature11['four']
label_data['feature11_five'] = feature11['five']
label_data['feature11_six'] = feature11['six']
label_data['feature11_seven'] = feature11['seven']
label_data['feature11_eight'] = feature11['eight']
label_data['feature11_nine'] = feature11['month']
# label_data = label_data.merge(feature11, on='user_id', how='left')
label_data = label_data.merge(feature13, on='user_id', how='left')
label_data = label_data.merge(feature14, on='user_id', how='left')
label_data = label_data.merge(feature15, on='user_id', how='left')
label_data = label_data.merge(feature16, on='user_id', how='left')
label_data['feature17'] = feature17['up']
label_data['feature18'] = feature18['down']
label_data = label_data.merge(feature19, on='merchant_id', how='left')
label_data = label_data.merge(feature20, on=['user_id', 'merchant_id'], how='left')
label_data = label_data.merge(feature21, on='merchant_id', how='left')
label_data = label_data.merge(feature22, on='merchant_id', how='left')
label_data = label_data.merge(feature23, on='coupon_id', how='left')
label_data = label_data.merge(feature24, on='user_id', how='left')
label_data = label_data.merge(feature25, on='user_id', how='left')
label_data = label_data.merge(feature26, on='user_id', how='left')
label_data = label_data.merge(feature27, on='user_id', how='left')
label_data = label_data.merge(feature28, on='user_id', how='left')
label_data = label_data.fillna(0)
print label_data
# # 判断日期是否是周末 #
# train_data = train_data[train_data['date'] != 'null']
# train_data['date_received'] = pd.to_datetime(train_data['date_received'])
# train_data['month'] = [index.day for index in train_data['date_received']]
# print train_data
# train_data['week'] = [index.weekday()+1 for index in train_data['date_received']]
# train_data['date'] = pd.to_datetime(train_data['date'])
# train_data['week'] = [index.weekday()+1 for index in train_data['date']]
# week = list(train_data[train_data['date'] != 'null']['week'])
# plt.hist(week, color='yellow')
# plt.show()
# print week
###############################################################
###############################################################
###############################################################
# 特征区间test_dataset #
test_dataset = pd.read_csv(r'..\test_set\train_data_feature', header=None)
test_dataset.columns = ['user_id', 'merchant_id', 'coupon_id', 'discount_rate', 'distance', 'date_received']
# 标签区间test_set #
test_set = pd.read_csv(r'D:\aliyun\ccf_offline_stage1_test_revised.csv', header=None)
test_set.columns = ['user_id', 'merchant_id', 'coupon_id', 'discount_rate', 'distance', 'date_received']
test_dataset_two = test_set.copy()
# 消费特征提取-数据集 #
test_data = pd.read_csv(r'..\test_set\train_data_test', header=None)
test_data.columns = ['user_id', 'merchant_id', 'coupon_id', 'discount_rate', 'distance', 'date_received', 'date']
##*********************************消费相关特征******************************************##
# 用户领券并消费的张数 #
test_data['afford'] = [1 if index != 'null' else 0 for index in test_data['date']]
feature25 = pd.pivot_table(test_data, index='user_id', values='afford', aggfunc='sum')
feature25['user_id'] = feature25.index
feature25.columns = ['afford', 'user_id']
# print feature25
# 用户领券未消费的张数 #
feature26 = pd.pivot_table(test_data, index='user_id', values='date_received', aggfunc='count')
feature26['user_id'] = feature26.index
feature26.columns = ['unafford', 'user_id']
feature26['unafford'] =feature26['unafford'] - feature25['afford']
# print feature26
# 用户领券消费过的平均折扣率 #
distance_means = test_data[test_data['distance'] != 'null'] #距离处理#
distance_means['distance'] = [int(i) for i in distance_means['distance']]
distance = distance_means['distance'].median() #中位数#
test_data['distance'] = test_data['distance'].replace('null', distance)
test_data['distance'] = [int(i) for i in test_data['distance']]
afford_data = test_data.iloc[:, [0, 3, 4, 6]]
afford_data = afford_data[afford_data['date'] != 'null']
afford_data['discount_rate'] = [index if index[0] == '0' else (1 - (int(str(index).split(':')[1])*1.0)/int(str(index).split(':')[0]))
for index in afford_data['discount_rate']]
afford_data['discount_rate'] = [float(index) for index in afford_data['discount_rate']]
feature27 = pd.pivot_table(afford_data, index='user_id', values='discount_rate', aggfunc='mean')
feature27['user_id'] = feature27.index
feature27.columns = ['feature27', 'user_id']
# print feature27
# 用户领券消费的平均距离 #
feature28 = pd.pivot_table(afford_data, index='user_id', values='distance', aggfunc='mean')
feature28['user_id'] = feature28.index
feature28.columns = ['feature28', 'user_id']
# print feature28
##############################################################################
# 用户在周几领取优惠券数目最多 #
test_dataset['date_received'] = [str(index) for index in test_dataset['date_received']]
test_dataset['date_received'] = pd.to_datetime(test_dataset['date_received']) #这一行不用加#
test_dataset['week'] = [index.weekday()+1 for index in test_dataset['date_received']] #这一行不用加#
feature24 = test_dataset.iloc[:, [0, 6]]
feature24 = pd.pivot_table(feature24, index='user_id', values='week', aggfunc=call_mode)
feature24['user_id'] = feature24.index
feature24.columns = ['week_set', 'user_id']
# print 'feature24:', feature24
# 每个商户发的优惠券被多少个不同用户领取并归一化 #
feature22 = pd.pivot_table(test_dataset, index='merchant_id', values='user_id', aggfunc=call_set)
feature22['merchant_id'] = feature22.index
feature22.columns = ['feature22', 'merchant_id']
m = feature22['feature22'].max()
n = feature22['feature22'].min()
feature22['feature22_one'] = [1.0*(index-n)/(m-n) for index in feature22['feature22']]
# print feature22
# 每种优惠券被多少个不同用户领取并归一化 #
feature23 = pd.pivot_table(test_dataset, index='coupon_id', values='user_id', aggfunc=call_set)
feature23['merchant_id'] = feature23.index
feature23.columns = ['feature23', 'coupon_id']
m = feature23['feature23'].max()
n = feature23['feature23'].min()
feature23['feature23_one'] = [1.0*(index-n)/(m-n) for index in feature23['feature23']]
# print feature23
# 用户领取特定商家的优惠券张数 #
feature20 = pd.pivot_table(test_dataset, index=['user_id', 'merchant_id'], values='coupon_id', aggfunc='count')
feature20.to_csv('test.csv', header=None)
feature20 = pd.read_csv(r'test.csv', header=None)
feature20.columns = ['user_id', 'merchant_id', 'feature20']
# 每个商户发了多少优惠券 #
feature1 = pd.pivot_table(test_dataset, index='merchant_id', values='coupon_id', aggfunc='count')
feature1['merchant_id'] = feature1.index
feature1.columns = ['feature1', 'merchant_id']
# 每位用户领了多少张优惠券 #
feature2 = pd.pivot_table(test_dataset[test_dataset['coupon_id'].notnull()], index='user_id', values='coupon_id', aggfunc='count')
feature2['user_id'] = feature2.index
feature2.columns = ['feature2', 'user_id']
#
# 每个用户领了多少不同类型的优惠券 #
feature15 = pd.pivot_table(test_dataset, index='user_id', values='coupon_id', aggfunc=call_set)
feature15['user_id'] = feature15.index
feature15.columns = ['feature15', 'user_id']
feature15['Repeat'] = feature2['feature2'] - feature15['feature15'] # 重复领取次数 #
feature15['Multip'] = feature2['feature2'] + feature15['feature15'] # 相加 #
feature15['Cheng'] = feature2['feature2'] * feature15['feature15'] # 相乘 #
feature15['Chu'] = feature15['feature15'] / feature2['feature2'] # 相除 #
# print feature15
# 该用户领了多少不同商家的优惠券 #
feature14 = pd.pivot_table(test_dataset, index='user_id', values='merchant_id', aggfunc=call_set)
feature14['user_id'] = feature14.index
feature14.columns = ['feature14', 'user_id']
# print feature14
# 该用户领了多少商家的优惠券 #
feature141 = pd.pivot_table(test_dataset, index='user_id', values='merchant_id', aggfunc='count')
feature141['user_id'] = feature141.index
feature141.columns = ['feature141', 'user_id']
feature14['count'] = feature141['feature141']
feature14['repeat'] = feature14['count'] - feature14['feature14'] # 重复领券次数 #
feature14['multip'] = feature14['count'] + feature14['feature14'] # 相加 #
feature14['cheng'] = feature14['count'] * feature14['feature14'] # 相乘 #
feature14['chu'] = feature14['feature14'] / feature14['count'] # 相除 #
# print feature14
# 每个商户发行了多少不同类型的优惠券 #
feature21 = pd.pivot_table(test_dataset, index='merchant_id', values='coupon_id', aggfunc=call_set)
feature21['merchant_id'] = feature21.index
feature21.columns = ['feature21', 'merchant_id']
# print feature21
# 商户距离 4个 #
distance_means = test_dataset[test_dataset['distance'] != 'null']
distance_means['distance'] = [int(i) for i in distance_means['distance']]
distance = distance_means['distance'].median() #中位数#
test_dataset['distance'] = test_dataset['distance'].replace('null', distance)
test_dataset['distance'] = [int(i) for i in test_dataset['distance']]
feature3 = pd.pivot_table(test_dataset, index='user_id', values='distance', aggfunc='mean')
feature3['user_id'] = feature3.index
feature3.columns = ['feature3', 'user_id']
feature = pd.pivot_table(test_dataset, index='user_id', values='distance', aggfunc='max')
feature3['one'] = feature['distance']
feature = pd.pivot_table(test_dataset, index='user_id', values='distance', aggfunc='min')
feature3['two'] = feature['distance']
feature = pd.pivot_table(test_dataset, index='user_id', values='distance', aggfunc='median')
feature3['three'] = feature['distance']
feature = pd.pivot_table(test_dataset, index='user_id', values='distance', aggfunc=call_mode)
feature3['four0'] = feature['distance']
# 用户距离 3个 # # 均值,中位数,最大,最小 再两两交叉加减乘,本身与本身加减乘#
feature19 = pd.pivot_table(test_dataset, index='merchant_id', values='distance', aggfunc='mean')
feature19['merchant_id'] = feature19.index
feature19.columns = ['feature19', 'merchant_id']
feature = pd.pivot_table(test_dataset, index='merchant_id', values='distance', aggfunc='max')
feature19['one'] = feature['distance']
feature = pd.pivot_table(test_dataset, index='merchant_id', values='distance', aggfunc='min')
feature19['two'] = feature['distance']
feature = pd.pivot_table(test_dataset, index='merchant_id', values='distance', aggfunc='median')
feature19['three'] = feature['distance']
feature = pd.pivot_table(test_dataset, index='merchant_id', values='distance', aggfunc=call_mode)
feature19['four0'] = feature['distance']
feature19['four'] = feature19['feature19'] + feature19['one']
feature19['four1'] = feature19['feature19'] - feature19['one']
feature19['four2'] = feature19['feature19'] * feature19['one']
feature19['five'] = feature19['feature19'] + feature19['two']
feature19['five1'] = feature19['feature19'] - feature19['two']
feature19['five2'] = feature19['feature19'] * feature19['two']
feature19['six'] = feature19['feature19'] + feature19['three']
feature19['six1'] = feature19['feature19'] - feature19['three']
feature19['six2'] = feature19['feature19'] * feature19['three']
feature19['four3'] = feature19['feature19'] + feature19['four0']
feature19['four4'] = feature19['feature19'] - feature19['four0']
feature19['four5'] = feature19['feature19'] * feature19['four0']
feature19['seven'] = feature19['one'] + feature19['two']
feature19['seven1'] = feature19['one'] - feature19['two']
feature19['seven2'] = feature19['one'] * feature19['two']
feature19['eight'] = feature19['one'] + feature19['three']
feature19['eight1'] = feature19['one'] - feature19['three']
feature19['eight2'] = feature19['one'] * feature19['three']
feature19['eight3'] = feature19['one'] + feature19['four0']
feature19['eight4'] = feature19['one'] - feature19['four0']
feature19['eight5'] = feature19['one'] * feature19['four0']
feature19['nine'] = feature19['two'] + feature19['three']
feature19['nine1'] = feature19['two'] - feature19['three']
feature19['nine2'] = feature19['two'] * feature19['three']
feature19['nine3'] = feature19['two'] + feature19['four0']
feature19['nine4'] = feature19['two'] - feature19['four0']
feature19['nine5'] = feature19['two'] * feature19['four0']
feature19['nine6'] = feature19['three'] + feature19['four0']
feature19['nine7'] = feature19['three'] - feature19['four0']
feature19['nine8'] = feature19['three'] * feature19['four0']
feature19['ten'] = feature19['one'] + feature19['one']
feature19['ten2'] = feature19['one'] * feature19['one']
feature19['eleven'] = feature19['feature19'] + feature19['feature19']
feature19['eleven2'] = feature19['feature19'] * feature19['feature19']
feature19['twelve'] = feature19['two'] + feature19['two']
feature19['twelve2'] = feature19['two'] * feature19['two']
feature19['thirteen'] = feature19['three'] + feature19['three']
feature19['thirteen2'] = feature19['three'] * feature19['three']
feature19['thirteen3'] = feature19['four0'] + feature19['four0']
feature19['thirteen4'] = feature19['four0'] * feature19['four0']
# print feature3
# print feature19
# ####################################################
#user_id单个属性提取特征 #
test_dataset['user_id1'] = test_dataset['user_id']
feature8 = pd.pivot_table(test_dataset, index='user_id', values='user_id1', aggfunc='count')
feature8['user_id'] = feature8.index
test_dataset = test_dataset.drop('user_id1', axis=1)
# print feature8
test_dataset['merchant_id1'] = test_dataset['merchant_id']
feature9 = pd.pivot_table(test_dataset, index='merchant_id', values='merchant_id1', aggfunc='count')
feature9['merchant_id'] = feature9.index
test_dataset = test_dataset.drop('merchant_id1', axis=1)
# print feature9
test_dataset['coupon_id1'] = test_dataset['coupon_id']
feature10 = pd.pivot_table(test_dataset, index='coupon_id', values='coupon_id1', aggfunc='count')
feature10['coupon_id'] = feature10.index
test_dataset = test_dataset.drop('coupon_id1', axis=1)
# print feature10
######################################################
######################################################
# 从打标的数据集中提取特征 #
# 读取已经打标的数据集 #
# 优惠券折扣率 #
feature4 = test_set.iloc[:, [2, 3]]
feature4['discount_rate'] = [index if index[0] == '0' else (1 - (int(str(index).split(':')[1])*1.0)/int(str(index).split(':')[0]))
for index in feature4['discount_rate']]
feature4.columns = ['coupon_id', 'feature4']
# print feature4
# 满多少 #
feature5 = test_set.iloc[:, [2, 3]]
feature5.columns = ['coupon_id', 'up']
feature5['up'] = [index if index[0] == '0' else int(str(index).split(':')[0]) for index in feature5['up']]
up_median = feature5['up'].median()
feature5['up'] = [up_median if str(index)[0] == '0' else index for index in feature5['up']]
# print feature5
# 满多少归一化 #
feature17 = feature5
m = feature17['up'].max()
n = feature17['up'].min()
feature17['up'] = [1.0*(index-n)/(m-n) for index in feature17['up']]
# print feature17
# 减多少 #
feature6 = test_set.iloc[:, [2, 3]]
feature6.columns = ['coupon_id', 'down']
feature6['down'] = [index if index[0] == '0' else int(str(index).split(':')[1]) for index in feature6['down']]
up_median = feature6['down'].median()
feature6['down'] = [up_median if str(index)[0] == '0' else index for index in feature6['down']]
# print feature6
# 减多少归一化 #
feature18 = feature6
m = feature18['down'].max()
n = feature18['down'].min()
feature18['down'] = [1.0*(index-n)/(m-n) for index in feature18['down']]
# print feature18
# 折扣类型 #
feature7 = test_set.iloc[:, [0, 2, 3]]
feature7.columns = ['user_id', 'coupon_id', 'feature7']
feature7['feature7'] = [0 if index[0] == '0' else 1 for index in feature7['feature7']]
# print feature7
# 每个用户喜欢领取的优惠券类型 #
feature16 = pd.pivot_table(feature7, index='user_id', values='feature7', aggfunc=call_mode)
feature16['user_id'] = feature16.index
feature16.columns = ['feature16', 'user_id']
# print feature16
# 领券日期相关特征 9个 #
test_set['date_received'] = [str(index) for index in test_set['date_received']]
test_set['date_received'] = pd.to_datetime(test_set['date_received'])
test_set['week'] = [index.weekday()+1 for index in test_set['date_received']]
feature11 = test_set.iloc[:, [0, 6]]
feature11['one'] = [1 if index == 1 else 0 for index in feature11['week']]
feature11['two'] = [1 if index == 2 else 0 for index in feature11['week']]
feature11['three'] = [1 if index == 3 else 0 for index in feature11['week']]
feature11['four'] = [1 if index == 4 else 0 for index in feature11['week']]
feature11['five'] = [1 if index == 5 else 0 for index in feature11['week']]
feature11['six'] = [1 if index == 6 else 0 for index in feature11['week']]
feature11['seven'] = [1 if index == 7 else 0 for index in feature11['week']]
set_one = set([7, 6])
feature11['eight'] = [1 if index in set_one else 0 for index in feature11['week']]
feature11['month'] = [index.day for index in test_set['date_received']]
# print feature11
# 判断是否只领了一次券 #
feature = test_set.iloc[:, [0, 5]]
feature13 = pd.pivot_table(feature, index='user_id', values='date_received', aggfunc='count')
feature13['user_id'] = feature13.index
feature13['feature13'] = [1 if index == 1 else 0 for index in feature13['date_received']]
feature13 = feature13.drop('date_received', axis=1)
# print feature13
################################################
test_set = test_set.iloc[:, [0, 1, 2]]
test_set = test_set.merge(feature1, on='merchant_id', how='left')
test_set = test_set.merge(feature2, on='user_id', how='left')
test_set = test_set.merge(feature3, on='user_id', how='left')
test_set['feature4'] = feature4['feature4']
test_set['feature5'] = feature5['up']
test_set['feature6'] = feature6['down']
test_set['feature7'] = feature7['feature7']
test_set = test_set.merge(feature8, on='user_id', how='left')
test_set = test_set.merge(feature9, on='merchant_id', how='left')
test_set = test_set.merge(feature10, on='coupon_id', how='left')
test_set['feature11_week'] = feature11['week']
test_set['feature11_one'] = feature11['one']
test_set['feature11_two'] = feature11['two']
test_set['feature11_three'] = feature11['three']
test_set['feature11_four'] = feature11['four']
test_set['feature11_five'] = feature11['five']
test_set['feature11_six'] = feature11['six']
test_set['feature11_seven'] = feature11['seven']
test_set['feature11_eight'] = feature11['eight']
test_set['feature11_nine'] = feature11['month']
test_set = test_set.merge(feature13, on='user_id', how='left')
test_set = test_set.merge(feature14, on='user_id', how='left')
test_set = test_set.merge(feature15, on='user_id', how='left')
test_set = test_set.merge(feature16, on='user_id', how='left')
test_set['feature17'] = feature17['up']
test_set['feature18'] = feature18['down']
test_set = test_set.merge(feature19, on='merchant_id', how='left')
test_set = test_set.merge(feature20, on=['user_id', 'merchant_id'], how='left')
test_set = test_set.merge(feature21, on='merchant_id', how='left')
test_set = test_set.merge(feature22, on='merchant_id', how='left')
test_set = test_set.merge(feature23, on='coupon_id', how='left')
test_set = test_set.merge(feature24, on='user_id', how='left')
mean1 = feature25['afford'].median()
mean2 = feature26['unafford'].median()
test_set = test_set.merge(feature25, on='user_id', how='left')
test_set = test_set.merge(feature26, on='user_id', how='left')
test_set['afford'] = test_set['afford'].fillna(mean1)
test_set['unafford'] = test_set['unafford'].fillna(mean2)
test_set = test_set.merge(feature27, on='user_id', how='left')
test_set = test_set.merge(feature28, on='user_id', how='left')
test_set = test_set.fillna(0)
print test_set
#########################################################
train_data = label_data.iloc[:, 4:]
label_data = label_data.iloc[:, 3]
test_data = test_set.iloc[:, 3:]
# train_data.to_csv('predict_data.csv', index=None, header=None)
# label_data.to_csv('predict_label.csv', index=None, header=None)
# test_data.to_csv('predict_test.csv', index=None, header=None)
# train_data = pd.read_csv(r'predict_data.csv', header=None)
# label_data = pd.read_csv(r'predict_label.csv', header=None)
# test_data = pd.read_csv(r'predict_test.csv', header=None)
mode = GradientBoostingClassifier(n_estimators=150, learning_rate=0.2, subsample=0.7)
mode.fit(train_data, label_data)
test_pre = mode.predict_proba(test_data)[:, 1]
test_pre = pd.DataFrame(test_pre)
test_pre.columns = ['probability']
test_dataset = test_dataset_two.iloc[:, [0, 2, 5]]
test_dataset['probability'] = test_pre['probability']
# test_dataset.to_csv('sample_submission.csv', index=None, header=None)
print test_dataset
auc=0.47
| [
"noreply@github.com"
] | rogeroyer.noreply@github.com |
4dba7e90a41685b62780a43bab3af49234534501 | d9e8a2464e52cf74910dfa8bf856f9bbf9cac0be | /squares_and_cardinal_points.py | 41dcdf9559ea407b371bda0d3bb47f6314f74a59 | [] | no_license | renatocrobledo/probable-octo-spork | c791a16a73d0ad012829ef4f2800557a059edacd | 958357c75a6d0c300a3f606b02b2f80dc7d47d26 | refs/heads/master | 2022-04-30T17:21:03.430985 | 2022-03-07T15:58:56 | 2022-03-07T15:58:56 | 212,404,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,940 | py |
'''
Search all possible sqares/rectangles that can be formed given a list of cardinals points (x, y)
So for example suppose we receive a list of points x, y : [(1,1),(1,3),(3,1),(3,3)]
which in a plot will look something like this:
Y
3 | * *
2 |
1 | * *
--------- X
1 2 3 4
Then there is one square that is formed for the four ponts:
1 -> [(1,1), (1,3), (3,1), (3,3)]
and if we receive:
Y
3 | * * *
2 |
1 | * * *
--------- X
1 2 3 4
there will be 3 squares builded as follows:
1 -> [(1,1), (1,3), (3,1), (3,3)]
2 -> [(1,1), (1,3), (4,1), (4,3)]
3 -> [(3,1), (3,3), (4,1), (4,3)]
This quiz was extracted from: https://www.youtube.com/watch?v=EuPSibuIKIg (Google Coding Interview With A Competitive Programmer)
'''
def search_for_squares(l):
dx = {}
r = 0
for n in l:
x, y = n
if x in dx:
dx[x].append(y)
else:
dx[x] = [y]
r = 0
while len(dx):
k, v = dx.popitem()
for _k,_v in dx.items():
intersection = list(set(v) & set(_v))
if len(intersection):
r += sum(list(range(0,len(intersection))))
return r
result = search_for_squares([(0,0),(0,2),(1,0),(1,2)])
assert result == 1, result
result = search_for_squares([(0,0),(0,2),(1,0),(1,2),(2,0),(2,2)])
assert result == 3, result
result = search_for_squares([(0,1),(0,3),(1,1),(1,0),(2,5),(2,2)])
assert result == 0, result
result = search_for_squares([(0,0),(0,2),(1,0)])
assert result == 0, result
result = search_for_squares([(0,0),(0,2)])
assert result == 0, result
result = search_for_squares([(0,0)])
assert result == 0, result
result = search_for_squares([(0,0),(0,1),(0,2),(0,3),(0,4),(1,0),(1,1),(1,2),(1,3),(1,4)])
assert result == 10, result
result = search_for_squares([(1,1),(1,4),(1,2),(0,0),(0,4),(0,1),(0,2),(0,3),(1,0),(1,3)])
assert result == 10, result
| [
"rcacho@legacy.com"
] | rcacho@legacy.com |
dc41b02abc8a0d0f3c36e7d4af3f0e908b4a4961 | d105b1f9122c4b9e9d935bc29cdfb202d6f30d4c | /prac_01/shop_caculator.py | 7eafc6695cf260869597ba31df7624bc52f6e422 | [] | no_license | lechang96/CP-1404 | b725dc023210e1da0e96163e2770534517afecc1 | 6788d827dc47ed894b4058ea972ad16aa98c71bd | refs/heads/master | 2021-05-21T14:53:25.404783 | 2020-05-12T10:43:22 | 2020-05-12T10:43:22 | 252,687,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | total = 0
number = int(input("Enter number of items:"))
while number <= 0:
print("Invalid number of items!")
number = int(input("Enter number of items:"))
if number > 0:
for i in range(number):
price = int(input("Enter price of item:"))
total += price
if total > 100:
total *= 0.9 # apply 10% discount
print("Total price for {} items is ${:.2f}".format(number, total)) # formatting for currency output
| [
"le.chang@my.jcu.edu.au"
] | le.chang@my.jcu.edu.au |
551b075914b2467f849db66f60f281c35f6c3152 | 42351d45ee8acab06f9a988bc00aad1910fca9b6 | /examples/addons/build_sierpinski_pyramid.py | fffea68c5712767320d48dffd3be2a86b737e3e7 | [
"MIT"
] | permissive | snegovick/ezdxf | 0004f3143d75d7709721b83f637efd0db959fa95 | 1070c67779f75c707c8817b2cc2eca87154fdab5 | refs/heads/master | 2020-12-27T11:47:47.173327 | 2020-02-02T10:46:38 | 2020-02-02T10:46:38 | 237,892,288 | 0 | 0 | NOASSERTION | 2020-02-03T05:31:47 | 2020-02-03T05:31:46 | null | UTF-8 | Python | false | false | 892 | py | # Copyright (c) 2018-2019 Manfred Moitzi
# License: MIT License
import ezdxf
from ezdxf.addons import SierpinskyPyramid
def write(filename, pyramids, merge=False):
doc = ezdxf.new('R2000')
pyramids.render(doc.modelspace(), merge=merge)
doc.saveas(filename)
def main(filename, level, sides=3, merge=False):
print('building sierpinski pyramid {}: start'.format(sides))
pyramids = SierpinskyPyramid(level=level, sides=sides)
print('building sierpinski pyramid {}: done'.format(sides))
try:
write(filename, pyramids, merge=merge)
except IOError as e:
print('ERROR: can not write "{0}": {1}'.format(e.filename, e.strerror))
else:
print('saving "{}": done'.format(filename))
if __name__ == '__main__':
main("dxf_sierpinski_pyramid_3.dxf", level=4, sides=3)
main("dxf_sierpinski_pyramid_4.dxf", level=4, sides=4, merge=True)
| [
"mozman@gmx.at"
] | mozman@gmx.at |
83739e01415138ecdaf4a4640ffa97bda66c39fa | c628d06d413bd0c711f70c5379af131da59c7384 | /Novice/02-04/Latihan/1.py | bd6c79ab90144cfacf7b6a8e041212db326cf36d | [] | no_license | nitarosiana/praxis-academy | 2eb969efb36ef3930d1a741f0d437131f5cb2fb0 | c31f05ef6cd0bdce96922d2ae0bd2f31c0732aeb | refs/heads/master | 2021-01-05T11:38:12.416333 | 2020-12-17T02:31:12 | 2020-12-17T02:31:12 | 241,009,796 | 0 | 0 | null | 2020-03-23T01:42:51 | 2020-02-17T03:16:06 | Python | UTF-8 | Python | false | false | 1,172 | py | from injector import Module, singleton, Injector, provider
class Api:
def fetch_remote_data(self):
print('Api called')
return 42
class BusinessLogic:
def __init__(self, api: Api):
self.api = api
def do_stuff(self):
api_result = self.api.fetch_remote_data()
print(f'the api returned a result: {api_result}')
# do something with the data and return a result
class AppModule(Module):
@singleton
@provider
def provide_business_logic(self, api: Api) -> BusinessLogic:
return BusinessLogic(api=api)
@singleton
@provider
def provide_api(self) -> Api:
return Api()
class TestApi(Api):
def fetch_remote_data(self):
print('Demo Api called')
return 24
class TestAppModule(Module):
@singleton
@provider
def provide_api(self) -> Api:
return TestApi()
if __name__ == '__main__':
real_injector = Injector(AppModule())
test_injector = Injector([AppModule(), TestAppModule()])
real_logic = real_injector.get(BusinessLogic)
real_logic.do_stuff()
test_logic = test_injector.get(BusinessLogic)
test_logic.do_stuff() | [
"nita1700016085@webmail.uad.ac.id"
] | nita1700016085@webmail.uad.ac.id |
69f606adda92d2023b27da70bcaf2c2748758335 | 8e4ce714e93d3c561f52abff7499d22188bed9ac | /src/data.py | deeb7519d44a69d2dc508c75472fb6504f6156b2 | [] | no_license | drothler/ethan | fe2cc8ef85ea27a7df3b9154e77af5c983772330 | 764f3a3d700c5cecec382c86faf75b7728615843 | refs/heads/main | 2023-07-11T02:41:56.815369 | 2021-07-25T03:35:59 | 2021-07-25T03:35:59 | 389,252,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,814 | py | import xmltodict
import sys
import numpy as np
# @DavidDrothler
# Loading long-term social graph data from xml file
# Storing xml data in numpy array with restriction to count: lower number for testing purposes, else no specification
# path only used for xml extraction
# Potentially getting dict/array as direct input, bypassing the need to convert data
id_dict = {
"family": 2,
"friend": 3,
"colleague": 4,
"partner": 5,
"household": 6
} # dictionary that we use to assign integer ids to attribute strings.
# any id that is not listed in this dict will lead to errors
# might make this dynamic in the future, if a new connection type is detected, it gets a new id automatically
facility_dict = {
"home facility": 0,
"work facility": 1
}
# implementing this in case I need the connection to facilities in the future, not used right now tho
class DataLoader:
def __init__(self, path, count, mode, export_path):
self.path = path
self.count = count
self.export_path = export_path
self.mode = mode
self.data = None
self.info = None
print("Getting ", count, " nodes from ", path)
self.import_from_xml()
def import_from_xml(self):
print(self.mode, self.count)
with open(self.path) as graph:
self.data = xmltodict.parse(graph.read(), process_namespaces=True)
original_stdout = sys.stdout
with open('xml_data_log.txt', 'w') as f:
sys.stdout = f
if int(self.mode) == 1:
print("Testing Purposes")
data_list = list(self.data["graph"]["nodes"]['node'][0:int(self.count)])
print(len(data_list))
else:
print(self.mode)
data_list = list(self.data["graph"]["nodes"]['node'])
self.data = np.array(data_list)
print("NP array: ", self.data)
sys.stdout = original_stdout
def parse_connection_type(self, connection_type):
types = connection_type.split()
type_ids = list()
for string in types:
if string != ',': # inconsistent xml input, only some connection properties are separated by commas
type_ids.append(string)
return type_ids
def connection_type_to_int(self, types, dictionary):
indices = list()
for type in types:
indices.append(dictionary[type])
return indices
# prepare_data() takes the imported xml file dict and leaves out currently unimportant information
# each node has various connections, which store the id of the node its connected to, as well as the type of
# connection. we have N nodes, each node has m_n connections, each connection has d_m_n types
# our output list is of shape N x M_n x D_m_n and can be represented in 3d space.
# i might consider creating a uniform 3d matrix with M_n = max(M_n) and D_m_n = max(D_m_n), which would create
# a lot of zero entries, but could be computationally faster when using numpy
def prepare_data(self, dictionary):
converted_nodes = list()
node_info = list()
for node in self.data:
id = int(node['@id'])
info = list()
connections = list()
for attribute in node['attributes']['attribute']:
info.append(attribute['value'])
for connection in node['connections']['connection']:
#print(connection['node_id'], connection['connection_type'])
connection_id, connection_type = int(connection['node_id']), self.connection_type_to_int(self.parse_connection_type(connection['connection_type']), dictionary)
connections.append([connection_id, connection_type])
converted_nodes.append([id, connections])
node_info.append(info)
return converted_nodes, node_info
# pls dont add more than 9 types of connections 1-9 in the Long term social network
def ctype_to_single_int(self, types):
type_len = len(types)
val = 0
for i in range(type_len):
val += types[i] * pow(10, i)
return val
def prepare_numpy_data(self, nodes, node_info, facility_data):
node_len = len(nodes)
max_connections = 0
# getting maximum connection dimension
for node in range(node_len):
connections = nodes[node][1]
connection_length = len(connections)
if connection_length > max_connections:
max_connections = connection_length
node_np = np.full([node_len, max_connections + 1, 2], fill_value=0, dtype=np.uint32)
# initializing node numpy data
for node in range(node_len):
for connection in range(1, len(nodes[node][1]) + 1):
# print(nodes[node][1][connection - 1])
node_np[node][connection - 1][0] = nodes[node][1][connection - 1][0]
node_np[node][connection - 1][1] = self.ctype_to_single_int(nodes[node][1][connection - 1][1])
# storing/increasing connection count
node_np[node][0][0] += 1
# assuming we only have 3 types of attributes, work, edu and home :)
facilities = (facility_data['institutions'].replace(' ', '')).split(',')
info_np = np.full([node_len, len(facilities)], fill_value=-1, dtype=np.uint64)
#print(node_info)
for node in range(node_len):
for index, info in enumerate(node_info[node]):
#print(info)
fac_str, id_str = (info.replace('"', '')).split('_')
id_int = np.uint32(id_str)
fac_int = facilities.index(fac_str)
info_np[node][fac_int] = id_int
return node_np, info_np | [
"68952766+drothler@users.noreply.github.com"
] | 68952766+drothler@users.noreply.github.com |
e77c93eba5a0b76cd84ed400b15546ca5fc6c4f8 | b66bd1f0f85d7fdc64432a577979646f675d0f56 | /utils/smiles_data_utils.py | f8faf96c4de2163433e07590bba3db6c83b464dc | [] | no_license | wenhao-gao/MolVAE | 65dfa016707df80f7e8237a3d4c9609d1b4e17a0 | 98412968764820a83776e1ef82ba121010536a6f | refs/heads/main | 2023-03-19T22:14:47.957470 | 2021-03-16T00:17:56 | 2021-03-16T00:17:56 | 346,457,099 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,788 | py | import random
from multiprocessing import Pool
from collections import UserList, defaultdict
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import torch
from rdkit import rdBase
from rdkit import Chem
# https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader
def set_torch_seed_to_all_gens(_):
seed = torch.initial_seed() % (2**32 - 1)
random.seed(seed)
np.random.seed(seed)
class SpecialTokens:
bos = '<bos>'
eos = '<eos>'
pad = '<pad>'
unk = '<unk>'
class CharVocab:
@classmethod
def from_data(cls, data, *args, **kwargs):
chars = set()
for string in data:
chars.update(string)
return cls(chars, *args, **kwargs)
def __init__(self, chars, ss=SpecialTokens):
if (ss.bos in chars) or (ss.eos in chars) or \
(ss.pad in chars) or (ss.unk in chars):
raise ValueError('SpecialTokens in chars')
all_syms = sorted(list(chars)) + [ss.bos, ss.eos, ss.pad, ss.unk]
self.ss = ss
self.c2i = {c: i for i, c in enumerate(all_syms)}
self.i2c = {i: c for i, c in enumerate(all_syms)}
def __len__(self):
return len(self.c2i)
@property
def bos(self):
return self.c2i[self.ss.bos]
@property
def eos(self):
return self.c2i[self.ss.eos]
@property
def pad(self):
return self.c2i[self.ss.pad]
@property
def unk(self):
return self.c2i[self.ss.unk]
def char2id(self, char):
if char not in self.c2i:
return self.unk
return self.c2i[char]
def id2char(self, id):
if id not in self.i2c:
return self.ss.unk
return self.i2c[id]
def string2ids(self, string, add_bos=False, add_eos=False):
ids = [self.char2id(c) for c in string]
if add_bos:
ids = [self.bos] + ids
if add_eos:
ids = ids + [self.eos]
return ids
def ids2string(self, ids, rem_bos=True, rem_eos=True):
if len(ids) == 0:
return ''
if rem_bos and ids[0] == self.bos:
ids = ids[1:]
if rem_eos and ids[-1] == self.eos:
ids = ids[:-1]
string = ''.join([self.id2char(id) for id in ids])
return string
class OneHotVocab(CharVocab):
def __init__(self, *args, **kwargs):
super(OneHotVocab, self).__init__(*args, **kwargs)
self.vectors = torch.eye(len(self.c2i))
def mapper(n_jobs):
'''
Returns function for map call.
If n_jobs == 1, will use standard map
If n_jobs > 1, will use multiprocessing pool
If n_jobs is a pool object, will return its map function
'''
if n_jobs == 1:
def _mapper(*args, **kwargs):
return list(map(*args, **kwargs))
return _mapper
if isinstance(n_jobs, int):
pool = Pool(n_jobs)
def _mapper(*args, **kwargs):
try:
result = pool.map(*args, **kwargs)
finally:
pool.terminate()
return result
return _mapper
return n_jobs.map
class Logger(UserList):
def __init__(self, data=None):
super().__init__()
self.sdata = defaultdict(list)
for step in (data or []):
self.append(step)
def __getitem__(self, key):
if isinstance(key, int):
return self.data[key]
if isinstance(key, slice):
return Logger(self.data[key])
ldata = self.sdata[key]
if isinstance(ldata[0], dict):
return Logger(ldata)
return ldata
def append(self, step_dict):
super().append(step_dict)
for k, v in step_dict.items():
self.sdata[k].append(v)
def save(self, path):
df = pd.DataFrame(list(self))
df.to_csv(path, index=None)
class LogPlotter:
def __init__(self, log):
self.log = log
def line(self, ax, name):
if isinstance(self.log[0][name], dict):
for k in self.log[0][name]:
ax.plot(self.log[name][k], label=k)
ax.legend()
else:
ax.plot(self.log[name])
ax.set_ylabel('value')
ax.set_xlabel('epoch')
ax.set_title(name)
def grid(self, names, size=7):
_, axs = plt.subplots(nrows=len(names) // 2, ncols=2,
figsize=(size * 2, size * (len(names) // 2)))
for ax, name in zip(axs.flatten(), names):
self.line(ax, name)
class CircularBuffer:
def __init__(self, size):
self.max_size = size
self.data = np.zeros(self.max_size)
self.size = 0
self.pointer = -1
def add(self, element):
self.size = min(self.size + 1, self.max_size)
self.pointer = (self.pointer + 1) % self.max_size
self.data[self.pointer] = element
return element
def last(self):
assert self.pointer != -1, "Can't get an element from an empty buffer!"
return self.data[self.pointer]
def mean(self):
if self.size > 0:
return self.data[:self.size].mean()
return 0.0
def disable_rdkit_log():
rdBase.DisableLog('rdApp.*')
def enable_rdkit_log():
rdBase.EnableLog('rdApp.*')
def get_mol(smiles_or_mol):
'''
Loads SMILES/molecule into RDKit's object
'''
if isinstance(smiles_or_mol, str):
if len(smiles_or_mol) == 0:
return None
mol = Chem.MolFromSmiles(smiles_or_mol)
if mol is None:
return None
try:
Chem.SanitizeMol(mol)
except ValueError:
return None
return mol
return smiles_or_mol
class StringDataset:
def __init__(self, vocab, data):
"""
Creates a convenient Dataset with SMILES tokinization
Arguments:
vocab: CharVocab instance for tokenization
data (list): SMILES strings for the dataset
"""
self.vocab = vocab
self.tokens = [vocab.string2ids(s) for s in data]
self.data = data
self.bos = vocab.bos
self.eos = vocab.eos
def __len__(self):
"""
Computes a number of objects in the dataset
"""
return len(self.tokens)
def __getitem__(self, index):
"""
Prepares torch tensors with a given SMILES.
Arguments:
index (int): index of SMILES in the original dataset
Returns:
A tuple (with_bos, with_eos, smiles), where
* with_bos is a torch.long tensor of SMILES tokens with
BOS (beginning of a sentence) token
* with_eos is a torch.long tensor of SMILES tokens with
EOS (end of a sentence) token
* smiles is an original SMILES from the dataset
"""
tokens = self.tokens[index]
with_bos = torch.tensor([self.bos] + tokens, dtype=torch.long)
with_eos = torch.tensor(tokens + [self.eos], dtype=torch.long)
return with_bos, with_eos, self.data[index]
def default_collate(self, batch, return_data=False):
"""
Simple collate function for SMILES dataset. Joins a
batch of objects from StringDataset into a batch
Arguments:
batch: list of objects from StringDataset
pad: padding symbol, usually equals to vocab.pad
return_data: if True, will return SMILES used in a batch
Returns:
with_bos, with_eos, lengths [, data] where
* with_bos: padded sequence with BOS in the beginning
* with_eos: padded sequence with EOS in the end
* lengths: array with SMILES lengths in the batch
* data: SMILES in the batch
Note: output batch is sorted with respect to SMILES lengths in
decreasing order, since this is a default format for torch
RNN implementations
"""
with_bos, with_eos, data = list(zip(*batch))
lengths = [len(x) for x in with_bos]
order = np.argsort(lengths)[::-1]
with_bos = [with_bos[i] for i in order]
with_eos = [with_eos[i] for i in order]
lengths = [lengths[i] for i in order]
with_bos = torch.nn.utils.rnn.pad_sequence(
with_bos, padding_value=self.vocab.pad
)
with_eos = torch.nn.utils.rnn.pad_sequence(
with_eos, padding_value=self.vocab.pad
)
if return_data:
data = np.array(data)[order]
return with_bos, with_eos, lengths, data
return with_bos, with_eos, lengths
def batch_to_device(batch, device):
return [
x.to(device) if isinstance(x, torch.Tensor) else x
for x in batch
]
| [
"gaowh19@gmail.com"
] | gaowh19@gmail.com |
8e866dcd52b4441cc9da099cdb036fcb062fdb20 | 7b270cf5f9d0a3e26b5afd758563c6cff73a5248 | /comportamentais/interpreter/operacoes/operacoes/simples/multiplicacao.py | faf7c226799c22ff6b1739fce8ce4ceefa8db7a2 | [] | no_license | reginaldosantarosa/DesignPatterns | 10810672d3831e562ec636a5f66bd709c797ca34 | bec4247f52b8d2e1fe41c570408816a5d4b22608 | refs/heads/master | 2020-04-04T06:54:19.757054 | 2018-01-04T03:06:05 | 2018-01-04T03:06:05 | 155,761,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | class Multiplicacao(object):
"""
Operação simples de multiplicação.
"""
def __init__(self, esquerda, direita):
"""
Constroi a operação simples de multiplicação passando a
expressão do lado esquerdo e a expressão do lado direito.
"""
self.__esquerda = esquerda
self.__direita = direita
def executa(self):
"""
Retorna a multiplicação das duas expressões: esquerda e direita.
"""
return self.__esquerda.executa() * self.__direita.executa()
| [
"victorhad@gmail.com"
] | victorhad@gmail.com |
fc836719468155bce0624ac6734a2fe0c035dbb6 | 32c1f9230149ef251d8c3a40171b10cd78aeca99 | /app/meme_sources/__init__.py | 66386b850edbcc1219ebe28678ffa75b4d9581df | [
"Apache-2.0"
] | permissive | AInursery/george | ce0adb2ea862c44454bbd8f4d84c755506bfe60a | 6285f52750aca14b8ea85d82349ee0907eb04867 | refs/heads/master | 2021-11-01T19:49:35.193815 | 2014-11-17T12:03:23 | 2014-11-17T12:03:23 | 26,678,493 | 1 | 0 | null | 2021-09-29T17:25:17 | 2014-11-15T11:54:57 | Python | UTF-8 | Python | false | false | 234 | py | from meme_sources.main import MemeSource, MemeItem
from meme_sources.meme_generator import MemeGenerator
from meme_sources.know_your_meme import KnowYourMeme
# Avoid pyflakes warning
MemeSource, MemeGenerator, MemeItem, KnowYourMeme
| [
"Nabellaleen@users.noreply.github.com"
] | Nabellaleen@users.noreply.github.com |
f0d2723b19b430f278cbff8ab9758cfcb3d72478 | bd3a8a57f92e36b1743354ed2917c57e944563be | /warmup-1/pos_neg.py | 6424e248a461ce982ceb2e2bd369adb2c981f699 | [] | no_license | dchu07/codingbat | dd28890590976b40e40b3013c9e65cd1905cf7ca | 530c889d57c22121e6f6a12872dd54d650e218b0 | refs/heads/master | 2020-07-29T02:18:39.740017 | 2019-11-19T20:03:19 | 2019-11-19T20:03:19 | 209,630,429 | 0 | 0 | null | 2019-09-19T19:12:08 | 2019-09-19T19:12:07 | null | UTF-8 | Python | false | false | 147 | py | def pos_neg(a, b, negative):
if negative:
return ((a < 0 and b < 0))
else:
return ((a < 0 and b > 0 ) or (a > 0 and b < 0)) | [
"37084052+dchu07@users.noreply.github.com"
] | 37084052+dchu07@users.noreply.github.com |
85a59fe5b47c7db2ab81e6df835fff2b33fa1bb3 | 4a4c310ce0983e374f99e8e96b9d885ca1849c0a | /warmup/socks.py | 4927164cbd44e97560b858bc8320a2ddb1e02b0d | [
"MIT"
] | permissive | piequi/my-hackerrank-challenges | dc357462429678e3d2423960846d369bc9299129 | 455d110dd8675228d44f21e7f253a67b58a0441d | refs/heads/master | 2020-09-10T08:12:43.106423 | 2019-11-14T12:45:05 | 2019-11-14T13:10:05 | 221,697,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | #!/bin/python
import math
import os
import random
import re
import sys
# Complete the sockMerchant function below.
def sockMerchant(n, ar):
pairs = 0
colors = {}
socks = list(ar)
print('socks = ', socks)
for s in socks:
colors[s] = 0
for color in socks:
colors[color] += 1
if (colors[color] % 2) == 0:
pairs += 1
colors[color] = 0
print('after: ', colors)
print('pairs', pairs)
return pairs
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(9)
ar = map(int, "10 20 20 10 10 30 50 10 20".rstrip().split())
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"mbourgeot.ext@orange.com"
] | mbourgeot.ext@orange.com |
5a8abef7706bcce6e17be3d5109ec5ecfb789983 | 9f71ec1ace34607902f4886c4d0121347dba6da2 | /ex40.py | ebc030dd59ed8421d5545c326a784b367955a204 | [
"MIT"
] | permissive | JueJue1/python-exercises | b59e7c564c0af6558d377e4323528bc422c8df38 | 0aa08a8e5fc7a350ae7a8f955ee7a41720d1ea41 | refs/heads/master | 2020-03-19T13:06:37.087235 | 2018-06-15T04:24:38 | 2018-06-15T04:24:38 | 136,561,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | class Song(object):
def _init_(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print(line)
happy_bday = Song(["Happy birthday to you",
"I don't want to get sued",
"So I'll stop right there"])
bulls_on_parade = Song(["They rally around tha family",
"With pockets full of shells"])
happy_bday.sing_me_a_song()
bulls_on_parade.sing_me_a_song()
| [
"sandarwinn31184@gmail.com"
] | sandarwinn31184@gmail.com |
516c031978c2f82c58c800af93a4aea8418f4ca1 | 92cd4218a251f17eb1b734712ed3d43c1e5d7d0d | /Chapter_06/ch06_r02.py | 59d78f85efd00a65ee1172cfe642e68704113a8c | [] | no_license | laranea/Modern-Python-Cookbook-Second-Edition | c521dfb614652a2e76967e026a98e74133f85bd2 | 0c91076965531c13745fd0171f30b2c130a0e96a | refs/heads/master | 2021-01-14T16:22:49.806946 | 2020-02-23T18:53:29 | 2020-02-23T18:53:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,900 | py | """Python Cookbook 2nd ed.
Chapter 6, recipe 2, Essential type hints for class definitions
"""
import random
from typing import Set, List
class Dice:
RNG = random.Random()
def __init__(self, n: int, sides: int = 6) -> None:
self.n_dice = n
self.sides = sides
self.faces: List[int]
self.roll_number = 0
def __str__(self) -> str:
return ", ".join(
f"{i}: {self.faces[i]}"
for i in range(len(self.faces))
)
def total(self) -> int:
return sum(self.faces)
def average(self) -> float:
return sum(self.faces) / self.n_dice
def first_roll(self) -> List[int]:
self.roll_number = 0
self.faces = [
self.RNG.randint(1, self.sides)
for _ in range(self.n_dice)
]
return self.faces
def reroll(self, positions: Set[int]) -> List[int]:
self.roll_number += 1
for p in positions:
self.faces[p] = self.RNG.randint(1, self.sides)
return self.faces
# The following example has type checking disabled.
# To see the effect of using a wrong type, remove the type: ignore comments,
# and run mypy on this module.
def example_mypy_failure() -> None:
d = Dice(2.5) # type: ignore
r1: List[str] = d.first_roll() # type: ignore
print(d)
test_dice = """
>>> d1 = Dice(5)
>>> d1.RNG.seed(42)
>>> d1.first_roll()
[6, 1, 1, 6, 3]
>>> d1.reroll({0, 3, 4})
[2, 1, 1, 2, 2]
>>> str(d1)
'0: 2, 1: 1, 2: 1, 3: 2, 4: 2'
"""
test_dice_failure = """
>>> example_mypy_failure()
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
>>> bad = Dice(2.5)
>>> bad.first_roll()
Traceback (most recent call last):
...
TypeError: 'float' object cannot be interpreted as an integer
"""
__test__ = {n: v for n, v in locals().items() if n.startswith("test_")}
| [
"slott56@gmail.com"
] | slott56@gmail.com |
3e689a490f06aeee4d810234642d9e4eb84d886a | c0c5b1c902f23904ecfc8bb3184f38fbdfca7637 | /Python_Succinctly/chapter_1/23.py | 660c7f5aae46cb77ac723c02c8310c0a9d0b5efc | [] | no_license | SyncfusionWebsite/Python-Succinctly | 4bb1909e07e1b98b2d838c7a29fe445544f4dd28 | f1a3af162c9181c2798e024a79ece908129bd00a | refs/heads/master | 2021-05-12T06:51:41.337146 | 2017-12-11T20:34:26 | 2017-12-11T20:34:26 | 117,228,576 | 1 | 0 | null | 2018-01-12T10:36:33 | 2018-01-12T10:36:33 | null | UTF-8 | Python | false | false | 75 | py | #!/usr/bin/env python3
version = 3
print('Python ' + version + ' is fun.')
| [
"content@syncfusion.com"
] | content@syncfusion.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.