blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f021bb6980fc0c5185a1dd2bd49e8253b7bab1cf | ee8bfdaede4de43c40bd3a614bb01e01efcebacc | /test/test5.py | 9d39a89a5bfff2b1136c140cae025531b7d19dd4 | [] | no_license | xufangda/flask_playground | c929b6bf329e1df15b6078e4473fbb506ee09db6 | d01ceb2f058194bb51175016f871f7eee5c800e4 | refs/heads/master | 2022-12-12T16:28:14.393183 | 2018-05-10T23:28:15 | 2018-05-10T23:28:15 | 132,193,222 | 0 | 0 | null | 2021-03-19T23:39:41 | 2018-05-04T22:03:49 | Python | UTF-8 | Python | false | false | 78 | py |
class NoneLocal:
def __init__(self, v):
self.v = v
n=NoneLocal() | [
"xfangda@gmail.com"
] | xfangda@gmail.com |
c676b048e188e0db0fbba0e539d748370987f5be | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02595/s506680961.py | 0b69961fbe962e610ab93e5fd3943def42a15081 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | n, d = map(int, input().split())
xy = [0]*n
for i in range(n):
xy[i] = list(map(int, input().split()))
def judge(x, y):
return (((x**2 + y**2)**0.5))
cnt = 0
for i in range(n):
dis = judge(xy[i][0], xy[i][1])
if dis <= d:
cnt += 1
print(cnt) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9b1f380f8f3d8faba47b70f88a989e06dd533b0b | 16e12ca977e31f8f672ead2679ff283389192eb2 | /byoot/wsgi.py | 41a56b165863c619680351d51a9ff60751f7b7ee | [] | no_license | hussainalhilli/django-project | 7e69e5c6f5a66830742e29f24ebd7a1f28bca639 | b0dc616164c7c4420caf40795e458d42680a80c7 | refs/heads/master | 2020-12-25T06:22:42.762820 | 2016-04-26T18:27:12 | 2016-04-26T18:27:12 | 56,699,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for byoot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "byoot.settings")
application = get_wsgi_application()
| [
"Hilli@Hillis-MacBook-Air.local"
] | Hilli@Hillis-MacBook-Air.local |
a41e01bb4fc3e243c6163b6ec37fa51a5a3a8beb | 85ce93b4f02111b68941807dd99527e97c77ebef | /websocket/rpc_agent.py | 5baa937c56b0dc5270c7b26ba099d57cbaa17177 | [] | no_license | jdutreve/request-reply-python | dd706be4c5246fb4fd241905aa030e1975492226 | 43eec3943d9cff08616e95097fa109c20ddb3112 | refs/heads/main | 2023-01-20T19:45:26.033669 | 2020-11-14T11:39:21 | 2020-11-14T11:39:21 | 312,557,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,854 | py | #
# Freelance Pattern
#
# worker LRU load balancing (ROUTER + ready queue)
# heartbeat to workers so they restart all (i.e. send READY) if queue is unresponsive
#
import time
import threading
import random
import asyncio
import zmq
import zmq.asyncio
from zhelpers import zpipe
# If no server replies after N retries, abandon request by returning FAILED!
REQUEST_RETRIES = 5
HEARTBEAT_INTERVAL = 500 # Milliseconds
HEARTBEAT_LIVENESS = 3 # 3..5 is reasonable
HEARTBEAT = b''
DEALER_QUEUE_SIZE = 100000 # Queue to access each WEBSOCKET
CLIENT_QUEUE_SIZE = 100000 # Queue to access the internal dispatcher
BACKEND_QUEUE_SIZE = 1000 # Queue to access external servers
too_late_nb = 0
reply_nb = 0
def p(msg):
pass
# print('%s %s' % (datetime.now().strftime('%M:%S:%f')[:-3], msg))
class FreelanceClient(object):
context = None # Our Context
commands = None # command socket in the client thread
agent = None # the background thread
def __init__(self):
context = zmq.Context()
self.context = zmq.asyncio.Context()
self.commands, agent_command_socket = zpipe(context, context, zmq.PAIR, zmq.PAIR)
self.agent = threading.Thread(target=agent_task, args=(self.context, agent_command_socket))
self.agent.daemon = False
self.agent.start()
def connect(self, endpoint):
"""Connect to new server endpoint
Sends [CONNECT][endpoint] to the agent
"""
self.commands.send_multipart([b"CONNECT", endpoint])
def create_requester(self):
requester = self.context.socket(zmq.DEALER)
requester.sndhwm = DEALER_QUEUE_SIZE
requester.connect("inproc://toto")
return requester
async def request(self, requester, msg):
await requester.send_multipart([b"REQUEST"] + msg)
async def receive(self, requester):
return await requester.recv_multipart()
# =====================================================================
# Asynchronous part, works in the background thread
class Request(object):
address = None # zmq reply to address
left_retries = 0
msg = None # Current request
expires = 0 # Timeout for request/reply
sequence = 0
def __init__(self, sequence, msg, address):
super().__init__()
self.address = address
self.msg = [str(sequence).encode()] + msg
self.left_retries = REQUEST_RETRIES
self.sequence = sequence
result = self.compute_expires()
self.expires = time.time() + result
def retry(self, now):
self.left_retries -= 1
if self.left_retries < 1:
return False
result = self.compute_expires()
self.expires = now + result
return True
def compute_expires(self):
n = REQUEST_RETRIES - self.left_retries
result = (3 ** n) * (random.random() + 1)
#p("request timeout = %s" % result)
return result
class FreelanceAgent(object):
context = None # Own context
command_socket = None # command Socket to talk back to client
request_socket = None # request Socket to talk back to client
backend_socket = None # Socket to talk to servers
servers = None # Servers we've connected to, used for sending PING
actives = None # Servers we know are alive (reply or PONG), used for fair load balancing
sequence = 0 # Number of requests ever sent
request = None # Current request if any
requests = None # all pending requests
def __init__(self, context, command_frontend):
self.context = context
self.command_socket = command_frontend
self.request_socket = context.socket(zmq.ROUTER)
self.request_socket.rcvhwm = CLIENT_QUEUE_SIZE
self.request_socket.router_mandatory = 1
self.request_socket.bind("inproc://toto")
self.backend_socket = zmq.Context().socket(zmq.ROUTER)
# make sure router doesn't drop unroutable message (host unreachable or again exception)
self.backend_socket.router_mandatory = 1
self.backend_socket.hwm = BACKEND_QUEUE_SIZE
self.servers = {}
self.actives = []
self.request = None
self.requests = {}
self.start = time.time()
asyncio.get_event_loop().create_task(self.on_command_message())
async def on_command_message(self):
msg = self.command_socket.recv_multipart()
command = msg.pop(0)
if command == b"CONNECT":
endpoint = msg.pop(0)
p("I: CONNECTING %s" % [endpoint])
self.backend_socket.connect(endpoint)
server = Server(endpoint)
self.servers[endpoint] = server
else:
p("E: Unknown command %s" % command)
async def on_request_message(self):
msg = await self.request_socket.recv_multipart()
#p("DEALER RECEIVE %s" % msg)
request = msg[1]
if request == b"REQUEST":
self.sequence += 1
address = msg[0]
self.request = Request(self.sequence, msg, address)
self.requests[self.sequence] = self.request
else:
p("E: Unknown request %s" % request)
async def on_reply_message(self):
reply = self.backend_socket.recv_multipart()
endpoint = reply[0] # the server that replied
server = self.servers[endpoint]
server.reset_server_expiration()
msg = reply[1:]
if len(msg) == 1:
if msg[0] is HEARTBEAT:
p("I: RECEIVE PONG %s" % [endpoint])
server.connected = True
else:
p("E: Invalid message from Worker: %s" % reply)
else:
sequence = int(msg[0].decode())
if sequence in self.requests:
global reply_nb
reply_nb += 1
p("I: RECEIVE REPLY %s : counter=%d" % (reply, reply_nb))
request = self.requests.pop(sequence)
msg = [request.address] + msg
await self.request_socket.send_multipart(msg)
else:
global too_late_nb
too_late_nb += 1
#p("W: TOO LATE REPLY %s" % reply)
if not server.alive:
server.alive = True
p("I: SERVER ACTIVED %s-----------------------" % [server.address])
# We want to move this responding server at the 'right place' in the actives queue, first remove it
if server in self.actives:
self.actives.remove(server)
# Then, find the server having returned a reply the most recently (i.e. being truly alive)
most_recently_received_index = 0
for active in reversed(self.actives): # reversed() because the most recent is at the end of the queue
if active.is_last_operation_receive:
most_recently_received_index = self.actives.index(active) + 1
break
# Finally, put the current server just behind the found server (Least Recently Used is the first in the queue)
self.actives.insert(most_recently_received_index, server)
server.is_last_operation_receive = True
def send_request(self, server, request):
request = [server.address] + request.msg
self.backend_socket.send_multipart(request)
p("I: SEND REQUEST %s, ACTIVE: %s" % (request, self.actives))
def agent_task(ctx, command_socket):
loop = asyncio.new_event_loop()
loop.create_task(do_agent_task(ctx, command_socket))
loop.run_forever()
async def do_agent_task(ctx, command_socket):
agent = FreelanceAgent(ctx, command_socket)
poll_backend = zmq.asyncio.Poller()
poll_backend.register(agent.backend_socket, zmq.POLLIN)
poll_all = zmq.asyncio.Poller()
poll_all.register(agent.backend_socket, zmq.POLLIN)
poll_all.register(agent.request_socket, zmq.POLLIN)
while True:
if len(agent.actives) > 0:
events = dict(await poll_all.poll(HEARTBEAT_INTERVAL))
else:
events = dict(await poll_backend.poll(HEARTBEAT_INTERVAL))
if events.get(agent.backend_socket) == zmq.POLLIN:
await agent.on_reply_message()
is_request_sent = False
if events.get(agent.request_socket) == zmq.POLLIN:
await agent.on_request_message()
if agent.request and len(agent.actives) > 0:
# Least recently used active server, i.e. queue head
active_server = agent.actives[0]
agent.send_request(active_server, agent.request)
is_request_sent = True
agent.request = None
now = time.time()
# Retry any expired requests
if len(agent.requests) > 0 and len(agent.actives) > 0:
active_server = agent.actives[0]
for request in list(agent.requests.values()):
if now >= request.expires:
if request.retry(now):
p("I: RETRYING REQUEST %s, remaining %d" % (request.sequence, request.left_retries))
agent.send_request(active_server, request)
is_request_sent = True
else:
agent.requests.pop(request.sequence)
global reply_nb
reply_nb += 1
p("I: REQUEST FAILED %d : counter=%d" % (request.sequence, reply_nb))
msg = [request.address, b"FAILED-"+str(request.sequence).encode()]
await agent.request_socket.send_multipart(msg)
# Move the current active server at from the head to the end of the queue (Round-Robin)
if is_request_sent:
server = agent.actives.pop(0)
agent.actives.append(server)
server.is_last_operation_receive = False # last operation is now SEND, not RECEIVE
server.ping_at = now + 1e-3 * HEARTBEAT_INTERVAL
# Remove any expired servers
for server in agent.actives[:]:
if now >= server.expires:
p("I: SERVER EXPIRED %s-----------------------" % [server.address])
server.alive = False
agent.actives.remove(server)
# Send PING to idle servers if time has come
for server in agent.servers.values():
server.ping(agent.backend_socket, now)
class Server(object):
address = None # Server identity/address
alive = False # 1 if known to be alive
connected = False
ping_at = 0 # Next ping at this time
expires = 0 # Expires at this time
is_last_operation_receive = False # Whether the last action for this server was a receive or send operation
def __init__(self, address):
self.address = address
self.alive = False
self.connected = False
self.reset_server_expiration()
self.is_last_operation_receive = False
def reset_server_expiration(self):
time_time = time.time()
self.ping_at = time_time + 1e-3 * HEARTBEAT_INTERVAL
self.expires = time_time + 1e-3 * HEARTBEAT_INTERVAL * HEARTBEAT_LIVENESS
def ping(self, backend_socket, now):
if self.connected and self.alive and now > self.ping_at:
p("I: SEND PING %s" % [self.address])
backend_socket.send_multipart([self.address, HEARTBEAT])
self.ping_at = now + 1e-3 * HEARTBEAT_INTERVAL
self.is_last_operation_receive = False # last operation is now SEND, not RECEIVE
def tickless(self, tickless):
if tickless > self.ping_at:
tickless = self.ping_at
return tickless
def __repr__(self):
return "%s-%s" % (self.address.decode().split(':')[2], self.is_last_operation_receive)
| [
"noreply@github.com"
] | jdutreve.noreply@github.com |
393658705cd559f61f856a8a2dff56181e9268ed | 348a4089b4af1b4c05f4837cccd01d87f756e7ed | /else/test.py | 77b2ece6b52b6354efd75c5024fefe3c3078c4df | [] | no_license | ecode-ethiopia/plotbot | 5bc694e3e73ae82e10dc3b0b25d98442478d157a | eb1e40c42c43943a00c38299ea966852fc601707 | refs/heads/master | 2020-07-07T20:48:10.092435 | 2018-09-13T10:01:57 | 2018-09-13T10:01:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,501 | py | # this is the webhook for a TestBot
#import os
#import json
#import requests
from flask import Flask, request, make_response, jsonify
#from keys import nutrionix_app_id, nutrionix_app_key
#import pygal
#from pygal.style import DefaultStyle
#import cairosvg
#import pymongo
#from pymongo import MongoClient
import ast
myinput1 = {
"id": "d75f7d46-155f-4a2f-add5-75b5fc4f2596",
"timestamp": "2018-02-22T09:01:10.433Z",
"lang": "en",
"result": {
"source": "agent",
"resolvedQuery": "Dan 50 eur",
"action": "testbot",
"actionIncomplete": False,
"parameters": {
"user1": "Dan",
"sum": {
"amount": 500,
"currency": "UAH"
},
"sum_basic_currency": "",
"user2": ""
},
"contexts": [],
"metadata": {
"intentId": "83b7244a-7595-4f67-8b72-85199ded352a",
"webhookUsed": "false",
"webhookForSlotFillingUsed": "false",
"intentName": "add_payment"
},
"fulfillment": {
"speech": "Dan paid 50 EUR",
"messages": [
{
"type": 0,
"speech": "Dan paid 50 EUR"
}
]
},
"score": 0.9300000071525574
},
"status": {
"code": 200,
"errorType": "success",
"webhookTimedOut": False
},
"sessionId": "ad0d56ff-2dc1-4720-8516-067ce9c1cd55"
}
myinput2 = {
"id": "d75f7d46-155f-4a2f-add5-75b5fc4f2596",
"timestamp": "2018-02-22T09:01:10.433Z",
"lang": "en",
"result": {
"source": "agent",
"resolvedQuery": "Dan 1200",
"action": "testbot",
"actionIncomplete": False,
"parameters": {
"user1": "Tim",
"sum": "",
"sum_basic_currency": 1200,
"user2": ""
},
"contexts": [],
"metadata": {
"intentId": "83b7244a-7595-4f67-8b72-85199ded352a",
"webhookUsed": "false",
"webhookForSlotFillingUsed": "false",
"intentName": "add_payment"
},
"fulfillment": {
"speech": "Dan paid 1200 UAH",
"messages": [
{
"type": 0,
"speech": "Dan paid 1200 UAH"
}
]
},
"score": 0.9300000071525574
},
"status": {
"code": 200,
"errorType": "success",
"webhookTimedOut": False
},
"sessionId": "ad0d56ff-2dc1-4720-8516-067ce9c1cd55"
}
# Get our log (txt file will be substituted with Mongo DB)
with open("log.txt", "r+") as logfromtxt:
log = ast.literal_eval(logfromtxt.read())
BASIC_CURRENCY = 'UAH'
# Exchange rates to be substituted with calls to some API
usd_uah = 26.9
eur_uah = 33.1
# Get request parameters
req = myinput1
if 'originalRequest' in req:
first_name = req.get('originalRequest').get('data').get('message').get('from').get('first_name')
last_name = req.get('originalRequest').get('data').get('message').get('from').get('last_name')
uid = req.get('originalRequest').get('data').get('message').get('from').get('id')
user1 = req.get('result').get('parameters').get('user1')
user2 = req.get('result').get('parameters').get('user2')
sum = req.get('result').get('parameters').get('sum') # {"amount": 100, "currency": "USD"}
sum_basic_currency = req.get('result').get('parameters').get('sum_basic_currency')
timestamp = req.get('timestamp')
print('user1: ' + user1)
print('user2: ' + user2)
print('sum: ' + str(sum))
print('sum_basic_currency: ' + str(sum_basic_currency))
# If currency != basic (for eg., UAH), convert to basic currency
if sum == "":
amount = float(sum_basic_currency)
else:
if sum["currency"] == BASIC_CURRENCY:
amount = sum["amount"]
elif sum["currency"] == "USD":
amount = sum["amount"] * usd_uah
elif sum["currency"] == "EUR":
amount = sum["amount"] * eur_uah
print('sum_converted: ' + str(amount))
# In our 1st model we'll have 2 already registered users, Tim and Dan
if user2 == "": # means that user1 paid for all = he gets his sum - sum/users_quantity, for eg. if 2 users and user1 paid $50, his balance will be +25$
who_received = "all"
every_user_gets = amount / len(log["users"])
print("log: " + str(log))
nexttransaction = {
"timestamp": timestamp,
"transaction_number": len(log["transactions"]) + 1,
"who_paid": user1,
"who_received": who_received,
"amount": amount,
"transaction_balance": {},
"total_balance": {}
}
for user in log["users"]:
if user != user1:
nexttransaction["transaction_balance"].update({user: every_user_gets * -1})
user_balance_was = log["transactions"][len(log["transactions"])-1]["total_balance"][user]
user_balance_now = user_balance_was + every_user_gets * -1
else:
nexttransaction["transaction_balance"].update({user: amount - every_user_gets})
user_balance_was = log["transactions"][len(log["transactions"])-1]["total_balance"][user]
user_balance_now = user_balance_was + amount - every_user_gets
print("Balance of user {} was {}, became {}".format(user, user_balance_was, user_balance_now))
nexttransaction["total_balance"][user] = user_balance_now
print("nexttransaction: " + str(nexttransaction))
log["transactions"].append(nexttransaction)
print("New log: " + str(log))
with open("log.txt", "w") as logdump:
logdump.write(str(log))
print(str(log["transactions"][len(log["transactions"])-1]["total_balance"]))
| [
"iurii.dziuban@gmail.com"
] | iurii.dziuban@gmail.com |
e3bd5d3495a210f25ecc424a18742c1b119aa157 | 9b1f36c8863916cead28b58b08ba37f346fe85c9 | /2 ЧАСТЬ/задача (15).py | f52108f8fef3e8ec0a726e2a82b61341e0bfb959 | [] | no_license | kbpersik/cource-work | 875d089c8fa0d16ec5141a0fd8ca07294a4bed22 | ea7f382e95b26e4f9f8ee18f9345a6a0cc95a832 | refs/heads/master | 2020-05-31T19:26:03.868087 | 2019-06-02T19:45:25 | 2019-06-02T19:45:25 | 190,456,062 | 1 | 0 | null | 2019-06-05T19:28:58 | 2019-06-05T19:28:58 | null | UTF-8 | Python | false | false | 512 | py | import numpy as np
import random
N = random.randint(1, 10)
M = random.randint(1, 10)
H = random.randint(1, 10)
A = np.random.randint(0, 10, (N, M))
print("Матрица:\r\n{}".format(A))
A_bool = A == H
row_sum = np.sum(A_bool, axis=0)
print("Столбцы в которых встречается значение {}:".format(H))
print(np.argwhere(row_sum).flatten())
print("Столбцы в которых нет значения {}:".format(H))
print(np.argwhere(row_sum == 0).flatten()) | [
"noreply@github.com"
] | kbpersik.noreply@github.com |
4b4a0ca0bdc90aa474a316c7d9937947eaee2af0 | 79ea915d7b66510fd783a84b8cba564bbcc95eef | /app/migrations/0001_initial.py | 90108fd195a20e0aa89f0fde3546f01c42e63738 | [] | no_license | pseudo-sm/covidasha | 04eed28dbb20fba27adb709842aa6d055d8ebdf3 | 2da73b11de652553b996f92ee215bc80fba79d57 | refs/heads/master | 2023-04-22T22:44:09.715625 | 2021-04-28T19:03:30 | 2021-04-28T19:03:30 | 361,137,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # Generated by Django 3.2 on 2021-04-23 09:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Alert',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('want', models.BooleanField()),
('what', models.CharField(max_length=300)),
('location', models.CharField(max_length=300)),
('phone', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
],
),
]
| [
"saswathcommand@gmail.com"
] | saswathcommand@gmail.com |
4a93251e0a310cb4044c6db320a1f6a0d884524a | a284dd520c0e7619ed928645e4ae8855d4a04d3e | /etl.py | 612263275018324d697d86ef1a4f53e041014228 | [] | no_license | mmosc/sparkify-data-lake | c928b0e107dc4cfed41fe3a7f7c0f2bdbb12dc21 | 82d61555c24c245c498e55c8e2a6149771c0099e | refs/heads/master | 2022-11-12T23:20:20.642277 | 2020-07-01T13:24:27 | 2020-07-01T13:24:27 | 275,584,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,874 | py | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.types import StructType as R, StructField,DecimalType as Dec, DoubleType as Dbl, StringType as Str, IntegerType as Int, DateType as Date, TimestampType
from pyspark.sql.functions import monotonically_increasing_id
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config.get('AWS','KEY')
os.environ['AWS_SECRET_ACCESS_KEY']=config.get('AWS','SECRET')
def create_spark_session():
'''
Creates and returns the spark session
'''
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data, aws=True):
"""
Process song data by iterating over the .json files in
the song_data folder in the input_data folder.
Creates songs and artists tables.
If aws is True, writes the tables to the S3 bucket
give in the output_data
key arg:
- spark: the spark session
- input_data: the path to the folder containing the song_data
- output_data: the path to the S3 bucket where to write the tables
- aws: to set to true when the script is executed on the cluster.
Set to False when executing locally for debugging
"""
# Define the song schema before importing data
songsSchema = R([
StructField("num_songs",Int()),
StructField("artist_id",Str()),
StructField("artist_latitude",Dec()),
StructField("artist_longitude",Dec()),
StructField("artist_location",Str()),
StructField("artist_name",Str()),
StructField("song_id",Str()),
StructField("title",Str()),
StructField("duration",Dbl()),
StructField("year",Int()),
])
# get filepath to song data file
song_data = input_data + "song_data/*/*/*/*.json"
# read song data file
print("Reading song_data from {}\n".format(song_data))
df = spark.read.json(song_data,schema=songsSchema)
# extract columns to create songs table
print("Extracting columns to create the songs table...\n")
df.createOrReplaceTempView("songs_data_table")
songs_table = spark.sql('''
SELECT DISTINCT song_id, title, artist_id, year, duration
FROM songs_data_table
''')
print("done.")
# print song table schema
print("Songs table schema:\n")
songs_table.printSchema()
# write songs table to parquet files partitioned by year and artist
if aws:
print("Writing the songs table to parquet files partitioned by year and artist...\n")
songs_table.write.parquet(output_data + "songs_table.parquet",
partitionBy = ["year", "artist_id"],
mode = "overwrite")
print("done.")
# extract columns to create artists table
print("Extracting columns to create the artists table...\n")
artists_table = spark.sql('''
SELECT DISTINCT artist_id, artist_name AS name, artist_location AS location, artist_latitude AS latitude, artist_longitude AS longitude
FROM songs_data_table
''')
print("done.")
# print artists table schema
print("Artists table schema:\n")
artists_table.printSchema()
# write artists table to parquet files
if aws:
print("Writing the artists table to parquet files ...\n")
artists_table.write.parquet(output_data + "artists_table.parquet",
mode = "overwrite")
print("done.")
def process_log_data(spark, input_data, output_data, aws=True):
"""
Process log and song data by iterating over the
- the .json files in the log_data folder
- the .json files in the song_data folder.
Creates the users, times and songplays tables.
If aws is True, writes the tables to the S3 bucket
give in the output_data
key arg:
- spark: the spark session
- input_data: the path to the folder containing the song_data
- output_data: the path to the S3 bucket where to write the tables
- aws: to set to true when the script is executed on the cluster.
Set to False when executing locally for debugging
"""
songsSchema = R([
StructField("num_songs",Int()),
StructField("artist_id",Str()),
StructField("artist_latitude",Dec()),
StructField("artist_longitude",Dec()),
StructField("artist_location",Str()),
StructField("artist_name",Str()),
StructField("song_id",Str()),
StructField("title",Str()),
StructField("duration",Dbl()),
StructField("year",Int()),
])
# get filepath to log data file
log_data = input_data + "log_data/*/*/*.json"
# read log data file
print("Reading log_data from {}\n".format(log_data))
df = spark.read.json(log_data)
print("done.")
# filter by actions for song plays
print("Filter by actions for song plays...")
df = df.filter(df.page=='NextSong')
df.createOrReplaceTempView("logs_data_table")
print("done.")
# extract columns for users table
print("Extract columns for users table...")
users_table = spark.sql('''
SELECT DISTINCT userId as user_id, firstName as first_name, lastName as last_name, gender, level
FROM logs_data_table
''')
users_table = users_table.dropDuplicates(["user_id"])
print("done.")
# write users table to parquet files
if aws:
print("Write users table to parquet files...")
users_table.write.parquet(output_data + "users_table.parquet",
mode = "overwrite")
print("done.")
# create datetime column from original timestamp column
print("Create datetime column from original timestamp column...")
get_datetime = udf(lambda time: datetime.fromtimestamp((time/1000.0)), Date())
df = df.withColumn("date",get_datetime("ts"))
print("done.")
# create timestamp column from original timestamp column
print("Create timestamp column from original timestamp column...")
convert_ts = udf(lambda time: datetime.fromtimestamp((time/1000.0)), TimestampType())
df = df.withColumn("ts",convert_ts("ts"))
print("done.")
# extract columns to create time table
print("Extract columns to create time table...")
df.createOrReplaceTempView("clean")
time_table = spark.sql('''
SELECT ts AS start_time,
date_format(date,'YYYY') AS year,
date_format(date,'MM') AS month,
date_format(date,'dd') AS day,
date_format(date,'w') AS week,
date_format(ts,'E') AS weekday,
HOUR(ts) AS hour
FROM clean
''').dropDuplicates(["start_time"])
print("done.")
# write time table to parquet files partitioned by year and month
if aws:
print("Write time table to parquet files partitioned by year and month...")
time_table.write.parquet(output_data + "songs_table.parquet",
partitionBy = ["year", "month"],
mode = "overwrite")
print("done.")
# read in song data to use for songplays table
print("Read in song data to use for songplays table...")
song_data = input_data + "song_data/*/*/*/*.json"
song_df = spark.read.json(song_data, schema=songsSchema)
song_df.createOrReplaceTempView("songs_data_table")
print("done.")
# extract columns from joined song and log datasets to create songplays table
print("Extract columns from joined song and log datasets to create songplays table...")
artists_table = spark.sql('''
SELECT DISTINCT artist_id, artist_name AS name, artist_location AS location, artist_latitude AS latitude, artist_longitude AS longitude
FROM songs_data_table
''')
artists_table.createOrReplaceTempView("artists")
print("done.")
print("Extract columns to create songplays table...")
songplays_table = spark.sql('''
SELECT
year(l.ts) AS year,
month(l.ts) AS month,
l.ts AS start_time,
l.userId AS user_id,
l.level,
s.song_id,
a.artist_id,
l.sessionId AS session_id,
l.location,
l.userAgent AS user_agent
FROM clean AS l
JOIN songs_data_table AS s
ON (l.song = s.title AND l.artist = s.artist_name)
JOIN artists AS a ON a.artist_id=s.artist_id
LIMIT 5
''')
print("done.")
print("Create songplays_id...")
songplays_table = songplays_table.withColumn("songplay_id", monotonically_increasing_id())
print("done.")
# write songplays table to parquet files partitioned by year and month
if aws:
print("Write songplays table to parquet files partitioned by year and month...")
songplays_table.write.parquet(output_data + "songs_table.parquet",
partitionBy = ["year", "month"],
mode = "overwrite")
print("done.")
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
#input_data = "./data/"
output_data = "s3a://sparkify-udacity/"
process_song_data(spark, input_data, output_data#, aws=False
)
process_log_data(spark, input_data, output_data#, aws=False
)
if __name__ == "__main__":
main()
| [
"ma.moscati@gmail.com"
] | ma.moscati@gmail.com |
b96d13b288c2afe32f6274094917a0b4626ca0bc | 069f911ec587a14b74526aa00e5646499e36da1e | /pyjobsearch/bin/pasteurize | b54487ffce3859a01a5c913e05ec9c7e1555f893 | [] | no_license | cmurphy580/PyJobSearch | 448c2dd57aab12573e3d5289bd97a455cef68368 | 436e4ebf511d605e69bb498c12c3cdfd0f961091 | refs/heads/master | 2022-12-13T07:16:32.153291 | 2021-06-24T21:26:16 | 2021-06-24T21:26:16 | 204,336,863 | 1 | 0 | null | 2022-12-08T05:21:08 | 2019-08-25T18:55:20 | Python | UTF-8 | Python | false | false | 268 | #!/Users/conormurphy/Desktop/pyjobsearch/pyjobsearch/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from libpasteurize.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"cmurphy580@gmail.com"
] | cmurphy580@gmail.com | |
46fb6238d675c352a76334374b518342da606394 | 4aece8140046cefd8500030f305852dbd1cf8d10 | /nlp/nlp.py | 87fea4b3356ebde745838fcc0fc97da33d9b3140 | [] | no_license | donkkis/tori-scraper | 3f1bbf5226aef656ae870fc2920cabbdd7ffa2f5 | 10202da06deeaca139ff7472a5a5b9cd5ccc9858 | refs/heads/master | 2023-03-16T07:41:51.108537 | 2021-03-04T10:37:21 | 2021-03-04T10:37:21 | 293,253,056 | 0 | 0 | null | 2021-03-04T10:37:22 | 2020-09-06T10:30:21 | Python | UTF-8 | Python | false | false | 1,308 | py | import os
import string
import pandas as pd
import nltk
from nltk import bigrams, trigrams
from nltk.tokenize import word_tokenize
from nltk.probability import FreqDist
from dotenv import load_dotenv
load_dotenv()
from typing import List
STOPWORDS = nltk.corpus.stopwords.words('finnish')
def tokenize(sentence: str) -> List[str]:
tokens = word_tokenize(sentence.lower())
tokens = [t for t in tokens if t not in STOPWORDS]
tokens = [t for t in tokens if t not in string.punctuation]
return tokens
def get_tags(data: pd.DataFrame = None, col='Title', most_common=200) -> pd.DataFrame:
"""Exctract tags from listing titles, up to frequent 3-grams."""
if not data:
data = pd.read_csv(os.getenv('DESCRIPTIONS_PATH'))
corpus = tokenize(' '.join(data.loc[:, col]))
tags = pd.DataFrame({'Word': [], 'Count': []})
fdists = [FreqDist(corpus), FreqDist(list(bigrams(corpus))), FreqDist(list(trigrams(corpus)))]
for fd in fdists:
top_n = fd.most_common(most_common)
common = pd.DataFrame({
'Word': [' '.join(w[0]) if isinstance(w[0], tuple) else w[0] for w in top_n],
'Count': [w[1] for w in fd.most_common(most_common)],
})
tags = pd.concat([tags, common])
return tags
| [
"panu.aho@wapice.com"
] | panu.aho@wapice.com |
45a81c3d49206cca47cd45f790c01bf1608403af | 9ad27ac41069190d316065689a25ecab0df60cc9 | /bin/ex13_combine_argv_with_input.py | 46bdb3f35125337556ffe7552c4eddba2835f8b5 | [] | no_license | hmdshfq/Learn-Python-The-Hard-Way | 115efbd9254460a74d1add8f9c81ad2c00ff6b70 | 624519050b5e1797ee29f2ead28efff8eb579853 | refs/heads/master | 2022-02-27T11:16:57.302210 | 2019-10-13T13:37:53 | 2019-10-13T13:37:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # Exercise 13 - Practice exercise that combines input with argv
from sys import argv
script, username = argv
print("The script is ", script)
print("The username is ", username)
password = input("Enter your password: ")
print("Your password is ", password)
| [
"hmdbupt@yahoo.com"
] | hmdbupt@yahoo.com |
aa4f12a9fb94b5aba945728094e8204a9ecd33aa | 65ece82f9d1529c9babb7e902d6f875348f09fc4 | /5주차/CRUD/CRUD/myvenv/Lib/site-packages/django/db/models/sql/compiler.py | e98dbdfdf2e8ed433482512f5e135bb407f876f9 | [] | no_license | hoohoohoohoo/LikeLion | f1e8a0d2b6f88ef9ff1c4cffde5af8d7c76c6ae4 | 566ceb4c6bb11579163732ae5320e7e7068f0ca3 | refs/heads/master | 2020-05-05T03:14:45.573294 | 2019-05-10T08:27:17 | 2019-05-10T08:27:17 | 179,665,157 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71,086 | py | import collections
import functools
import re
import warnings
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref, Subquery
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError, NotSupportedError
from django.utils.deprecation import (
RemovedInDjango30Warning, RemovedInDjango31Warning,
)
from django.utils.hashable import make_hashable
from django.utils.inspect import func_supports_parameter
FORCE = object()
class SQLCompiler:
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
self._meta_ordering = None
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
# Skip References to the select clause, as all expressions in the
# select clause are already part of the group by.
if not expr.contains_aggregate and not is_ref:
expressions.extend(expr.get_source_expressions())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
if isinstance(expr, Subquery) and not sql.startswith('('):
# Subquery expression from HAVING clause may not contain
# wrapping () because they could be removed when a subquery is
# the "rhs" in an expression (see Subquery._prepare()).
sql = '(%s)' % sql
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or (
getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key and expr.target.model._meta.managed
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col, select_format=True)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif self.query.get_meta().ordering:
ordering = self.query.get_meta().ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for field in ordering:
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = order == 'DESC'
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator:
src = resolved.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias:
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format is FORCE or (select_format and not self.query.subquery):
return node.output_field.select_format(self, sql, params)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query.set_values((
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
))
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = 'SELECT * FROM ({})'.format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif not features.supports_slicing_ordering_in_compound:
part_sql = '({})'.format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [', '.join(out_cols), 'FROM', *from_]
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
# If it's a NOWAIT/SKIP LOCKED/OF query but the backend
# doesn't support it, raise NotSupportedError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
order_by = order_by or self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if self._meta_ordering:
# When the deprecation ends, replace with:
# order_by = None
warnings.warn(
"%s QuerySet won't use Meta.ordering in Django 3.1. "
"Add .order_by('%s') to retain the current query." % (
self.query.model.__name__,
"', '".join(self._meta_ordering)
),
RemovedInDjango31Warning,
stacklevel=4,
)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if self.query.explain_query:
result.insert(0, self.connection.ops.explain_query_prefix(
self.query.explain_format,
**self.query.explain_options
))
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limit_offset:
result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark))
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = 'col%d' % index
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) subquery' % (
', '.join(sub_selects),
' '.join(result),
), tuple(sub_params + params)
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == 'DESC'
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.is_relation and opts.ordering and getattr(field, 'attname', None) != name:
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices, self.query._filtered_relations)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'local_setter': f.set_cached_value,
'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': f.remote_field.set_cached_value,
'remote_setter': f.set_cached_value,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias)
model = join_opts.model
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
def local_setter(obj, from_obj):
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(obj, from_obj):
setattr(from_obj, name, obj)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': local_setter,
'remote_setter': remote_setter,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select, opts=model._meta, root_alias=alias,
cur_depth=cur_depth + 1, requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
field = klass_info['field']
if klass_info['reverse']:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
parts = [] if name == 'self' else name.split(LOOKUP_SEP)
klass_info = self.klass_info
for part in parts:
for related_klass_info in klass_info.get('related_klass_infos', []):
field = related_klass_info['field']
if related_klass_info['reverse']:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
select_index = klass_info['select_fields'][0]
col = self.select[select_index][0]
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
convs = []
for conv in (backend_converters + field_converters):
if func_supports_parameter(conv, 'context'):
warnings.warn(
'Remove the context parameter from %s.%s(). Support for it '
'will be removed in Django 3.0.' % (
conv.__self__.__class__.__name__,
conv.__name__,
),
RemovedInDjango30Warning,
)
conv = functools.partial(conv, context={})
convs.append(conv)
converters[i] = (convs, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
for row in result[0]:
if not isinstance(row, str):
yield ' '.join(str(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
return_id = False
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if value.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts)
result = ['%s %s' % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.return_id and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql(
ignore_conflicts=self.query.ignore_conflicts
)
if self.return_id and self.connection.features.can_return_id_from_insert:
if self.connection.features.can_return_ids_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += [r_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, return_id=False):
assert not (
return_id and len(self.query.objs) != 1 and
not self.connection.features.can_return_ids_from_bulk_insert
)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not return_id:
return
if self.connection.features.can_return_ids_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_ids(cursor)
if self.connection.features.can_return_id_from_insert:
assert len(self.query.objs) == 1
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.alias_map if self.query.alias_refcount[t] > 0]) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.base_table)]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
if val.contains_over_clause:
raise FieldError('Window expressions are not allowed in this query.')
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=FORCE)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
| [
"hojun0520@nate.com"
] | hojun0520@nate.com |
0f3b7c53f539c1c74fee44343f1d0ce4d6d2f0f7 | 98beba61ee90c29d67aff50e9bc226e56ed49a2e | /tests/test_graph_states.py | d6fabac09d3765e067fc34442d5222772271e413 | [
"MIT"
] | permissive | deyh2020/Graph-States | 5a6c53a44ba6ee2c1048c56ccaf334a8e4588eea | e3e29abedf0a82e12a36da10bad1393b8d7ada6e | refs/heads/master | 2022-11-09T09:39:46.331707 | 2020-07-01T22:03:25 | 2020-07-01T22:03:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | import unittest
import networkx as nx
from qiskit.circuit.classicalregister import Clbit, ClassicalRegister
from QiskitGraphStates import QiskitGraphState
class GraphStateTest(unittest.TestCase):
def setUp(self):
self.G = nx.Graph()
self.G.add_edges_from([(0, 1), (1, 2)])
def test_init_edges(self):
a = QiskitGraphState(self.G)
self.assertEqual(sorted(a.graph.edges), sorted([(0, 1), (1, 2)]))
def test_init_build_node_dict(self):
a = QiskitGraphState(self.G)
assert a.node_dict == {0: 0, 1: 1, 2: 2}
def test_x_measurement(self):
"""this will give you something different every time,
so don't try to test the exact answer"""
a = QiskitGraphState(self.G)
a.x_measurement(a.qreg[0], a.creg[0])
assert isinstance(a.creg[0], type(Clbit(ClassicalRegister(3, 'c2'), 0)))
def test_apply_stabilizer(self):
a = QiskitGraphState(self.G)
assert a.circuit.depth() == 3
a.apply_stabilizer(0)
assert a.circuit.depth() == 4
a.apply_stabilizer(1)
assert a.circuit.depth() == 5
if __name__ == '__main__':
unittest.main()
| [
"thesingularity.research@gmail.com"
] | thesingularity.research@gmail.com |
e2e231f46c10e111ff203e31af44196df8a5b100 | de95e9ace929f6279f5364260630e4bf7a658c1c | /firstDebug.py | 559b4cae2546eae9669c115c5caa54fcd5c045de | [] | no_license | ludwigwittgenstein2/Algorithms-Python | ceaf0739b8582f7bd749a9b3f52f283765044744 | c5bed8b2e398c218d1f36e72b05a3f5545cf783a | refs/heads/master | 2021-06-19T11:40:31.012268 | 2017-07-02T04:59:20 | 2017-07-02T04:59:20 | 75,953,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | #!/bin/Python
import pdb
a = "aaa"
b = "bbb"
pdb.set_trace()
c = "ccc"
final = a+b+c
print final
| [
"penpals.oranges14@gmail.com"
] | penpals.oranges14@gmail.com |
66217f5e3e24f7387bd7ba67d7c458fb2e13bcef | 888ac6eabd07082bfc0c1a0faecad05627628b79 | /Python Codes/Labor3_Aufgabe3.py | 63b975a8bee662b8aff0ffcffb466db64ce6eb75 | [] | no_license | MertUzeken/codes | 6e12cd45e8abc358b114f76241de73ee2ceb119c | 42592710e8817256d50d449553b16eca35e275fa | refs/heads/master | 2023-05-05T15:33:59.434111 | 2021-05-28T12:19:40 | 2021-05-28T12:19:40 | 291,279,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
s1=input("Please enter the first string: ")
s2=input("Please enter the second string: ")
s3=input("Please enter the third string: ")
snew=s1.upper() + s2.upper() + s3.upper()
print("The whole string is: ", snew)
| [
"mertuzeken@gmail.com"
] | mertuzeken@gmail.com |
7ab36fd8318dee6634ece773310edd784e2a1024 | 624a37e0aec90409ab3ea0b33216bc708c7ce42b | /bubbleSort.py | e2db2b5b6b397af0de25dc889d08fffc2fd65c9d | [] | no_license | qiyue0421/pythontest | a68229dcf8d733a0815194112cda3ef9ae526c5d | 5dc20737bc492386bf5d2f35b240e83307f81e13 | refs/heads/master | 2020-05-03T12:39:51.524319 | 2019-04-18T08:46:55 | 2019-04-18T08:46:55 | 173,700,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | def bubbleSort(alist):
# 如果有n个项,则需要比较n-1次
for passnum in range(len(alist)-1, 0, -1):
# 开始遍历列表
for i in range(passnum):
# 每次比较一对
if alist[i] > alist[i+1]:
# 交换操作
alist[i], alist[i+1] = alist[i+1], alist[i]
# 短冒泡排序
def shortBubbleSort(alist):
exchange = True
passsum = len(alist) - 1
while passsum > 0 and exchange:
# 设置交换标志位为False,如果在遍历过程中没有发生交换,则停止排序
exchange = False
# 遍历列表,检查是否需要交换
for i in range(passsum):
if alist[i] > alist[i+1]:
# 需要交换,将标志位置True
exchange = True
# 交换操作
alist[i], alist[i + 1] = alist[i + 1], alist[i]
passsum = passsum - 1
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
bubbleSort(alist)
print(alist)
| [
"qiyue0421@gmail.com"
] | qiyue0421@gmail.com |
4b18e3099af4a6f90ef46f5244c5a2cb5c1d8ad4 | 62e9fb33329fbefa89287e5bc343cb9c120306a1 | /tensorflow_probability/python/bijectors/rayleigh_cdf_test.py | 6e4562bdd491190cd57b4ab6b6507d6daab3b402 | [
"Apache-2.0"
] | permissive | npfp/probability | 3c103d4b9d7a72d3d16eb79b1e4f648afbaca057 | 3911f4463cdcca6cc118633742430885fb0c88cb | refs/heads/master | 2022-05-01T14:23:40.504258 | 2022-04-07T20:08:45 | 2022-04-07T20:10:58 | 246,853,846 | 0 | 0 | Apache-2.0 | 2020-03-12T14:23:04 | 2020-03-12T14:23:03 | null | UTF-8 | Python | false | false | 3,455 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
# Dependency imports
import numpy as np
from scipy import stats as scipy_stats
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class RayleighCDFBijectorTest(test_util.TestCase):
"""Tests correctness of the rayleigh bijector."""
def testBijector(self):
scale = 50.
bijector = tfb.RayleighCDF(scale=scale, validate_args=True)
self.assertStartsWith(bijector.name, 'rayleigh')
test_cdf_func = scipy_stats.rayleigh.cdf
x = np.array([[[.1], [1.], [14.], [20.], [100.]]], dtype=np.float32)
y = test_cdf_func(x, scale=scale).astype(np.float32)
self.assertAllClose(y, self.evaluate(bijector.forward(x)))
self.assertAllClose(x, self.evaluate(bijector.inverse(y)))
self.assertAllClose(
scipy_stats.rayleigh.logpdf(x, scale=scale),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)))
self.assertAllClose(
self.evaluate(-bijector.inverse_log_det_jacobian(y, event_ndims=0)),
self.evaluate(bijector.forward_log_det_jacobian(x, event_ndims=0)),
rtol=1e-4,
atol=0.)
def testBijectorLogDetJacobianZeroAtZero(self):
scale = np.logspace(0.1, 10., num=20).astype(np.float32)
bijector = tfb.RayleighCDF(scale)
fldj = self.evaluate(bijector.forward_log_det_jacobian(0., event_ndims=0))
self.assertAllNegativeInf(fldj)
def testScalarCongruency(self):
bijector_test_util.assert_scalar_congruency(
tfb.RayleighCDF(scale=50.),
lower_x=1.,
upper_x=100.,
eval_func=self.evaluate,
rtol=0.05)
def testBijectiveAndFinite(self):
bijector = tfb.RayleighCDF(scale=20., validate_args=True)
x = np.linspace(1., 8., num=10).astype(np.float32)
y = np.linspace(
-np.expm1(-1 / 400.),
-np.expm1(-16), num=10).astype(np.float32)
bijector_test_util.assert_bijective_and_finite(
bijector, x, y, eval_func=self.evaluate, event_ndims=0, rtol=1e-3)
def testAsserts(self):
with self.assertRaisesOpError('Argument `scale` must be positive.'):
b = tfb.RayleighCDF(scale=-1., validate_args=True)
self.evaluate(b.forward(3.))
@test_util.jax_disable_variable_test
def testVariableAssertsScale(self):
scale = tf.Variable(1.)
b = tfb.RayleighCDF(scale=scale, validate_args=True)
self.evaluate([scale.initializer])
with self.assertRaisesOpError('Argument `scale` must be positive.'):
with tf.control_dependencies([scale.assign(-1.)]):
self.evaluate(b.forward(3.))
if __name__ == '__main__':
test_util.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
b784259f8e99dc073c3ca1c79fb2c66824833654 | 467c4d2ded2eba79db4fe4ad426e506ad367159a | /Code_two.py | bf80590d51f5723747e634a45ea183b4e5c89dfd | [] | no_license | PoojaBansal12/Machine_test_code | 5f615946bde28c4d8b76124b8877d96de155dd0d | 4ec4eb87ca72757bef80d7e422d85cbddab5e04a | refs/heads/master | 2022-12-06T19:29:43.064953 | 2020-08-21T05:49:57 | 2020-08-21T05:49:57 | 289,073,914 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 196 | py |
name = input("Enter the email address: ")
try:
companyName = (name.split("@")[1]).split(".")[0]
print("Your company name is: " + companyName)
except:
print("Invalid Input!")
| [
"noreply@github.com"
] | PoojaBansal12.noreply@github.com |
f977275edc686bc1849ef7dab91f7e0c966481e6 | 3546ee7f176ceb89694d0ba7b6d285f02eb45c6f | /garlic/apps.py | 770a2e3eec677194f46bfe17a3c7a94ff51b9a03 | [] | no_license | WoodlandGeoEng/fundraiserapp | 144fa40ffb9572bde8276356d559d0a485d26d16 | 2f960de2fdc6a38dfd755992a8591ce28586142a | refs/heads/master | 2020-08-15T15:20:18.298851 | 2019-10-15T18:34:22 | 2019-10-15T18:34:22 | 215,362,788 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class GarlicConfig(AppConfig):
name = 'garlic'
| [
"49031803+WoodlandGeoEng@users.noreply.github.com"
] | 49031803+WoodlandGeoEng@users.noreply.github.com |
c88e116b193fce222e5239539de8a5312d0abac1 | 38d1e0b40d9cc54e4aa272ae5c2872fca378002a | /python_stack/django/django_orm/dojo_ninjas_proj/dojo_ninjas_proj/settings.py | 1af921065ff4a5858802afb4d5aedb6da2f5d487 | [] | no_license | taichikoga/Dojo_Assignments | 0a8974a6fcb3ce83973fd481803f1bb7126ca3ba | 4c7e82bd652286d281ce86fe9c14491182c3ecde | refs/heads/master | 2022-11-22T23:19:02.431639 | 2020-07-20T17:53:37 | 2020-07-20T17:53:37 | 274,190,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | """
Django settings for dojo_ninjas_proj project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&-flk@)gszq8n8i9#-ykkagizfb(i#t%mqqo7r&ugnem9!@$c9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'dojo_ninjas_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dojo_ninjas_proj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dojo_ninjas_proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"63776416+taichikoga@users.noreply.github.com"
] | 63776416+taichikoga@users.noreply.github.com |
210654eb0dfbcce74f526a3f7ac5416d20744c38 | 88726f0d487a0d9f1c568722f458d5cc8ad40566 | /ABC167/B.py | ea6cee584a1715de002cd48b55330ccc14f395f3 | [] | no_license | thortoyo/study | 60e3cccbf7b6587044ca3ee5c4cdb07f038a5800 | 7c20f7208703acf81125aca49de580982391ecfe | refs/heads/master | 2023-06-21T21:56:47.141439 | 2023-06-20T02:38:47 | 2023-06-20T02:38:47 | 196,919,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | A,B,C,K=map(int,input().split())
ans = min(A,K)
K -= A
if K > 0:
K -= B
if K > 0:
ans -= min(C,K)
print(ans)
| [
"thor.toyo@gmail.com"
] | thor.toyo@gmail.com |
4255fe74e150947607bb20d26bd775baf0b357cd | d90655925e189a3e936b3223794809d1bf44acc7 | /ArticleBlog/app01/migrations/0022_auto_20200218_2029.py | cbf520624ff68e0638ac622a957a5c87bd8e9c72 | [] | no_license | xiaokou123/- | c6e91d73e151ee4e303a84e852361757867c79db | 9e798ee4d3700c70df0ab3b9b1c668b1f6208ca5 | refs/heads/master | 2021-01-02T15:36:58.890776 | 2020-03-09T01:36:02 | 2020-03-09T01:36:02 | 239,683,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | # Generated by Django 2.2.1 on 2020-02-18 12:29
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app01', '0021_user'),
]
operations = [
migrations.RenameModel(
old_name='User',
new_name='Yonghu',
),
]
| [
"760391377@qq.com"
] | 760391377@qq.com |
d2ef75c71589468ef3f6d4adc3034aa3bd5a9c87 | 7c2e1b71dfabaa2cef72da577b78677d4dd6237e | /speed_training.py | 1455060a6d7fad6686af0ea7e8d420ad181e1ee4 | [] | no_license | mirkozaff/DeepGTAPrediction | e89707b614e03f36f4a35764da8b5594a7a89a3d | a5d80c3402d053567ef6c679c3b67a5b82f0720d | refs/heads/master | 2020-03-22T07:31:15.046299 | 2019-07-01T10:25:48 | 2019-07-01T10:25:48 | 139,705,682 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,179 | py | import h5py
from keras.optimizers import Adam
from load_batch import speed_steps_counter, speed_batch_generator, load_speed_dataset, SpeedDataGenerator
from utils import EarlyStopping
from keras.callbacks import TensorBoard, ModelCheckpoint
from speed_model import nvidia_model, ODFPA
import os
from keras.utils import multi_gpu_model
from ModelMGPU import ModelMGPU
import tensorflow as tf
from keras import backend as K
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
MODELS_PATH = 'speed_models/'
#Allocate memory dinamically
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
print("Loading Model...")
with tf.device('/cpu:0'): #load model on CPU
model = ODFPA(summary=True)
gpus = 4
parallel_model = ModelMGPU(model, gpus) #load model on GPUs
print("Model Loaded. \nCompiling...")
#Setting optimizer
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
#Compile model
parallel_model.compile(optimizer=adam, loss='mse', metrics=['accuracy'])
print("Compiled.")
#Setting TensorBoard
tbCallback = TensorBoard(log_dir='speed_graph/', histogram_freq=0, write_graph=False, write_images=False)
#Settig CheckpointCallback
mcpCallback = ModelCheckpoint(MODELS_PATH + 'model_checkpoint.h5', monitor='val_loss', save_weights_only=True, save_best_only=True, period=1)
#Settig EarlyStoppingtCallback
esCallback = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, start_epoch = 15)
#Setting variables
batch_size = 16
#Loading dataset
train_dataset = load_speed_dataset(data_dir = 'training')
val_dataset = load_speed_dataset(data_dir = 'validation')
#Setting generators
train_generator = SpeedDataGenerator(train_dataset, batch_size)
val_generator = SpeedDataGenerator(val_dataset, batch_size)
#Fit model to data
print("Starting training...")
parallel_model.fit_generator(train_generator, validation_data=val_generator,
max_queue_size=10, workers=10, use_multiprocessing=True,
epochs=100, verbose=1, callbacks=[tbCallback, mcpCallback, esCallback])
#Saving trained model
print('Saving trained model...')
model.save_weights(MODELS_PATH + 'trained_weights.h5')
print('Model saved. \nTraining ended.')
| [
"mirko.zaff@gmail.com"
] | mirko.zaff@gmail.com |
b8c949054e759c0ff8197cab3e98c993622ca4fd | e36225e61d95adfabfd4ac3111ec7631d9efadb7 | /problems/SD/auto/problem56_SD.py | 217b7658e448cb1ce9116acecf7c8bf45d4d51dd | [
"BSD-3-Clause"
] | permissive | sunandita/ICAPS_Summer_School_RAE_2020 | d2ab6be94ac508e227624040283e8cc6a37651f1 | a496b62185bcfdd2c76eb7986ae99cfa85708d28 | refs/heads/main | 2023-01-01T02:06:40.848068 | 2020-10-15T17:25:01 | 2020-10-15T17:25:01 | 301,263,711 | 5 | 2 | BSD-3-Clause | 2020-10-15T17:25:03 | 2020-10-05T01:24:08 | Python | UTF-8 | Python | false | false | 1,214 | py | __author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6]
rv.EDGES = {1: [4], 2: [5], 3: [6], 4: [1, 5], 5: [2, 4, 6], 6: [3, 5]}
rv.DOORS = ['d1', 'd2', 'd3']
rv.DOORLOCATIONS = {(1, 4): 'd3', (2, 5): 'd2', (3, 6): 'd1'}
rv.DOORTYPES = {'d1': 'ordinary', 'd2': 'spring', 'd3': 'spring'}
rv.ROBOTS = ['r1', 'r2', 'r3', 'r4']
def ResetState():
state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL, 'r4': NIL}
state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free', 'r4': 'free'}
state.loc = {'r1': 4, 'r2': 5, 'r3': 4, 'r4': 4}
state.pos = {'o1': 3}
state.doorStatus = {'d1': 'closed', 'd2': 'closed', 'd3': 'closed', }
state.doorType = {'d1': UNK, 'd2': UNK, 'd3': UNK, }
tasks = {
10: [['fetch', 'r1', 'o1', 3]],
}
eventsEnv = {
} | [
"sunandita.patra@gmail.com"
] | sunandita.patra@gmail.com |
d15700d5e54ef4902dac5a4fdeab8419b868866a | fd9d661ebc70f97993a8a381b62177fb191b5005 | /对称的二叉树/hello.py | 11c08c2b9ce72d6ac8e71886a00b6a2e121aa89f | [] | no_license | blankxz/LCOF | 441db27ba42c0d10cdcfcb45ea36d72308e05a47 | aea440d51dee61b03ca2cd1edafda661971331e6 | refs/heads/master | 2021-01-26T05:02:55.455887 | 2020-07-06T01:47:29 | 2020-07-06T01:47:29 | 243,318,530 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 637 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
if not root:
return True
return self.dfs(root.left,root.right)
def dfs(self,l,r):
if not l and not r:
return True
else:
if l and r:
if l.val!=r.val:
return False
return self.dfs(l.left,r.right) and self.dfs(l.right,r.left)
else:
return False | [
"993010776@qq.com"
] | 993010776@qq.com |
885e4c2b7023b8adb10a4fba405a6c95df0d9b57 | 6463afadc13456a81760f8269ba63bb4892eb928 | /源码/Pythonpa/ch19/Thread.py | 8e09787c4529f2bc7e97715c4d6054a5bc8d304c | [] | no_license | brucehzhai/Python-JiangHong | ebc43f34df0f6f3e63b948c68bb8d1ce01b77a5d | 4543e18c489d6a655a0742cd3ba622a2aca0c882 | refs/heads/master | 2023-04-10T20:26:37.656432 | 2019-03-12T07:46:50 | 2019-03-12T07:46:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | import threading, time, random
def timer(interval):
for i in range(5):
time.sleep(random.choice(range(interval))) #随机睡眠0-interval秒
thread_id = threading.get_ident() #获取当前线程标识符
print('Thread:{0} Time:{1}\n'.format(thread_id, time.ctime()))
def test(): #Use thread.start_new_thread() to create 2 new threads
t1=threading.Thread(target=timer, args=(5,)) #创建线程
t2=threading.Thread(target=timer, args=(5,)) #创建线程
t1.start(); t2.start() #启动线程
if __name__=='__main__':
test()
| [
"a18749290902@163.com"
] | a18749290902@163.com |
efa9c7a2d9f7fdaea44368b2929cfd7519e20b2f | 63ace0431171016aa5af1517fc332c6faca3e219 | /PythonCalculator/PythonCalculator.py | 266fe78c119be99fd2ac3cad5e5b759de583c3b5 | [] | no_license | pacolamuerte/PythonCalculator | 861a33788b8ba01d9ec9c10085ab18f5878c28a2 | a8656e7579a79ead6f9791a6b22904dc5f181b3c | refs/heads/master | 2022-11-12T19:17:55.155011 | 2020-07-02T12:52:57 | 2020-07-02T12:52:57 | 276,645,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | #define the start and print what the program is
def start():
print("Area Calculator in Python")
print("="*25)
#start of the while loop. Makes it so we don't get kicked out if wrong input
while True:
_input_ = input("Enter 'C' for circle or 'T' for triangle or 'R' for Rectangle: ")
#formulas for the different area calculations
if _input_ == 'C':
radius = float(input("what is the radius of the circle in meters (m)?: "))
area_c = 2 * 3.14159 * radius
print("The area of the circle is " + str(area_c) + " m2)")
elif _input_ == 'T':
base = float(input("what is the base measurement (m)?: "))
height = float(input("what is the height measuerment (m)?: "))
area_t = 0.5 * base * height
print("The area of the triangle is " + str(area_t) + " m2")
elif _input_ == 'R':
length = float(input("what is the length measurement (m)?: "))
width = float(input("what is the width measuerment (m)?: "))
area_r = length * width
print("The area of the rectangle is " + str(area_r) + " m2")
else:
print("Sorry, that was an invalid command!") | [
"46374397+pacolamuerte@users.noreply.github.com"
] | 46374397+pacolamuerte@users.noreply.github.com |
565ffcd5174d163fc98b4e93aece9ed4cecbd461 | c2c3f5a399970003e715b592f6087eb21693bca7 | /sdi_pipeline/extract.py | 333107e0dcfc20ba5d66fc15b7d9e367c7645b8f | [
"MIT"
] | permissive | andrewhstewart/SDI | a9cbe74b2614dc8c5789bb1b125ce0dba79b145e | 19d52bd5e13c2128c083776712672becf8b6ab45 | refs/heads/master | 2018-11-12T12:43:16.928870 | 2018-10-10T21:53:07 | 2018-10-10T21:53:07 | 108,577,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import sex
import psf
import glob
def EXTRACT():
path = input("-> Enter path to target's exposure time directory: ")
images = glob.glob(path + '/data/*.fits')
psf_data = glob.glob(path + '/psf/*')
if len(psf_data) == 3*len(images):
sex.sextractor(path)
sex.src_filter(path)
else:
sex.sextractor_psf(path)
psf.psfex(path)
sex.sextractor(path)
sex.src_filter(path)
if __name__ == '__main__':
path = input("-> Enter path to target's exposure time directory: ")
images = glob.glob(path + '/data/*.fits')
psf_data = glob.glob(path + '/psf/*')
if len(psf_data) == 3*len(images):
sex.sextractor(path)
sex.src_filter(path)
else:
sex.sextractor_psf(path)
psf.psfex(path)
sex.sextractor(path)
sex.src_filter(path) | [
"andrew.henry.stewart@emory.edu"
] | andrew.henry.stewart@emory.edu |
f2003fc1ef757a8559afeb935f094fafed2db13c | 78316fa921873e5b04c0269f3a4167441efd5313 | /about_page_test.py | babdada7778147612a73f6c4c60126e2aed7380b | [] | no_license | Oceanblue713/kitchen-calc-test | 2105d5fa38467eacf5eeea2df84d4384e9a991d5 | 7dddcf117b5b674656e632b6ba7d800ee78d3091 | refs/heads/master | 2022-11-25T17:02:41.324815 | 2020-07-27T22:54:37 | 2020-07-27T22:54:37 | 276,963,053 | 0 | 0 | null | 2020-07-27T22:54:39 | 2020-07-03T18:22:52 | Python | UTF-8 | Python | false | false | 1,121 | py | from selenium import webdriver
from home_locators import *
import time
import unittest
class AboutPageTestCase(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Chrome('./chromedriver')
self.browser.set_window_size(414, 736)
self.addCleanup(self.browser.quit)
def test_aboutPage(self):
self.browser.get("https://www.yourkitchenapp.com")
time.sleep(1)
about_link = self.browser.find_element(*HomePage.about_link)
about_link.click()
time.sleep(2)
about_page_text = self.browser.find_element(*AboutPage.about_this_page)
self.assertIn('About This Page', about_page_text.text)
print (about_page_text.text)
about_the_creator = self.browser.find_element(*AboutPage.about_the_creator)
self.assertIn("About The Creator", about_the_creator.text)
print (about_the_creator.text)
home_button = self.browser.find_element(*Buttons.home_button)
home_button.click()
time.sleep(1)
print("About page test is done")
if __name__ == '__main__':
unittest.main(verbosity=1) | [
"amedamachip713@gmail.com"
] | amedamachip713@gmail.com |
607b060e4efaed1ce4b9888f5b223a931f628f48 | ed4f294b5d7df3237fb45d8eb4f3972e5e577316 | /venv/Scripts/HC_KBR-gspread.py | 74fec7ad1425f121ead26ca91076c1d7cb6c230a | [] | no_license | superfonz/BQ_Projects | fdc027b6d83d12553447cd5690bb6d666ad81440 | 7a20c011e3fe4610e94693723fb394836fd7b898 | refs/heads/master | 2022-12-25T00:04:11.390667 | 2020-09-28T13:31:44 | 2020-09-28T13:31:44 | 246,917,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,101 | py | import pgpy
import os
import gcsfs
from google.cloud import storage
import paramiko
import json
import io
import pandas as pd
import pandas_gbq
from datetime import datetime
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import openpyxl as opxl
import re
import numpy as np
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"] = '/Users/alfonzosanfilippo/PycharmProjects/BQ_Projects/venv/Resource/hireclix.json'
def KBRImport():
# ---init---------
storage_client = storage.Client()
scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"]
credraw = storage_client.get_bucket('hc_tokens_scripts').blob(
'Tokens/hireclix-googlesheets.json').download_as_string()
credjson = json.loads(credraw)
cred = ServiceAccountCredentials.from_json_keyfile_dict(credjson, scope)
gclient = gspread.authorize(cred)
sheet = gclient.open_by_key('1nQDxuJVTjfFRSGDIr_eqeT6rJ2yUkZB9kGQWh0GP7d8').worksheet('Automated ATS by BU')
# -----dev tool-----
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# -----load STFP server-----
data = storage_client.get_bucket('hc_tokens_scripts').blob('Tokens/KBRFTP.json').download_as_string()
json_data = json.loads(data)
transport = paramiko.Transport(json_data["host"], 22)
transport.connect(username=json_data["username"], password=json_data["password"])
sftp = paramiko.SFTPClient.from_transport(transport)
x = sftp.open("HireClix_Data.csv.pgp", 'rb').read()
toread = io.BytesIO() # Removes Empty Null Char from String/prep for decryt
toread.write(x)
toread.seek(0)
# ------pgp decrypt------
if storage_client.get_bucket('hc_tokens_scripts').blob('Tokens/KBR (FD2E83EC) – Secret.asc').exists():
with gcsfs.GCSFileSystem(project="hireclix").open('hc_tokens_scripts/Tokens/KBR (FD2E83EC) – Secret.asc',
'rb') as token:
creds = pgpy.PGPKey().from_blob(token.read())
with creds[0].unlock("hireclix10") as ukey:
message = pgpy.PGPMessage().from_blob(toread.read())
decryptedmessage = ukey.decrypt(message).message
decryptedmessagestr = decryptedmessage.decode()
DMIo = io.StringIO(decryptedmessagestr)
dataframe = pd.read_csv(DMIo)
else:
print("PGP Token not Found, please fix")
raise FileNotFoundError
# ----transform data----
dataframe.rename(columns={'Application Date': 'Application_Date',
'Job Requisition ID': 'Job_Requisition_ID',
'Job Posting Title': 'Job_Posting_Title',
'Job Requisition Primary Location': 'Job_Requisition_Primary_Location',
'Job Requisition Status': 'Job_Requisition_Status',
'Is Evergreen': 'Is_Evergreen',
'First Name': 'First_Name',
'Last Name': 'Last_Name',
'Candidate ID': 'Candidate_ID',
'Candidate Location': 'Candidate_Location',
'Candidate Stage': 'Candidate_Stage',
'Candidate Step': 'Candidate_Step',
'Source': 'Source',
'Referred by': 'Referred_by',
'Job Code': 'Job_Code',
'Security Sub Region': 'Security_Sub_Region'}, inplace=True)
dataframe['Application_Date'] = pd.to_datetime(dataframe['Application_Date'], errors='coerce').dt.date
# ----pivot table -----
pivot = dataframe.pivot_table(index=['Application_Date', 'Source', 'Security_Sub_Region', 'Job_Posting_Title',
'Job_Requisition_Primary_Location', 'Job_Requisition_ID'],
columns=['Candidate_Stage'], values=['Candidate_ID'], aggfunc='count')
pivot.reset_index(inplace=True)
axes = pivot.axes[1]
columnheaders = []
for axis in axes:
if axis[1] == "":
columnheaders.append(axis[0])
elif axis[0] == "Candidate_ID":
columnheaders.append(axis[1])
else:
columnheaders.append("unknown")
pivotval = pivot.values
atslst = [columnheaders + ["Month-Year", "Month", "Year", "Applies", "Quality Applies"]]
ind = 1
# -----transform pivot for Google sheets-----
for x in pivotval:
ind += 1
month = str(x[0].strftime("%B"))
year = str(x[0].strftime("%Y"))
dates = month[0:3] + "-" + year
ap = '=SUM(INDIRECT("G' + str(ind) + ':P' + str(ind) + '"))' # insert Applications formula
qap = '=SUM(INDIRECT("I' + str(ind) + ':M' + str(ind) + '"))' # insert Quality Applications formula
templst = []
for ex in range(0, len(x)):
templst.append(str(x[ex])) # format a temp list in the correct manner
templst.extend([dates, month, year, ap, qap])
atslst.append(templst)
for i in range(1, len(atslst)):
for val in range(5, len(atslst[i])):
if atslst[i][val] == 'nan':
atslst[i][val] = ''
elif re.match('[0-9]+\.[0-9+]', atslst[i][val]):
atslst[i][val] = int(re.sub("\.[0-9]", "", atslst[i][val]))
sheet.clear()
sheet.append_rows(atslst, "USER_ENTERED")
today = datetime.today().date()
dataframe.to_csv('gs://hc_kbr_ats/File_Completed/' + "kbr_data_ " + str(today.month) + "." +
str(today.day) + "." + str(today.year) + ".csv", index=False, header=True)
header = ["Application_Date", "Source", "Security_Sub_Region", "Job_Posting_Title",
"Job_Requisition_Primary_Location", "Job_Requisition_ID", "Background Check", "Declined by Candidate",
"Hire", "Interview", "Offer", "Offer/Employment Agreement", "Pre-Employment Checklist", "Rejected",
"Review", "Screen", "Month-Year", "Month", "Year", "Applies", "Quality Applies"]
mod = 0
sheetaxis = sheet.get("A1:W1")
for x in range(0, len(sheetaxis[0])): # modify header locations post insert
for i in range(0, len(header)):
if (i - mod) == x:
if sheetaxis[0][x] != header[i]:
sheet.insert_col([""], i + 1)
mod += 1
sheet.insert_row(header)
sheet.delete_rows(2, 2)
if __name__ = '__main__':
KBRImport()
# key = pgpy.PGPKey().from_file("/Users/alfonzosanfilippo/Desktop/KBR Keys/KBR (FD2E83EC) – Secret.asc")
#
# with key[0].unlock("hireclix10") as ukey:
# message = pgpy.PGPMessage().from_file("/Users/alfonzosanfilippo/Desktop/Banfield backup.csv.gpg")
# f = ukey.decrypt(message).message
# print(f)
| [
"noreply@github.com"
] | superfonz.noreply@github.com |
1b7e1365e13bbcbf9319626cc91f59b53063bfcc | 9dc16419cc28d7f83495cf26bdd0c1c81e6802b2 | /CLI/To Speech/Text to Speech/text_to_speech.py | 8533b95cc016ec26c0a3d64eff6f7b4776ae79cc | [
"Apache-2.0"
] | permissive | ypratham/python-aio | 937f92800d7267816365107388d590e29244d0ea | 39805da35481dac58a8bfd0e51398936b3fba249 | refs/heads/main | 2023-08-01T06:16:07.839554 | 2021-09-08T06:48:00 | 2021-09-08T06:48:00 | 404,224,241 | 0 | 0 | Apache-2.0 | 2021-09-08T05:38:54 | 2021-09-08T05:38:54 | null | UTF-8 | Python | false | false | 174 | py | from gtts import gTTS
import os
text = 'Demo Speech'
language = 'en'
speech = gTTS(text=text, lang=language, slow=False)
speech.save('text.mp3')
os.system('start text.mp3')
| [
"codewithchin@gmail.com"
] | codewithchin@gmail.com |
d996340d1540e127150ad2f2d15bab2211e0bb6b | 85b6e009c45f2dd530d8ae186feb7e6e67d076a8 | /cohesity_management_sdk/models/application_server_object_to_restore.py | b3355a72c5c37251635935b3bf33acfb546d1799 | [
"MIT"
] | permissive | priyambiswas0/management-sdk-python | 4a60153b038d0a04de02f2308362a2531b0ff9cb | 5807c85e003f271ce069b52529b31abfd08ec153 | refs/heads/master | 2021-10-20T05:43:34.626369 | 2018-05-22T06:04:20 | 2019-02-25T23:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,346 | py | # Copyright 2019 Cohesity Inc.
# -*- coding: utf-8 -*-
import cohesity_management_sdk.models.sql_application_server_restore_parameters
class ApplicationServerObjectToRestore(object):
"""Implementation of the 'Application Server object to restore.' model.
Specifies the Application Server to restore and parameters specific to
that application.
Attributes:
application_server_id (long|int): Specifies the Application Server to
restore (for example, kSQL).
sql_restore_parameters (SQLApplicationServerRestoreParameters):
Specifies the parameters specific the Application Server
instance.
target_host_id (long|int): Specifies the target host if the
application is to be restored to a different host. If this is
empty, then the application is restored to the original host,
which is the hosting Protection Source.
target_root_node_id (long|int): Specifies the registered root node,
like vCenter, of targetHost. If this is empty, then it is assumed
the root node of the target host is the same as the host
Protection Source of the application.
"""
# Create a mapping from Model property names to API property names
_names = {
"application_server_id":'applicationServerId',
"sql_restore_parameters":'sqlRestoreParameters',
"target_host_id":'targetHostId',
"target_root_node_id":'targetRootNodeId'
}
def __init__(self,
application_server_id=None,
sql_restore_parameters=None,
target_host_id=None,
target_root_node_id=None):
"""Constructor for the ApplicationServerObjectToRestore class"""
# Initialize members of the class
self.application_server_id = application_server_id
self.sql_restore_parameters = sql_restore_parameters
self.target_host_id = target_host_id
self.target_root_node_id = target_root_node_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
application_server_id = dictionary.get('applicationServerId')
sql_restore_parameters = cohesity_management_sdk.models.sql_application_server_restore_parameters.SQLApplicationServerRestoreParameters.from_dictionary(dictionary.get('sqlRestoreParameters')) if dictionary.get('sqlRestoreParameters') else None
target_host_id = dictionary.get('targetHostId')
target_root_node_id = dictionary.get('targetRootNodeId')
# Return an object of this model
return cls(application_server_id,
sql_restore_parameters,
target_host_id,
target_root_node_id)
| [
"ashish@cohesity.com"
] | ashish@cohesity.com |
c2a9f3ada9b299af99e9f67c572b1b8c3cb2dd4c | c1e58d39329b54b0bfa270474720519894930328 | /project/main.py | ba240a750c9b4382d0d2fbc2c714d514caa59311 | [] | no_license | simms21/argo-test-app | c4bdd85aaac730d3c27d0510861cf8f03fe05fbe | 4baa7248850ed25a15c937e035c6b3d5eff74762 | refs/heads/main | 2023-01-31T16:22:25.806468 | 2020-12-19T00:31:12 | 2020-12-19T00:31:12 | 322,727,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,432 | py | import socket
import sys
from thread import *
host = '0.0.0.0'
port = 80
methods = ['GET', 'HEAD', 'POST', 'PUT','DELETE','CONNECT','OPTIONS','TRACE','PATCH']
versions = ['HTTP/1.1','HTTP/1.0','HTTP/2.0']
allow = ['html','json','jpg','png','js','css']
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind((host, port))
except socket.error as e:
print(str(e))
s.listen(5)
print('Waiting for a connection.')
def threaded_client(conn):
while True:
data = conn.recv(10000)
#reply = 'Server output: ' + data.decode('utf-8')
if not data:
#conn.sendall(str.encode(reply))
#conn.close()
break
data2 = data.decode('utf-8')
head = data2.splitlines()
if head[0].split(' ')[0] in methods and head[0].split(' ')[2] in versions:
if head[0].split(' ')[1] == "/":
filename = 'index.html'
else:
filename= head[0].split(' ')[1][1:]
try:
header=""
if filename.endswith("html"):
header = "text/html"
elif filename.endswith("css"):
header = "text/css"
elif filename.endswith("png"):
header = "image/png"
else:
reply = 'HTTP/1.1 404 Page not Found\n\n HATA3'
conn.sendall(str.encode(reply))
conn.close()
break
with open(filename, 'rb') as f:
conn.send(str.encode('HTTP/1.1 200 OK\n' + 'Content-Type: ' + header + "\n\n"))
data = f.read()
print(data)
conn.send(data)
conn.close()
break
except:
reply = 'HTTP/1.1 404 Page not Found\n\n HATA1'
conn.sendall(str.encode(reply))
conn.close()
exit_thread()
break
else:
reply = 'HTTP/1.1 400 Bad Request\n'
conn.sendall(str.encode(reply))
conn.close()
exit_thread()
break
conn.close()
exit_thread()
while True:
conn, addr = s.accept()
print('connected to: ' + addr[0] + ':' + str(addr[1]))
start_new_thread(threaded_client, (conn,))
| [
"simms21@mgial.com"
] | simms21@mgial.com |
7da03415017c14f0b26291a07ccc29a101517dc9 | 37069009dd428ce59819ffea2fcffc07dda6e712 | /django_analyze/migrations/0056_auto__add_field_genotype_production_evaluating__add_field_genotype_pro.py | 9b787065f00ad6ec661fc5dd81eefd6d9211d6c0 | [] | no_license | chrisspen/django-analyze | 829f560d7c5f2fb1c19fc07bc77cb1a83238e696 | 421ee35235f76ff8657f7befe5212acd7ccf3989 | refs/heads/master | 2020-04-28T15:42:51.773823 | 2015-04-18T14:50:02 | 2015-04-18T14:50:02 | 14,995,029 | 2 | 2 | null | 2014-07-07T12:39:22 | 2013-12-06T22:26:29 | Python | UTF-8 | Python | false | false | 20,278 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Genotype.production_evaluating'
db.add_column(u'django_analyze_genotype', 'production_evaluating',
self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True),
keep_default=False)
# Adding field 'Genotype.production_evaluating_pid'
db.add_column(u'django_analyze_genotype', 'production_evaluating_pid',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Genotype.production_fresh'
db.add_column(u'django_analyze_genotype', 'production_fresh',
self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True),
keep_default=False)
# Adding field 'Genotype.production_valid'
db.add_column(u'django_analyze_genotype', 'production_valid',
self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True),
keep_default=False)
# Adding field 'Genotype.production_total_parts'
db.add_column(u'django_analyze_genotype', 'production_total_parts',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Genotype.production_complete_parts'
db.add_column(u'django_analyze_genotype', 'production_complete_parts',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Genotype.production_success_parts'
db.add_column(u'django_analyze_genotype', 'production_success_parts',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Genotype.production_ontime_parts'
db.add_column(u'django_analyze_genotype', 'production_ontime_parts',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Genotype.production_success_ratio'
db.add_column(u'django_analyze_genotype', 'production_success_ratio',
self.gf('django.db.models.fields.FloatField')(db_index=True, null=True, blank=True),
keep_default=False)
# Adding field 'Genotype.production_ontime_ratio'
db.add_column(u'django_analyze_genotype', 'production_ontime_ratio',
self.gf('django.db.models.fields.FloatField')(db_index=True, null=True, blank=True),
keep_default=False)
# Adding field 'Genotype.production_error'
db.add_column(u'django_analyze_genotype', 'production_error',
self.gf('django.db.models.fields.TextField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Genotype.production_evaluating'
db.delete_column(u'django_analyze_genotype', 'production_evaluating')
# Deleting field 'Genotype.production_evaluating_pid'
db.delete_column(u'django_analyze_genotype', 'production_evaluating_pid')
# Deleting field 'Genotype.production_fresh'
db.delete_column(u'django_analyze_genotype', 'production_fresh')
# Deleting field 'Genotype.production_valid'
db.delete_column(u'django_analyze_genotype', 'production_valid')
# Deleting field 'Genotype.production_total_parts'
db.delete_column(u'django_analyze_genotype', 'production_total_parts')
# Deleting field 'Genotype.production_complete_parts'
db.delete_column(u'django_analyze_genotype', 'production_complete_parts')
# Deleting field 'Genotype.production_success_parts'
db.delete_column(u'django_analyze_genotype', 'production_success_parts')
# Deleting field 'Genotype.production_ontime_parts'
db.delete_column(u'django_analyze_genotype', 'production_ontime_parts')
# Deleting field 'Genotype.production_success_ratio'
db.delete_column(u'django_analyze_genotype', 'production_success_ratio')
# Deleting field 'Genotype.production_ontime_ratio'
db.delete_column(u'django_analyze_genotype', 'production_ontime_ratio')
# Deleting field 'Genotype.production_error'
db.delete_column(u'django_analyze_genotype', 'production_error')
models = {
'django_analyze.epoche': {
'Meta': {'ordering': "('genome', '-index')", 'unique_together': "(('genome', 'index'),)", 'object_name': 'Epoche'},
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'epoches'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'max_fitness': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'mean_fitness': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'min_fitness': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'django_analyze.gene': {
'Meta': {'ordering': "('-dependee_gene__id', 'name')", 'unique_together': "(('genome', 'name'),)", 'object_name': 'Gene'},
'coverage_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'dependee_gene': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dependent_genes'", 'null': 'True', 'to': "orm['django_analyze.Gene']"}),
'dependee_value': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'exploration_priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'genes'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_increment': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'max_value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'max_value_observed': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'min_value': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'min_value_observed': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mutation_weight': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'django_analyze.genedependency': {
'Meta': {'unique_together': "(('gene', 'dependee_gene', 'dependee_value'),)", 'object_name': 'GeneDependency'},
'dependee_gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependents'", 'to': "orm['django_analyze.Gene']"}),
'dependee_value': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dependencies'", 'to': "orm['django_analyze.Gene']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'positive': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'django_analyze.genome': {
'Meta': {'object_name': 'Genome'},
'_epoche': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'current_genome'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Epoche']"}),
'delete_inferiors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'elite_ratio': ('django.db.models.fields.FloatField', [], {'default': '0.1'}),
'epoche': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'epoche_stall': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'epoches_since_improvement': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'evaluating_part': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'evaluation_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300'}),
'evaluator': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'evolution_start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'evolving': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'max_species': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'maximum_evaluated_population': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1000'}),
'maximum_population': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10'}),
'min_fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mutation_rate': ('django.db.models.fields.FloatField', [], {'default': '0.1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'production_at_best': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'production_genotype': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'production_genomes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Genotype']"}),
'production_genotype_auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ratio_evaluated': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'django_analyze.genotype': {
'Meta': {'ordering': "('-fitness',)", 'unique_together': "(('genome', 'fingerprint'),)", 'object_name': 'Genotype', 'index_together': "(('valid', 'fresh', 'fitness'), ('genome', 'fresh'))"},
'complete_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'epoche': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'genotypes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Epoche']"}),
'epoche_of_evaluation': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'evaluating': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'evaluating_pid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '700', 'null': 'True', 'db_column': "'fingerprint'", 'blank': 'True'}),
'fingerprint_fresh': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fitness': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'fitness_evaluation_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fitness_evaluation_datetime_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fresh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'gene_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'generation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'genotypes'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immortal': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'mean_absolute_error': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'mean_evaluation_seconds': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ontime_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ontime_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_complete_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'production_evaluating': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'production_evaluating_pid': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_fresh': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'production_ontime_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_ontime_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_success_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_success_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'production_total_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'production_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'genotypes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['django_analyze.Species']"}),
'success_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'success_ratio': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'total_evaluation_seconds': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_parts': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'})
},
'django_analyze.genotypegene': {
'Meta': {'ordering': "('gene__name',)", 'unique_together': "(('genotype', 'gene'),)", 'object_name': 'GenotypeGene'},
'_value': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'db_column': "'value'"}),
'_value_genome': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_analyze.Genome']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'gene_values'", 'to': "orm['django_analyze.Gene']"}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'genes'", 'to': "orm['django_analyze.Genotype']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'django_analyze.genotypegeneillegal': {
'Meta': {'object_name': 'GenotypeGeneIllegal', 'managed': 'False'},
'dependee_name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'dependee_value': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'gene_value': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_analyze.GenotypeGene']", 'on_delete': 'models.DO_NOTHING', 'primary_key': 'True', 'db_column': "'illegal_genotypegene_id'"}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'illegal_gene_values'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'illegal_genotype_id'", 'to': "orm['django_analyze.Genotype']"}),
'illegal_gene_name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'illegal_value': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
u'django_analyze.genotypegenemissing': {
'Meta': {'object_name': 'GenotypeGeneMissing', 'managed': 'False'},
'default': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'dependee_gene': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missing_dependents'", 'on_delete': 'models.DO_NOTHING', 'primary_key': 'True', 'db_column': "'dependee_gene_id'", 'to': "orm['django_analyze.Gene']"}),
'gene': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_analyze.Gene']", 'on_delete': 'models.DO_NOTHING', 'primary_key': 'True', 'db_column': "'gene_id'"}),
'gene_name': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'genotype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missing_gene_values'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'genotype_id'", 'to': "orm['django_analyze.Genotype']"})
},
'django_analyze.species': {
'Meta': {'ordering': "('genome', 'index')", 'unique_together': "(('genome', 'index'),)", 'object_name': 'Species', 'index_together': "(('genome', 'index'),)"},
'centroid': ('picklefield.fields.PickledObjectField', [], {'null': 'True', 'blank': 'True'}),
'genome': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'species'", 'to': "orm['django_analyze.Genome']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'population': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
}
}
complete_apps = ['django_analyze'] | [
"chris@coronis"
] | chris@coronis |
952b2d58816e31fc9339095aeddb0c46d20e0471 | f027240cfabeafc9b0bd6c76b911d2ab6212a64e | /scripts/fund_and_withdraw.py | 1f581f9339f38cd2ba1574ca92048b79950b0352 | [] | no_license | EFJB/brownie_fund_me | ff7a04f91c13d27ac598f9fc24c319e19f08bd5e | d413a8549c14149827a8b21b8724660ac2b7506e | refs/heads/master | 2023-08-15T19:00:31.688244 | 2021-10-02T03:33:37 | 2021-10-02T03:33:37 | 412,679,258 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from brownie import FundMe
from scripts.helpful_scripts import get_account
def fund():
fund_me = FundMe[-1]
account = get_account()
entrance_fee = fund_me.getEntranceFee()
print(entrance_fee)
print(f"La actual fee de entrada es {entrance_fee}")
print("Funding")
fund_me.fund({"from": account, "value": entrance_fee})
def withdraw():
fund_me = FundMe[-1]
account = get_account()
fund_me.withdraw({"from": account})
def main():
fund()
withdraw()
| [
"engel24jb@yahoo.com"
] | engel24jb@yahoo.com |
403bf8bc999633b433fcc935fb23ff8f06f2e1a2 | 2529e1d1c923b664be93f241fc73c11f10d99639 | /PyQuante/OEP.py | 1e6b701beb419cb57914927bf4178d84b7396958 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | punitjha/pyquante | bc20dd1de1da0c041df19c74275ac0d36792677d | f5cae27f519b1c1b70afbebfe8b5c83cb4b3c2a6 | refs/heads/master | 2022-01-23T04:15:32.516250 | 2009-11-16T00:06:50 | 2009-11-16T00:06:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,673 | py | "Yang/Wu's OEP implementation, in PyQuante."
from math import sqrt
from PyQuante.NumWrap import zeros,matrixmultiply,transpose,dot,identity,\
array,solve
from PyQuante.Ints import getbasis, getints, getJ,get2JmK,getK
from PyQuante.LA2 import geigh,mkdens,trace2,simx
from PyQuante.hartree_fock import get_fock
from PyQuante.CGBF import three_center
from PyQuante.optimize import fminBFGS
from PyQuante.fermi_dirac import get_efermi, get_fermi_occs,mkdens_occs,\
get_entropy,mkdens_fermi
import logging
gradcall=0
class EXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a HF or a DFT calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nclosed, self.nopen = self.molecule.get_closedopen()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbs = self.solver.orbs
self.orbe = self.solver.orbe
self.Gij = []
for g in range(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in range(self.nbf):
ibf = self.bfs[i]
for j in range(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbs,0,self.nclosed)
J0 = getJ(self.Ints,D0)
Vfa = (2.0*(self.nel-1.0)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(self.nbf,'d')
return
def iterate(self,**opts):
self.iter = 0
self.etemp = opts.get("etemp",False)
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
self.Hoep = get_Hoep(b,self.H0,self.Gij)
self.orbe,self.orbs = geigh(self.Hoep,self.S)
if self.etemp:
self.D,self.entropy = mkdens_fermi(self.nel,self.orbe,self.orbs,
self.etemp)
else:
self.D = mkdens(self.orbs,0,self.nclosed)
self.entropy=0
self.F = get_fock(self.D,self.Ints,self.h)
self.energy = trace2(self.h+self.F,self.D)+self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmo = simx(self.F,self.orbs)
bp = zeros(self.nbf,'d')
for g in range(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbs)
# Now sum the appropriate terms to get the b gradient
for i in range(self.nclosed):
for a in range(self.nclosed,self.norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(self.orbe[i]-self.orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
class UEXXSolver:
"EXXSolver(solver)"
def __init__(self,solver):
# Solver is a pointer to a UHF calculation that has
# already converged
self.solver = solver
self.bfs = self.solver.bfs
self.nbf = len(self.bfs)
self.S = self.solver.S
self.h = self.solver.h
self.Ints = self.solver.Ints
self.molecule = self.solver.molecule
self.nel = self.molecule.get_nel()
self.nalpha, self.nbeta = self.molecule.get_alphabeta()
self.Enuke = self.molecule.get_enuke()
self.norb = self.nbf
self.orbsa = self.solver.orbsa
self.orbsb = self.solver.orbsb
self.orbea = self.solver.orbea
self.orbeb = self.solver.orbeb
self.Gij = []
for g in range(self.nbf):
gmat = zeros((self.nbf,self.nbf),'d')
self.Gij.append(gmat)
gbf = self.bfs[g]
for i in range(self.nbf):
ibf = self.bfs[i]
for j in range(i+1):
jbf = self.bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
D0 = mkdens(self.orbsa,0,self.nalpha)+mkdens(self.orbsb,0,self.nbeta)
J0 = getJ(self.Ints,D0)
Vfa = ((self.nel-1.)/self.nel)*J0
self.H0 = self.h + Vfa
self.b = zeros(2*self.nbf,'d')
return
def iterate(self,**opts):
self.etemp = opts.get("etemp",False)
self.iter = 0
logging.debug("iter Energy <b|b>")
logging.debug("---- ------ -----")
self.b = fminBFGS(self.get_energy,self.b,self.get_gradient,logger=logging)
return
def get_energy(self,b):
self.iter += 1
ba = b[:self.nbf]
bb = b[self.nbf:]
self.Hoepa = get_Hoep(ba,self.H0,self.Gij)
self.Hoepb = get_Hoep(bb,self.H0,self.Gij)
self.orbea,self.orbsa = geigh(self.Hoepa,self.S)
self.orbeb,self.orbsb = geigh(self.Hoepb,self.S)
if self.etemp:
self.Da,entropya = mkdens_fermi(2*self.nalpha,self.orbea,self.orbsa,
self.etemp)
self.Db,entropyb = mkdens_fermi(2*self.nbeta,self.orbeb,self.orbsb,
self.etemp)
self.entropy = 0.5*(entropya+entropyb)
else:
self.Da = mkdens(self.orbsa,0,self.nalpha)
self.Db = mkdens(self.orbsb,0,self.nbeta)
self.entropy=0
J = getJ(self.Ints,self.Da+self.Db)
Ka = getK(self.Ints,self.Da)
Kb = getK(self.Ints,self.Db)
self.Fa = self.h + J - Ka
self.Fb = self.h + J - Kb
self.energy = 0.5*(trace2(self.h+self.Fa,self.Da) +
trace2(self.h+self.Fb,self.Db))\
+ self.Enuke + self.entropy
if self.iter == 1 or self.iter % 10 == 0:
logging.debug("%4d %10.5f %10.5f" % (self.iter,self.energy,dot(b,b)))
return self.energy
def get_gradient(self,b):
energy = self.get_energy(b)
Fmoa = simx(self.Fa,self.orbsa)
Fmob = simx(self.Fb,self.orbsb)
bp = zeros(2*self.nbf,'d')
for g in range(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsa)
# Now sum the appropriate terms to get the b gradient
for i in range(self.nalpha):
for a in range(self.nalpha,self.norb):
bp[g] += Fmoa[i,a]*Gmo[i,a]/(self.orbea[i]-self.orbea[a])
for g in range(self.nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = simx(self.Gij[g],self.orbsb)
# Now sum the appropriate terms to get the b gradient
for i in range(self.nbeta):
for a in range(self.nbeta,self.norb):
bp[self.nbf+g] += Fmob[i,a]*Gmo[i,a]/(self.orbeb[i]-self.orbeb[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return bp
def exx(atoms,orbs,**opts):
return oep_hf(atoms,orbs,**opts)
def oep_hf(atoms,orbs,**opts):
"""oep_hf - Form the optimized effective potential for HF exchange.
See notes on options and other args in oep routine.
"""
return oep(atoms,orbs,get_exx_energy,get_exx_gradient,**opts)
def oep(atoms,orbs,energy_func,grad_func=None,**opts):
"""oep - Form the optimized effective potential for a given energy expression
oep(atoms,orbs,energy_func,grad_func=None,**opts)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
energy_func The function that returns the energy for the given method
grad_func The function that returns the force for the given method
Options
-------
verbose False Output terse information to stdout (default)
True Print out additional information
ETemp False Use ETemp value for finite temperature DFT (default)
float Use (float) for the electron temperature
bfs None The basis functions to use. List of CGBF's
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
verbose = opts.get('verbose',False)
ETemp = opts.get('ETemp',False)
opt_method = opts.get('opt_method','BFGS')
bfs = opts.get('bfs',None)
if not bfs:
basis = opts.get('basis',None)
bfs = getbasis(atoms,basis)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = opts.get('pbfs',None)
if not pbfs: pbfs = bfs
npbf = len(pbfs)
integrals = opts.get('integrals',None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = opts.get('bvec',None)
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in range(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in range(nbf):
ibf = bfs[i]
for j in range(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = fminBFGS(energy_func,b,grad_func,
(nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij),
logger=logging)
energy,orbe,orbs = energy_func(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=1)
return energy,orbe,orbs
def get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**opts):
"""Computes the energy for the OEP/HF functional
Options:
return_flag 0 Just return the energy
1 Return energy, orbe, orbs
2 Return energy, orbe, orbs, F
"""
return_flag = opts.get('return_flag',0)
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
if ETemp:
efermi = get_efermi(nel,orbe,ETemp)
occs = get_fermi_occs(efermi,orbe,ETemp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,ETemp)
else:
D = mkdens(orbs,0,nocc)
F = get_fock(D,Ints,h)
energy = trace2(h+F,D)+Enuke
if ETemp: energy += entropy
iref = nel/2
gap = 627.51*(orbe[iref]-orbe[iref-1])
logging.debug("EXX Energy, B, Gap: %10.5f %10.5f %10.5f"
% (energy,sqrt(dot(b,b)),gap))
#logging.debug("%s" % orbe)
if return_flag == 1:
return energy,orbe,orbs
elif return_flag == 2:
return energy,orbe,orbs,F
return energy
def get_exx_gradient(b,nbf,nel,nocc,ETemp,Enuke,S,h,Ints,H0,Gij,**opts):
"""Computes the gradient for the OEP/HF functional.
return_flag 0 Just return gradient
1 Return energy,gradient
2 Return energy,gradient,orbe,orbs
"""
# Dump the gradient every 10 steps so we can restart...
global gradcall
gradcall += 1
#if gradcall % 5 == 0: logging.debug("B vector:\n%s" % b)
# Form the new potential and the new orbitals
energy,orbe,orbs,F = get_exx_energy(b,nbf,nel,nocc,ETemp,Enuke,
S,h,Ints,H0,Gij,return_flag=2)
Fmo = matrixmultiply(transpose(orbs),matrixmultiply(F,orbs))
norb = nbf
bp = zeros(nbf,'d') # dE/db
for g in range(nbf):
# Transform Gij[g] to MOs. This is done over the whole
# space rather than just the parts we need. I can speed
# this up later by only forming the i,a elements required
Gmo = matrixmultiply(transpose(orbs),matrixmultiply(Gij[g],orbs))
# Now sum the appropriate terms to get the b gradient
for i in range(nocc):
for a in range(nocc,norb):
bp[g] = bp[g] + Fmo[i,a]*Gmo[i,a]/(orbe[i]-orbe[a])
#logging.debug("EXX Grad: %10.5f" % (sqrt(dot(bp,bp))))
return_flag = opts.get('return_flag',0)
if return_flag == 1:
return energy,bp
elif return_flag == 2:
return energy,bp,orbe,orbs
return bp
def get_Hoep(b,H0,Gij):
Hoep = H0
# Add the contributions from the gaussian potential functions
# H[ij] += b[g]*<ibf|g|jbf>
for g in range(len(b)):
Hoep = Hoep + b[g]*Gij[g]
return Hoep
# Here's a much faster way to do this. Haven't figured out how to
# do it for more generic functions like OEP-GVB
def oep_hf_an(atoms,orbs,**opts):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_hf(atoms,orbs,**opts)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = opts.get('maxiter',100)
tol = opts.get('tol',1e-5)
bfs = opts.get('bfs',None)
if not bfs:
basis = opts.get('basis',None)
bfs = getbasis(atoms,basis)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = opts.get('pbfs',None)
if not pbfs: pbfs = bfs
npbf = len(pbfs)
integrals = opts.get('integrals',None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nocc,nopen = atoms.get_closedopen()
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
bp = zeros(nbf,'d')
bvec = opts.get('bvec',None)
if bvec:
assert len(bvec) == npbf
b = array(bvec)
else:
b = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in range(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in range(nbf):
ibf = bfs[i]
for j in range(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbs,0,nocc)
J0 = getJ(Ints,D0)
Vfa = (2*(nel-1.)/nel)*J0
H0 = h + Vfa
b = zeros(nbf,'d')
eold = 0
for iter in range(maxiter):
Hoep = get_Hoep(b,H0,Gij)
orbe,orbs = geigh(Hoep,S)
D = mkdens(orbs,0,nocc)
Vhf = get2JmK(Ints,D)
energy = trace2(2*h+Vhf,D)+Enuke
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
dV_ao = Vhf-Vfa
dV = matrixmultiply(transpose(orbs),matrixmultiply(dV_ao,orbs))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
Gkt = zeros((nbf,nbf),'d')
for k in range(nbf):
# This didn't work; in fact, it made things worse:
Gk = matrixmultiply(transpose(orbs),matrixmultiply(Gij[k],orbs))
for i in range(nocc):
for a in range(nocc,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbe[i]-orbe[a])
for l in range(nbf):
Gl = matrixmultiply(transpose(orbs),matrixmultiply(Gij[l],orbs))
for i in range(nocc):
for a in range(nocc,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbe[i]-orbe[a])
# This should actually be a pseudoinverse...
b = solve(X,c)
logging.info("Final OEP energy = %f" % energy)
return energy,orbe,orbs
def oep_uhf_an(atoms,orbsa,orbsb,**opts):
"""oep_hf - Form the optimized effective potential for HF exchange.
Implementation of Wu and Yang's Approximate Newton Scheme
from J. Theor. Comp. Chem. 2, 627 (2003).
oep_uhf(atoms,orbs,**opts)
atoms A Molecule object containing a list of the atoms
orbs A matrix of guess orbitals
Options
-------
bfs None The basis functions to use for the wfn
pbfs None The basis functions to use for the pot
basis_data None The basis data to use to construct bfs
integrals None The one- and two-electron integrals to use
If not None, S,h,Ints
"""
maxiter = opts.get('maxiter',100)
tol = opts.get('tol',1e-5)
ETemp = opts.get('ETemp',False)
bfs = opts.get('bfs',None)
if not bfs:
basis = opts.get('basis',None)
bfs = getbasis(atoms,basis)
# The basis set for the potential can be set different from
# that used for the wave function
pbfs = opts.get('pbfs',None)
if not pbfs: pbfs = bfs
npbf = len(pbfs)
integrals = opts.get('integrals',None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nclosed,nopen = atoms.get_closedopen()
nalpha,nbeta = nclosed+nopen,nclosed
Enuke = atoms.get_enuke()
# Form the OEP using Yang/Wu, PRL 89 143002 (2002)
nbf = len(bfs)
norb = nbf
ba = zeros(npbf,'d')
bb = zeros(npbf,'d')
# Form and store all of the three-center integrals
# we're going to need.
# These are <ibf|gbf|jbf> (where 'bf' indicates basis func,
# as opposed to MO)
# N^3 storage -- obviously you don't want to do this for
# very large systems
Gij = []
for g in range(npbf):
gmat = zeros((nbf,nbf),'d')
Gij.append(gmat)
gbf = pbfs[g]
for i in range(nbf):
ibf = bfs[i]
for j in range(i+1):
jbf = bfs[j]
gij = three_center(ibf,gbf,jbf)
gmat[i,j] = gij
gmat[j,i] = gij
# Compute the Fermi-Amaldi potential based on the LDA density.
# We're going to form this matrix from the Coulombic matrix that
# arises from the input orbitals. D0 and J0 refer to the density
# matrix and corresponding Coulomb matrix
D0 = mkdens(orbsa,0,nalpha)+mkdens(orbsb,0,nbeta)
J0 = getJ(Ints,D0)
Vfa = ((nel-1.)/nel)*J0
H0 = h + Vfa
eold = 0
for iter in range(maxiter):
Hoepa = get_Hoep(ba,H0,Gij)
Hoepb = get_Hoep(ba,H0,Gij)
orbea,orbsa = geigh(Hoepa,S)
orbeb,orbsb = geigh(Hoepb,S)
if ETemp:
efermia = get_efermi(2*nalpha,orbea,ETemp)
occsa = get_fermi_occs(efermia,orbea,ETemp)
Da = mkdens_occs(orbsa,occsa)
efermib = get_efermi(2*nbeta,orbeb,ETemp)
occsb = get_fermi_occs(efermib,orbeb,ETemp)
Db = mkdens_occs(orbsb,occsb)
entropy = 0.5*(get_entropy(occsa,ETemp)+get_entropy(occsb,ETemp))
else:
Da = mkdens(orbsa,0,nalpha)
Db = mkdens(orbsb,0,nbeta)
J = getJ(Ints,Da) + getJ(Ints,Db)
Ka = getK(Ints,Da)
Kb = getK(Ints,Db)
energy = (trace2(2*h+J-Ka,Da)+trace2(2*h+J-Kb,Db))/2\
+Enuke
if ETemp: energy += entropy
if abs(energy-eold) < tol:
break
else:
eold = energy
logging.debug("OEP AN Opt: %d %f" % (iter,energy))
# Do alpha and beta separately
# Alphas
dV_ao = J-Ka-Vfa
dV = matrixmultiply(orbsa,matrixmultiply(dV_ao,transpose(orbsa)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in range(nbf):
Gk = matrixmultiply(orbsa,matrixmultiply(Gij[k],
transpose(orbsa)))
for i in range(nalpha):
for a in range(nalpha,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbea[i]-orbea[a])
for l in range(nbf):
Gl = matrixmultiply(orbsa,matrixmultiply(Gij[l],
transpose(orbsa)))
for i in range(nalpha):
for a in range(nalpha,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbea[i]-orbea[a])
# This should actually be a pseudoinverse...
ba = solve(X,c)
# Betas
dV_ao = J-Kb-Vfa
dV = matrixmultiply(orbsb,matrixmultiply(dV_ao,transpose(orbsb)))
X = zeros((nbf,nbf),'d')
c = zeros(nbf,'d')
for k in range(nbf):
Gk = matrixmultiply(orbsb,matrixmultiply(Gij[k],
transpose(orbsb)))
for i in range(nbeta):
for a in range(nbeta,norb):
c[k] += dV[i,a]*Gk[i,a]/(orbeb[i]-orbeb[a])
for l in range(nbf):
Gl = matrixmultiply(orbsb,matrixmultiply(Gij[l],
transpose(orbsb)))
for i in range(nbeta):
for a in range(nbeta,norb):
X[k,l] += Gk[i,a]*Gl[i,a]/(orbeb[i]-orbeb[a])
# This should actually be a pseudoinverse...
bb = solve(X,c)
logging.info("Final OEP energy = %f" % energy)
return energy,(orbea,orbeb),(orbsa,orbsb)
def test_old():
from PyQuante.Molecule import Molecule
from PyQuante.Ints import getbasis,getints
from PyQuante.hartree_fock import rhf
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
#mol = Molecule('HF',[('H',(0.,0.,0.)),('F',(0.,0.,0.898369))],
# units='Angstrom')
mol = Molecule('LiH',[(1,(0,0,1.5)),(3,(0,0,-1.5))],units = 'Bohr')
bfs = getbasis(mol)
S,h,Ints = getints(bfs,mol)
print "after integrals"
E_hf,orbe_hf,orbs_hf = rhf(mol,bfs=bfs,integrals=(S,h,Ints),DoAveraging=True)
print "RHF energy = ",E_hf
E_exx,orbe_exx,orbs_exx = exx(mol,orbs_hf,bfs=bfs,integrals=(S,h,Ints))
return
def test():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
solver = HFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = EXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=40000)
return
def utest():
from PyQuante import Molecule, HFSolver, DFTSolver, UHFSolver
logging.basicConfig(level=logging.DEBUG,format="%(message)s")
mol = Molecule("He",[(2,(0,0,0))])
mol = Molecule("Li",[(3,(0,0,0))],multiplicity=2)
solver = UHFSolver(mol)
solver.iterate()
print "HF energy = ",solver.energy
dft_solver = DFTSolver(mol)
dft_solver.iterate()
print "DFT energy = ",dft_solver.energy
oep = UEXXSolver(solver)
# Testing 0 temp
oep.iterate()
# Testing finite temp
oep.iterate(etemp=10000)
return
if __name__ == '__main__':
test()
utest()
| [
"rpmuller@64417113-1622-0410-aef8-ef15d1a3721e"
] | rpmuller@64417113-1622-0410-aef8-ef15d1a3721e |
560e64ea487be5a6a70bc4ef2e1a1d01a553ad9e | e9b7f94749026c0fbbdced8dc7b2a1b844062d90 | /p.7.1.py | 85466f348d87e686b502f6d1a70fb2f37d734bfb | [] | no_license | thijsishiernietgoedinaarts/prog | 54aa9cf61349946e03eb3f490e8004a3a4624d3d | e42f81eb9aaa39face5513e1bb6c6035c711ecce | refs/heads/master | 2020-04-01T20:47:19.847590 | 2018-10-23T11:16:41 | 2018-10-23T11:16:41 | 153,620,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | def convert(x):
y = x * 1.8 + 32
print(y)
convert(5)
def table(x):
print(' F', ' C', sep=' ')
for c in range(-30, 41, 10):
f = float(c * 1.8 + 32)
answer= [f,c]
print('{:^6}{:^9}'.format(f, float(c)))
table(5) | [
"thijs.aarts.student@outlook.com"
] | thijs.aarts.student@outlook.com |
8b210db939d8b646a52e33fe434716acbc44ad4f | b7a78daeaf3ec5658b4d6e84033c80b1765a7a71 | /Chapter2/algorithm/sparta_algorithm/week2/04_delete_node_linked_list.py | c636473e8fb25d002857a3a6b497d6d3d471ec1e | [] | no_license | conagreen/TIL-hanghae99 | bc3350712ed99846d85fec25d5f3b36dce8b7eae | 9d6e71fd2f5c6e6bbb3ad13f77613b7d1860c2de | refs/heads/main | 2023-04-10T21:09:24.731110 | 2021-04-11T05:09:26 | 2021-04-11T05:09:26 | 343,802,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | # 링크드 리스트 원소 삭제
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self, value):
self.head = Node(value)
def append(self, value):
cur = self.head
while cur.next is not None:
cur = cur.next
cur.next = Node(value)
def print_all(self):
cur = self.head
while cur is not None:
print(cur.data)
cur = cur.next
def get_node(self, index):
node = self.head
count = 0
while count < index:
node = node.next
count += 1
return node
def add_node(self, index, value):
new_node = Node(value)
if index == 0:
new_node.next = self.head
self.head = new_node
return
else:
node = self.get_node(index-1)
next_node = node.next
node.next = new_node
new_node.next = next_node
def delete_node(self, index):
if index == 0:
self.head = self.head.next
else:
node = self.get_node(index-1)
node.next = node.next.next
linked_list = LinkedList(5)
linked_list.append(12)
linked_list.append(8)
linked_list.append(9)
linked_list.delete_node(1)
linked_list.print_all() | [
"icoqwee@gmail.com"
] | icoqwee@gmail.com |
19defa82572fde3df12eaee0fd0887b789f1ae4d | b0551e7ad759e0d4c20a69035c81ee26147f1f37 | /tests/adl_func_backend/xAODlib/test_xaod_aggragate.py | f375bc8ad6e86d9dbd13e27731c5a3f120111d49 | [] | no_license | gordonwatts/functional_adl | 59192391492b1bee4d6d7eb4b071ddecad037119 | a691229c102658c98e71c8374cd80174a86834a1 | refs/heads/master | 2020-05-20T19:47:50.213706 | 2019-09-22T20:49:53 | 2019-09-22T20:49:53 | 185,730,019 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,548 | py | # Test out various things connected to the Aggregate call.
# That code is more complex than I'd like it!
from tests.adl_func_backend.xAODlib.utils_for_testing import *
from adl_func_client.event_dataset import EventDataset
def test_Aggregate_not_initial_const_SUM():
r = EventDataset("file://root.root") \
.Select("lambda e: e.Jets('AntiKt4EMTopoJets').Select(lambda j: j.pt()/1000).Sum()") \
.AsROOTTTree('dude.root', 'analysis', 'jetPT') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
l_sets = find_line_numbers_with("/1000", lines)
assert 2 == len(l_sets)
def test_count_after_single_sequence():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").Select(lambda j: j.pt()).Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
# Make sure there is just one for loop in here.
assert 1 == ["for" in l for l in lines].count(True)
# Make sure the +1 happens after the for, and before another } bracket.
num_for = find_line_with("for", lines)
num_inc = find_line_with("+1", lines[num_for:])
num_close = find_next_closing_bracket(lines[num_for:])
assert num_close > num_inc
def test_count_after_single_sequence_with_filter():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").Select(lambda j: j.pt()).Where(lambda jpt: jpt>10.0).Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
# Make sure there is just one for loop in here.
assert 1 == ["for" in l for l in lines].count(True)
# Make sure the +1 happens after the for, and before another } bracket.
num_for = find_line_with("if", lines)
num_inc = find_line_with("+1", lines[num_for:])
num_close = find_next_closing_bracket(lines[num_for:])
assert num_close > num_inc
def test_count_after_double_sequence():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").SelectMany(lambda j: e.Tracks("InnerTracks")).Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
# Make sure there is just one for loop in here.
assert 2 == ["for" in l for l in lines].count(True)
# Make sure the +1 happens after the for, and before another } bracket.
num_for = find_line_with("for", lines)
num_inc = find_line_with("+1", lines[num_for:])
num_close = find_next_closing_bracket(lines[num_for:])
assert num_close > num_inc
def test_count_after_single_sequence_of_sequence():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").Select(lambda j: e.Tracks("InnerTracks")).Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
# Make sure there is just one for loop in here.
assert 1 == ["for" in l for l in lines].count(True)
# Make sure the +1 happens after the for, and before another } bracket.
num_for = find_line_with("for", lines)
num_inc = find_line_with("+1", lines[num_for:])
num_close = find_next_closing_bracket(lines[num_for:])
assert num_close > num_inc
def test_count_after_double_sequence_with_filter():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").SelectMany(lambda j: e.Tracks("InnerTracks").Where(lambda t: t.pt()>10.0)).Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
# Make sure there is just one for loop in here.
assert 2 == ["for" in l for l in lines].count(True)
# Make sure the +1 happens after the for, and before another } bracket.
num_for = find_line_with("if", lines)
num_inc = find_line_with("+1", lines[num_for:])
num_close = find_next_closing_bracket(lines[num_for:])
assert num_close > num_inc
def test_count_after_single_sequence_of_sequence_unwound():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").Select(lambda j: e.Tracks("InnerTracks")).SelectMany(lambda ts: ts).Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
# Make sure there is just one for loop in here.
assert 2 == ["for" in l for l in lines].count(True)
# Make sure the +1 happens after the for, and before another } bracket.
num_for = find_line_with("for", lines)
num_inc = find_line_with("+1", lines[num_for:])
num_close = find_next_closing_bracket(lines[num_for:])
assert num_close > num_inc
def test_count_after_single_sequence_of_sequence_with_useless_where():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").Select(lambda j: e.Tracks("InnerTracks").Where(lambda pt: pt > 10.0)).Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
# Make sure there is just one for loop in here.
l_increment = find_line_with('+1', lines)
block_headers = find_open_blocks(lines[:l_increment])
assert 1 == ["for" in l for l in block_headers].count(True)
# Make sure the +1 happens after the for, and before another } bracket.
num_for = find_line_with("for", lines)
num_inc = find_line_with("+1", lines[num_for:])
num_close = find_next_closing_bracket(lines[num_for:])
assert num_close > num_inc
def test_first_can_be_iterable_after_where():
# This was found while trying to generate a tuple for some training, below, simplified.
# The problem was that First() always returned something you weren't allowed to iterate over. Which is not what we want here.
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").Select(lambda j: e.Tracks("InnerTracks").Where(lambda t: t.pt() > 1000.0)).First().Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
def test_first_can_be_iterable():
# Make sure a First() here gets called back correctly and generated.
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AllMyJets").Select(lambda j: e.Tracks("InnerTracks")).First().Count()') \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
def test_Aggregate_per_jet():
r = EventDataset("file://root.root") \
.Select("lambda e: e.Jets('AntiKt4EMTopoJets').Select(lambda j: j.pt()).Count()") \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
def test_generate_Max():
r = EventDataset("file://root.root") \
.Select("lambda e: e.Jets('AntiKt4EMTopoJets').Select(lambda j: j.pt()).Max()") \
.AsROOTTTree('dude.root', 'analysis', 'JetPt') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
def test_First_selects_collection_count():
# Make sure that we have the "First" predicate after if Where's if statement.
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AntiKt4EMTopoJets").Select(lambda j: e.Tracks("InDetTrackParticles")).First().Count()') \
.AsPandasDF('TrackCount') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
l = find_line_numbers_with("for", lines)
assert 2==len(l)
def test_sequence_with_where_first():
r = EventDataset("file://root.root") \
.Select('lambda e: e.Jets("AntiKt4EMTopoJets").Select(lambda j: e.Tracks("InDetTrackParticles").Where(lambda t: t.pt() > 1000.0)).First().Count()') \
.AsPandasDF('dude') \
.value(executor=exe_for_test)
lines = get_lines_of_code(r)
print_lines(lines)
l_first = find_line_numbers_with("if (is_first", lines)
assert 1 == len(l_first)
active_blocks = find_open_blocks(lines[:l_first[0]])
assert 1==["for" in a for a in active_blocks].count(True)
l_agg = find_line_with("+1", lines)
active_blocks = find_open_blocks(lines[:l_agg])
assert 1==[">1000" in a for a in active_blocks].count(True)
| [
"gwatts@uw.edu"
] | gwatts@uw.edu |
584f901a80a2f22282440a28dce2d9699015344e | 007cd60ca84623c5ace8b50e846be659ae2e8c85 | /collectors/pfizer/spider.py | 971186105d5e69303b527e53cbb81e1dfb35b8f6 | [
"MIT"
] | permissive | kenferrara/collectors | 412a97f0475747a206cbe68890774c0c37e9fc1a | e6c1f45df3a1ffd5d60dada1816484812eb51417 | refs/heads/master | 2023-04-16T22:01:19.899692 | 2020-04-23T02:31:24 | 2020-04-23T02:31:24 | 258,073,992 | 0 | 0 | MIT | 2023-04-04T00:19:23 | 2020-04-23T02:30:59 | Python | UTF-8 | Python | false | false | 1,025 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from scrapy.spiders import Rule
from scrapy.spiders import CrawlSpider
from scrapy.linkextractors import LinkExtractor
from .parser import parse_record
# Module API
class Spider(CrawlSpider):
# Public
name = 'pfizer'
allowed_domains = ['pfizer.com']
def __init__(self, conf=None, conn=None):
# Save conf/conn
self.conf = conf
self.conn = conn
# Make urls
self.start_urls = [
'http://www.pfizer.com/research/clinical_trials/find_a_trial?recr=0',
]
# Make rules
self.rules = [
Rule(LinkExtractor(
allow=r'find_a_trial/NCT\d+',
), callback=parse_record),
Rule(LinkExtractor(
allow=r'page=\d+',
)),
]
# Inherit parent
super(Spider, self).__init__()
| [
"roll@post.agency"
] | roll@post.agency |
1a8110513faf6e110a32cea2a34ee3110be3b94c | 542b16adf10198571fd4881fcb7d7ff196434c10 | /pyscene.py | 46bd21ce93f651d910212df009282084681317eb | [] | no_license | lingyue328/SceneClassification | 655fd8c2165fee8a2621c8c7e24578d7091dd448 | 269d9d7134bc26412794cf1713666fe741d60ffa | refs/heads/main | 2023-03-21T08:35:35.584816 | 2021-03-16T04:47:19 | 2021-03-16T04:47:19 | 347,949,704 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | import scenedetect
import subprocess
videopath='test.mp4'
scenepath='./scene'
subprocess.call('scenedetect -i %s -o %s detect-content list-scenes save-images -n 1' % (videopath,scenepath)) | [
"noreply@github.com"
] | lingyue328.noreply@github.com |
5e949e06e6c5c4f3f95d57ba4cacb3b4a27db36c | c3752620acf43b822061f0c84ab2e1728fdf1c06 | /pureWords/idSimi2NodeSimi.py | 5878b095fa319621bacfbd88b62a4ac2eae56ab0 | [] | no_license | zhipeng93/wordEmbeddingBenchmark | 2704aedd3478e712a44996539b30da03531aa11b | 6a2cb68262ee4c4cb59457f2a15667e8d76350e5 | refs/heads/master | 2021-10-27T15:58:09.741495 | 2019-04-18T08:30:34 | 2019-04-18T08:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | import sys
def convert(embedding_f, node2id_f, out_embedding_f):
id2word = dict()
for line in open(node2id_f):
if line.startswith('#') or line.strip() == "":
continue
line = line.strip().split()
_word = line[0]
_id = line[1]
id2word[_id] = _word
out_embedding_file = open(out_embedding_f, 'w')
for line in open(embedding_f):
line = line.strip().split()
line[0] = id2word[line[0]]
line[1] = id2word[line[1]]
# print line[0]
out_embedding_file.write(" ".join(line) + "\n")
out_embedding_file.close()
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: python _.py embedding.file node2id.file out_embedding.file'
sys.exit(1)
convert(sys.argv[1], sys.argv[2], sys.argv[3])
| [
"zhangzhipeng@pku.edu.cn"
] | zhangzhipeng@pku.edu.cn |
3756722bfc33e2a508d337325f73dd8d823b0c0a | dbb935a67217902fe823fff5885d8a08b0dce133 | /python/pqauth/pqauth_django_server/tests/__init__.py | 7e91e08ff73a4c3cba9db3359803f4c046424b00 | [
"MIT"
] | permissive | ddtrung-fs/pqauth | 292c72fc8aeac995ce0a0765f9abef9e2c02c422 | b6322c704306342b0cdeed07d6a6dfe7faf7ca4c | refs/heads/master | 2021-05-26T22:45:44.167235 | 2013-05-03T23:10:27 | 2013-05-03T23:10:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | from .protocol import ProtocolTest
| [
"tjdziuba@gmail.com"
] | tjdziuba@gmail.com |
39e1db7518d3d854a2368377dfb59e1886ade294 | 815339fed0c82e96769e0a49f916df2b0a4c0c9d | /setup.py | 079050c8c83d60e434dbde482381149e794d6cf2 | [
"MIT"
] | permissive | ps/Flask-HTTPAuth | 9ac541ca7d40ac926917d0d5d3e0a3fa20c945d8 | fc34cc5020168ca3824cc4a740b2010bb3132abf | refs/heads/master | 2021-01-22T12:37:19.531289 | 2015-01-16T20:18:21 | 2015-01-16T20:18:21 | 29,791,846 | 0 | 0 | null | 2015-01-24T21:12:24 | 2015-01-24T21:12:24 | null | UTF-8 | Python | false | false | 1,006 | py | """
Flask-HTTPAuth
--------------
Basic and Digest HTTP authentication for Flask routes.
"""
from setuptools import setup
setup(
name='Flask-HTTPAuth',
version='2.3.0',
url='http://github.com/miguelgrinberg/flask-httpauth/',
license='MIT',
author='Miguel Grinberg',
author_email='miguelgrinberg50@gmail.com',
description='Basic and Digest HTTP authentication for Flask routes',
long_description=__doc__,
py_modules=['flask_httpauth'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
test_suite = "test_httpauth",
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| [
"miguelgrinberg50@gmail.com"
] | miguelgrinberg50@gmail.com |
d50bca0fe3b87aae44167fe82ad1b04c2743031d | e8199f1d424592affe19b50fd96a02815067d1b1 | /Dynamic Programming/1641. Count Sorted Vowel Strings.py | cee2d0c34ad32c209ff626cc43f9db293a8c090d | [] | no_license | srajsonu/LeetCode-Solutions-Python | 39a809e4c6d555a3a3055ce03d59cfa40b93a287 | 8ec31c8df2885f3da533424ba13060b7d3e3af78 | refs/heads/master | 2023-03-19T10:05:42.578615 | 2021-03-13T17:21:36 | 2021-03-13T17:21:36 | 280,716,200 | 0 | 1 | null | 2020-10-06T09:54:02 | 2020-07-18T18:32:04 | Python | UTF-8 | Python | false | false | 504 | py | class Solution:
def countVowelStrings(self, n):
m = 5
dp = [[0]*m for _ in range(n)]
for i in range(n):
dp[i][-1] = 1
tmp = 1
for i in reversed(range(m)):
dp[0][i] = tmp
tmp += 1
for i in range(1, n):
for j in reversed(range(m-1)):
dp[i][j] = dp[i-1][j] + dp[i][j+1]
return dp[-1][0]
if __name__ == '__main__':
A = 33
B = Solution()
print(B.countVowelStrings(A))
| [
"srajsonu02@gmail.com"
] | srajsonu02@gmail.com |
d6b0ce2557419bb5ee7d83893bd14dca5b797630 | bf5c6eed07213bf7e544a0856ed46031b0078011 | /scripts/gensimLoadModelAndSimilarWords.py | ed4a8cd483a03e2b4f60bfea15229a034fd92c5d | [] | no_license | flovera1/NlpHackaton | 4009df66dadbe2ad027c3dbbc4f5c95dd17c9494 | 2be089dadca7914f3faddbb63ed6a8aac34d7341 | refs/heads/master | 2021-03-04T04:02:12.673586 | 2020-03-12T13:20:46 | 2020-03-12T13:20:46 | 246,007,392 | 1 | 1 | null | 2020-03-09T14:01:11 | 2020-03-09T10:35:32 | Python | UTF-8 | Python | false | false | 5,935 | py | import pandas as pd
import re
import string
import numpy as np
import os
import nltk
#nltk.download('stopwords')
#from nltk.corpus import gutenberg
from string import punctuation
import gensim
from matplotlib import pyplot as plt
#from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import KMeans
from sklearn import cluster
from sklearn.decomposition import PCA
#directory = "../data/tauExptData/threeSent/"
directory = "../data/tauExptData/hundSent/"
filesno=0
textSents = []
filelist = os.listdir(directory)
for docname in filelist:
fullpath = directory+docname
if fullpath.endswith("txt"):
print(fullpath)
f = open(fullpath, 'r')
filesno+=1
textSents += f.readlines()
#print(textSents)
print("Total number of files",filesno)
'''
These function below are for supporting'text_to_phrases' function
'''
# identify all possible phrases
def key_words_phrases(raw):
ngramlist=[]
x=minlen
ngramlimit = maxlen
tokens=nltk.word_tokenize(raw)
while x <= ngramlimit:
ngramlist.extend(nltk.ngrams(tokens, x))
x+=1
return ngramlist
# join words into a new list
def concat_words(wordlist):
new_list = []
for words in wordlist:
new_list.append(' '.join(words))
return new_list
# define maximum and minimum number of words in one phrase
maxlen = 10
minlen = 4
def text_to_phrases(text):
phrases = []
for sentence in text:
if len(str(sentence).split(' ')) <= maxlen:
phrases.append(sentence)
else:
wordlist = key_words_phrases(sentence)
phrases += concat_words(wordlist)
print(len(phrases))
print("Phrase length obtained")
return phrases
phrases = text_to_phrases(textSents)
#phrases = ['gemeente TESSENDERLO , vertegenwoordigd', 'TESSENDERLO , vertegenwoordigd door', ', vertegenwoordigd door het', 'vertegenwoordigd door het college', 'door het college van', 'de gemeente TESSENDERLO , vertegenwoordigd', 'gemeente TESSENDERLO , vertegenwoordigd door', 'TESSENDERLO , vertegenwoordigd door het', ', vertegenwoordigd door het college', 'vertegenwoordigd door het college van', 'de gemeente TESSENDERLO , vertegenwoordigd door', 'gemeente TESSENDERLO , vertegenwoordigd door het', 'TESSENDERLO , vertegenwoordigd door het college', ', vertegenwoordigd door het college van', 'de gemeente TESSENDERLO , vertegenwoordigd door het', 'gemeente TESSENDERLO , vertegenwoordigd door het college', 'TESSENDERLO , vertegenwoordigd door het college van', 'de gemeente TESSENDERLO , vertegenwoordigd door het college', 'gemeente TESSENDERLO , vertegenwoordigd door het college van', 'de gemeente TESSENDERLO , vertegenwoordigd door het college van', 'burgemeester en schepenen \n', ' \n', 'verwerende partij \n', ' \n', '\n', ' \n', '\n', ' \n', '\n', ' \n', ' \n', 'In zake: \n']
REPLACE_BY_SPACE_RE = re.compile('[/(){}\[\]\|@,;]')
BAD_SYMBOLS_RE = re.compile('[^0-9a-z #+_]')
def clean_text(text):
"""
text: a string
return: modified initial string
"""
#text = BeautifulSoup(text, "lxml").text # HTML decoding
text = text.lower() # lowercase text
text = REPLACE_BY_SPACE_RE.sub(' ', text) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = BAD_SYMBOLS_RE.sub('', text) # delete symbols which are in BAD_SYMBOLS_RE from text
#text = ' '.join(word for word in text.split() if word not in STOPWORDS) # delete stopwors from text
# remove numbers
text_nonum = re.sub(r'\d+', '', text)
# remove punctuations and convert characters to lower case
text_nopunct = "".join([char.lower() for char in text_nonum if char not in string.punctuation])
# substitute multiple whitespace with single whitespace
# Also, removes leading and trailing whitespaces
text_no_doublespace = re.sub('\s+', ' ', text_nopunct).strip()
return text_no_doublespace
newphrases=[]
for items in phrases:
newphrases.append(clean_text(items))
#print("newphrases", newphrases)
wpt = nltk.WordPunctTokenizer()
stop_words = nltk.corpus.stopwords.words('dutch')
def normalize_document(doc):
# lower case and remove special characters\whitespaces
#doc = re.sub(r'[^a-zA-Z\s]', '', doc, re.I|re.A)
doc = doc.lower()
doc = doc.strip()
# tokenize document
tokens = wpt.tokenize(doc)
# filter stopwords out of document
filtered_tokens = [token for token in tokens if token not in stop_words]
# re-create document from filtered tokens
doc = ' '.join(filtered_tokens)
return doc
normalize_corpus = np.vectorize(normalize_document)
norm_corpus = normalize_corpus(newphrases)
#https://kite.com/python/answers/how-to-remove-empty-strings-from-a-list-of-strings-in-python
norm_corpus_sans_empty_strings = [string for string in norm_corpus if string != ""]
norm_corpus = norm_corpus_sans_empty_strings
print("Normalised corpus")
#print(norm_corpus)
print("lenght of norm corpus", len(norm_corpus))
sentences_split = [s.lower().split(' ') for s in norm_corpus]
#print(sentences_split)
#https://machinelearningmastery.com/develop-word-embeddings-python-gensim/?fbclid=IwAR2zGhHkU98rxOZuDbuk_iYI2pz4gxHd0yz_q9UYX2OwMZsiSy1xLSyllW4
#model = gensim.models.Word2Vec(sentences_split, min_count=2)
print("Train Word2Vec on sentences_split")
model = gensim.models.Word2Vec(sentences_split, size=50, min_count=3, sg=1)
#https://ai.intelligentonlinetools.com/ml/text-clustering-doc2vec-word-embedding-machine-learning/
from gensim.test.utils import get_tmpfile
fname = get_tmpfile("my_word2vec_model")
#load model from saved file
print("Save Word2Vec trained model")
model.save(fname)
model = gensim.models.Word2Vec.load(fname)
#model.delete_temporary_training_data(keep_wordtags_vectors=True, keep_inference=True)
w1 = "bouwdiepte"
print("top 6 similar words to bouwdiepte", model.wv.most_similar(positive=w1,topn=6))
| [
"18098441+curiousTauseef@users.noreply.github.com"
] | 18098441+curiousTauseef@users.noreply.github.com |
2dad1f74e7d7f7ecc839ac8ab348b5d490a763a6 | cd41e9a384dd3c0b2be22b9243ac9735f8de86bc | /beaglebone.py | 598aaa7c201e10342a2413e0e4c48947a9adc4de | [] | no_license | rsps1008/Voice-Controlled-Christmas-Tree | d551bf3268dc90d3f5a46239ce54570e4143aed4 | 23e57105a9bed403c5c4c3cda4de4da36fc4c83c | refs/heads/master | 2022-04-15T21:29:53.445647 | 2020-03-15T15:53:14 | 2020-03-15T15:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,403 | py | # -*- coding: utf-8 -*-
import argparse
import base64
import httplib2
import json
import math
import os
import select
import serial
import subprocess
import time
import Adafruit_BBIO.UART as UART
import Adafruit_BBIO.ADC as ADC
import Adafruit_BBIO.GPIO as GPIO
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from socket import socket, AF_INET, SOCK_DGRAM
GPIO.setup("P9_14", GPIO.OUT)
GPIO.setup("P8_10", GPIO.OUT)
GPIO.setup("P8_36", GPIO.OUT)
GPIO.setup("P8_46", GPIO.OUT)
speech_file = '/root/test2.wav'
encoding = 'LINEAR16'
sampleRate = 16000
languageCode = 'zh-tw'
UART.setup("UART1")
ADC.setup()
DISCOVERY_URL = ('https://{api}.googleapis.com/$discovery/rest?'
'version={apiVersion}')
def record_voice():
subprocess.call('arecord -D plughw:1,0 -V mono -r 16000 -f S16_LE -d 3 /root/test2.wav', shell=True)
def get_speech_service():
credentials = GoogleCredentials.get_application_default().create_scoped(
['https://www.googleapis.com/auth/cloud-platform'])
http = httplib2.Http()
credentials.authorize(http)
return discovery.build(
'speech', 'v1beta1', http=http, discoveryServiceUrl=DISCOVERY_URL)
def main(speech_file):
global Word
with open(speech_file, 'rb') as speech:
speech_content = base64.b64encode(speech.read())
service = get_speech_service()
service_request = service.speech().syncrecognize(
body={
'config': {
'encoding': encoding,
'sampleRate': sampleRate,
'languageCode': languageCode,
},
'audio': {
'content': speech_content.decode('UTF-8')
}
})
response = service_request.execute()
try:
#取dist字串
ss = response['results']
s1 = ss[0]
s2 = s1['alternatives']
s3 = s2[0]
Word = s3['transcript']
except:
#沒有語音輸入
print("No Word Input!")
if __name__ == '__main__':
print("wait for command ... ...")
s = socket(AF_INET, SOCK_DGRAM)
s.bind(('', 11111))
blink1 = 0; blink2 = 0; blink3 = 0; blink4 = 0
redLED = "P8_36"; greenLED = "P9_14"; yellowLED = "P8_10"
UART.setup("UART1")
ser = serial.Serial(port = "/dev/ttyO1", baudrate=9600)
Wopen = u"開"; Wclose = u"關"; Wred = u"紅"; Wgreen = u"綠"; Wyellow = u"黃"
Wall = u"全部"; Wblink = u"閃爍"; Wsame = u"同時"; Wturn = u"輪流";
Wtemp = u"溫度"; Wvoice = u"辨識"; Walways = u"恆"; Wtime = u"時間"
Word = ''
while True:
data = 0; Word=''; rec = False; web = False;#初始化所有值
ADC1value = ADC.read_raw("P9_40")
ser.close()
ser.open()
if ADC1value == 4095: #偵測按鈕後錄音並解析
rec = True
ser.write("2")#顯示"REC"
record_voice()
ser.write("3")
print
main(speech_file)#進入辨識api
s.setblocking(0)
ready = select.select([s], [], [], 0.2) #設定websocket延時,0.2秒接收指令
if ready[0]: #如果websocket接收到指令
web = True
data, addr = s.recvfrom(1024)
print "Received %r from %s " % (data, addr)
if rec == True or web == True: #如果有接收到socket或voice指令
if rec == True and Wopen not in Word and Wclose not in Word and Wblink not in Word and Wtemp not in Word and Wvoice not in Word and Walways not in Word and Wtime not in Word:
print u"沒有這個指令 %s" % Word
ser.write("1 No such command")
if data == "1" or (Wred in Word and Wopen in Word) or (Wred in Word and Walways in Word):
GPIO.output("P8_36", GPIO.HIGH) #紅燈開
blink1 = 0; redLED = "P8_46"
elif data == "2" or (Wgreen in Word and Wopen in Word) or (Wgreen in Word and Walways in Word):
GPIO.output("P9_14", GPIO.HIGH) #綠燈開
blink2 = 0; greenLED = "P8_46"
elif data == "3" or (Wyellow in Word and Wopen in Word) or (Wyellow in Word and Walways in Word):
GPIO.output("P8_10", GPIO.HIGH) #黃燈開
blink3 = 0; yellowLED = "P8_46"
elif data == "4" or (Wred in Word and Wblink in Word):#紅燈閃爍
blink1 = 1
elif data == "5" or (Wgreen in Word and Wblink in Word):#綠燈閃爍
blink2 = 1
elif data == "6" or (Wyellow in Word and Wblink in Word):#黃燈閃爍
blink3 = 1
elif data == "7" or (Wred in Word and Wclose in Word):#紅燈關
GPIO.output("P8_36", GPIO.LOW); blink1 = 0; redLED = "P8_46"
elif data == "8" or (Wgreen in Word and Wclose in Word):#綠燈關
GPIO.output("P9_14", GPIO.LOW); blink2 = 0; greenLED = "P8_46"
elif data == "9" or (Wyellow in Word and Wclose in Word):#黃燈關
GPIO.output("P8_10", GPIO.LOW); blink3 = 0; yellowLED = "P8_46"
elif data == "10" or (Wall in Word and Wopen in Word) or (Wsame in Word and Wopen in Word):#全部開啟
GPIO.output("P8_10", GPIO.HIGH); GPIO.output("P9_14", GPIO.HIGH); GPIO.output("P8_36", GPIO.HIGH)
blink1 = 0; blink2 = 0; blink3 = 0; blink4 = 0
elif data == "11" or (Wall in Word and Wclose in Word):#全部關閉
GPIO.output("P8_10", GPIO.LOW); GPIO.output("P9_14", GPIO.LOW); GPIO.output("P8_36", GPIO.LOW)
blink1 = 0; blink2 = 0; blink3 = 0; blink4 = 0
elif data == "12" or (Wturn in Word and Wblink in Word):#輪流閃爍
blink1 = 1; blink2 = 1; blink3 = 1
elif data == "13" or (Wsame in Word and Wblink in Word):#同時閃爍
blink4 = 1; blink1 = 0; blink2 = 0; blink3 = 0
redLED = "P8_36"; greenLED = "P9_14"; yellowLED = "P8_10"
elif data == "14" or (Wtemp in Word):#顯示溫度
ser.write("0")
ser.close()
elif data == "15" or Wvoice in Word:#語音辨識
ser.write("2")#顯示"REC"
record_voice()
ser.write("3")
main(speech_file)
ww = "1 " + Word
try:
ser.write(ww.encode())
except:
ser.write("1 Cannot Display Chinese")
ser.close()
elif data == "16" or Wtime in Word:#時間
a = str(int(time.strftime("%H")) + 8)
b = time.strftime("%Y/%m/%d ")
c = time.strftime(":%M:%S")
d = b + a + c
ser.write("1 "+ d )
time.sleep(.5)
if blink1 == 1:
GPIO.output("P8_36", GPIO.HIGH)
time.sleep(.2)
GPIO.output("P8_36", GPIO.LOW)
if blink2 == 1:
GPIO.output("P9_14", GPIO.HIGH)
time.sleep(.2)
GPIO.output("P9_14", GPIO.LOW)
if blink3 == 1:
GPIO.output("P8_10", GPIO.HIGH)
time.sleep(.2)
GPIO.output("P8_10", GPIO.LOW)
if blink4 == 1:
GPIO.output(redLED, GPIO.HIGH)
GPIO.output(greenLED, GPIO.HIGH)
GPIO.output(yellowLED, GPIO.HIGH)
time.sleep(.2)
GPIO.output(redLED, GPIO.LOW)
GPIO.output(greenLED, GPIO.LOW)
GPIO.output(yellowLED, GPIO.LOW)
| [
"rsps1008@gmail.com"
] | rsps1008@gmail.com |
64027fc85a180c295e6a6166caae085887c3a574 | 7f8d0da9996fae10ac1c435aa4f07662dc3dfcf3 | /day-9-silent_auctioni/day-9-3-silent auction/main.py | 208bbd5c900ccea681e6af00b8c695589fd09b99 | [] | no_license | tylersojka/100-Days_of_code | 1fc34f48c5bc16ae63bd33227e9481ab04a87c5f | 9ad0fbff8a55237bce019a44b7e649240388fab6 | refs/heads/main | 2023-03-19T04:06:55.329612 | 2021-03-15T19:17:30 | 2021-03-15T19:17:30 | 312,051,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | import art
from replit import clear
clear()
print(art.logo)
# empty dict for holding all input
auction_dict = {}
# flag to start the while loop
add_more = True
# build the name:bid dictionary to hold all the entries
while add_more == True:
dict_name = input("What is your name?\n")
dict_bid = int(input("What is your bid?\n"))
auction_dict[dict_name] = dict_bid
# flag to end the loop if no more people to enter
next_person = input("Is there another person to bid? (Y/N)\n").lower()
if next_person == "n":
add_more = False
clear()
# list to hold all the bids to then interate over and get the max
bid_list = []
for name in auction_dict:
bid_list.append(auction_dict[name])
winning_bid = max(bid_list)
# function to parse auction_dict and get the key of the passed value
def get_key(val):
for key, value in auction_dict.items():
if val == value:
return key
# call function to grab the key of the winning bid value and assign to winning name
winning_name = get_key(winning_bid)
# print results
print(f'{winning_name} is the winner with a bid of {winning_bid}!')
| [
"sojkasojkasojka@gmail.com"
] | sojkasojkasojka@gmail.com |
f3870b74ea6e640f17dc518240e3fed2c77136bf | 32d032b6726fd450cba0b3c534709196f3934eee | /groups/migrations/0006_auto_20200401_1200.py | 6cc8a2c0c35409d08e2bbeb9bffe9482bf43acc2 | [] | no_license | Machele-codez/WeSocial-Blog | d63361db5d58418d7113d5d8f02d435e274cad4b | 744f419516c807cb07f4b182e64b8bb57114c891 | refs/heads/master | 2021-05-23T12:02:07.192446 | 2020-04-05T16:33:06 | 2020-04-05T16:33:06 | 253,277,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # Generated by Django 2.2.7 on 2020-04-01 12:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('groups', '0005_auto_20200401_1109'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memberships', to='groups.Group'),
),
migrations.AlterField(
model_name='membership',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_groups', to=settings.AUTH_USER_MODEL),
),
]
| [
"smithbeblack@gmail.com"
] | smithbeblack@gmail.com |
d6a261836e76a042cb2a7152a6424eceb1337321 | a73fd25dd9a8e6df0b1bf3eee0bccf5297722bc7 | /]tasks/2018.06.01.domain_selection/exam_patch_method.py | bdc750084e88d217e7830d6c239a9cb3e7c11bc7 | [] | no_license | bohaohuang/sis | 23d0260d85903b62518fb8fb588661597248ad0d | 28a59f3182f0ba58ba582449377c6588af1d4cde | refs/heads/master | 2021-05-05T17:00:33.808099 | 2019-09-06T17:46:02 | 2019-09-06T17:46:02 | 117,362,230 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,843 | py | import os
import imageio
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import uabCrossValMaker
import uab_collectionFunctions
import uab_DataHandlerFunctions
from bohaoCustom import uabDataReader
def get_patch_by_name(patch_dir, p_name, patch_size):
img = np.zeros((patch_size, patch_size, 3), dtype=np.uint8)
for i in range(3):
img_name = p_name[i] # '{}_RGB{}.jpg'.format(p_name, i)
img[:, :, i] = imageio.imread(os.path.join(patch_dir, img_name))
return img
patch_prob = np.load('/media/ei-edl01/user/bh163/tasks/2018.06.01.domain_selection/patch_prob_austin_2048.npy')
city_list = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna']
# create collection
# the original file is in /ei-edl01/data/uab_datasets/inria
blCol = uab_collectionFunctions.uabCollection('inria')
img_mean = blCol.getChannelMeans([0, 1, 2])
# extract patches
extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], # extract all 4 channels
cSize=(321, 321), # patch size as 572*572
numPixOverlap=0, # overlap as 92
extSave=['jpg', 'jpg', 'jpg', 'png'],
# save rgb files as jpg and gt as png
isTrain=True,
gtInd=3,
pad=0) # pad around the tiles
patchDir = extrObj.run(blCol)
# make data reader
chipFiles = os.path.join(patchDir, 'fileList.txt')
# use uabCrossValMaker to get fileLists for training and validation
idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
# use first 5 tiles for validation
file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(6, 37)])
file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, [i for i in range(0, 6)])
patch_id_dict = {}
for cnt, item in enumerate(file_list_train):
p_name = '_'.join(item[0].split('_')[:2])
patch_id_dict[p_name] = cnt
dataReader_train = uabDataReader.ImageLabelReaderPatchSampleControl(
[3], [0, 1, 2], patchDir, file_list_train, (321, 321), 100, patch_prob, patch_name=True,
block_mean=np.append([0], img_mean))
patch_cnt = np.zeros(len(file_list_train), dtype=np.uint64)
c_cnt = np.zeros(5)
city_dict = {'aus': 0, 'chi': 1, 'kit': 2, 'tyr': 3, 'vie': 4}
for reader_cnt in tqdm(range(100000)):
idx_batch = np.random.choice(len(file_list_train), 100, p=patch_prob)
for i in idx_batch:
row = file_list_train[i]
p_name = '_'.join(row[0].split('_')[:2])
c_cnt[city_dict[p_name[:3]]] += 1
patch_cnt[patch_id_dict[p_name]] += 1
plt.bar(np.arange(5), c_cnt)
plt.xticks(np.arange(5), city_list)
plt.show()
top_num = 28
top_idx = np.argsort(patch_cnt)[::-1][:top_num]
c_cnt = np.zeros(5)
plt.figure(figsize=(18, 10))
for i in range(top_num):
plt.subplot(4, 7, 1+i)
plt.imshow(get_patch_by_name(patchDir, file_list_train[top_idx[i]], 321))
plt.axis('off')
p_name = '_'.join(file_list_train[top_idx[i]][0].split('_')[:2])
p_num = patch_cnt[top_idx[i]]
c_cnt[city_dict[p_name[:3]]] += 1
plt.title('{}:{}'.format(p_name, p_num))
plt.tight_layout()
print(c_cnt)
plt.figure(figsize=(8, 5))
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
city_key = [city_dict[i[0][:3]] for i in file_list_train]
sort_city_key = np.array([city_key[i] for i in np.argsort(patch_cnt)[::-1]])
for i in range(5):
plt.bar(np.arange(len(file_list_train))[sort_city_key == i],
np.sort(patch_cnt)[::-1][sort_city_key == i], label=city_list[i])
plt.ylabel('cnt')
plt.legend()
plt.tight_layout()
plt.show()
| [
"bohao.huang@duke.edu"
] | bohao.huang@duke.edu |
62fcf7524c6e232539f497e3c91f76801a2ce522 | f23b7ba02960fde346609e8cf35fee317825d825 | /vcnn_train.py | 8eb2ae08f7abaaf243e7231342fa4b8f63842b2d | [] | no_license | danielsonjh/ESC499 | 010dbf0c3b9fd1ffa7ac2d39091a4c3b512207f9 | afd0fc17ae7239b551bf8ed9fcb6a4ff84935d4c | refs/heads/master | 2021-01-13T12:27:51.405486 | 2017-04-13T22:55:23 | 2017-04-13T22:55:23 | 78,540,386 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,680 | py | import sys
import tensorflow as tf
import numpy as np
import datetime as dt
from data_loader import dl
learning_rate = 0.001
training_epochs = 50
batch_size = 64
display_step = 50
input_dim = 30
def basic_vcnn(n_labels):
x = tf.placeholder(tf.float32, [None, input_dim, input_dim, input_dim])
y = tf.placeholder(tf.float32, [None, n_labels])
weights = {
'c1': tf.Variable(tf.random_normal([5, 5, 5, 1, 32]), name='wc1'),
'c2': tf.Variable(tf.random_normal([3, 3, 3, 32, 64]), name='wc2'),
'fc1': tf.Variable(tf.random_normal([8 * 8 * 8 * 64, 512]), name='wfc1'),
'fc2': tf.Variable(tf.random_normal([512, n_labels]), name='wfc2'),
}
biases = {
'c1': tf.Variable(tf.random_normal([weights['c1'].get_shape().as_list()[-1]]), name='bc1'),
'c2': tf.Variable(tf.random_normal([weights['c2'].get_shape().as_list()[-1]]), name='bc2'),
'fc1': tf.Variable(tf.random_normal([weights['fc1'].get_shape().as_list()[-1]]), name='bfc1'),
'fc2': tf.Variable(tf.random_normal([weights['fc2'].get_shape().as_list()[-1]]), name='bfc2'),
}
# Reshape input
x_in = tf.reshape(x, shape=[-1, input_dim, input_dim, input_dim, 1])
# x_in = tf.nn.avg_pool3d(x_in, ksize=[1, 4, 4, 4, 1], strides=[1, 4, 4, 4, 1], padding='SAME')
print(x_in.get_shape())
# Convolution Layers
c1 = tf.nn.conv3d(x_in, weights['c1'], strides=[1, 2, 2, 2, 1], padding='SAME')
c1 = tf.nn.bias_add(c1, biases['c1'])
c1 = tf.nn.relu(c1)
# c1 = tf.nn.avg_pool3d(c1, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
# c1 = tf.nn.dropout(c1, 0.8)
c2 = tf.nn.conv3d(c1, weights['c2'], strides=[1, 1, 1, 1, 1], padding='SAME')
c2 = tf.nn.bias_add(c2, biases['c2'])
c2 = tf.nn.relu(c2)
c2 = tf.nn.avg_pool3d(c2, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
# c2 = tf.nn.dropout(c2, 0.7)
# Fully connected layers
fc1 = tf.reshape(c2, [-1, weights['fc1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['fc1']), biases['fc1'])
fc1 = tf.nn.relu(fc1)
# fc1 = tf.nn.dropout(fc1, 0.6)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['fc2']), biases['fc2'])
return x, y, weights, biases, out
def vgg(n_labels):
x = tf.placeholder(tf.float32, [None, input_dim, input_dim, input_dim])
y = tf.placeholder(tf.float32, [None, n_labels])
weights = {
'c1': tf.Variable(tf.random_normal([3, 3, 3, 1, 32]), name='wc1'),
'c2': tf.Variable(tf.random_normal([3, 3, 3, 32, 32]), name='wc2'),
'c3': tf.Variable(tf.random_normal([3, 3, 3, 32, 48]), name='wc3'),
'c4': tf.Variable(tf.random_normal([3, 3, 3, 48, 48]), name='wc4'),
'c5': tf.Variable(tf.random_normal([3, 3, 3, 48, 64]), name='wc5'),
'c6': tf.Variable(tf.random_normal([3, 3, 3, 64, 64]), name='wc6'),
'c7': tf.Variable(tf.random_normal([3, 3, 3, 64, 64]), name='wc7'),
'fc1': tf.Variable(tf.random_normal([4 * 4 * 4 * 64, 512]), name='wfc1'),
'fc2': tf.Variable(tf.random_normal([512, n_labels]), name='wfc2'),
}
biases = {
'c1': tf.Variable(tf.random_normal([weights['c1'].get_shape().as_list()[-1]]), name='bc1'),
'c2': tf.Variable(tf.random_normal([weights['c2'].get_shape().as_list()[-1]]), name='bc2'),
'c3': tf.Variable(tf.random_normal([weights['c3'].get_shape().as_list()[-1]]), name='bc3'),
'c4': tf.Variable(tf.random_normal([weights['c4'].get_shape().as_list()[-1]]), name='bc4'),
'c5': tf.Variable(tf.random_normal([weights['c5'].get_shape().as_list()[-1]]), name='bc5'),
'c6': tf.Variable(tf.random_normal([weights['c6'].get_shape().as_list()[-1]]), name='bc6'),
'c7': tf.Variable(tf.random_normal([weights['c7'].get_shape().as_list()[-1]]), name='bc7'),
'fc1': tf.Variable(tf.random_normal([weights['fc1'].get_shape().as_list()[-1]]), name='bfc1'),
'fc2': tf.Variable(tf.random_normal([weights['fc2'].get_shape().as_list()[-1]]), name='bfc2'),
}
# Reshape input
x_in = tf.reshape(x, shape=[-1, input_dim, input_dim, input_dim, 1])
# Convolution Layers
c1 = conv_relu(x_in, weights['c1'], biases['c1'], [1, 1, 1, 1, 1])
c2 = conv_relu(c1, weights['c2'], biases['c2'], [1, 1, 1, 1, 1])
c2 = tf.nn.max_pool3d(c2, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
c3 = conv_relu(c2, weights['c3'], biases['c3'], [1, 1, 1, 1, 1])
c4 = conv_relu(c3, weights['c4'], biases['c4'], [1, 1, 1, 1, 1])
c4 = tf.nn.max_pool3d(c4, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
c5 = conv_relu(c4, weights['c5'], biases['c5'], [1, 1, 1, 1, 1])
c6 = conv_relu(c5, weights['c6'], biases['c6'], [1, 1, 1, 1, 1])
c7 = conv_relu(c6, weights['c7'], biases['c7'], [1, 1, 1, 1, 1])
c7 = tf.nn.max_pool3d(c7, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
# Fully connected layers
fc1 = tf.reshape(c7, [-1, weights['fc1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['fc1']), biases['fc1'])
fc1 = tf.nn.relu(fc1)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['fc2']), biases['fc2'])
return x, y, weights, biases, out
def aniprobing():
x = tf.placeholder(tf.float32, [None, input_dim, input_dim, input_dim])
y = tf.placeholder(tf.float32, [None, 10])
weights = {
'c1': tf.Variable(tf.random_normal([5, 5, 5, 1, 32]), name='wc1'),
'c2': tf.Variable(tf.random_normal([3, 3, 3, 32, 32]), name='wc2'),
'fc1': tf.Variable(tf.random_normal([8 * 8 * 8 * 32, 128]), name='wfc2'),
'fc2': tf.Variable(tf.random_normal([128, 10]), name='wfc2'),
}
biases = {
'c1': tf.Variable(tf.random_normal([weights['c1'].get_shape().as_list()[4]]), name='bc1'),
'c2': tf.Variable(tf.random_normal([weights['c2'].get_shape().as_list()[4]]), name='bc2'),
'fc1': tf.Variable(tf.random_normal([weights['fc1'].get_shape().as_list()[1]]), name='bfc1'),
'fc2': tf.Variable(tf.random_normal([weights['fc2'].get_shape().as_list()[1]]), name='bfc2'),
}
# Reshape input
x_in = tf.reshape(x, shape=[-1, 60, 60, 60, 1])
x_in = tf.nn.avg_pool3d(x_in, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
# Convolution Layers
c1 = tf.nn.conv3d(x_in, weights['c1'], strides=[1, 2, 2, 2, 1], padding='SAME')
c1 = tf.nn.bias_add(c1, biases['c1'])
c1 = tf.nn.relu(c1)
# c1 = tf.nn.dropout(c1, 0.8)
c2 = tf.nn.conv3d(c1, weights['c2'], strides=[1, 1, 1, 1, 1], padding='SAME')
c2 = tf.nn.bias_add(c2, biases['c2'])
c2 = tf.nn.relu(c2)
c2 = tf.nn.avg_pool3d(c2, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME')
# c2 = tf.nn.dropout(c2, 0.7)
# Fully connected layers
fc1 = tf.reshape(c2, [-1, weights['fc1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['fc1']), biases['fc1'])
fc1 = tf.nn.relu(fc1)
# fc1 = tf.nn.dropout(fc1, 0.6)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['fc2']), biases['fc2'])
return x, y, weights, biases, out
def conv_relu(x, weight, bias, strides):
x = tf.nn.conv3d(x, weight, strides=strides, padding='SAME')
x = tf.nn.bias_add(x, bias)
x = tf.nn.relu(x)
return x
def mlpconv3d(x, weight, bias, strides):
x = tf.nn.conv3d(x, weight, strides=strides, padding='SAME')
x = tf.nn.bias_add(x, bias)
x = tf.nn.batch_normalization(x)
x = tf.nn.relu(x)
return x
def mlpconv2d(x, weight, bias, strides):
x = tf.nn.conv2d(x, weight, strides=strides, padding='SAME')
x = tf.nn.bias_add(x, bias)
x = tf.nn.batch_normalization(x)
x = tf.nn.relu(x)
return x
def main(model, n_labels):
dl.prepare_train_val_data(train_filename, train_ratio=0.9)
# Construct model
x, y, weights, biases, pred = model(n_labels)
# Define loss and optimizer
with tf.name_scope('Loss'):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
with tf.name_scope('Optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
with tf.name_scope('Accuracy'):
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
tf.summary.scalar("loss", cost)
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
train_summary_writer = tf.train.SummaryWriter(train_logs_path, graph=tf.get_default_graph())
valid_summary_writer = tf.train.SummaryWriter(valid_logs_path, graph=tf.get_default_graph())
step = 0
# Keep training until reach max iterations
batches_per_epoch = dl.n_train / batch_size
n_batches = batches_per_epoch * training_epochs
print('Number of batches {0}'.format(n_batches))
while step < n_batches:
batch_x, batch_y = dl.next_train_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc, summary = sess.run([cost, accuracy, merged_summary_op],
feed_dict={x: batch_x, y: batch_y})
train_summary_writer.add_summary(summary, step)
print("Batch " + str(step) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc))
valid_batch = 0
valid_cost = 0
valid_acc = 0
n_valid_batches = np.ceil(float(dl.n_valid) / batch_size)
while valid_batch < n_valid_batches:
valid_batch += 1
batch_x, batch_y = dl.next_valid_batch(batch_size)
valid_batch_cost, valid_batch_acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y})
valid_cost += valid_batch_cost * batch_x.shape[0] / dl.n_valid
valid_acc += valid_batch_acc * batch_x.shape[0] / dl.n_valid
valid_summary = tf.Summary()
valid_summary.value.add(tag="accuracy", simple_value=valid_acc)
valid_summary.value.add(tag="loss", simple_value=valid_cost)
valid_summary_writer.add_summary(valid_summary, step)
print("Validation Accuracy= " + "{:.5f}".format(valid_acc) + ', Loss=' + "{:.5f}".format(valid_cost))
step += 1
saver = tf.train.Saver()
save_path = saver.save(sess, model_path)
print("---Final model saved in file: " + save_path)
model_selector = {
'basic_vcnn': basic_vcnn,
'vgg': vgg
}
if __name__ == '__main__':
model_name = sys.argv[1]
train_filename = sys.argv[2]
n_labels = int(sys.argv[3])
model_name_with_metadata = model_name + '_' + train_filename.split('.')[0] + '_' \
+ str(dt.datetime.utcnow()).replace(' ', '_').split('.')[0]
train_logs_path = '/tmp/tensorflow_logs/' + model_name_with_metadata + '_train'
valid_logs_path = '/tmp/tensorflow_logs/' + model_name_with_metadata + '_valid'
model_path = model_name_with_metadata + '.ckpt'
main(model_selector[model_name], n_labels) | [
"danielsonjh@hotmail.com"
] | danielsonjh@hotmail.com |
653cfd2b1be8fc902956eb5f6815f3536a3a0514 | f2abb49ad725c9719ea48a543e23d625d536c4c2 | /plant_vs_zoomie_game_normal06.py | 6fb6add9c4b75e1fb9300ee61413f8e4e350c024 | [
"MIT"
] | permissive | ChengzhuLi/plantwarzombie | 0d5720ddca730694a187391fe8509ad4ea8503c8 | d9cb018d04df241846c49dbeabd52df688de5e5f | refs/heads/master | 2022-11-28T13:39:04.113319 | 2020-08-02T15:42:05 | 2020-08-02T15:44:04 | 284,490,770 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,905 | py | import time
import pygame
import os
from Bullet import Bullet
from FlagZombie import FlagZombie
from Peashooter import Peashooter
from SunFlower import SunFlower
from WallNut import WallNut
from Sun import Sun
from Zombie import Zombie
pygame.init()
backgd_size = (1200, 600)
screen = pygame.display.set_mode(backgd_size)
pygame.display.set_caption('plant_vs_zoomie')
bg_img_path = 'material/images/background1.jpg'
bg_img_obj = pygame.image.load(bg_img_path).convert_alpha()
sunFlowerImg = pygame.image.load('material/images/SunFlower_00.png').convert_alpha()
wallnutImg = pygame.image.load('material/images/WallNut_00.png').convert_alpha()
peashooterImg = pygame.image.load('material/images/Peashooter_00.png').convert_alpha()
# sunbank_img_path = 'material/images/SunBack.png'
# sunbank_img_obj = pygame.image.load(sunbank_img_path).convert_alpha()
sunbackImg = pygame.image.load('material/images/SeedBank.png').convert_alpha()
flower_seed = pygame.image.load("material/images/TwinSunflower.gif")
wallNut_seed = pygame.image.load("material/images/WallNut.gif")
peashooter_seed = pygame.image.load("material/images/Peashooter.gif")
text = '1000'
sun_font = pygame.font.SysFont('arial',20)
sun_num_surface = sun_font.render(text,True,(0,0,0))
# peashooter = Peashooter()
# sunflower = SunFlower()
# wallnut = WallNut()
# zombie = Zombie()
spriteGroup = pygame.sprite.Group()
# spriteGroup.add(peashooter)
# spriteGroup.add(sunflower)
# spriteGroup.add(wallnut)
# spriteGroup.add(zombie)
bulletGroup = pygame.sprite.Group()
zombieGroup = pygame.sprite.Group()
sunList = pygame.sprite.Group()
# sunList = []
clock = pygame.time.Clock()
GEN_SUN_EVENT = pygame.USEREVENT + 1
pygame.time.set_timer(GEN_SUN_EVENT,1000)
GEN_BULLET_EVENT = pygame.USEREVENT + 2
pygame.time.set_timer(GEN_BULLET_EVENT,1000)
GEN_ZOMBIE_EVENT = pygame.USEREVENT + 3
pygame.time.set_timer(GEN_ZOMBIE_EVENT,5000)
GEN_FLAGZOMBIE_EVENT = pygame.USEREVENT + 4
pygame.time.set_timer(GEN_FLAGZOMBIE_EVENT,8000)
choose = 0
def main():
global text,choose
global sun_num_surface
running = True
index = 0
while running:
# if index >= 130:
# index = 0
clock.tick(20)
#2s产生一个太阳花
# if index % 40 == 0:
# sun = Sun(sunflower.rect)
# sunList.add(sun)
#3s产生一个子弹
# if index % 30 == 0:
# for sprite in spriteGroup:
# if isinstance(sprite, Peashooter):
# bullet = Bullet(sprite.rect, backgd_size)
# spriteGroup.add(bullet)
for bullet in bulletGroup:
for zombie in zombieGroup:
if pygame.sprite.collide_mask(bullet,zombie):
zombie.energy -= 1
bulletGroup.remove(bullet)
screen.blit(bg_img_obj,(0,0))
screen.blit(sunbackImg,(250,0))
screen.blit(sun_num_surface,(270,60))
screen.blit(flower_seed, (330, 10))
screen.blit(wallNut_seed, (380, 10))
screen.blit(peashooter_seed, (430, 10))
spriteGroup.update(index)
spriteGroup.draw(screen)
bulletGroup.update(index)
bulletGroup.draw(screen)
zombieGroup.update(index)
zombieGroup.draw(screen)
sunList.update(index)
sunList.draw(screen)
(x,y) = pygame.mouse.get_pos()
if choose == 1:
screen.blit(sunFlowerImg,(x,y))
elif choose == 2:
screen.blit(wallnutImg, (x, y))
elif choose == 3:
screen.blit(peashooterImg, (x, y))
index+=1
for event in pygame.event.get():
if event.type == GEN_FLAGZOMBIE_EVENT:
zombie = FlagZombie()
zombieGroup.add(zombie)
if event.type == GEN_ZOMBIE_EVENT:
zombie = Zombie()
zombieGroup.add(zombie)
if event.type == GEN_SUN_EVENT:
for sprite in spriteGroup:
if isinstance(sprite,SunFlower):
now = time.time()
if now - sprite.lasttime >= 5:
sun = Sun(sprite.rect)
sunList.add(sun)
sprite.lasttime = now
if event.type == GEN_BULLET_EVENT:
for sprite in spriteGroup:
if isinstance(sprite, Peashooter):
bullet = Bullet(sprite.rect, backgd_size)
bulletGroup.add(bullet)
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pressed_key = pygame.mouse.get_pressed()
print(pressed_key)
if pressed_key[0] == 1:
pos = pygame.mouse.get_pos()
print(pos)
x,y = pos
if 330<=x<=380 and 10<=y<=80 and int(text) >= 50:
print('点中了太阳花卡片')
choose = 1
elif 380<x<=430 and 10<=y<=80 and int(text) >= 50:
print('点中了坚果卡片')
choose = 2
elif 430 < x <= 480 and 10 <= y <= 80 and int(text) >= 100:
print('点中了豌豆射手卡片')
choose = 3
elif 250 < x < 1200 and 70<y<600:
#种植植物
if choose == 1:
current_time = time.time()
sunFlower = SunFlower(current_time)
sunFlower.rect.top = y
sunFlower.rect.left = x
spriteGroup.add(sunFlower)
choose = 0
#扣除分数
text = int(text)
text -= 50
myfont = pygame.font.SysFont('arial',20)
sun_num_surface = myfont.render(str(text),True,(0,0,0))
elif choose == 2:
wallNut = WallNut()
wallNut.rect.top = y
wallNut.rect.left = x
spriteGroup.add(wallNut)
choose = 0
# 扣除分数
text = int(text)
text -= 50
myfont = pygame.font.SysFont('arial', 20)
sun_num_surface = myfont.render(str(text), True, (0, 0, 0))
elif choose == 3:
peashooter = Peashooter()
peashooter.rect.top = y
peashooter.rect.left = x
spriteGroup.add(peashooter)
choose = 0
# 扣除分数
text = int(text)
text -= 50
myfont = pygame.font.SysFont('arial', 20)
sun_num_surface = myfont.render(str(text), True, (0, 0, 0))
print('#########')
print(x,y)
pass
else:
pass
for sun in sunList:
if sun.rect.collidepoint(pos):
sunList.remove(sun)
text = str(int(text)+50)
sun_font = pygame.font.SysFont('arial', 20)
sun_num_surface = sun_font.render(text, True, (0, 0, 0))
pygame.display.update()
if __name__ == '__main__':
main()
| [
"3024836722@qq.com"
] | 3024836722@qq.com |
f098af0f8e7d94d9ea457ab1f56803ee2dca1723 | 29ac141e824d6f695ee52e8c4f4a006e581747f9 | /UI_Files/ComparisonUI.py | 92ebe8181f033de170e0cc5eeb97eaaff34bfb27 | [] | no_license | mhmohebbi/NMR-Pulse-Simulator | 344fa4b70c188610a1884858e25004d7a5bfe59e | 76a7a7c406fedf12602e83c7a425891b71fe467f | refs/heads/master | 2022-12-11T05:08:10.758049 | 2019-09-25T14:07:56 | 2019-09-25T14:07:56 | 210,232,116 | 8 | 0 | null | 2022-12-07T23:36:08 | 2019-09-23T00:17:21 | Python | UTF-8 | Python | false | false | 3,923 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ComparisonUI.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(525, 282)
Dialog.setAcceptDrops(False)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.tableWidget = QtGui.QTableWidget(Dialog)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.gridLayout.addWidget(self.tableWidget, 4, 0, 1, 3)
self.title_label = QtGui.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Calibri"))
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.title_label.setFont(font)
self.title_label.setAlignment(QtCore.Qt.AlignCenter)
self.title_label.setObjectName(_fromUtf8("title_label"))
self.gridLayout.addWidget(self.title_label, 0, 0, 1, 3)
self.pulse_comboBox = QtGui.QComboBox(Dialog)
self.pulse_comboBox.setObjectName(_fromUtf8("pulse_comboBox"))
self.gridLayout.addWidget(self.pulse_comboBox, 2, 1, 1, 1)
self.label_14 = QtGui.QLabel(Dialog)
self.label_14.setMaximumSize(QtCore.QSize(100, 16777215))
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout.addWidget(self.label_14, 2, 0, 1, 1)
self.pushButton_remove = QtGui.QPushButton(Dialog)
self.pushButton_remove.setEnabled(False)
self.pushButton_remove.setObjectName(_fromUtf8("pushButton_remove"))
self.gridLayout.addWidget(self.pushButton_remove, 2, 2, 1, 1)
self.toolButton_add = QtGui.QToolButton(Dialog)
self.toolButton_add.setObjectName(_fromUtf8("toolButton_add"))
self.gridLayout.addWidget(self.toolButton_add, 1, 0, 1, 1)
self.pushButton_cancel = QtGui.QPushButton(Dialog)
self.pushButton_cancel.setObjectName(_fromUtf8("pushButton_cancel"))
self.gridLayout.addWidget(self.pushButton_cancel, 5, 2, 1, 1)
self.pushButton_apply = QtGui.QPushButton(Dialog)
self.pushButton_apply.setObjectName(_fromUtf8("pushButton_apply"))
self.gridLayout.addWidget(self.pushButton_apply, 5, 1, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Comparing Pulses", None))
self.title_label.setText(_translate("Dialog", "Pulse Comparison", None))
self.label_14.setText(_translate("Dialog", "<html><head/><body><p align=\"right\"><span style=\" font-size:9pt; font-weight:600;\">Selected Pulse:</span></p></body></html>", None))
self.pushButton_remove.setText(_translate("Dialog", "Remove ", None))
self.toolButton_add.setText(_translate("Dialog", "Add CSV File Pulse +", None))
self.pushButton_cancel.setText(_translate("Dialog", "Cancel", None))
self.pushButton_apply.setText(_translate("Dialog", "Apply", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| [
"mhmohebbi@gmail.com"
] | mhmohebbi@gmail.com |
ee3b7e0043e5612c5d440d96e52d24761706606e | f83ac0c7abea19da35332d6d3cb31ecbc8a84a44 | /chap4/digraph.py | 67ae442ce18cab7e72f54c8bc2c9916f89187e53 | [] | no_license | fndjjx/algorithm | 63e532fd8eead8c2de382cf8fafee4b2a862ec8e | be9603a426294bd746948a8d115199e22482e099 | refs/heads/master | 2021-09-01T05:42:36.206868 | 2017-12-25T05:56:11 | 2017-12-25T05:56:11 | 103,631,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,229 | py | import better_exceptions
from collections import deque
class Digraph():
def __init__(self):
self.adj = {}
self.e = 0
def add_edge(self, v1, v2):
if v2 not in self.adj[v1]:
self.adj[v1].append(v2)
self.e += 1
def build_digraph(self, l):
for i in l:
if not i[0] in self.adj:
self.adj[i[0]] = []
self.add_edge(i[0],i[1])
def __str__(self):
return str(self.adj)
class DFS():
def __init__(self, graph):
self.g = graph
self.marked = []
self._path = {}
def dfs(self, start, end):
for i in self.g.adj[start]:
self.marked.append(i)
self._path[i] = start
self._dfs(i)
return self.path(start, end)
def _dfs(self, v):
if v in self.g.adj:
for i in self.g.adj[v]:
if not i in self.marked:
self.marked.append(i)
self._path[i] = v
self._dfs(i)
def path(self, start, end):
if not end in self._path:
return None
result = [end]
v = end
count = 0
while v != start or count==0:
result.append(self._path[v])
v = self._path[v]
count += 1
result = result[::-1]
return result
class BFS():
def __init__(self, graph):
self.g = graph
self._path = {}
def bfs(self, start, end):
q = deque()
marked = []
q.append(start)
while len(q)!=0:
v = q.popleft()
marked.append(v)
if v in self.g.adj:
for i in self.g.adj[v]:
if not i in marked:
marked.append(i)
q.append(i)
self._path[i] = v
print(self._path)
return self.path(start, end)
def path(self, start, end):
if not end in self._path:
return None
result = [end]
v = end
count = 0
while v != start or count==0:
result.append(self._path[v])
v = self._path[v]
count += 1
result = result[::-1]
return result
class Topo():
def __init__(self, graph):
self.g = graph
self.marked = []
self.pre = []
self.post = []
def dfs(self, v):
self.pre.append(v)
if v in self.g.adj:
for i in self.g.adj[v]:
if not i in self.marked:
self.marked.append(i)
self.dfs(i)
self.post.append(v)
def pre_order(self):
return self.pre
def post_order(self):
return self.post
def reverse_post_order(self):
return self.post[::-1]
if __name__=="__main__":
l=[[0,1],[0,2],[1,3],[2,1],[2,4]]
dg = Digraph()
dg.build_digraph(l)
print(dg)
mydfs = DFS(dg)
print(mydfs.dfs(0,1))
mybfs = BFS(dg)
print(mybfs.bfs(0,1))
mytopo = Topo(dg)
mytopo.dfs(0)
print(mytopo.pre_order())
print(mytopo.post_order())
print(mytopo.reverse_post_order())
| [
"ly"
] | ly |
e458a885b05dd69438155b6c77e5ae707286f017 | 8e81c34c58e1ae10d905fc67308e1ccef1bf83de | /src/SJ_tags_generator_for_micro_exons.py | 3cdbe77a11aec305920c1f94366d8ed0d49fe6d3 | [] | no_license | geparada/Micro-exonator | 6ae2643c877f9c3031bcdf878372258a4be2dd86 | 61ba9db05df547d363f591b61139a99a64393ff6 | refs/heads/master | 2023-03-21T09:19:53.839777 | 2021-03-19T17:01:03 | 2021-03-19T17:01:03 | 115,053,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,587 | py | import sys
import csv
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from random import randint, sample
from operator import itemgetter
from collections import defaultdict
from operator import itemgetter
Transcriptome = {}
Genome = {}
def Genomictabulator(fasta):
print >> sys.stderr, "Cargando genoma en la memoria RAM ...",
f = open(fasta)
for chrfa in SeqIO.parse(f, "fasta"):
Genome[chrfa.id] = chrfa.seq
print >> sys.stderr, "OK"
f.close()
def Transcriptometabulator(genecode_fasta):
print >> sys.stderr, "Cargando a fasta en la ram ...",
for record in SeqIO.parse(genecode_fasta, "fasta"):
id = str(record.id).split("|")[0]
Transcriptome[id] = record.seq
print >> sys.stderr, "OK"
def main(bed12, ME_len):
n = 100
transcript_intron_info = defaultdict(list)
min_intron_lenght = 80
for row in csv.reader(open(bed12), delimiter = '\t'):
try:
qName = row[3]
seq = Transcriptome[qName]
qstarts = map (int, row[11].strip(",").split(","))
blocksizes = map(int, row[10].strip(",").split(","))
start = int(row[1])
strand = row[5]
bn = int(row[9])
chr = row[0]
qstart = 0
for q1, q2, b, b2 in zip(qstarts, qstarts[1:], blocksizes, blocksizes[1:]):
qstart = qstart + b
tag_start = qstart - n
tag_end = qstart + n
#if tag_start <= 0:
# print tag_start, qstart, tag_end, strand
istart = start + q1 + b
iend = start + q2
ilen = iend - istart
intron = row[0] + ":" + str(istart) + row[5] + str(iend)
intron = chr + ":" + str(istart) + strand + str(iend)
ilength = iend - istart
block_up = n
block_down = n
dn = str(Genome[chr][istart:(istart+2)] + Genome[chr][(iend-2):iend]).upper()
if strand == '+' : #Para los que aliniean en la hebra +
if tag_start<0: #Precausiones generar buenos tag del primer y ultimo tag
tag_start = 0
block_up = qstart
if tag_end>len(seq):
tag_end=len(seq)
block_down = tag_end - qstart
tag = seq[tag_start:tag_end]
if strand == '-' :
dn = str((Genome[chr][istart:(istart+2)] + Genome[chr][(iend-2):iend]).reverse_complement()).upper()
if tag_end>len(seq): #Para los que alinian en la hebra - es todo al inverso
tag_end=len(seq)
block_up = tag_end - qstart
tag = seq[-tag_end:-tag_start]
if tag_start<=0:
tag = seq[-tag_end:]
block_down = qstart
if b > ME_len and b2 > ME_len and ilength >= min_intron_lenght and (dn=="GTAG" or dn=="GCAG" or dn=="ATAC"): # hay que agregarle el filtro de los micro exones!!
info = qName, tag, chr, istart, iend, strand, block_up, block_down, block_up + block_down
transcript_intron_info[intron].append(info)
except KeyError:
pass
for i in transcript_intron_info.items():
infos = i[1]
intron = i[0]
qName, tag, chr, istart, iend, strand, block_up, block_down, sum_blocks = max(infos, key=itemgetter(8))
ID = ">" + intron + "|" + qName + "|" + str(block_up) + "_" + str(block_down)
print ID
print tag
#>chr12:3701518+3702264|ENST00000562877.1|100_19
#AGCTTTCTGTTTAGTTGTGTCAATCGCAGGCCACTCTGCTGAGCATCTTCTCCCAGGAGTACCAGAAACACATTAAAAGAACACATGCCAAACATCATACTTCGGAAGCAATTGAAAGT
if __name__ == '__main__':
Genomictabulator(sys.argv[1])
Transcriptometabulator(sys.argv[2])
main (sys.argv[3], sys.argv[4])
#El filtro del los intrones canonicos fue anadido despues
| [
"noreply@github.com"
] | geparada.noreply@github.com |
2dbad905fc1dfaab15d2977401a6a3158e1b4fb2 | eff5e35c81567d11431a8757c4cef9f8aafa7883 | /mcuuid.py | 669cc595a3254a75411906000fb8b8b1ca564255 | [
"MIT"
] | permissive | Mathtin/mc-discord-bot-old | 775e0a1098df657685a7f5ce4f67b048af5440f1 | 51713c257722bbe5a825a8351793d7b49644b1f4 | refs/heads/master | 2023-01-14T07:46:52.618500 | 2020-11-24T16:46:00 | 2020-11-24T16:46:00 | 295,772,006 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,563 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
###################################################
#........../\./\...___......|\.|..../...\.........#
#........./..|..\/\.|.|_|._.|.\|....|.c.|.........#
#......../....../--\|.|.|.|i|..|....\.../.........#
# Mathtin (c) #
###################################################
# Author: Daniel [Mathtin] Shiko #
# Copyright (c) 2020 <wdaniil@mail.ru> #
# This file is released under the MIT license. #
###################################################
""" Username to UUID
Converts a Minecraft username to it's UUID equivalent.
Uses the official Mojang API to fetch player data.
"""
### Import necessary modules
import http.client
import json
from uuid import UUID
def is_valid_minecraft_username(username):
"""https://help.mojang.com/customer/portal/articles/928638-minecraft-usernames"""
allowed_chars = 'abcdefghijklmnopqrstuvwxyz1234567890_'
allowed_len = [3, 16]
username = username.lower()
if len(username) < allowed_len[0] or len(username) > allowed_len[1]:
return False
for char in username:
if char not in allowed_chars:
return False
return True
def is_valid_mojang_uuid(uuid):
"""https://minecraft-de.gamepedia.com/UUID"""
allowed_chars = '0123456789abcdef'
allowed_len = 32
uuid = uuid.lower()
if len(uuid) != 32:
return False
for char in uuid:
if char not in allowed_chars:
return False
return True
### Main class
class GetPlayerData:
def __init__(self, identifier, timestamp=None):
self.valid = True
"""
Get the UUID of the player.
Parameters
----------
username: string
The known minecraft username
timestamp : long integer (optional)
The time at which the player used this name, expressed as a Unix timestamp.
"""
# Handle the timestamp
get_args = ""
if timestamp is not None:
get_args = "?at=" + str(timestamp)
# Build the request path based on the identifier
req = ""
if is_valid_minecraft_username(identifier):
req = "/users/profiles/minecraft/" + identifier + get_args
elif is_valid_mojang_uuid(identifier):
req = "/user/profiles/" + identifier + "/names" + get_args
else:
self.valid = False
# Proceed only, when the identifier was valid
if self.valid:
# Request the player data
http_conn = http.client.HTTPSConnection("api.mojang.com");
http_conn.request("GET", req,
headers={'User-Agent':'https://github.com/clerie/mcuuid', 'Content-Type':'application/json'});
response = http_conn.getresponse().read().decode("utf-8")
# In case the answer is empty, the user dont exist
if not response:
self.valid = False
# If there is an answer, fill out the variables
else:
# Parse the JSON
json_data = json.loads(response)
### Handle the response of the different requests on different ways
# Request using username
if is_valid_minecraft_username(identifier):
# The UUID
self.uuid = json_data['id']
# The username written correctly
self.username = json_data['name']
#Request using UUID
elif is_valid_mojang_uuid(identifier):
# The UUID
self.uuid = identifier
current_name = ""
current_time = 0
# Getting the username based on timestamp
for name in json_data:
# Prepare the JSON
# The first name has no change time
if 'changedToAt' not in name:
name['changedToAt'] = 0
# Get the right name on timestamp
if current_time <= name['changedToAt'] and (timestamp is None or name['changedToAt'] <= timestamp):
current_time = name['changedToAt']
current_name = name['name']
# The username written correctly
self.username = current_name
self.uuid = UUID(self.uuid)
| [
"wdaniil@mail.ru"
] | wdaniil@mail.ru |
f84104be2dd9a64cbb29620a7afa13eed126eb9b | f35d3f13510b399034a8ead7126bd7e42b1dd125 | /x_fizzbuzz_test.py | 43aac2d9a80a7f46ea48de990312b5a54f12f2db | [] | no_license | pwon7862/assign | 9bfafd52a5277e6b634eedd89acc3dc4160f3df1 | ec97aa430f2cfd0cd3e85dd16a89339114b7368d | refs/heads/master | 2021-04-26T22:18:05.773529 | 2018-03-06T11:03:23 | 2018-03-06T11:03:23 | 124,065,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
import x_fizzbuzz
import unittest
class ImsiTestCase(unittest.TestCase):
def test_fizzbizz(self) :
self.assertEqual(x_fizzbuzz.fizzbuzz(10), [1, 2, 'fizz', 4, 'buzz', 'fizz', 7, 8, 'fizz', 'buzz'])
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"pwon7862@gmail.com"
] | pwon7862@gmail.com |
cd723aa81613a45f7719f5fced953ff4d86444e3 | 8a59923be6a3647028a3c63cb4a0f47b264d7968 | /fidal/iex/stock/dividends.py | f96a5b2abebf36bdc43fbec76522c1ab29c44808 | [] | no_license | jbussdieker/fidal | 31cf9fbd59176bdee3f31a895ec91f147de4a446 | 252e5d83e1a18d410d37e056beef748a29d54ade | refs/heads/master | 2023-02-19T07:43:21.707286 | 2021-01-22T11:38:03 | 2021-01-22T11:38:03 | 327,143,904 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | from fidal.iex.client import client
def fetch(symbols):
return client.request("stock/market/dividends/5y", {"symbols": symbols})
| [
"jbussdieker@gmail.com"
] | jbussdieker@gmail.com |
729d563fe3a5ca235cc2965875ff1be41b1a267d | 546df14bcaed7fec38b577743a27a56dcc6befe3 | /HighlyDivisibleTriangleNumber.py | a77a591ae8d4e4b878a0fc9f76d6a93e01cc6c23 | [] | no_license | KevinMOGrady/project-euler | 03ba13a820ac6ec034ad73ad651bd3be8a4d0ff9 | 499a16abcccdc41f47fb91c74b9e50a2572f4683 | refs/heads/master | 2021-07-20T11:10:32.824692 | 2017-10-27T00:06:01 | 2017-10-27T00:06:01 | 106,835,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | def least_factor(Number, MaxFactor, Start=2):
# MaxFactor = int(n**0.5)
# if Start == 2:
# if Number % 2 > 0:
# Start = 3
for i in range(Start, MaxFactor + 1):
if Number % i == 0:
return i
return Number
def factors_greater_than(Number, AmountGreaterThan):
# CurrentNumber = Number
NextFactor = 1
MaxFactor = int(Number**0.5)
TotalFactors = 0
while NextFactor <= MaxFactor:
NextFactor = least_factor(Number, MaxFactor, NextFactor)
if NextFactor < MaxFactor:
TotalFactors += 2
elif NextFactor == MaxFactor:
TotalFactors += 1
if TotalFactors > AmountGreaterThan:
return True
NextFactor += 1
return False
Triangle = 0
NextGrowth = 1
AmountGreaterThan = 500
Found = False
while not Found:
Triangle += NextGrowth
NextGrowth += 1
Found = factors_greater_than(Triangle, AmountGreaterThan)
print(Triangle) | [
"kevin.ogradyk@gmail.com"
] | kevin.ogradyk@gmail.com |
f23efdd9461f23170937db5508b773037f226837 | 61d4cd614048d38243b8c8537b3a2d2ba4dbb3ba | /visualHelper.py | ee5af449deff105fe01e2bfb164e5073d298e74d | [
"MIT"
] | permissive | YupingLin171/PostAvgDefense | 08188d91145f15ec403811b1c7ad1da3db7391d6 | 23832afb7bb6127a9571738f20f0e3dfd3935697 | refs/heads/master | 2020-07-26T10:53:33.724202 | 2019-09-27T13:54:25 | 2019-09-27T13:54:25 | 208,621,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,471 | py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import numpy as np
import matplotlib; matplotlib.use('agg')
import matplotlib.pyplot as plt
def plotPredStats(feats, lb, K=10, image=None, noiseImage=None, savePath=None):
# score by averaging
scores = torch.mean(feats, dim=0)
# sort and select the top K scores
hScores, hCates = torch.sort(scores, dim=0, descending=True)
hScores = hScores[:K].numpy()
hCates = hCates[:K].numpy()
# get individual preditions
_, preds = torch.max(feats, dim=1)
# count votes
preds_count = {lb: 0}
for i in range(feats.size(0)):
if preds[i].item() in preds_count:
preds_count[preds[i].item()] = preds_count[preds[i].item()] + 1
else:
preds_count[preds[i].item()] = 1
candidates = sorted(preds_count.keys())
votes = [preds_count[x] for x in candidates]
# generate figure
fig = plt.figure()
if image is None and noiseImage is None:
ax1, ax2, ax3 = fig.subplots(3, 1)
else:
axes = fig.subplots(2, 2)
ax1 = axes[0, 0]
ax2 = axes[1, 0]
ax3 = axes[0, 1]
ax4 = axes[1, 1]
# chart 1, votes distribution
inx1 = list(range(len(candidates)))
clr1 = []
for i in inx1:
if candidates[i] == lb:
clr1.append('Red')
else:
clr1.append('SkyBlue')
rects1 = ax1.bar(inx1, votes, color=clr1)
for rect in rects1:
h = rect.get_height()
ax1.text(rect.get_x() + 0.5 * rect.get_width(), 1.01 * h, '{}'.format(h), ha='center', va='bottom')
ax1.set_ylim(top=1.1 * ax1.get_ylim()[1])
ax1.set_xticks(inx1)
ax1.set_xticklabels([str(x) for x in candidates], rotation=30)
ax1.set_ylabel('votes')
ax1.set_title('Votes Distribution')
# chart 2, top prediction scores
inx2 = list(range(len(hCates)))
clr2 = []
for i in inx2:
if hCates[i] == lb:
clr2.append('Red')
else:
clr2.append('SkyBlue')
rects2 = ax2.bar(inx2, hScores, color=clr2)
for rect in rects2:
h = rect.get_height()
ax2.text(rect.get_x() + 0.5 * rect.get_width(), 1.01 * h, '{:.2f}'.format(h), ha='center', va='bottom')
ax2.set_ylim(top=1.1 * ax2.get_ylim()[1])
ax2.set_xticks(inx2)
ax2.set_xticklabels([str(x) for x in hCates], rotation=30)
ax2.set_ylabel('score')
ax2.set_xlabel('Top Predictions')
# axis 3, the noise image
if noiseImage is not None:
ax3.imshow(noiseImage)
ax3.set_xlabel('Noise Image')
ax3.set_axis_off()
else:
# if noise image is not given, show prediction event plot
clr3 = []
for i in range(preds.size(0)):
if preds[i] == lb:
clr3.append('Red')
else:
clr3.append('Green')
ax3.eventplot(preds.unsqueeze(1).numpy(), orientation='vertical', colors=clr3)
ax3.set_yticks(candidates)
ax3.set_yticklabels([str(x) for x in candidates])
ax3.set_xlabel('sample index')
ax3.set_ylabel('class')
# axis 4, the input image
if image is not None:
ax4.imshow(image)
ax4.set_title('Input Image')
ax4.set_axis_off()
# save figure and close
if savePath is not None:
fig.savefig(savePath)
plt.close(fig)
def plotPerturbationDistribution(perturbations, savePath=None):
# generate figure
fig = plt.figure()
ax1, ax2, ax3 = fig.subplots(3, 1)
# plot scatter chart
perts = np.asarray(perturbations)
ax1.scatter(perts[:, 0], perts[:, 1], c='SkyBlue')
ax1.autoscale(axis='x')
ax1.set_ylim((-1, 2))
ax1.set_yticks([0, 1])
ax1.set_yticklabels(['missed', 'defensed'])
ax1.set_xlabel('Perturbation distance')
ax1.set_title('Perturbations Distribution')
# plot bin chart for defensed adversarial samples
x = [e[0] for e in perturbations if e[1] == 1]
ax2.hist(x, bins=20, color='SkyBlue')
ax2.set_xlabel('Perturbation distance')
ax2.set_ylabel('Denfensed')
# plot bin chart for missed adversarial samples
x = [e[0] for e in perturbations if e[1] == 0]
ax3.hist(x, bins=20, color='Red')
ax3.set_xlabel('Perturbation distance')
ax3.set_ylabel('Missed')
# save figure and close
if savePath is not None:
fig.savefig(savePath)
plt.close(fig)
| [
"55327259+YupingLin171@users.noreply.github.com"
] | 55327259+YupingLin171@users.noreply.github.com |
98d5754c4c88776844fa76b73fa795285ed48126 | 2c7f99ff86d1786d133df13a630d62e7dcc63fab | /google/cloud/dialogflow_v2/services/contexts/transports/__init__.py | 7437034e4e22ca443771790d90c3e018528c41d0 | [
"Apache-2.0"
] | permissive | rlindao/python-dialogflow | 2141b7181506210c6cfffb27bb9599ad21261c28 | 8958e562bb159b00bb1fc0fa97e5ffd35dea058d | refs/heads/master | 2023-04-06T15:09:14.888871 | 2021-04-16T21:34:24 | 2021-04-16T21:34:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,142 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ContextsTransport
from .grpc import ContextsGrpcTransport
from .grpc_asyncio import ContextsGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ContextsTransport]]
_transport_registry["grpc"] = ContextsGrpcTransport
_transport_registry["grpc_asyncio"] = ContextsGrpcAsyncIOTransport
__all__ = (
"ContextsTransport",
"ContextsGrpcTransport",
"ContextsGrpcAsyncIOTransport",
)
| [
"noreply@github.com"
] | rlindao.noreply@github.com |
88bac48340ea40b37dfeee5289425f10b7f14705 | 288a00d2ab34cba6c389b8c2444455aee55a8a95 | /tests/data23/recipe-302746.py | 89c4cb749221fd1a5df78063c067ae6d238e7418 | [
"BSD-2-Clause"
] | permissive | JohannesBuchner/pystrict3 | ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb | 18b0dd369082422f9bf0f89c72e7acb53a49849c | refs/heads/master | 2023-08-14T06:37:37.954880 | 2023-07-13T11:16:38 | 2023-07-13T11:16:38 | 268,571,175 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | import threading,queue,time,sys,traceback
#Globals (start with a captial letter)
Qin = queue.Queue()
Qout = queue.Queue()
Qerr = queue.Queue()
Pool = []
def err_msg():
trace= sys.exc_info()[2]
try:
exc_value=str(sys.exc_info()[1])
except:
exc_value=''
return str(traceback.format_tb(trace)),str(sys.exc_info()[0]),exc_value
def get_errors():
try:
while 1:
yield Qerr.get_nowait()
except queue.Empty:
pass
def process_queue():
flag='ok'
while flag !='stop':
try:
flag,item=Qin.get() #will wait here!
if flag=='ok':
newdata='new'+item
Qout.put(newdata)
except:
Qerr.put(err_msg())
def start_threads(amount=5):
for i in range(amount):
thread = threading.Thread(target=process_queue)
thread.start()
Pool.append(thread)
def put(data,flag='ok'):
Qin.put([flag,data])
def get(): return Qout.get() #will wait here!
def get_all():
try:
while 1:
yield Qout.get_nowait()
except queue.Empty:
pass
def stop_threads():
for i in range(len(Pool)):
Qin.put(('stop',None))
while Pool:
time.sleep(1)
for index,the_thread in enumerate(Pool):
if the_thread.isAlive():
continue
else:
del Pool[index]
break
#STANDARD use:
for i in ('b','c'): put(i)
start_threads()
stop_threads()
for i in get_all(): print(i)
for i in get_errors(): print(i)
#POOL use
#put element into input queue
put('a')
#setup threads -- will run forever as a pool until you shutdown
start_threads()
for i in ('b','c'): put(i)
#get an element from output queue
print(get())
#put even more data in, 7 causes an error
for i in ('d','e',7): put(i)
#get whatever is available
for i in get_all(): print(i)
#stop_threads only returns when all threads have stopped
stop_threads()
print('__threads finished last data available__')
for i in get_all(): print(i)
for i in get_errors(): print(i)
#starting up threads again
start_threads()
put('f')
stop_threads()
print('__threads finished(again) last data available__')
for i in get_all(): print(i)
for i in get_errors(): print(i)
| [
"johannes.buchner.acad@gmx.com"
] | johannes.buchner.acad@gmx.com |
90d991c2151643d0cfc01edd4add8fce17ee035f | 42bcfefe2344f99758bede6df580f954eac02f87 | /src/scribimus/wsgi.py | 4a0b22c2f38f8c8e6cbeadb45c0ba682f18cb6e1 | [] | no_license | andarms/scribimus | 847e88caeaea6870a677af47dc58c191ddaed0e7 | 3fce3180cca008faa67c541305d79dfb695bf04a | refs/heads/master | 2021-01-10T13:48:52.805850 | 2015-10-16T21:03:46 | 2015-10-16T21:03:46 | 44,364,365 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for scribimus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scribimus.settings")
application = get_wsgi_application()
| [
"andarms@outlook.com"
] | andarms@outlook.com |
0b43f74a50035848a159103ea7ad7c78398ae86a | 4c07bf87f2a5b7d1574e16b0c521d37bc2875d65 | /backend/api/api_base.py | 483855fe06334ae49e4b383c6a5822c3a018a426 | [] | no_license | analysiscenter/ecg_demo | 82b15cb2714394a74873912fc2cf7c340273ab29 | dd07c21581004bea1fc239ea83f9be62d7cc5ff6 | refs/heads/master | 2021-09-14T15:09:20.912498 | 2018-05-15T11:51:44 | 2018-05-15T11:51:44 | 132,882,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | import logging
from flask import request
from flask_socketio import Namespace
class BaseNamespace(Namespace):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger("server." + __name__)
def on_connect(self):
self.logger.info("User connected {}".format(request.sid))
def on_disconnect(self):
self.logger.info("User disconnected {}".format(request.sid))
def _safe_call(self, method, data, meta, event_in, event_out=None):
self.logger.info("Handling event {}. Data: {}. Meta: {}.".format(event_in, data, meta))
try:
payload = method(data, meta)
if event_out is not None:
self.emit(event_out, payload, room=request.sid)
self.logger.info("Sending response {}. Meta: {}".format(event_out, meta))
except Exception as error:
self.emit("ERROR", str(error), room=request.sid)
self.logger.exception(error)
| [
"a.kuvaev@analysiscenter.ru"
] | a.kuvaev@analysiscenter.ru |
fc94b528bc0821b1e139a849d280ad3baa3fdc1f | 27f28d43f7ae6a2d6ef91309229b73a2b7fae8f4 | /src/pytkdocs/serializer.py | 6502a340c31d9e9e867cc65dc9d22cf21c28c918 | [
"ISC"
] | permissive | art049/pytkdocs | 5d7ca6e239344424a323223c9a0abed54aae0022 | 229d7d20fa7c8908f26035313049534339ae61af | refs/heads/master | 2022-12-12T11:41:38.783618 | 2020-09-08T15:43:03 | 2020-09-08T15:43:03 | 295,542,549 | 0 | 0 | ISC | 2020-09-14T21:26:22 | 2020-09-14T21:26:21 | null | UTF-8 | Python | false | false | 7,753 | py | """
This module defines function to serialize objects.
These functions simply take objects as parameters and return dictionaries that can be dumped by `json.dumps`.
"""
import inspect
import re
from typing import Any, Optional, Pattern
from pytkdocs.objects import Object, Source
from pytkdocs.parsers.docstrings.base import AnnotatedObject, Attribute, Parameter, Section
try:
from typing import GenericMeta # python 3.6
except ImportError:
# in 3.7, GenericMeta doesn't exist but we don't need it
class GenericMeta(type): # type: ignore
"""GenericMeta type."""
RE_OPTIONAL: Pattern = re.compile(r"Union\[(.+), NoneType\]")
"""Regular expression to match optional annotations of the form `Union[T, NoneType]`."""
RE_FORWARD_REF: Pattern = re.compile(r"_?ForwardRef\('([^']+)'\)")
"""Regular expression to match forward-reference annotations of the form `_ForwardRef('T')`."""
def rebuild_optional(matched_group: str) -> str:
"""
Rebuild `Union[T, None]` as `Optional[T]`.
Arguments:
matched_group: The matched group when matching against a regular expression (by the parent caller).
Returns:
The rebuilt type string.
"""
brackets_level = 0
for char in matched_group:
if char == "," and brackets_level == 0:
return f"Union[{matched_group}]"
elif char == "[":
brackets_level += 1
elif char == "]":
brackets_level -= 1
return matched_group
def annotation_to_string(annotation: Any) -> str:
"""
Return an annotation as a string.
Arguments:
annotation: The annotation to return as a string.
Returns:
The annotation as a string.
"""
if annotation is inspect.Signature.empty:
return ""
if inspect.isclass(annotation) and not isinstance(annotation, GenericMeta):
string = annotation.__name__
else:
string = str(annotation).replace("typing.", "")
string = RE_FORWARD_REF.sub(lambda match: match.group(1), string)
string = RE_OPTIONAL.sub(lambda match: f"Optional[{rebuild_optional(match.group(1))}]", string)
return string
def serialize_annotated_object(obj: AnnotatedObject) -> dict:
"""
Serialize an instance of [`AnnotatedObject`][pytkdocs.parsers.docstrings.base.AnnotatedObject].
Arguments:
obj: The object to serialize.
Returns:
A JSON-serializable dictionary.
"""
return {"description": obj.description, "annotation": annotation_to_string(obj.annotation)}
def serialize_attribute(attribute: Attribute) -> dict:
"""
Serialize an instance of [`Attribute`][pytkdocs.parsers.docstrings.base.Attribute].
Arguments:
attribute: The attribute to serialize.
Returns:
A JSON-serializable dictionary.
"""
return {
"name": attribute.name,
"description": attribute.description,
"annotation": annotation_to_string(attribute.annotation),
}
def serialize_parameter(parameter: Parameter) -> dict:
"""
Serialize an instance of [`Parameter`][pytkdocs.parsers.docstrings.base.Parameter].
Arguments:
parameter: The parameter to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = serialize_annotated_object(parameter)
serialized.update(
{
"name": parameter.name,
"kind": str(parameter.kind),
"default": parameter.default_string,
"is_optional": parameter.is_optional,
"is_required": parameter.is_required,
"is_args": parameter.is_args,
"is_kwargs": parameter.is_kwargs,
}
)
return serialized
def serialize_signature_parameter(parameter: inspect.Parameter) -> dict:
"""
Serialize an instance of `inspect.Parameter`.
Arguments:
parameter: The parameter to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = {"kind": str(parameter.kind), "name": parameter.name}
if parameter.annotation is not parameter.empty:
serialized["annotation"] = annotation_to_string(parameter.annotation)
if parameter.default is not parameter.empty:
serialized["default"] = repr(parameter.default)
return serialized
def serialize_signature(signature: inspect.Signature) -> dict:
"""
Serialize an instance of `inspect.Signature`.
Arguments:
signature: The signature to serialize.
Returns:
A JSON-serializable dictionary.
"""
if signature is None:
return {}
serialized: dict = {
"parameters": [serialize_signature_parameter(value) for name, value in signature.parameters.items()]
}
if signature.return_annotation is not inspect.Signature.empty:
serialized["return_annotation"] = annotation_to_string(signature.return_annotation)
return serialized
def serialize_docstring_section(section: Section) -> dict:
"""
Serialize an instance of `inspect.Signature`.
Arguments:
section: The section to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = {"type": section.type}
if section.type == section.Type.MARKDOWN:
serialized.update({"value": section.value}) # type: ignore
elif section.type == section.Type.RETURN:
serialized.update({"value": serialize_annotated_object(section.value)}) # type: ignore
elif section.type == section.Type.EXCEPTIONS:
serialized.update({"value": [serialize_annotated_object(e) for e in section.value]}) # type: ignore
elif section.type == section.Type.PARAMETERS:
serialized.update({"value": [serialize_parameter(p) for p in section.value]}) # type: ignore
elif section.type == section.Type.ATTRIBUTES:
serialized.update({"value": [serialize_attribute(p) for p in section.value]}) # type: ignore
elif section.type == section.Type.EXAMPLES:
serialized.update({"value": section.value}) # type: ignore
return serialized
def serialize_source(source: Optional[Source]) -> dict:
"""
Serialize an instance of [`Source`][pytkdocs.objects.Source].
Arguments:
source: The source to serialize.
Returns:
A JSON-serializable dictionary.
"""
if source:
return {"code": source.code, "line_start": source.line_start}
return {}
def serialize_object(obj: Object) -> dict:
"""
Serialize an instance of a subclass of [`Object`][pytkdocs.objects.Object].
Arguments:
obj: The object to serialize.
Returns:
A JSON-serializable dictionary.
"""
serialized = {
"name": obj.name,
"path": obj.path,
"category": obj.category,
"file_path": obj.file_path,
"relative_file_path": obj.relative_file_path,
"properties": sorted(set(obj.properties + obj.name_properties)),
"parent_path": obj.parent_path,
"has_contents": obj.has_contents(),
"docstring": obj.docstring,
"docstring_sections": [serialize_docstring_section(s) for s in obj.docstring_sections],
"source": serialize_source(obj.source),
"children": {child.path: serialize_object(child) for child in obj.children},
"attributes": [o.path for o in obj.attributes],
"methods": [o.path for o in obj.methods],
"functions": [o.path for o in obj.functions],
"modules": [o.path for o in obj.modules],
"classes": [o.path for o in obj.classes],
}
if hasattr(obj, "type"):
serialized["type"] = annotation_to_string(obj.type) # type: ignore
if hasattr(obj, "signature"):
serialized["signature"] = serialize_signature(obj.signature) # type: ignore
return serialized
| [
"pawamoy@pm.me"
] | pawamoy@pm.me |
2dfade098a68cbf3949fa94771706c9d6b8b7245 | b6abccfa53efaf86bb06e1d4c8d2321bbda60145 | /dqn_agent.py | dab00455d2f3bda4523a0e87fc8436b435796d74 | [] | no_license | gthm1/Project-1_Navigation-DQN | 7d367c5f6d5134aeba4dd039a3bf13a069c4ca1c | a87a3e098f98b92bcfebfbe6af6da2c0997bf3dc | refs/heads/master | 2022-12-17T11:33:36.164803 | 2020-09-10T15:05:44 | 2020-09-10T15:05:44 | 292,557,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,358 | py | import numpy as np
import random
from collections import namedtuple, deque
from model import QNetwork
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5)
BATCH_SIZE = 64
GAMMA = 0.99
TAU = 1e-3
LR = 5e-4
UPDATE_EVERY = 4
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
def __init__(self, state_size, action_size, seed):
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(self, state, action, reward, next_state, done):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, eps=0.):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
Q_targets_next = self.qnetwork_target(next_states).detach().max(1)[0].unsqueeze(1)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
Q_expected = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(Q_expected, Q_targets)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
action_size (int): dimension of each action
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
seed (int): random seed
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory) | [
"noreply@github.com"
] | gthm1.noreply@github.com |
1e8a4634e4de9c5b1b83e5c2eb817f7103413800 | 694e560b8436edf4168a9fcfcc57c92e45f00a29 | /FBExtracter.py | 61e3535c253680af327ade55a2f5a6d8839470dd | [] | no_license | Gobiviswa/FB-Extracter | bdbf57a78f4663d183acbe34d184870f3663b088 | 5962540e2230ce631424e720637bf496d4da9eaa | refs/heads/master | 2022-02-21T16:41:12.866528 | 2019-08-07T10:46:15 | 2019-08-07T10:46:15 | 103,291,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,895 | py | #!/usr/bin/env python
import sys #to getversion of Python
import json #for handling JSON files
import requests #for requesting URL content
import getpass #for getting password
import time
import datetime
import csv
#getting the version of the python
def getVersion():
if sys.version_info[0] > 2:
return input
else:
return raw_input
#inputting the page id of the particular page
def pageId():
page_id = inputType("Please enter the page id --> ")
return str(page_id)
#inputting the credentials of our fb developer app
def appCred():
app_id = inputType("Please enter the App id --> ")
app_secret = getpass.getpass("Please enter the App Secret Id --> ")
return app_id , app_secret
#creating token automatically fron app credentials given above
def getFbToken(app_id, app_secret):
payload = {'grant_type': 'client_credentials', 'client_id': app_id, 'client_secret': app_secret}
file = requests.post('https://graph.facebook.com/oauth/access_token?', params = payload)
#print file.text #to test what the FB api responded with
token = file.text.split("=")
access = json.loads(token[0])
access_token = access["access_token"]
return str(access_token)
def validateDate(date):
try:
datetime.datetime.strptime(date, "%d/%m/%Y")
except :
raise ValueError("Please give in dd/mm/yyyy format")
#dates for the duration
def getDates():
since = inputType("Please enter the start date in DD/MM/YYYY format --> ")
validateDate(since)
until = inputType("Please enter the end date in DD/MM/YYYY format --> ")
validateDate(until)
since_unix = time.mktime(datetime.datetime.strptime(since , "%d/%m/%Y").timetuple())
until_unix = time.mktime(datetime.datetime.strptime(until , "%d/%m/%Y").timetuple())
return str(since.replace('/','-')), str(until.replace('/','-')), str(since_unix) , str(until_unix)
def getTime():
time = datetime.datetime.now().time()
return str(time.hour) + "-" + str(time.minute) + "-" + str(time.second)
#processing the url we have created
def processURL(url):
while True:
dic = requests.get(url).json()
file_name = "data_" + start_date + "_" + end_date + "_" + time + ".csv"
csvObj = open(file_name , 'a')
csv_file = csv.writer(csvObj)
if dic["data"] != {}:
for element in dic["data"]:
post_id , post_time = element["id"] , element["created_time"][0:10]
if "message" not in element:
element["message"] = ""
post_message = element["message"].encode('utf-8')
if "comments" not in element:
element["comments"] = {}
element["comments"]["summary"] = {}
element["comments"]["summary"]["total_count"] = 0
post_comments_count = element["comments"]["summary"]["total_count"]
if "likes" not in element:
element["likes"] = {}
element["likes"]["summary"] = {}
element["likes"]["summary"]["total_count"] = 0
post_likes_count = element["likes"]["summary"]["total_count"]
if "shares" not in element:
element["shares"] = {}
element["shares"]["count"] = 0
post_shares_count = element["shares"]["count"]
csv_file . writerow([post_id , post_time , post_message , post_comments_count , post_likes_count , post_shares_count ])
#
else:
break
if "paging" in dic:
url = dic["paging"]["next"]
else:
break
inputType = getVersion()
page_id = pageId()
app_id , app_secret = appCred()
token = getFbToken(app_id, app_secret)
start_date, end_date, s_date , u_date = getDates()
time = getTime()
#forming the url
url = "https://graph.facebook.com/"+page_id+"/posts?fields=id,message,created_time,likes.limit(0).summary(true),comments.limit(0).summary(true),shares&since="+s_date+"&until="+u_date+"&access_token="+token
processURL(url)
| [
"noreply@github.com"
] | Gobiviswa.noreply@github.com |
dc09f5313a9b5329c70f8c40cf11a8e182d79d09 | 9efe76d9147f46a113e2ba7c5ae61e8d0221a14c | /code/1184. Distance Between Bus Stops_20190911.py | e977912ad930f82cc9915836881087480cd0277b | [] | no_license | cathyxingchang/leetcode | d570860686589f992d1eefad9d807f1bca76bcf2 | 163b376acab84e28c74cb784d10fe39f11510921 | refs/heads/master | 2021-01-19T09:03:14.309153 | 2020-02-26T02:53:16 | 2020-02-26T02:53:16 | 87,716,087 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | class Solution(object):
def distanceBetweenBusStops(self, distance, start, destination):
"""
:type distance: List[int]
:type start: int
:type destination: int
:rtype: int
"""
# 计算正向的front
front = 0
back = 0
if destination < start:
tmp = destination
destination = start
start = tmp
for index in range(start, destination):
front += distance[index]
# 计算反向的
for index in range(destination, len(distance)):
back += distance[index]
for index in range(0, start):
back += distance[index]
return min(front, back)
s = Solution()
distance = [7,10,1,12,11,14,5,0]
start = 7
destination = 2
result = s.distanceBetweenBusStops(distance, start, destination)
print(result)
| [
"xingchang@xiaomi.com"
] | xingchang@xiaomi.com |
ad6083e20bbce189bfe802914d0868484b69add9 | 2526d61a2b3ff8ecba71ffd51b6e6b8a473fbaa4 | /cli/maticetl/cli/export_receipts_and_logs.py | e3265ef85b0ee2da3c8efae81a5caab69484bc01 | [
"MIT"
] | permissive | mathewdgardner/matic-etl | 9bda99c91c8dd515b6f0d7f97f8aa9ec8560179b | 36630507808be23f3570a7a2f333dc21ca09b2f9 | refs/heads/main | 2023-03-31T11:31:53.244262 | 2021-04-02T17:43:57 | 2021-04-02T17:43:57 | 354,088,802 | 0 | 0 | MIT | 2021-04-02T17:32:51 | 2021-04-02T17:32:51 | null | UTF-8 | Python | false | false | 3,641 | py | # MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
from blockchainetl_common.file_utils import smart_open
from maticetl.jobs.export_receipts_job import ExportReceiptsJob
from maticetl.jobs.exporters.receipts_and_logs_item_exporter import receipts_and_logs_item_exporter
from blockchainetl_common.logging_utils import logging_basic_config
from maticetl.thread_local_proxy import ThreadLocalProxy
from maticetl.providers.auto import get_provider_from_uri
logging_basic_config()
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('-b', '--batch-size', default=100, show_default=True, type=int, help='The number of receipts to export at a time.')
@click.option('-t', '--transaction-hashes', required=True, type=str,
help='The file containing transaction hashes, one per line.')
@click.option('-p', '--provider-uri', default='https://mainnet.infura.io', show_default=True, type=str,
help='The URI of the web3 provider e.g. '
'file://$HOME/Library/Bor/geth.ipc or https://mainnet.infura.io')
@click.option('-w', '--max-workers', default=5, show_default=True, type=int, help='The maximum number of workers.')
@click.option('--receipts-output', default=None, show_default=True, type=str,
help='The output file for receipts. If not provided receipts will not be exported. Use "-" for stdout')
@click.option('--logs-output', default=None, show_default=True, type=str,
help='The output file for receipt logs. '
'aIf not provided receipt logs will not be exported. Use "-" for stdout')
@click.option('-c', '--chain', default='matic', show_default=True, type=str, help='The chain network to connect to.')
def export_receipts_and_logs(batch_size, transaction_hashes, provider_uri, max_workers, receipts_output, logs_output,
chain='matic'):
"""Exports receipts and logs."""
with smart_open(transaction_hashes, 'r') as transaction_hashes_file:
job = ExportReceiptsJob(
transaction_hashes_iterable=(transaction_hash.strip() for transaction_hash in transaction_hashes_file),
batch_size=batch_size,
batch_web3_provider=ThreadLocalProxy(lambda: get_provider_from_uri(provider_uri, batch=True)),
max_workers=max_workers,
item_exporter=receipts_and_logs_item_exporter(receipts_output, logs_output),
export_receipts=receipts_output is not None,
export_logs=logs_output is not None)
job.run()
| [
"araa@connect.ust.hk"
] | araa@connect.ust.hk |
7902cfab8e06b52068fb3de8efa2c2ca595cc290 | 80bac94ba693adc5fb7e488e93aaadf85280be4c | /IBM_projs_coursse/Week2/lab2/ormtemplate/standalone/models.py | a358014f1e40f8d49e51fbce10d9127b3c27e828 | [] | no_license | yapalfredo/IBM_projs_coursse | cef482d027bfa0e5b99b2793667d88194085f08f | 2ac3a15c966768fc0da02e52f070b7a3985514c4 | refs/heads/main | 2023-06-08T13:01:45.594903 | 2021-07-02T01:21:12 | 2021-07-02T01:21:12 | 380,130,440 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | from django.db import models
# Test model
class Test(models.Model):
name = models.CharField(max_length=30) | [
"alfredo.yap@stu.bmcc.cuny.edu"
] | alfredo.yap@stu.bmcc.cuny.edu |
e93111e1175141ba7cb9cd4cf06309c045f70c40 | b0a45268376be5632cfe113c2e540e61fa4a3c51 | /ckan_api_client.py | 37ff4a455188975296e52f0ab5414c0dd02fb543 | [] | no_license | opendatatrentino/ckan-api-v2-tests | c4bdadbed0fe87cd9cc2caa9baa2bee106c777ca | 6cef44184fea19c122214af7f6e1be15c457676a | refs/heads/master | 2020-04-14T11:42:50.850813 | 2014-02-27T17:04:12 | 2014-02-27T17:04:12 | 16,990,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,693 | py | """
Ckan API client
"""
from collections import namedtuple
import copy
import functools
import json
import urlparse
import warnings
import requests
DATASET_FIELDS = {
'core': [
'author', 'author_email', 'license_id', 'maintainer',
'maintainer_email', 'name', 'notes', 'owner_org', 'private', 'state',
'type', 'url'
],
'cruft': [
'ckan_url', 'creator_user_id', 'isopen', 'license', 'license_title',
'license_url', 'metadata_created', 'metadata_modified',
'num_resources', 'num_tags', 'organization', 'ratings_average',
'ratings_count', 'revision_id', 'version'
],
'keys': ['id'],
'special': ['extras', 'groups', 'relationships', 'resources'],
}
RESOURCE_FIELDS = {
'core': [
'description', 'format', 'mimetype', 'mimetype_inner', 'name',
'position', 'resource_type', 'size', 'url', 'url_type',
],
'cruft': [
'cache_last_updated', 'cache_url', 'created', 'hash', 'last_modified',
'package_id', 'resource_group_id', 'webstore_last_updated',
'webstore_url',
],
'keys': ['id'],
'special': [],
}
GROUP_FIELDS = {
'core': [
'approval_status', 'description', 'image_display_url', 'image_url',
'is_organization', 'name', 'state', 'title', 'type',
],
'cruft': [
'created', 'display_name', 'package_count', 'packages', 'revision_id',
],
'keys': ['id'],
'special': ['extras', 'groups', 'tags', 'users'], # packages?
}
class SuppressExceptionIf(object):
def __init__(self, cond):
self.cond = cond
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is None:
return
if callable(self.cond):
# If the callable returns True, exception
# will be suppressed
return self.cond(exc_value)
return self.cond
class HTTPError(Exception):
def __init__(self, status_code, message):
self.status_code = status_code
self.message = message
def __str__(self):
return "HTTPError [{0}]: {1}".format(self.status_code, self.message)
class BadApiError(Exception):
"""Exception used to mark bad behavior from the API"""
pass
class BadApiWarning(UserWarning):
"""Warning to mark bad behavior from the API"""
pass
class SomethingWentWrong(Exception):
"""
Exception to indicate that something went wrong during
a data import.. :(
"""
pass
##----------------------------------------------------------------------
## Typechecker validators are used here as the only way to
## try make some order in this mess of API returning unexpected things.
## They might come in handy when refactoring Ckan code too, btw..
##----------------------------------------------------------------------
## WARNING! This is not a Python good practice for normal usage, do not
## copy code from this thing,,
##----------------------------------------------------------------------
def validate(validator, value):
if validator is None:
return True
if isinstance(validator, type):
return isinstance(value, validator)
if callable(validator):
return validator(value)
raise TypeError("Invalid validator type: {0}".format(type(validator)))
def check_arg_types(*a_types, **kw_types):
def decorator(func):
@functools.wraps(func)
def wrapped(*a, **kw):
# Validate arguments
for validator, value in zip(a_types, a):
if not validate(validator, value):
raise TypeError("Invalid argument type")
# Validate keyword arguments
for key, validator in kw_types.iteritems():
if key not in kw:
continue
value = kw[key]
if not validate(validator, value):
raise TypeError("Invalid argument type")
# Actually call the function
return func(*a, **kw)
return wrapped
return decorator
def check_retval(checker):
def decorator(func):
@functools.wraps(func)
def wrapped(*a, **kw):
retval = func(*a, **kw)
if not validate(checker, retval):
raise TypeError("Invalid return value")
return retval
return wrapped
return decorator
def is_list_of(type_):
def inner(obj):
if not isinstance(obj, list):
raise TypeError("Object is not a list")
if not all(isinstance(x, type_) for x in obj):
raise TypeError("A value in the list is not a {0!r}".format(type_))
return True
return inner
def is_dict_of(key_type, value_type):
def inner(obj):
if not isinstance(obj, dict):
raise TypeError("Object is not a dict")
for key, value in obj.iteritems():
validate(key_type, key)
validate(value_type, value)
return True
return inner
def validate_dataset(dataset):
"""Do some checking on a dataset object"""
# todo: what about extra fields? should be warn in case we have some?
if not isinstance(dataset, dict):
raise ValueError("Dataset must be a dict")
if 'extras' in dataset:
if not isinstance(dataset['extras'], dict):
raise ValueError("Dataset extras must be a dict")
for key, value in dataset['extras'].iteritems():
if not isinstance(key, basestring):
raise ValueError("Extras keys must be strings")
if (value is not None) and (not isinstance(value, basestring)):
raise ValueError("Extras values must be strings (or None)")
if 'groups' in dataset:
if not isinstance(dataset['groups'], list):
raise ValueError("Dataset groups must be a list")
if not all(isinstance(x, basestring) for x in dataset['groups']):
raise ValueError("Dataset groups must be a list of strings")
if 'resources' in dataset:
if not isinstance(dataset['resources'], list):
raise ValueError("Resources must be a list")
if not all(isinstance(x, dict) for x in dataset['resources']):
raise ValueError("Dataset resources must be a list of dicts")
# todo: validate each single resource object too..?
return True # Validation passed
##----------------------------------------------------------------------
## Actual client classes
##----------------------------------------------------------------------
class CkanClient(object):
def __init__(self, base_url, api_key=None):
self.base_url = base_url
self.api_key = api_key
@property
def anonymous(self):
return CkanClient(self.base_url)
def request(self, method, path, **kwargs):
headers = kwargs.get('headers') or {}
kwargs['headers'] = headers
## Update headers for authorization
if self.api_key is not None:
headers['Authorization'] = self.api_key
## Serialize data to json, if not already
if 'data' in kwargs:
if not isinstance(kwargs['data'], basestring):
kwargs['data'] = json.dumps(kwargs['data'])
headers['content-type'] = 'application/json'
if isinstance(path, (list, tuple)):
path = '/'.join(path)
url = urlparse.urljoin(self.base_url, path)
response = requests.request(method, url, **kwargs)
if not response.ok:
## todo: attach message, if any available..
## todo: we should find a way to figure out how to attach
## original text message to the exception
## as it might be: json string, part of json object,
## part of html document
raise HTTPError(response.status_code,
"Error while performing request")
return response
##============================================================
## Datasets
##============================================================
@check_retval(is_list_of(basestring))
def list_datasets(self):
path = '/api/2/rest/dataset'
response = self.request('GET', path)
return response.json()
def iter_datasets(self):
for ds_id in self.list_datasets():
yield self.get_dataset(ds_id)
@check_arg_types(None, basestring)
@check_retval(dict)
def get_dataset(self, dataset_id):
path = '/api/2/rest/dataset/{0}'.format(dataset_id)
response = self.request('GET', path)
return response.json()
@check_arg_types(None, dict)
@check_retval(dict)
def post_dataset(self, dataset):
path = '/api/2/rest/dataset'
response = self.request('POST', path, data=dataset)
return response.json()
@check_arg_types(None, validate_dataset)
@check_retval(dict)
def create_dataset(self, dataset):
"""
High-level function to create datasets.
Just a wrapper around post_dataset() right now, but
might come in handy in the future to add workarounds..
"""
return self.post_dataset(dataset)
@check_arg_types(None, basestring, validate_dataset)
@check_retval(dict)
def put_dataset(self, dataset_id, dataset):
"""
PUT a dataset (for update).
.. warning::
``update_dataset()`` should be used instead, in normal cases,
as it automatically takes care of a lot of needed workarounds
to prevent data loss.
Calling this method directly is almost never adviced or required.
"""
path = '/api/2/rest/dataset/{0}'.format(dataset_id)
response = self.request('PUT', path, data=dataset)
return response.json()
@check_arg_types(None, basestring, validate_dataset)
@check_retval(dict)
def update_dataset(self, dataset_id, updates):
"""
Trickery to perform a safe partial update of a dataset.
WARNING: This method contains tons of hacks to try and fix
major issues with the API.
In particular, remember that:
- Extras are updated incrementally. To delete a key, just set
it to None.
- Groups might accept objects too, but behavior is quite undefined
in that case.. so don't do that.
Fixes that are in place:
- If the extras field is not specified on update, all extras will
be deleted. To prevent this, we default it to {}.
- If the groups field is not specified on update, all groups will
be removed. To prevent this, we default it to [].
"""
##=====[!!]=========== IMPORTANT NOTE ===============[!!]=====
## - "core" fields seems to be kept
## - ..but "extras" need to be passed back again
## - ..same behavior for groups: no way to delete them,
## apparently.. a part from flushing 'em all by omitting
## the field...
## - resources?
## - relationships?
##============================================================
original_dataset = self.get_dataset(dataset_id)
## Dictionary holding the actual data to be sent
## for performing the update
updates_dict = {'id': dataset_id}
##############################################################
## Core fields
##------------------------------------------------------------
for field in DATASET_FIELDS['core']:
if field in updates:
updates_dict[field] = updates[field]
else:
updates_dict[field] = original_dataset[field]
##############################################################
## Extras fields
##------------------------------------------------------------
##=====[!!]=========== IMPORTANT NOTE ===============[!!]=====
## WARNING! Behavior here is quite "funky":
##
## db: {'a': 'aa', 'b': 'bb', 'c': 'cc'}
## update: (no extras key)
## result: {}
##
## db: {'a': 'aa', 'b': 'bb', 'c': 'cc'}
## update: {'a': 'foo'}
## result: {'a': 'foo', 'b': 'bb', 'c': 'cc'}
##
## db: {'a': 'aa', 'b': 'bb', 'c': 'cc'}
## update: {}
## db: {'a': 'aa', 'b': 'bb', 'c': 'cc'}
##============================================================
EXTRAS_FIELD = 'extras' # to avoid confusion
updates_dict[EXTRAS_FIELD] = {}
if EXTRAS_FIELD in updates:
# Notes: setting a field to 'None' will delete it.
updates_dict[EXTRAS_FIELD].update(updates[EXTRAS_FIELD])
##############################################################
## These fields need to be passed again or it will just
## be flushed..
##------------------------------------------------------------
FIELDS_THAT_NEED_TO_BE_PASSED = [
'resources', 'relationships'
]
for field in FIELDS_THAT_NEED_TO_BE_PASSED:
if field in updates:
updates_dict[field] = updates[field]
else:
updates_dict[field] = original_dataset[field]
##############################################################
## Update groups
##------------------------------------------------------------
##=====[!!]=========== IMPORTANT NOTE ===============[!!]=====
## - If the groups key is omitted, all groups are deleted
## - It seems to be possible to specify groups as objects too,
## but exact behavior is uncertain, so we only accept
## strings here (ids), otherwise object will not pass
## validation.
##============================================================
updates_dict['groups'] = (
updates['group']
if 'group' in updates
else original_dataset['groups'])
##############################################################
## todo: update relationships
##------------------------------------------------------------
# todo: WTF are relationships?
##############################################################
## todo: update tags
##------------------------------------------------------------
##############################################################
## todo: update resources
##------------------------------------------------------------
##############################################################
## Actually perform the update
##------------------------------------------------------------
return self.put_dataset(dataset_id, updates_dict)
@check_arg_types(None, basestring, ignore_404=bool)
def delete_dataset(self, dataset_id, ignore_404=True):
ign404 = SuppressExceptionIf(
lambda e: ignore_404 and (e.status_code == 404))
path = '/api/2/rest/dataset/{0}'.format(dataset_id)
with ign404:
self.request('DELETE', path, data={'id': dataset_id})
##============================================================
## Groups
##============================================================
##=====[!!]=========== IMPORTANT NOTE ===============[!!]=====
## BEWARE! API v2 only considers actual groups, organizations
## are not handled / returned by this one!
##============================================================
@check_retval(is_list_of(basestring))
def list_groups(self):
path = '/api/2/rest/group'
response = self.request('GET', path)
return response.json()
def iter_groups(self):
all_groups = self.list_groups()
for group_id in all_groups:
yield self.get_group(group_id)
@check_arg_types(None, basestring)
@check_retval(dict)
def get_group(self, group_id):
path = '/api/2/rest/group/{0}'.format(group_id)
response = self.request('GET', path)
return response.json()
@check_arg_types(None, dict)
@check_retval(dict)
def post_group(self, group):
path = '/api/2/rest/group'
response = self.request('POST', path, data=group)
return response.json()
@check_arg_types(None, basestring, dict)
@check_retval(dict)
def put_group(self, group_id, group):
path = '/api/2/rest/group/{0}'.format(group_id)
response = self.request('PUT', path, data=group)
data = response.json()
return data
@check_arg_types(None, basestring, ignore_404=bool)
def delete_group(self, group_id, ignore_404=True):
ign404 = SuppressExceptionIf(
lambda e: ignore_404 and (e.status_code == 404))
path = '/api/2/rest/group/{0}'.format(group_id)
with ign404:
self.request('DELETE', path)
path = '/api/3/action/group_purge'
with ign404:
self.request('POST', path, data={'id': group_id})
@check_arg_types(None, basestring, dict)
@check_retval(dict)
def update_group(self, group_id, updates):
"""
Trickery to perform a safe partial update of a group.
"""
original_group = self.get_group(group_id)
## Dictionary holding the actual data to be sent
## for performing the update
updates_dict = {'id': group_id}
##------------------------------------------------------------
## Core fields
##------------------------------------------------------------
for field in GROUP_FIELDS['core']:
if field in updates:
updates_dict[field] = updates[field]
else:
updates_dict[field] = original_group[field]
##------------------------------------------------------------
## Extras fields
##------------------------------------------------------------
## We assume the same behavior here as for datasets..
## See update_dataset() for more details.
EXTRAS_FIELD = 'extras' # to avoid confusion
updates_dict[EXTRAS_FIELD] = {}
if EXTRAS_FIELD in updates:
# Notes: setting a field to 'None' will delete it.
updates_dict[EXTRAS_FIELD].update(updates[EXTRAS_FIELD])
## These fields need to be passed again or they will just
## be flushed..
FIELDS_THAT_NEED_TO_BE_PASSED = [
'groups', # 'tags'?
]
for field in FIELDS_THAT_NEED_TO_BE_PASSED:
if field in updates:
updates_dict[field] = updates[field]
else:
updates_dict[field] = original_group[field]
## Actually perform the update
##----------------------------------------
return self.put_group(group_id, updates_dict)
@check_arg_types(None, dict)
@check_retval(dict)
def upsert_group(self, group):
"""
Try to "upsert" a group, by name.
This will:
- retrieve the group
- if the group['state'] == 'deleted', try to restore it
- if something changed, update it
:return: the group object
"""
# Try getting group..
if 'id' in group:
raise ValueError("You shouldn't specify a group id already!")
## Get the group
## Groups should be returned by name too (hopefully..)
try:
_retr_group = self.get_group(group['name'])
except HTTPError:
_retr_group = None
if _retr_group is None:
## Just insert the group and return its id
return self.post_group(group)
updates = {}
if _retr_group['state'] == 'deleted':
## We need to make it active again!
updates['state'] = 'active'
## todo: Check if we have differences, before updating!
updated_dict = copy.deepcopy(group)
updated_dict.update(updates)
return self.update_group(_retr_group['id'], updated_dict)
##============================================================
## Organizations
##============================================================
## --- [!!] NOTE ---------------------------------------------
## We need to fallback to api v3 here, as v2 doesn't support
## doing things with organizations..
##------------------------------------------------------------
@check_retval(is_list_of(basestring))
def list_organizations(self):
path = '/api/3/action/organization_list'
response = self.request('GET', path)
return response.json()['result']
def iter_organizations(self):
for org_id in self.list_organizations():
yield self.get_organization(org_id)
@check_arg_types(None, basestring)
@check_retval(dict)
def get_organization(self, organization_id):
path = '/api/3/action/organization_show?id={0}'.format(organization_id)
response = self.request('GET', path)
return response.json()['result']
@check_retval(dict)
def post_organization(self, organization):
path = '/api/3/action/organization_create'
response = self.request('POST', path, data=organization)
return response.json()['result']
@check_retval(dict)
def put_organization(self, organization_id, organization):
"""Warning! with api v3 we need to use POST!"""
organization['id'] = organization_id
path = '/api/3/action/organization_update'
response = self.request('POST', path, data=organization)
return response.json()['result']
@check_arg_types(None, basestring, dict)
@check_retval(dict)
def update_organization(self, organization_id, updates):
"""
Trickery to perform a safe partial update of a organization.
"""
original_organization = self.get_organization(organization_id)
## Dictionary holding the actual data to be sent
## for performing the update
updates_dict = {'id': organization_id}
##------------------------------------------------------------
## Core fields
##------------------------------------------------------------
for field in GROUP_FIELDS['core']:
if field in updates:
updates_dict[field] = updates[field]
else:
updates_dict[field] = original_organization[field]
##------------------------------------------------------------
## Extras fields
##------------------------------------------------------------
## We assume the same behavior here as for datasets..
## See update_dataset() for more details.
# EXTRAS_FIELD = 'extras' # to avoid confusion
# # updates_dict[EXTRAS_FIELD] = {}
# updates_dict[EXTRAS_FIELD] = [{}] # BUG!!
# if EXTRAS_FIELD in updates:
# # Notes: setting a field to 'None' will delete it.
# # updates_dict[EXTRAS_FIELD].update(updates[EXTRAS_FIELD])
# updates_dict[EXTRAS_FIELD] = [updates[EXTRAS_FIELD]]
## Fuck this
## These fields need to be passed again or they will just
## be flushed..
FIELDS_THAT_NEED_TO_BE_PASSED = [
'groups', # 'tags'?
]
for field in FIELDS_THAT_NEED_TO_BE_PASSED:
if field in updates:
updates_dict[field] = updates[field]
else:
updates_dict[field] = original_organization[field]
## Actually perform the update
##----------------------------------------
return self.put_organization(organization_id, updates_dict)
@check_arg_types(None, dict)
@check_retval(dict)
def upsert_organization(self, organization):
"""
Try to "upsert" a organization, by name.
This will:
- retrieve the organization
- if the organization['state'] == 'deleted', try to restore it
- if something changed, update it
:return: the organization object
"""
# Try getting organization..
if 'id' in organization:
raise ValueError(
"You shouldn't specify a organization id! "
"Name is going to be used as upsert key.")
## Get the organization
## Groups should be returned by name too (hopefully..)
try:
_retr_organization = self.get_organization(organization['name'])
except HTTPError:
_retr_organization = None
if _retr_organization is None:
## Just insert the organization and return its id
return self.post_organization(organization)
updates = {}
if _retr_organization['state'] == 'deleted':
## We need to make it active again!
updates['state'] = 'active'
## todo: Check if we have differences, before updating!
updated_dict = copy.deepcopy(organization)
updated_dict.update(updates)
return self.update_organization(_retr_organization['id'], updated_dict)
def delete_organization(self, organization_id, ignore_404=True):
ign404 = SuppressExceptionIf(
lambda e: ignore_404 and (e.status_code == 404))
path = '/api/3/action/organization_delete'
with ign404:
self.request('PUT', path, data={'id': organization_id})
path = '/api/3/action/organization_purge'
with ign404:
self.request('POST', path, data={'id': organization_id})
##============================================================
## Licenses
##============================================================
@check_retval(is_list_of(dict))
def list_licenses(self):
path = '/api/2/rest/licenses'
response = self.request('GET', path)
return response.json()
##============================================================
## Tags
##============================================================
@check_retval(is_list_of(basestring))
def list_tags(self):
path = '/api/2/rest/tag'
response = self.request('GET', path)
return response.json()
@check_retval(is_list_of(dict))
def list_datasets_with_tag(self, tag_id):
path = '/api/2/rest/tag/{0}'.format(tag_id)
response = self.request('GET', path)
return response.json()
def iter_datasets_with_tag(self, tag_id):
for dataset_id in self.list_datasets_with_tag():
yield self.get_dataset(dataset_id)
IDPair = namedtuple('IDPair', ['source_id', 'ckan_id'])
class CkanDataImportClient(object):
"""
Client to handle importing data in ckan
Needs:
- Synchronize a collection of datasets with a filtered
subset of Ckan datasets
- Also upsert "dependency" objects, such as groups and
organizations, in order to be able to link them with newly-created
datasets.
Notes:
- dataset['groups'] will be generated by mapping names in
dataset['group_names'] to ckan ids of the same groups
- dataset['owner_org'] will be the ckan id coresponding to
the name from dataset['organization_name']
"""
source_field_name = '_harvest_source'
source_id_field_name = '_harvest_source_id'
def __init__(self, base_url, api_key, source_name):
"""
:param base_url: passed to CkanClient constructor
:param api_key: passed to CkanClient constructor
:param source_name: identifier of the data source
"""
self.client = CkanClient(base_url, api_key)
self.source_name = source_name
def sync_data(self, data, double_check=True):
"""
Import data into Ckan
:param data:
Dict (or dict-like) mapping object types to
dicts (key/object) (key is the original key)
"""
## Used to keep track of the executed operations,
## mainly used to generate reports..
result = {
'created': [],
'updated': [],
'deleted': [],
}
##------------------------------------------------------------
## Retrieve current database state
##------------------------------------------------------------
used_dataset_names = set()
our_datasets_from_ckan = {} # key: source id
for dataset in self.client.iter_datasets():
if self._is_our_dataset(dataset):
key = dataset['extras'][self.source_id_field_name]
our_datasets_from_ckan[key] = dataset
used_dataset_names.add(dataset['name'])
##------------------------------------------------------------
## Utility functions
##------------------------------------------------------------
def _prepare_group(group):
# The original id is moved into name.
# Better not messing with these fields..
group.pop('id', None)
group.pop('name', None)
return group
def _prepare_organization(obj):
return _prepare_group(obj)
##------------------------------------------------------------
## Maps 'source_id' -> 'ckan_id' for
## organizations and groups.
##------------------------------------------------------------
groups_map = self._ensure_groups(
dict(
(k, _prepare_group(g))
for k, g in data['group'].iteritems()
)
)
organizations_map = self._ensure_organizations(
dict(
(k, _prepare_organization(g))
for k, g in data['organization'].iteritems()
)
)
##------------------------------------------------------------
## Obtain differences between datasets
##------------------------------------------------------------
dataset_diffs = self._verify_datasets(data['dataset'])
def _prepare_dataset(dataset):
"""
Prepare a dataset from an external source for insertion in ckan
"""
## Let's left the original untouched
dataset = copy.deepcopy(dataset)
## Pop the id, as it is not to be used as key
## - for creates, id will be generated
## - for updates, id is passed separately
source_id = dataset.pop('id')
## Note: we cannot handle name change here, as we don't
## know whether the dataset is new or going to be updated
## Map group names to ids
dataset['groups'] = [
groups_map[x]
for x in (dataset.get('group_names') or [])
if x in groups_map]
## Map organization name to id
dataset['owner_org'] = organizations_map.get(
dataset.get('owner_org'))
## We need to mark this dataset as ours
if 'extras' not in dataset:
dataset['extras'] = {}
dataset['extras'][self.source_field_name] = self.source_name
dataset['extras'][self.source_id_field_name] = source_id
return dataset
##----------------------------------------
## Apply creates
##----------------------------------------
for idpair in dataset_diffs['missing']:
## Create dataset with idpair.source_id
dataset = _prepare_dataset(data['dataset'][idpair.source_id])
# todo: we need to make sure we use a unique name
# for the newly created dataset!
# -> keep a set of used names and hope for the best..
# todo: how to generate default name, if not specified?
created = self.client.create_dataset(dataset)
## Add id in the list of created datasets
result['created'].append(
IDPair(source_id=idpair.source_id,
ckan_id=created['id']))
##----------------------------------------
## Apply updates
##----------------------------------------
for idpair in dataset_diffs['updated']:
assert idpair.source_id is not None
assert idpair.ckan_id is not None
## Update dataset
dataset = _prepare_dataset(data['dataset'][idpair.source_id])
dataset.pop('name', None)
# todo: we should ignore name changes, as they might cause
# Unique key problems.. plus, users might have
# customized them
# todo: should we change groups / organizations?
# Best thing would be to make this configurable
updated = self.client.update_dataset(idpair.ckan_id, dataset)
assert updated['id'] == idpair.ckan_id
# todo: check that the update was successful?
# (check might be done by update_dataset() too..)
## Add id in the list of updated datasets
result['updated'].append(idpair)
##----------------------------------------
## Apply removals
##----------------------------------------
for idpair in dataset_diffs['deleted']:
## Delete dataset
assert idpair.source_id is None
assert idpair.ckan_id is not None
self.client.delete_dataset(idpair.ckan_id)
result['deleted'].append(idpair)
##----------------------------------------
## Double-check
##----------------------------------------
if double_check:
errors = 0
differences = self._verify_datasets(data['dataset'])
if len(differences['missing']) > 0:
errors += 1
warnings.warn("We still have ({0}) datasets marked as missing"
.format(len(differences['missing'])))
#### TODO: RE-ENABLE THIS CHECK!!! ####
if len(differences['updated']) > 0:
# errors += 1
warnings.warn("We still have ({0}) datasets marked as updated"
.format(len(differences['updated'])))
if len(differences['deleted']) > 0:
errors += 1
warnings.warn("We still have ({0}) datasets marked as deleted"
.format(len(differences['deleted'])))
# todo: check groups/orgs too!
if errors > 0:
raise SomethingWentWrong(
"Something went wrong while performing updates.")
def _is_our_dataset(self, dataset):
"""
Check whether a dataset is associated with this harvest source
"""
try:
dataset_source = dataset['extras'][self.source_field_name]
except KeyError:
return False
return dataset_source == self.source_name
def _find_our_datasets(self):
"""
Iterate dataset, yield only the ones that match this source
"""
for dataset in self.client.iter_datasets():
if self._is_our_dataset(dataset):
yield dataset
def _check_dataset(self, dataset, expected):
"""
Check whether dataset is up to date with expected..
"""
# todo: should ignore names as they might change..
# todo: we need to make sure we are getting group/org **ids**,
# not names
for field in DATASET_FIELDS['core']:
if field in expected:
if dataset[field] != expected[field]:
return False
if 'extras' in expected:
if dataset['extras'] != expected['extras']:
return False
if 'groups' in expected:
if sorted(dataset['groups']) != sorted(expected['groups']):
return False
## Check resources
if 'resources' in expected:
_dataset_resources = dict((x['url'], x)
for x in dataset['resources'])
_expected_resources = dict((x['url'], x)
for x in expected['resources'])
if len(_dataset_resources) != len(dataset['resources']):
return False
if len(_expected_resources) != len(expected['resources']):
return False
if len(_dataset_resources) != len(_expected_resources):
return False
if sorted(_dataset_resources.iterkeys()) \
!= sorted(_expected_resources.iterkeys()):
return False
for key in _dataset_resources:
_resource = _dataset_resources[key]
_expected = _expected_resources[key]
for field in RESOURCE_FIELDS['core']:
if _resource[field] != _expected[field]:
return False
## Need to check relationships (wtf is that, btw?)
return True
def _check_group(self, group, expected):
"""
Make sure all the data in ``expected`` is also in ``group``
"""
return True
def _check_organization(self, organization, expected):
"""
Make sure all the data in ``expected`` is also in ``organization``
"""
return True
def _verify_datasets(self, datasets):
"""
Compare differences between current state and desired state
of the datasets collection.
:param datasets:
A dictionary (or dict-like) mapping {<source-id>: <dataset>}
:return: a dict with following keys:
- missing:
List of IDPair of datasets that are in ``datasets`` but
not in Ckan
- up_to_date:
List of IDPair of datasets that are both in ``datasets``
and Ckan, and that are up to date.
- updated:
List of IDPair of datasets that are both in ``datasets``
and Ckan, but are somehow different.
- deleted:
List of IDPair of datasets that are in Ckan but not
in ``datasets``, and thus should be deleted.
Each 'IDPair' is a named tuple with (source_id, ckan_id) keys.
"""
## Dictionary mapping {<source_id>: <dataset>} for datasets in Ckan,
## filtered on source name.
our_datasets = dict(
(x['extras'][self.source_id_field_name], x)
for x in self._find_our_datasets())
# ## Create map of {'source_id': 'ckan_id'}
# dataset_ids = ((k, v['id']) for k, v in our_datasets.iteritems())
new_datasets = []
up_to_date_datasets = []
updated_datasets = []
for source_id, dataset in datasets.iteritems():
## Pop dataset from list, to leave only deleted ones
existing_dataset = our_datasets.pop(source_id, None)
if existing_dataset is None:
## This dataset is missing in the database,
## meaning we need to update it
new_datasets.append(IDPair(source_id=source_id, ckan_id=None))
else:
## Dataset is in Ckan, but is it up to date?
_id_pair = IDPair(source_id=source_id,
ckan_id=existing_dataset['id'])
if not self._check_dataset(existing_dataset, dataset):
## This dataset differs from the one in the database
updated_datasets.append(_id_pair)
else:
## This dataset is ok
up_to_date_datasets.append(_id_pair)
## Remaining datasets are in the db but have been deleted in
## the new collection.
deleted_datasets = [IDPair(source_id=None, ckan_id=d['id'])
for k, d in our_datasets.iteritems()]
return {
'missing': new_datasets,
'up_to_date': up_to_date_datasets,
'updated': updated_datasets,
'deleted': deleted_datasets,
}
@check_arg_types(None, is_dict_of(basestring, dict))
@check_retval(is_dict_of(basestring, basestring))
def _ensure_groups(self, groups):
"""
Make sure the specified groups exist in Ckan.
:param groups:
a {'name': <group>} dict
:return:
a {'name': 'ckan-id'} dict
"""
results = {}
for group_name, group in groups.iteritems():
group['name'] = group_name
c_group = self.client.upsert_group(group)
results[group_name] = c_group['id']
return results
@check_arg_types(None, is_dict_of(basestring, dict))
@check_retval(is_dict_of(basestring, basestring))
def _ensure_organizations(self, organizations):
"""
Make sure the specified organizations exist in Ckan.
:param organizations:
a {'name': <group>} dict
:return: a {'name':
'ckan-id'} dict
"""
results = {}
for organization_name, organization in organizations.iteritems():
organization['name'] = organization_name
c_organization = self.client.upsert_organization(organization)
results[organization_name] = c_organization['id']
return results
| [
"redshadow@hackzine.org"
] | redshadow@hackzine.org |
1b95e6b2d0973c6330be837f6719e80f660b6827 | 5064d9c3c7f2ce03f0bf1098cebd1a71498260df | /clients/admin_image_widget.py | 4c48aa003c69f0d2a58c7b4b0a3206828f0180bc | [] | no_license | RetinaInc/mediguest | db85e1bb72392ea2834a94c444856366e4cd0426 | 52d8a383ef3d1ef04841285e0e7eb6dc3654fdfd | refs/heads/master | 2021-01-17T12:16:33.908649 | 2014-03-16T15:58:32 | 2014-03-16T15:58:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,993 | py | from django.contrib.admin.widgets import AdminFileWidget
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import os
from PIL import Image
class AdminImageWidget(AdminFileWidget):
def render(self, name, value, attrs=None):
output = []
if value and getattr(value, "url", None):
image_url = value.url
file_name=str(value)
# defining the size
x, y = (100, 100)
size="%dx%d" % (x,y)
# defining the filename and the miniature filename
filehead, filetail = os.path.split(value.path)
basename, format = os.path.splitext(filetail)
miniature = basename + '_' + size + format
filename = value.path
miniature_filename = os.path.join(filehead, miniature)
filehead, filetail = os.path.split(value.url)
miniature_url = filehead + '/' + miniature
# make sure that the thumbnail is a version of the current original sized image
if os.path.exists(miniature_filename) and os.path.getmtime(filename) > os.path.getmtime(miniature_filename):
os.unlink(miniature_filename)
# if the image wasn't already resized, resize it
if not os.path.exists(miniature_filename):
image = Image.open(filename)
image.thumbnail((x, y), Image.ANTIALIAS)
try:
image.save(miniature_filename, image.format, quality=90, optimize=1)
except:
image.save(miniature_filename, image.format, quality=90)
output.append(u' <div><a href="%s" target="_blank"><img src="%s" alt="%s" /></a></div> %s ' % \
(miniature_url, miniature_url, miniature_filename, _('Change:')))
output.append(super(AdminFileWidget, self).render(name, value, attrs))
return mark_safe(u''.join(output))
| [
"leebraid@gmail.com"
] | leebraid@gmail.com |
aa3a29d2c6739fa6f0c3c1fd181e9e62d542fadd | c4b21ca242ce5813437b563e21ce5bf084c2fe15 | /app/migrations/0030_auto_20210329_1808.py | 2ccea7fa665daba98a0e32b6105cc25dc24ed455 | [] | no_license | Igor-987/Sneg_Recs | 276cd4eea1655ab1589d4d41bf7c72c284e902b4 | 3a1006151fd9994957e5a7e311c96591597bfe00 | refs/heads/master | 2023-03-30T15:46:40.697007 | 2021-04-07T11:24:09 | 2021-04-07T11:24:09 | 349,412,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,558 | py | # Generated by Django 3.1.7 on 2021-03-29 10:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0029_store_retail'),
]
operations = [
migrations.AlterModelOptions(
name='rec',
options={'ordering': ['-rec_time'], 'permissions': (('can_create_update', 'Может rec_create и rec_update'), ('can_list_detail', 'Может rec_list и rec_detail')), 'verbose_name': 'Заявка', 'verbose_name_plural': 'Заявки'},
),
migrations.AlterModelOptions(
name='retail',
options={'verbose_name': 'Торговая сеть', 'verbose_name_plural': 'Торговые сети'},
),
migrations.AlterModelOptions(
name='status',
options={'verbose_name': 'Статус', 'verbose_name_plural': 'Статусы'},
),
migrations.AlterModelOptions(
name='store',
options={'verbose_name': 'Магазин', 'verbose_name_plural': 'Магазины'},
),
migrations.AlterModelOptions(
name='tech',
options={'verbose_name': 'Инженер', 'verbose_name_plural': 'Инженеры'},
),
migrations.AlterModelOptions(
name='trouble',
options={'verbose_name': 'Неисправность', 'verbose_name_plural': 'Неисправности'},
),
migrations.RemoveField(
model_name='store',
name='retail',
),
]
| [
"igor.kov@list.ru"
] | igor.kov@list.ru |
a8d6c8d28a4bab862a94b9a96f1fce68c8fff635 | 1690a17b015d906d8ccc758d8e6edea8980b7d8b | /Simulator/Simulator.py | 915c7374c8da086bce4747ad55f09e7980d95d40 | [
"MIT"
] | permissive | aayushkapadia/chemical_reaction_simulator | 0624e488f2a655a6734688702ad4b7e3f4bb0f9c | 00a2497b8d6619486c69167ca62538221bce1373 | refs/heads/master | 2021-01-18T20:53:09.367013 | 2017-05-05T16:49:43 | 2017-05-05T16:49:43 | 86,996,682 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | from Reaction import *
import matplotlib.pyplot as plt
class Simulator:
def __init__(self,crn):
self.crn = crn
self.simulationData = dict()
self.crn.prepare()
for chemical_name in self.crn.concentrations:
self.simulationData[chemical_name] = []
def addInSimulationData(self,concentrations):
for chemical_name in concentrations:
self.simulationData[chemical_name].append(concentrations[chemical_name])
def simulate(self,timeSteps,filePath):
historyFile = open(filePath,'w')
historyFile.write(str(self.crn.concentrations))
historyFile.write('\n')
self.addInSimulationData(self.crn.concentrations)
for i in range(timeSteps):
reaction = self.crn.getFastestReaction()
if reaction != None:
self.crn.doReaction(reaction)
self.addInSimulationData(self.crn.concentrations)
historyFile.write(str(self.crn.concentrations))
historyFile.write('\n')
print 'History file ' + filePath + ' created'
def plot(self,listOfChemicals):
for chemical in listOfChemicals:
initString = 'init = '+str(self.simulationData[chemical][0])
endString = 'end = '+ str(self.simulationData[chemical][-1])
plt.plot(self.simulationData[chemical],label=chemical + '(' + initString +',' + endString + ')')
plt.ylabel('Concentration')
plt.xlabel('Time (unit time)')
plt.legend()
plt.show()
| [
"kapadiaaayush@gmail.com"
] | kapadiaaayush@gmail.com |
0659ba2b964c99d71870485b9c9e0d8687be2bc7 | 130b708183683934e14da0540defd2714a5683e9 | /src/config.py | 6db6ac49ec8c286c947813d5483339fd44dffbb0 | [] | no_license | burrussmp/Dermatoscopic-Image-Classifier | 062d6ad00037833ba4ddcf946fe088e012d6fbf6 | 341f8a5ca07f99b1480b6268105fce04e3cc89fe | refs/heads/master | 2021-01-01T23:01:40.931062 | 2020-02-09T22:15:26 | 2020-02-09T22:15:26 | 239,382,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | BASEDIR = './'
DATA = './data' | [
"burrussmatthew@gmail.com"
] | burrussmatthew@gmail.com |
0e5d489a49a94330d5600743529ef7495b17d98f | c0df4b81f88b0e16a70fe8bd16e512c7d5add9de | /klearn/model_selection/metrics_depr.py | 25011f8965c028ab518c068411ced150bbce5a3f | [
"MIT"
] | permissive | KevinLiao159/klearn | b53139e8659c8f2b3ada53b87cf29952204c9aca | ffc0cb6b69cd21f2aac8934af55ac6e32c4db689 | refs/heads/master | 2021-05-03T05:20:10.003850 | 2018-09-19T18:25:43 | 2018-09-19T18:25:43 | 120,633,954 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,409 | py | """
Metrics to assess performance on classification task given class prediction
Functions named as *_score return a scalar value to maximize: the higher
the better
Function named as *_error or *_loss return a scalar value to minimize:
the lower the better
"""
# Authors: Kevin Liao
import numpy as np
import math
from sklearn.metrics.classification import _weighted_sum
from sklearn.metrics import (accuracy_score, f1_score, precision_score,
recall_score, roc_auc_score, log_loss)
from sklearn.metrics import mean_squared_error
from gravity_learn.utils import force_array
import warnings
warnings.warn("This module was deprecated. All scores and metrics "
"are moved to model_selection.metrics",
DeprecationWarning)
__all__ = ('classification_error',
'long_error',
'short_error',
'short_precision_score',
'short_recall_score',
'top_bottom_accuracy_score',
'top_bottom_error',
'top_bottom_long_error',
'top_bottom_short_error',
'top_bottom_precision_score',
'top_bottom_recall_score',
'top_bottom_short_precision_score',
'top_bottom_short_recall_score',
'top_bottom_f1_score',
'top_bottom_roc_auc_score',
'top_bottom_log_loss',
'root_mean_squared_error',
'mean_absolute_percentage_error')
# --------------------------------------------------
# Classification metrics
# --------------------------------------------------
def _select_top_and_bottom(y_true, y_score,
percentile=10, interpolation='midpoint'):
"""
Select truth values, predictions, scores of the top and bottom observations
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, ]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, 2]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
Returns
-------
y_true_ext : array, shape = [n_samples] or [n_samples, ]
True binary labels in binary label indicators of top and bottom
y_score_ext : array, shape = [n_samples] or [n_samples, 2]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions of top and bottom.
y_pred_ext : array, shape = [n_samples] or [n_samples, ]
Target prediction, can either be 1 or 0, top is always 1 and bottom\
is always 0.
"""
y_true = force_array(y_true)
y_score = force_array(y_score)
upperQ = np.percentile(y_score[:, 1], q=(100-percentile),
interpolation=interpolation)
lowerQ = np.percentile(y_score[:, 1], q=percentile,
interpolation=interpolation)
top_bottom_filter = (y_score[:, 1] >= upperQ) | (y_score[:, 1] <= lowerQ)
y_true_ext = y_true[top_bottom_filter]
y_score_ext = y_score[top_bottom_filter]
y_pred_ext = y_score_ext[:, 1] >= 0.5
return y_true_ext, y_score_ext, y_pred_ext
def classification_error(y_true, y_pred,
normalize=True, sample_weight=None):
"""
Compute classification error
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
"""
return 1 - accuracy_score(y_true, y_pred, normalize, sample_weight)
def long_error(y_true, y_pred, normalize=True, sample_weight=None):
"""
Error of long classification. False negative rate (FNR)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
long_true = y_true[y_true == 1]
long_pred = y_pred[y_true == 1]
score = long_pred != long_true
return _weighted_sum(score, sample_weight, normalize)
def short_error(y_true, y_pred, normalize=True, sample_weight=None):
"""
Error of short classification. False positive rate (FPR)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
short_true = y_true[y_true == 0]
short_pred = y_pred[y_true == 0]
score = short_pred != short_true
return _weighted_sum(score, sample_weight, normalize)
def short_precision_score(y_true, y_pred,
average='binary', sample_weight=None):
"""
Precision of short prediction. False omission rate (FOR)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the negative class in binary classification or weighted
average of the precision of each class for the multiclass task.
"""
p = precision_score(y_true, y_pred,
labels=None, pos_label=0,
average=average, sample_weight=sample_weight)
return p
def short_recall_score(y_true, y_pred, average='binary', sample_weight=None):
"""
Recall of short prediction. True negative rate (TNR), Specificity (SPC)
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall: float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the negative class in binary classification or weighted
average of the recall of each class for the multiclass task.
"""
r = recall_score(y_true, y_pred,
labels=None, pos_label=0, average=average,
sample_weight=sample_weight)
return r
def top_bottom_accuracy_score(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Accuracy score of top and bottom percentile observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 1 with normalize == True and the number
of samples with normalize == False.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return accuracy_score(y_true_ext, y_pred_ext, normalize, sample_weight)
def top_bottom_error(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Classification error for top and bottom percentile
observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return classification_error(y_true_ext, y_pred_ext,
normalize, sample_weight)
def top_bottom_long_error(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Classification error for long class of top and bottom percentile
observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return long_error(y_true_ext, y_pred_ext, normalize, sample_weight)
def top_bottom_short_error(y_true, y_score,
percentile=10, interpolation='midpoint',
normalize=True, sample_weight=None):
"""
Classification error for short class of top and bottom percentile
observations.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
normalize : bool, optional (default=True)
If False, return the number of misclassified samples.
Otherwise, return the fraction of misclassified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
error : float
If normalize == True, return the misclassified samples
(float), else it returns the number of misclassified samples
(int).
The best performance is 0
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return short_error(y_true_ext, y_pred_ext, normalize, sample_weight)
def top_bottom_precision_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the precision of top and bottom observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom precision of the positive class in binary \
classification or weighted average of the precision of each class \
for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return precision_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=1, average=average,
sample_weight=sample_weight)
def top_bottom_recall_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the recall of top and bottom observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom recall of the positive class in binary classification \
or weighted average of the precision of each class for \
the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return recall_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=1, average=average,
sample_weight=sample_weight)
def top_bottom_short_precision_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the short precision of top and bottom observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom precision of the negative class in binary \
classification or weighted average of the precision of each class \
for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return precision_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=0, average=average,
sample_weight=sample_weight)
def top_bottom_short_recall_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the recall of top and bottom of short observations
The precision is the ratio tp / (tp + fp) where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Top and bottom recall of the positive class in binary classification \
or weighted average of the precision of each class for \
the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return recall_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=0, average=average,
sample_weight=sample_weight)
def top_bottom_f1_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='binary', sample_weight=None):
"""
Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return f1_score(y_true=y_true_ext, y_pred=y_pred_ext,
pos_label=1, average=average,
sample_weight=sample_weight)
def top_bottom_roc_auc_score(y_true, y_score,
percentile=10, interpolation='midpoint',
average='macro', sample_weight=None):
"""
Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If None, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
'binary':
Only report results for the class specified by pos_label.
This is applicable only if targets (y_{true,pred}) are binary.
'micro':
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
'macro':
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
'weighted':
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
'samples':
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
# Need a hack here [:,1]
# TODO: need to handle pos_label in _binary_check
return roc_auc_score(y_true=y_true_ext, y_score=y_score_ext[:, 1],
average=average, sample_weight=sample_weight)
def top_bottom_log_loss(y_true, y_score,
percentile=10, interpolation='midpoint',
eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
percentile: float, default 10 (10% quantile) 0 <= percentile <= 100,
the top and bottom quantile(s) to select from all true values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
New in version 0.18.0.
This optional parameter specifies the interpolation method to use,\
when the desired quantile lies between two data points i and j:
linear: i + (j - i) * fraction, where fraction is the fractional part\
of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j whichever is nearest.
midpoint: (i + j) / 2.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_true_ext, y_score_ext, y_pred_ext =\
_select_top_and_bottom(y_true, y_score, percentile, interpolation)
return log_loss(y_true=y_true_ext, y_pred=y_pred_ext,
eps=eps, normalize=normalize, sample_weight=sample_weight)
# --------------------------------------------------
# Regression metrics
# --------------------------------------------------
def root_mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Root mean squared error regression loss
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
"""
mse = mean_squared_error(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput=multioutput
)
return math.sqrt(mse)
def mean_absolute_percentage_error(y_true, y_pred, robust=False):
"""mean_absolute_percentage_error
Use case:
y is expressed in percent and we want to take pct into account
Formula:
mean_absolute_percentage_error = \
mean(abs((y_true - y_pred) / y_true) * 100
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
robust : bool, if True, use median, otherwise, mean
Default is False
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0)
"""
y_true = force_array(y_true)
y_pred = force_array(y_pred)
if robust:
loss = np.median(np.abs((y_true - y_pred)/y_true)) * 100
else: # use mean
loss = np.mean(np.abs((y_true - y_pred)/y_true)) * 100
return loss
| [
"lwk723@berkeley.edu"
] | lwk723@berkeley.edu |
19b974e1d0cd8c7142d40aa60413da30905446d6 | a5cce6d6cda00a90676a8cc56b1ffbf17c8066b0 | /python/ccxt/mexc3.py | a05285a07b4ce29ae2381bf220a7471813a0f65b | [
"MIT"
] | permissive | woolf-wen/ccxt | 2ce7d699344935bd5b291ce049ec026fd7eaef76 | ecf77d57d1055cf90d38837cf5a02d1ad82b950b | refs/heads/master | 2023-02-13T05:43:46.376882 | 2023-01-30T16:02:48 | 2023-01-30T16:02:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198,812 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import NotSupported
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class mexc3(Exchange):
def describe(self):
return self.deep_extend(super(mexc3, self).describe(), {
'id': 'mexc3',
'name': 'MEXC Global',
'countries': ['SC'], # Seychelles
'rateLimit': 50, # default rate limit is 20 times per second
'version': 'v3',
'has': {
'CORS': None,
'spot': True,
'margin': True,
'swap': True,
'future': True,
'option': None,
'addMargin': True,
'borrowMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': None,
'createDepositAddress': None,
'createLimitOrder': None,
'createMarketOrder': None,
'createOrder': True,
'createReduceOnlyOrder': True,
'deposit': None,
'editOrder': None,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBidsAsks': True,
'fetchBorrowRate': None,
'fetchBorrowRateHistory': None,
'fetchBorrowRates': None,
'fetchBorrowRatesPerSymbol': None,
'fetchCanceledOrders': True,
'fetchClosedOrder': None,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposit': None,
'fetchDepositAddress': True,
'fetchDepositAddresses': None,
'fetchDepositAddressesByNetwork': True,
'fetchDeposits': True,
'fetchDepositWithdrawFee': 'emulated',
'fetchDepositWithdrawFees': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': None,
'fetchIndexOHLCV': True,
'fetchL2OrderBook': True,
'fetchLedger': None,
'fetchLedgerEntry': None,
'fetchLeverageTiers': True,
'fetchMarginMode': False,
'fetchMarketLeverageTiers': None,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': None,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderBooks': None,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPosition': True,
'fetchPositionMode': True,
'fetchPositions': True,
'fetchPositionsRisk': None,
'fetchPremiumIndexOHLCV': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': None,
'fetchTradingFees': True,
'fetchTradingLimits': None,
'fetchTransactionFee': 'emulated',
'fetchTransactionFees': True,
'fetchTransactions': None,
'fetchTransfer': True,
'fetchTransfers': True,
'fetchWithdrawal': None,
'fetchWithdrawals': True,
'privateAPI': True,
'publicAPI': True,
'reduceMargin': True,
'repayMargin': True,
'setLeverage': True,
'setMarginMode': None,
'setPositionMode': True,
'signIn': None,
'transfer': None,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/137283979-8b2a818d-8633-461b-bfca-de89e8c446b2.jpg',
'api': {
'spot': {
'public': 'https://api.mexc.com',
'private': 'https://api.mexc.com',
},
'spot2': {
'public': 'https://www.mexc.com/open/api/v2',
'private': 'https://www.mexc.com/open/api/v2',
},
'contract': {
'public': 'https://contract.mexc.com/api/v1/contract',
'private': 'https://contract.mexc.com/api/v1/private',
},
},
'www': 'https://www.mexc.com/',
'doc': [
'https://mxcdevelop.github.io/apidocs/spot_v3_en/',
'https://mxcdevelop.github.io/APIDoc/', # v1 & v2 : soon to be deprecated
],
'fees': [
'https://www.mexc.com/fee',
],
'referral': 'https://m.mexc.com/auth/signup?inviteCode=1FQ1G',
},
'api': {
'spot': {
'public': {
'get': {
'ping': 1,
'time': 1,
'exchangeInfo': 1,
'depth': 1,
'trades': 1,
'historicalTrades': 1,
'aggTrades': 1,
'klines': 1,
'avgPrice': 1,
'ticker/24hr': 1,
'ticker/price': 1,
'ticker/bookTicker': 1,
'etf/info': 1,
},
},
'private': {
'get': {
'order': 1,
'openOrders': 1,
'allOrders': 1,
'account': 1,
'myTrades': 1,
'sub-account/list': 1,
'sub-account/apiKey': 1,
'capital/config/getall': 1,
'capital/deposit/hisrec': 1,
'capital/withdraw/history': 1,
'capital/deposit/address': 1,
'capital/transfer': 1,
'capital/sub-account/universalTransfer': 1,
'margin/loan': 1,
'margin/allOrders': 1,
'margin/myTrades': 1,
'margin/openOrders': 1,
'margin/maxTransferable': 1,
'margin/priceIndex': 1,
'margin/order': 1,
'margin/isolated/account': 1,
'margin/maxBorrowable': 1,
'margin/repay': 1,
'margin/isolated/pair': 1,
'margin/forceLiquidationRec': 1,
'margin/isolatedMarginData': 1,
'margin/isolatedMarginTier': 1,
'rebate/taxQuery': 1,
'rebate/detail': 1,
'rebate/detail/kickback': 1,
'rebate/referCode': 1,
'mxDeduct/enable': 1,
},
'post': {
'order': 1,
'order/test': 1,
'sub-account/virtualSubAccount': 1,
'sub-account/apiKey': 1,
'sub-account/futures': 1,
'sub-account/margin': 1,
'batchOrders': 1,
'capital/withdraw/apply': 1,
'capital/transfer': 1,
'capital/deposit/address': 1,
'capital/sub-account/universalTransfer': 1,
'margin/tradeMode': 1,
'margin/order': 1,
'margin/loan': 1,
'margin/repay': 1,
'mxDeduct/enable': 1,
},
'delete': {
'order': 1,
'openOrders': 1,
'sub-account/apiKey': 1,
'margin/order': 1,
'margin/openOrders': 1,
},
},
},
'contract': {
'public': {
'get': {
'ping': 2,
'detail': 2,
'support_currencies': 2, # TODO: should we implement 'fetchCurrencies' solely for swap? because spot doesnt have it atm
'depth/{symbol}': 2,
'depth_commits/{symbol}/{limit}': 2,
'index_price/{symbol}': 2,
'fair_price/{symbol}': 2,
'funding_rate/{symbol}': 2,
'kline/{symbol}': 2,
'kline/index_price/{symbol}': 2,
'kline/fair_price/{symbol}': 2,
'deals/{symbol}': 2,
'ticker': 2,
'risk_reverse': 2,
'risk_reverse/history': 2,
'funding_rate/history': 2,
},
},
'private': {
'get': {
'account/assets': 2,
'account/asset/{currency}': 2,
'account/transfer_record': 2,
'position/list/history_positions': 2,
'position/open_positions': 2,
'position/funding_records': 2,
'position/position_mode': 2,
'order/list/open_orders/{symbol}': 2,
'order/list/history_orders': 2,
'order/external/{symbol}/{external_oid}': 2,
'order/get/{order_id}': 2,
'order/batch_query': 8,
'order/deal_details/{order_id}': 2,
'order/list/order_deals': 2,
'planorder/list/orders': 2,
'stoporder/list/orders': 2,
'stoporder/order_details/{stop_order_id}': 2,
'account/risk_limit': 2, # TO_DO: gets max/min position size, allowed sides, leverage, maintenance margin, initial margin, etc...
'account/tiered_fee_rate': 2, # TO_DO: taker/maker fees for account
'position/leverage': 2,
},
'post': {
'position/change_margin': 2,
'position/change_leverage': 2,
'position/change_position_mode': 2,
'order/submit': 2,
'order/submit_batch': 40,
'order/cancel': 2,
'order/cancel_with_external': 2,
'order/cancel_all': 2,
'account/change_risk_level': 2,
'planorder/place': 2,
'planorder/cancel': 2,
'planorder/cancel_all': 2,
'stoporder/cancel': 2,
'stoporder/cancel_all': 2,
'stoporder/change_price': 2,
'stoporder/change_plan_price': 2,
},
},
},
'spot2': {
'public': {
'get': {
'market/symbols': 1,
'market/coin/list': 2,
'common/timestamp': 1,
'common/ping': 1,
'market/ticker': 1,
'market/depth': 1,
'market/deals': 1,
'market/kline': 1,
'market/api_default_symbols': 2,
},
},
'private': {
'get': {
'account/info': 1,
'order/open_orders': 1,
'order/list': 1,
'order/query': 1,
'order/deals': 1,
'order/deal_detail': 1,
'asset/deposit/address/list': 2,
'asset/deposit/list': 2,
'asset/address/list': 2,
'asset/withdraw/list': 2,
'asset/internal/transfer/record': 10,
'account/balance': 10,
'asset/internal/transfer/info': 10,
'market/api_symbols': 2,
},
'post': {
'order/place': 1,
'order/place_batch': 1,
'order/advanced/place_batch': 1,
'asset/withdraw': 2,
'asset/internal/transfer': 10,
},
'delete': {
'order/cancel': 1,
'order/cancel_by_symbol': 1,
'asset/withdraw': 2,
},
},
},
},
'precisionMode': TICK_SIZE,
'timeframes': {
'1m': '1m', # spot, swap
'3m': '3m', # spot
'5m': '5m', # spot, swap
'15m': '15m', # spot, swap
'30m': '30m', # spot, swap
'1h': '1h', # spot, swap
'2h': '2h', # spot
'4h': '4h', # spot, swap
'6h': '6h', # spot
'8h': '8h', # spot, swap
'12h': '12h', # spot
'1d': '1d', # spot, swap
'3d': '3d', # spot
'1w': '1w', # spot, swap
'1M': '1M', # spot, swap
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.002'), # maker / taker
'taker': self.parse_number('0.002'),
},
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'unavailableContracts': {
'BTC/USDT:USDT': True,
'LTC/USDT:USDT': True,
'ETH/USDT:USDT': True,
},
'fetchMarkets': {
'types': {
'spot': True,
'future': {
'linear': False,
'inverse': False,
},
'swap': {
'linear': True,
'inverse': False,
},
},
},
'timeframes': {
'spot': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'swap': {
'1m': 'Min1',
'5m': 'Min5',
'15m': 'Min15',
'30m': 'Min30',
'1h': 'Min60',
'4h': 'Hour4',
'8h': 'Hour8',
'1d': 'Day1',
'1w': 'Week1',
'1M': 'Month1',
},
},
'defaultType': 'spot', # spot, swap
'networks': {
'TRX': 'TRC20',
'ETH': 'ERC20',
'BEP20': 'BEP20(BSC)',
'BSC': 'BEP20(BSC)',
},
'networksById': {
'BEP20(BSC)': 'BSC',
},
'networkAliases': {
'BSC(BEP20)': 'BSC',
},
'recvWindow': 5 * 1000, # 5 sec, default
'maxTimeTillEnd': 90 * 86400 * 1000 - 1, # 90 days
'broker': 'CCXT',
},
'commonCurrencies': {
'BEYONDPROTOCOL': 'BEYOND',
'BIFI': 'BIFIF',
'BYN': 'BeyondFi',
'COFI': 'COFIX', # conflict with CoinFi
'DFI': 'DfiStarter',
'DFT': 'dFuture',
'DRK': 'DRK',
'EGC': 'Egoras Credit',
'FLUX1': 'FLUX', # switched places
'FLUX': 'FLUX1', # switched places
'FREE': 'FreeRossDAO', # conflict with FREE Coin
'GMT': 'GMT Token', # Conflict with GMT(STEPN)
'STEPN': 'GMT', # Conflict with GMT Token
'HERO': 'Step Hero', # conflict with Metahero
'MIMO': 'Mimosa',
'PROS': 'Pros.Finance', # conflict with Prosper
'SIN': 'Sin City Token',
'SOUL': 'Soul Swap',
},
'exceptions': {
'exact': {
# until mexc migrates fully to v3, it might be worth to note the version & market aside errors, not easily remove obsolete version's exceptions in future
'-1128': BadRequest,
'-2011': BadRequest,
'-1121': BadSymbol,
'10101': InsufficientFunds, # {"msg":"资金不足","code":10101}
'2009': InvalidOrder, # {"success":false,"code":2009,"message":"Position is not exists or closed."}
'2011': BadRequest,
'30004': InsufficientFunds,
'33333': BadRequest, # {"msg":"Not support transfer","code":33333}
'44444': BadRequest,
'1002': InvalidOrder,
'30019': BadRequest,
'30005': InvalidOrder,
'2003': InvalidOrder,
'2005': InsufficientFunds,
'600': BadRequest,
'70011': PermissionDenied, # {"code":70011,"msg":"Pair user ban trade apikey."}
'88004': InsufficientFunds, # {"msg":"超出最大可借,最大可借币为:18.09833211","code":88004}
'88009': ExchangeError, # v3 {"msg":"Loan record does not exist","code":88009}
'88013': InvalidOrder, # {"msg":"最小交易额不能小于:5USDT","code":88013}
'88015': InsufficientFunds, # {"msg":"持仓不足","code":88015}
'700003': InvalidNonce, # {"code":700003,"msg":"Timestamp for self request is outside of the recvWindow."}
},
'broad': {
'Order quantity error, please try to modify.': BadRequest, # code:2011
'Combination of optional parameters invalid': BadRequest, # code:-2011
'api market order is disabled': BadRequest, #
'Contract not allow place order!': InvalidOrder, # code:1002
'Oversold': InvalidOrder, # code:30005
'Insufficient position': InsufficientFunds, # code:30004
'Insufficient balance!': InsufficientFunds, # code:2005
'Bid price is great than max allow price': InvalidOrder, # code:2003
'Invalid symbol.': BadSymbol, # code:-1121
'Param error!': BadRequest, # code:600
},
},
})
def fetch_status(self, params={}):
"""
the latest known information on the availability of the exchange API
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `status structure <https://docs.ccxt.com/en/latest/manual.html#exchange-status-structure>`
"""
marketType, query = self.handle_market_type_and_params('fetchStatus', None, params)
response = None
status = None
updated = None
if marketType == 'spot':
response = self.spotPublicGetPing(query)
#
# {}
#
status = self.json(response) if response else 'ok'
elif marketType == 'swap':
response = self.contractPublicGetPing(query)
#
# {"success":true,"code":"0","data":"1648124374985"}
#
status = 'ok' if self.safe_value(response, 'success') else self.json(response)
updated = self.safe_integer(response, 'data')
return {
'status': status,
'updated': updated,
'url': None,
'eta': None,
'info': response,
}
def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
marketType, query = self.handle_market_type_and_params('fetchTime', None, params)
response = None
if marketType == 'spot':
response = self.spotPublicGetTime(query)
#
# {"serverTime": "1647519277579"}
#
return self.safe_integer(response, 'serverTime')
elif marketType == 'swap':
response = self.contractPublicGetPing(query)
#
# {"success":true,"code":"0","data":"1648124374985"}
#
return self.safe_integer(response, 'data')
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#query-the-currency-information
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an associative dictionary of currencies
"""
# self endpoint requires authentication
# while fetchCurrencies is a public API method by design
# therefore we check the keys here
# and fallback to generating the currencies from the markets
if not self.check_required_credentials(False):
return None
response = self.spotPrivateGetCapitalConfigGetall(params)
#
# {
# coin: 'QANX',
# name: 'QANplatform',
# networkList: [
# {
# coin: 'QANX',
# depositDesc: null,
# depositEnable: True,
# minConfirm: '0',
# name: 'QANplatform',
# network: 'BEP20(BSC)',
# withdrawEnable: False,
# withdrawFee: '42.000000000000000000',
# withdrawIntegerMultiple: null,
# withdrawMax: '24000000.000000000000000000',
# withdrawMin: '20.000000000000000000',
# sameAddress: False,
# contract: '0xAAA7A10a8ee237ea61E8AC46C50A8Db8bCC1baaa'
# },
# {
# coin: 'QANX',
# depositDesc: null,
# depositEnable: True,
# minConfirm: '0',
# name: 'QANplatform',
# network: 'ERC20',
# withdrawEnable: True,
# withdrawFee: '2732.000000000000000000',
# withdrawIntegerMultiple: null,
# withdrawMax: '24000000.000000000000000000',
# withdrawMin: '240.000000000000000000',
# sameAddress: False,
# contract: '0xAAA7A10a8ee237ea61E8AC46C50A8Db8bCC1baaa'
# }
# ]
# }
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'coin')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'name')
currencyActive = False
currencyFee = None
currencyWithdrawMin = None
currencyWithdrawMax = None
depositEnabled = False
withdrawEnabled = False
networks = {}
chains = self.safe_value(currency, 'networkList', [])
for j in range(0, len(chains)):
chain = chains[j]
networkId = self.safe_string(chain, 'network')
network = self.safe_network(networkId)
isDepositEnabled = self.safe_value(chain, 'depositEnable', False)
isWithdrawEnabled = self.safe_value(chain, 'withdrawEnable', False)
active = (isDepositEnabled and isWithdrawEnabled)
currencyActive = active or currencyActive
withdrawMin = self.safe_string(chain, 'withdrawMin')
withdrawMax = self.safe_string(chain, 'withdrawMax')
currencyWithdrawMin = withdrawMin if (currencyWithdrawMin is None) else currencyWithdrawMin
currencyWithdrawMax = withdrawMax if (currencyWithdrawMax is None) else currencyWithdrawMax
fee = self.safe_number(chain, 'withdrawFee')
currencyFee = fee if (currencyFee is None) else currencyFee
if Precise.string_gt(currencyWithdrawMin, withdrawMin):
currencyWithdrawMin = withdrawMin
if Precise.string_lt(currencyWithdrawMax, withdrawMax):
currencyWithdrawMax = withdrawMax
if isDepositEnabled:
depositEnabled = True
if isWithdrawEnabled:
withdrawEnabled = True
networks[network] = {
'info': chain,
'id': networkId,
'network': network,
'active': active,
'deposit': isDepositEnabled,
'withdraw': isWithdrawEnabled,
'fee': fee,
'precision': None,
'limits': {
'withdraw': {
'min': withdrawMin,
'max': withdrawMax,
},
},
}
networkKeys = list(networks.keys())
networkKeysLength = len(networkKeys)
if (networkKeysLength == 1) or ('NONE' in networks):
defaultNetwork = self.safe_value_2(networks, 'NONE', networkKeysLength - 1)
if defaultNetwork is not None:
currencyFee = defaultNetwork['fee']
result[code] = {
'info': currency,
'id': id,
'code': code,
'name': name,
'active': currencyActive,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': currencyFee,
'precision': None,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': currencyWithdrawMin,
'max': currencyWithdrawMax,
},
},
'networks': networks,
}
return result
def safe_network(self, networkId):
if networkId.find('BSC') >= 0:
return 'BEP20'
parts = networkId.split(' ')
networkId = ''.join(parts)
networkId = networkId.replace('-20', '20')
networksById = {
'ETH': 'ETH',
'ERC20': 'ERC20',
'BEP20(BSC)': 'BEP20',
'TRX': 'TRC20',
}
return self.safe_string(networksById, networkId, networkId)
def fetch_markets(self, params={}):
"""
retrieves data on all markets for mexc3
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
spotMarket = self.fetch_spot_markets(params)
swapMarket = self.fetch_swap_markets(params)
return self.array_concat(spotMarket, swapMarket)
def fetch_spot_markets(self, params={}):
response = self.spotPublicGetExchangeInfo(params)
#
# {
# "timezone": "CST",
# "serverTime": 1647521860402,
# "rateLimits": [],
# "exchangeFilters": [],
# "symbols": [
# {
# "symbol": "OGNUSDT",
# "status": "ENABLED",
# "baseAsset": "OGN",
# "baseAssetPrecision": "2",
# "quoteAsset": "USDT",
# "quoteAssetPrecision": "4",
# "orderTypes": [
# "LIMIT",
# "LIMIT_MAKER"
# ],
# "baseCommissionPrecision": "2",
# "quoteCommissionPrecision": "4",
# "quoteOrderQtyMarketAllowed": False,
# "isSpotTradingAllowed": True,
# "isMarginTradingAllowed": True,
# "permissions": [
# "SPOT",
# "MARGIN"
# ],
# "filters": [],
# "baseSizePrecision": "0.01", # self turned out to be a minimum base amount for order
# "maxQuoteAmount": "5000000",
# "makerCommission": "0.002",
# "takerCommission": "0.002"
# "quoteAmountPrecision": "5", # self turned out to be a minimum cost amount for order
# "quotePrecision": "4", # deprecated in favor of 'quoteAssetPrecision'( https://dev.binance.vision/t/what-is-the-difference-between-quoteprecision-and-quoteassetprecision/4333 )
# # note, "icebergAllowed" & "ocoAllowed" fields were recently removed
# },
# ]
# }
#
# Notes:
# - 'quoteAssetPrecision' & 'baseAssetPrecision' are not currency's real blockchain precision(to view currency's actual individual precision, refer to fetchCurrencies() method).
#
data = self.safe_value(response, 'symbols', [])
result = []
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'baseAsset')
quoteId = self.safe_string(market, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
isSpotTradingAllowed = self.safe_value(market, 'isSpotTradingAllowed')
active = False
if (status == 'ENABLED') and (isSpotTradingAllowed):
active = True
isMarginTradingAllowed = self.safe_value(market, 'isMarginTradingAllowed')
makerCommission = self.safe_number(market, 'makerCommission')
takerCommission = self.safe_number(market, 'takerCommission')
maxQuoteAmount = self.safe_number(market, 'maxQuoteAmount')
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': isMarginTradingAllowed,
'swap': False,
'future': False,
'option': False,
'active': active,
'contract': False,
'linear': None,
'inverse': None,
'taker': takerCommission,
'maker': makerCommission,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'baseAssetPrecision'))),
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'quoteAssetPrecision'))),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'baseSizePrecision'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'quoteAmountPrecision'),
'max': maxQuoteAmount,
},
},
'info': market,
})
return result
def fetch_swap_markets(self, params={}):
response = self.contractPublicGetDetail(params)
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "symbol":"BTC_USDT",
# "displayName":"BTC_USDT永续",
# "displayNameEn":"BTC_USDT SWAP",
# "positionOpenType":3,
# "baseCoin":"BTC",
# "quoteCoin":"USDT",
# "settleCoin":"USDT",
# "contractSize":0.0001,
# "minLeverage":1,
# "maxLeverage":125,
# "priceScale":2, # seems useless atm, as it's just how UI shows the price, i.e. 29583.50 for BTC/USDT:USDT, while price ticksize is 0.5
# "volScale":0, # probably: contract amount precision
# "amountScale":4, # probably: quote currency precision
# "priceUnit":0.5, # price tick size
# "volUnit":1, # probably: contract tick size
# "minVol":1,
# "maxVol":1000000,
# "bidLimitPriceRate":0.1,
# "askLimitPriceRate":0.1,
# "takerFeeRate":0.0006,
# "makerFeeRate":0.0002,
# "maintenanceMarginRate":0.004,
# "initialMarginRate":0.008,
# "riskBaseVol":10000,
# "riskIncrVol":200000,
# "riskIncrMmr":0.004,
# "riskIncrImr":0.004,
# "riskLevelLimit":5,
# "priceCoefficientVariation":0.1,
# "indexOrigin":["BINANCE","GATEIO","HUOBI","MXC"],
# "state":0, # 0 enabled, 1 delivery, 2 completed, 3 offline, 4 pause
# "isNew":false,
# "isHot":true,
# "isHidden":false
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'baseCoin')
quoteId = self.safe_string(market, 'quoteCoin')
settleId = self.safe_string(market, 'settleCoin')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
state = self.safe_string(market, 'state')
result.append({
'id': id,
'symbol': base + '/' + quote + ':' + settle,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': 'swap',
'spot': False,
'margin': False,
'swap': True,
'future': False,
'option': False,
'active': (state == '0'),
'contract': True,
'linear': True,
'inverse': False,
'taker': self.safe_number(market, 'takerFeeRate'),
'maker': self.safe_number(market, 'makerFeeRate'),
'contractSize': self.safe_number(market, 'contractSize'),
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'volUnit'),
'price': self.safe_number(market, 'priceUnit'),
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'minLeverage'),
'max': self.safe_number(market, 'maxLeverage'),
},
'amount': {
'min': self.safe_number(market, 'minVol'),
'max': self.safe_number(market, 'maxVol'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
orderbook = None
if market['spot']:
response = self.spotPublicGetDepth(self.extend(request, params))
#
# {
# "lastUpdateId": "744267132",
# "bids": [
# ["40838.50","0.387864"],
# ["40837.95","0.008400"],
# ],
# "asks": [
# ["40838.61","6.544908"],
# ["40838.88","0.498000"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'lastUpdateId')
elif market['swap']:
response = self.contractPublicGetDepthSymbol(self.extend(request, params))
#
# {
# "success":true,
# "code":0,
# "data":{
# "asks":[
# [3445.72,48379,1],
# [3445.75,34994,1],
# ],
# "bids":[
# [3445.55,44081,1],
# [3445.51,24857,1],
# ],
# "version":2827730444,
# "timestamp":1634117846232
# }
# }
#
data = self.safe_value(response, 'data')
timestamp = self.safe_integer(data, 'timestamp')
orderbook = self.parse_order_book(data, symbol, timestamp)
orderbook['nonce'] = self.safe_integer(data, 'version')
return orderbook
def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
# if since is not None:
# request['startTime'] = since; bug in api, waiting for fix
# }
trades = None
if market['spot']:
method = self.safe_string(self.options, 'fetchTradesMethod', 'spotPublicGetAggTrades')
method = self.safe_string(params, 'method', method) # AggTrades, HistoricalTrades, Trades
trades = getattr(self, method)(self.extend(request, params))
#
# /trades, /historicalTrades
#
# [
# {
# "id": null,
# "price": "40798.94",
# "qty": "0.000508",
# "quoteQty": "20.72586152",
# "time": "1647546934374",
# "isBuyerMaker": True,
# "isBestMatch": True
# },
# ]
#
# /aggrTrades
#
# [
# {
# "a": null,
# "f": null,
# "l": null,
# "p": "40679",
# "q": "0.001309",
# "T": 1647551328000,
# "m": True,
# "M": True
# },
# ]
#
elif market['swap']:
response = self.contractPublicGetDealsSymbol(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": [
# {
# "p": 31199,
# "v": 18,
# "T": 1,
# "O": 3,
# "M": 2,
# "t": 1609831235985
# },
# ]
# }
#
trades = self.safe_value(response, 'data')
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
id = None
timestamp = None
orderId = None
symbol = None
fee = None
type = None
side = None
takerOrMaker = None
priceString = None
amountString = None
costString = None
# if swap
if 'v' in trade:
#
# swap: fetchTrades
#
# {
# "p": 31199,
# "v": 18,
# "T": 1,
# "O": 3,
# "M": 2,
# "t": 1609831235985
# }
#
timestamp = self.safe_integer(trade, 't')
market = self.safe_market(None, market)
symbol = market['symbol']
priceString = self.safe_string(trade, 'p')
amountString = self.safe_string(trade, 'v')
side = self.parse_order_side(self.safe_string(trade, 'T'))
takerOrMaker = 'taker'
else:
#
# spot: fetchTrades(for aggTrades)
#
# {
# "a": null,
# "f": null,
# "l": null,
# "p": "40679",
# "q": "0.001309",
# "T": 1647551328000,
# "m": True,
# "M": True
# }
#
# spot: fetchMyTrades, fetchOrderTrades
#
# {
# "symbol": "BTCUSDT",
# "id": "133948532984922113",
# "orderId": "133948532531949568",
# "orderListId": "-1",
# "price": "41995.51",
# "qty": "0.0002",
# "quoteQty": "8.399102",
# "commission": "0.016798204",
# "commissionAsset": "USDT",
# "time": "1647718055000",
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
#
# swap: fetchMyTrades, fetchOrderTrades
#
# {
# "id": "299444585",
# "symbol": "STEPN_USDT",
# "side": "1",
# "vol": "1",
# "price": "2.45455",
# "feeCurrency": "USDT",
# "fee": "0.00147273",
# "timestamp": "1648924557000",
# "profit": "0",
# "category": "1",
# "orderId": "265307163526610432",
# "positionMode": "1",
# "taker": True
# }
#
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
id = self.safe_string_2(trade, 'id', 'a')
priceString = self.safe_string_2(trade, 'price', 'p')
orderId = self.safe_string(trade, 'orderId')
# if swap
if 'positionMode' in trade:
timestamp = self.safe_integer(trade, 'timestamp')
amountString = self.safe_string(trade, 'vol')
side = self.parse_order_side(self.safe_string(trade, 'side'))
fee = {
'cost': self.safe_string(trade, 'fee'),
'currency': self.safe_currency_code(self.safe_string(trade, 'feeCurrency')),
}
takerOrMaker = 'taker' if self.safe_value(trade, 'taker') else 'maker'
else:
timestamp = self.safe_integer_2(trade, 'time', 'T')
amountString = self.safe_string_2(trade, 'qty', 'q')
costString = self.safe_string(trade, 'quoteQty')
isBuyer = self.safe_value(trade, 'isBuyer')
isMaker = self.safe_value(trade, 'isMaker')
buyerMaker = self.safe_string_2(trade, 'isBuyerMaker', 'm')
if isMaker is not None:
takerOrMaker = 'maker' if isMaker else 'taker'
if isBuyer is not None:
side = 'buy' if isBuyer else 'sell'
if buyerMaker is not None:
side = 'sell' if buyerMaker else 'buy'
takerOrMaker = 'taker'
feeAsset = self.safe_string(trade, 'commissionAsset')
if feeAsset is not None:
fee = {
'cost': self.safe_string(trade, 'commission'),
'currency': self.safe_currency_code(feeAsset),
}
if id is None:
id = self.synthetic_trade_id(market, timestamp, side, amountString, priceString, type, takerOrMaker)
return self.safe_trade({
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
'info': trade,
}, market)
def synthetic_trade_id(self, market=None, timestamp=None, side=None, amount=None, price=None, orderType=None, takerOrMaker=None):
# TODO: can be unified method? self approach is being used by multiple exchanges(mexc, woo-coinsbit, dydx, ...)
id = ''
if timestamp is not None:
id = self.number_to_string(timestamp) + '-' + self.safe_string(market, 'id', '_')
if side is not None:
id += '-' + side
if amount is not None:
id += '-' + self.number_to_string(amount)
if price is not None:
id += '-' + self.number_to_string(price)
if takerOrMaker is not None:
id += '-' + takerOrMaker
if orderType is not None:
id += '-' + orderType
return id
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
options = self.safe_value(self.options, 'timeframes', {})
timeframes = self.safe_value(options, market['type'], {})
timeframeValue = self.safe_string(timeframes, timeframe)
request = {
'symbol': market['id'],
'interval': timeframeValue,
}
candles = None
if market['spot']:
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.spotPublicGetKlines(self.extend(request, params))
#
# [
# [
# 1640804880000,
# "47482.36",
# "47482.36",
# "47416.57",
# "47436.1",
# "3.550717",
# 1640804940000,
# "168387.3"
# ],
# ]
#
candles = response
elif market['swap']:
if since is not None:
request['start'] = int(since / 1000)
priceType = self.safe_string(params, 'price', 'default')
params = self.omit(params, 'price')
method = self.get_supported_mapping(priceType, {
'default': 'contractPublicGetKlineSymbol',
'index': 'contractPublicGetKlineIndexPriceSymbol',
'mark': 'contractPublicGetKlineFairPriceSymbol',
})
response = getattr(self, method)(self.extend(request, params))
#
# {
# "success":true,
# "code":0,
# "data":{
# "time":[1634052300,1634052360,1634052420],
# "open":[3492.2,3491.3,3495.65],
# "close":[3491.3,3495.65,3495.2],
# "high":[3495.85,3496.55,3499.4],
# "low":[3491.15,3490.9,3494.2],
# "vol":[1740.0,351.0,314.0],
# "amount":[60793.623,12260.4885,10983.1375],
# }
# }
#
data = self.safe_value(response, 'data')
candles = self.convert_trading_view_to_ohlcv(data, 'time', 'open', 'high', 'low', 'close', 'vol')
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
request = {}
market = None
isSingularMarket = False
if symbols is not None:
length = len(symbols)
isSingularMarket = length == 1
firstSymbol = self.safe_string(symbols, 0)
market = self.market(firstSymbol)
marketType, query = self.handle_market_type_and_params('fetchTickers', market, params)
tickers = None
if isSingularMarket:
request['symbol'] = market['id']
if marketType == 'spot':
tickers = self.spotPublicGetTicker24hr(self.extend(request, query))
#
# [
# {
# "symbol": "BTCUSDT",
# "priceChange": "184.34",
# "priceChangePercent": "0.00400048",
# "prevClosePrice": "46079.37",
# "lastPrice": "46263.71",
# "lastQty": "",
# "bidPrice": "46260.38",
# "bidQty": "",
# "askPrice": "46260.41",
# "askQty": "",
# "openPrice": "46079.37",
# "highPrice": "47550.01",
# "lowPrice": "45555.5",
# "volume": "1732.461487",
# "quoteVolume": null,
# "openTime": 1641349500000,
# "closeTime": 1641349582808,
# "count": null
# }
# ]
#
elif marketType == 'swap':
response = self.contractPublicGetTicker(self.extend(request, query))
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "symbol":"ETH_USDT",
# "lastPrice":3581.3,
# "bid1":3581.25,
# "ask1":3581.5,
# "volume24":4045530,
# "amount24":141331823.5755,
# "holdVol":5832946,
# "lower24Price":3413.4,
# "high24Price":3588.7,
# "riseFallRate":0.0275,
# "riseFallValue":95.95,
# "indexPrice":3580.7852,
# "fairPrice":3581.08,
# "fundingRate":0.000063,
# "maxBidPrice":3938.85,
# "minAskPrice":3222.7,
# "timestamp":1634162885016
# },
# ]
# }
#
tickers = self.safe_value(response, 'data', [])
# when it's single symbol request, the returned structure is different(singular object) for both spot & swap, thus we need to wrap inside array
if isSingularMarket:
tickers = [tickers]
return self.parse_tickers(tickers, symbols)
def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchTicker', market, params)
ticker = None
request = {
'symbol': market['id'],
}
if marketType == 'spot':
ticker = self.spotPublicGetTicker24hr(self.extend(request, query))
#
# {
# "symbol": "BTCUSDT",
# "priceChange": "184.34",
# "priceChangePercent": "0.00400048",
# "prevClosePrice": "46079.37",
# "lastPrice": "46263.71",
# "lastQty": "",
# "bidPrice": "46260.38",
# "bidQty": "",
# "askPrice": "46260.41",
# "askQty": "",
# "openPrice": "46079.37",
# "highPrice": "47550.01",
# "lowPrice": "45555.5",
# "volume": "1732.461487",
# "quoteVolume": null,
# "openTime": 1641349500000,
# "closeTime": 1641349582808,
# "count": null
# }
#
elif marketType == 'swap':
response = self.contractPublicGetTicker(self.extend(request, query))
#
# {
# "success":true,
# "code":0,
# "data":{
# "symbol":"ETH_USDT",
# "lastPrice":3581.3,
# "bid1":3581.25,
# "ask1":3581.5,
# "volume24":4045530,
# "amount24":141331823.5755,
# "holdVol":5832946,
# "lower24Price":3413.4,
# "high24Price":3588.7,
# "riseFallRate":0.0275,
# "riseFallValue":95.95,
# "indexPrice":3580.7852,
# "fairPrice":3581.08,
# "fundingRate":0.000063,
# "maxBidPrice":3938.85,
# "minAskPrice":3222.7,
# "timestamp":1634162885016
# }
# }
#
ticker = self.safe_value(response, 'data', {})
# when it's single symbol request, the returned structure is different(singular object) for both spot & swap, thus we need to wrap inside array
return self.parse_ticker(ticker, market)
def parse_ticker(self, ticker, market=None):
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market)
timestamp = None
bid = None
ask = None
bidVolume = None
askVolume = None
baseVolume = None
quoteVolume = None
open = None
high = None
low = None
changePcnt = None
changeValue = None
prevClose = None
isSwap = self.safe_value(market, 'swap')
# if swap
if isSwap or ('timestamp' in ticker):
#
# {
# "symbol":"ETH_USDT",
# "lastPrice":3581.3,
# "bid1":3581.25,
# "ask1":3581.5,
# "volume24":4045530,
# "amount24":141331823.5755,
# "holdVol":5832946,
# "lower24Price":3413.4,
# "high24Price":3588.7,
# "riseFallRate":0.0275,
# "riseFallValue":95.95,
# "indexPrice":3580.7852,
# "fairPrice":3581.08,
# "fundingRate":0.000063,
# "maxBidPrice":3938.85,
# "minAskPrice":3222.7,
# "timestamp":1634162885016
# }
#
timestamp = self.safe_integer(ticker, 'timestamp')
bid = self.safe_number(ticker, 'bid1')
ask = self.safe_number(ticker, 'ask1')
baseVolume = self.safe_string(ticker, 'volume24')
quoteVolume = self.safe_string(ticker, 'amount24')
high = self.safe_number(ticker, 'high24Price')
low = self.safe_number(ticker, 'lower24Price')
changeValue = self.safe_string(ticker, 'riseFallValue')
changePcnt = self.safe_string(ticker, 'riseFallRate')
changePcnt = self.parse_number(Precise.string_mul(changePcnt, '100'))
else:
#
# {
# "symbol": "BTCUSDT",
# "priceChange": "184.34",
# "priceChangePercent": "0.00400048",
# "prevClosePrice": "46079.37",
# "lastPrice": "46263.71",
# "lastQty": "",
# "bidPrice": "46260.38",
# "bidQty": "",
# "askPrice": "46260.41",
# "askQty": "",
# "openPrice": "46079.37",
# "highPrice": "47550.01",
# "lowPrice": "45555.5",
# "volume": "1732.461487",
# "quoteVolume": null,
# "openTime": 1641349500000,
# "closeTime": 1641349582808,
# "count": null
# }
#
timestamp = self.safe_integer(ticker, 'closeTime')
bid = self.safe_number(ticker, 'bidPrice')
ask = self.safe_number(ticker, 'askPrice')
bidVolume = self.safe_number(ticker, 'bidQty')
askVolume = self.safe_number(ticker, 'askQty')
if bidVolume == 0:
bidVolume = None
if askVolume == 0:
askVolume = None
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'quoteVolume')
open = self.safe_string(ticker, 'openPrice')
high = self.safe_number(ticker, 'highPrice')
low = self.safe_number(ticker, 'lowPrice')
prevClose = self.safe_string(ticker, 'prevClosePrice')
changeValue = self.safe_string(ticker, 'priceChange')
changePcnt = self.safe_string(ticker, 'priceChangePercent')
changePcnt = self.parse_number(Precise.string_mul(changePcnt, '100'))
return self.safe_ticker({
'symbol': market['symbol'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'open': open,
'high': high,
'low': low,
'close': self.safe_string(ticker, 'lastPrice'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': None,
'previousClose': prevClose,
'change': changeValue,
'percentage': changePcnt,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_bids_asks(self, symbols=None, params={}):
"""
fetches the bid and ask price and volume for multiple markets
:param [str]|None symbols: unified symbols of the markets to fetch the bids and asks for, all markets are returned if not assigned
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = None
isSingularMarket = False
if symbols is not None:
length = len(symbols)
isSingularMarket = length == 1
market = self.market(symbols[0])
marketType, query = self.handle_market_type_and_params('fetchBidsAsks', market, params)
tickers = None
if marketType == 'spot':
tickers = self.spotPublicGetTickerBookTicker(query)
#
# [
# {
# "symbol": "AEUSDT",
# "bidPrice": "0.11001",
# "bidQty": "115.59",
# "askPrice": "0.11127",
# "askQty": "215.48"
# },
# ]
#
elif marketType == 'swap':
raise NotSupported(self.id + ' fetchBidsAsks() is not available for ' + marketType + ' markets')
# when it's single symbol request, the returned structure is different(singular object) for both spot & swap, thus we need to wrap inside array
if isSingularMarket:
tickers = [tickers]
return self.parse_tickers(tickers, symbols)
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['marginMode']: only 'isolated' is supported for spot-margin trading
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
marginMode, query = self.handle_margin_mode_and_params('createOrder', params)
if market['spot']:
return self.create_spot_order(market, type, side, amount, price, marginMode, query)
elif market['swap']:
return self.create_swap_order(market, type, side, amount, price, marginMode, query)
def create_spot_order(self, market, type, side, amount, price=None, marginMode=None, params={}):
symbol = market['symbol']
orderSide = 'BUY' if (side == 'buy') else 'SELL'
request = {
'symbol': market['id'],
'side': orderSide,
'type': type.upper(),
}
if orderSide == 'BUY' and type == 'market':
quoteOrderQty = self.safe_number(params, 'quoteOrderQty')
if quoteOrderQty is not None:
amount = quoteOrderQty
elif self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amountString = self.number_to_string(amount)
priceString = self.number_to_string(price)
quoteAmount = Precise.string_mul(amountString, priceString)
amount = self.parse_number(quoteAmount)
request['quoteOrderQty'] = amount
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
clientOrderId = self.safe_string(params, 'clientOrderId')
if clientOrderId is not None:
request['newClientOrderId'] = clientOrderId
params = self.omit(params, ['type', 'clientOrderId'])
method = 'spotPrivatePostOrder'
if marginMode is not None:
if marginMode != 'isolated':
raise BadRequest(self.id + ' createOrder() does not support marginMode ' + marginMode + ' for spot-margin trading')
method = 'spotPrivatePostMarginOrder'
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "symbol": "BTCUSDT",
# "orderId": "123738410679123456",
# "orderListId": -1
# }
#
# margin
#
# {
# "symbol": "BTCUSDT",
# "orderId": "762634301354414080",
# "clientOrderId": null,
# "isIsolated": True,
# "transactTime": 1661992652132
# }
#
return self.extend(self.parse_order(response, market), {
'side': side,
'type': type,
'price': price,
'amount': amount,
})
def create_swap_order(self, market, type, side, amount, price=None, marginMode=None, params={}):
self.load_markets()
symbol = market['symbol']
unavailableContracts = self.safe_value(self.options, 'unavailableContracts', {})
isContractUnavaiable = self.safe_value(unavailableContracts, symbol, False)
if isContractUnavaiable:
raise NotSupported(self.id + ' createSwapOrder() does not support yet self symbol:' + symbol)
openType = None
if marginMode is not None:
if marginMode == 'cross':
openType = 2
elif marginMode == 'isolated':
openType = 1
else:
raise ArgumentsRequired(self.id + ' createSwapOrder() marginMode parameter should be either "cross" or "isolated"')
else:
openType = self.safe_integer(params, 'openType', 2) # defaulting to cross margin
if (type != 'limit') and (type != 'market') and (type != 1) and (type != 2) and (type != 3) and (type != 4) and (type != 5) and (type != 6):
raise InvalidOrder(self.id + ' createSwapOrder() order type must either limit, market, or 1 for limit orders, 2 for post-only orders, 3 for IOC orders, 4 for FOK orders, 5 for market orders or 6 to convert market price to current price')
postOnly = self.safe_value(params, 'postOnly', False)
if postOnly:
type = 2
elif type == 'limit':
type = 1
elif type == 'market':
type = 6
request = {
'symbol': market['id'],
# 'price': float(self.price_to_precision(symbol, price)),
'vol': float(self.amount_to_precision(symbol, amount)),
# 'leverage': int, # required for isolated margin
# 'side': side, # 1 open long, 2 close short, 3 open short, 4 close long
#
# supported order types
#
# 1 limit
# 2 post only maker(PO)
# 3 transact or cancel instantly(IOC)
# 4 transact completely or cancel completely(FOK)
# 5 market orders
# 6 convert market price to current price
#
'type': type,
'openType': openType, # 1 isolated, 2 cross
# 'positionId': 1394650, # long, filling in self parameter when closing a position is recommended
# 'externalOid': clientOrderId,
# 'triggerPrice': 10.0, # Required for trigger order
# 'triggerType': 1, # Required for trigger order 1: more than or equal, 2: less than or equal
# 'executeCycle': 1, # Required for trigger order 1: 24 hours,2: 7 days
# 'trend': 1, # Required for trigger order 1: latest price, 2: fair price, 3: index price
# 'orderType': 1, # Required for trigger order 1: limit order,2:Post Only Maker,3: close or cancel instantly ,4: close or cancel completely,5: Market order
}
method = 'contractPrivatePostOrderSubmit'
stopPrice = self.safe_number_2(params, 'triggerPrice', 'stopPrice')
params = self.omit(params, ['stopPrice', 'triggerPrice'])
if stopPrice:
method = 'contractPrivatePostPlanorderPlace'
request['triggerPrice'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = self.safe_integer(params, 'triggerType', 1)
request['executeCycle'] = self.safe_integer(params, 'executeCycle', 1)
request['trend'] = self.safe_integer(params, 'trend', 1)
request['orderType'] = self.safe_integer(params, 'orderType', 1)
if (type != 5) and (type != 6) and (type != 'market'):
request['price'] = float(self.price_to_precision(symbol, price))
if openType == 1:
leverage = self.safe_integer(params, 'leverage')
if leverage is None:
raise ArgumentsRequired(self.id + ' createSwapOrder() requires a leverage parameter for isolated margin orders')
reduceOnly = self.safe_value(params, 'reduceOnly', False)
if reduceOnly:
request['side'] = 2 if (side == 'buy') else 4
else:
request['side'] = 1 if (side == 'buy') else 3
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'externalOid')
if clientOrderId is not None:
request['externalOid'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'externalOid', 'postOnly'])
response = getattr(self, method)(self.extend(request, params))
#
# Swap
# {"code":200,"data":"2ff3163e8617443cb9c6fc19d42b1ca4"}
#
# Trigger
# {"success":true,"code":0,"data":259208506303929856}
#
data = self.safe_string(response, 'data')
return self.parse_order(data, market)
def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['marginMode']: only 'isolated' is supported, for spot-margin trading
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
data = None
if market['spot']:
clientOrderId = self.safe_string(params, 'clientOrderId')
if clientOrderId is not None:
params = self.omit(params, 'clientOrderId')
request['origClientOrderId'] = clientOrderId
else:
request['orderId'] = id
marginMode, query = self.handle_margin_mode_and_params('fetchOrder', params)
method = 'spotPrivateGetOrder'
if marginMode is not None:
if marginMode != 'isolated':
raise BadRequest(self.id + ' fetchOrder() does not support marginMode ' + marginMode + ' for spot-margin trading')
method = 'spotPrivateGetMarginOrder'
data = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "symbol": "BTCUSDT",
# "orderId": "133734823834147272",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "30000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "CANCELED",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647667102000",
# "updateTime": "1647708567000",
# "isWorking": True,
# "origQuoteOrderQty": "6"
# }
#
# margin
#
# {
# "symbol": "BTCUSDT",
# "orderId": "763307297891028992",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "18000",
# "origQty": "0.0014",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "type": "LIMIT",
# "side": "BUY",
# "isIsolated": True,
# "isWorking": True,
# "time": 1662153107000,
# "updateTime": 1662153107000
# }
#
elif market['swap']:
request['order_id'] = id
response = self.contractPrivateGetOrderGetOrderId(self.extend(request, params))
#
# {
# "success": True,
# "code": "0",
# "data": {
# "orderId": "264995729269765120",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.2",
# "vol": "15",
# "leverage": "20",
# "side": "1",
# "category": "1",
# "orderType": "1",
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "2.2528",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2",
# "externalOid": "_m_0e9520c256744d64b942985189026d20",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648850305236",
# "updateTime": "1648850305245",
# "positionMode": "1"
# }
# }
#
data = self.safe_value(response, 'data')
return self.parse_order(data, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['marginMode']: only 'isolated' is supported, for spot-margin trading
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType, query = self.handle_market_type_and_params('fetchOrders', market, params)
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument for spot market')
marginMode, query = self.handle_margin_mode_and_params('fetchOrders', params)
method = 'spotPrivateGetAllOrders'
if marginMode is not None:
if marginMode != 'isolated':
raise BadRequest(self.id + ' fetchOrders() does not support marginMode ' + marginMode + ' for spot-margin trading')
method = 'spotPrivateGetMarginAllOrders'
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "133949373632483328",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "45000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "SELL",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647718255000",
# "updateTime": "1647718255000",
# "isWorking": True,
# "origQuoteOrderQty": "9"
# },
# ]
#
# margin
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "763307297891028992",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "18000",
# "origQty": "0.0014",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "type": "LIMIT",
# "side": "BUY",
# "isIsolated": True,
# "isWorking": True,
# "time": 1662153107000,
# "updateTime": 1662153107000
# }
# ]
#
return self.parse_orders(response, market, since, limit)
else:
if since is not None:
request['start_time'] = since
end = self.safe_integer(params, 'end_time')
if end is None:
request['end_time'] = self.sum(since, self.options['maxTimeTillEnd'])
if limit is not None:
request['page_size'] = limit
method = self.safe_string(self.options, 'fetchOrders', 'contractPrivateGetOrderListHistoryOrders')
method = self.safe_string(query, 'method', method)
ordersOfRegular = []
ordersOfTrigger = []
if method == 'contractPrivateGetOrderListHistoryOrders':
response = self.contractPrivateGetOrderListHistoryOrders(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "265230764677709315",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.1",
# "vol": "102",
# "leverage": "20",
# "side": "1",
# "category": "1",
# "orderType": "1",
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "10.96704",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2",
# "externalOid": "_m_7e42f8df6b324c869e4e200397e2b00f",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648906342000",
# "updateTime": "1648906342000",
# "positionMode": "1"
# },
# ]
# }
#
ordersOfRegular = self.safe_value(response, 'data')
else:
# the Planorder endpoints work not only for stop-market orders, but also for stop-limit orders that were supposed to have a separate endpoint
response = self.contractPrivateGetPlanorderListOrders(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "symbol": "STEPN_USDT",
# "leverage": "20",
# "side": "1",
# "vol": "13",
# "openType": "1",
# "state": "1",
# "orderType": "1",
# "errorCode": "0",
# "createTime": "1648984276000",
# "updateTime": "1648984276000",
# "id": "265557643326564352",
# "triggerType": "1",
# "triggerPrice": "3",
# "price": "2.9", # not present in stop-market, but in stop-limit order
# "executeCycle": "87600",
# "trend": "1",
# },
# ]
# }
#
ordersOfTrigger = self.safe_value(response, 'data')
merged = self.array_concat(ordersOfTrigger, ordersOfRegular)
return self.parse_orders(merged, market, since, limit, params)
def fetch_orders_by_ids(self, ids, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType, query = self.handle_market_type_and_params('fetchOrdersByIds', market, params)
if marketType == 'spot':
raise BadRequest(self.id + ' fetchOrdersByIds() is not supported for ' + marketType)
else:
request['order_ids'] = ','.join(ids)
response = self.contractPrivateGetOrderBatchQuery(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "265230764677709315",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.1",
# "vol": "102",
# "leverage": "20",
# "side": "1",
# "category": "1",
# "orderType": "1",
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "10.96704",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2",
# "externalOid": "_m_7e42f8df6b324c869e4e200397e2b00f",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648906342000",
# "updateTime": "1648906342000",
# "positionMode": "1"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_orders(data, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['marginMode']: only 'isolated' is supported, for spot-margin trading
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType = None
marketType, params = self.handle_market_type_and_params('fetchOpenOrders', market, params)
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument for spot market')
method = 'spotPrivateGetOpenOrders'
marginMode, query = self.handle_margin_mode_and_params('fetchOpenOrders', params)
if marginMode is not None:
if marginMode != 'isolated':
raise BadRequest(self.id + ' fetchOpenOrders() does not support marginMode ' + marginMode + ' for spot-margin trading')
method = 'spotPrivateGetMarginOpenOrders'
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "133949373632483328",
# "orderListId": "-1",
# "clientOrderId": "",
# "price": "45000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "SELL",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647718255199",
# "updateTime": null,
# "isWorking": True,
# "origQuoteOrderQty": "9"
# }
# ]
#
# margin
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "764547676405633024",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "18000",
# "origQty": "0.0013",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "type": "LIMIT",
# "side": "BUY",
# "isIsolated": True,
# "isWorking": True,
# "time": 1662448836000,
# "updateTime": 1662448836000
# }
# ]
#
return self.parse_orders(response, market, since, limit)
else:
# TO_DO: another possible way is through: open_orders/{symbol}, but as they have same ratelimits, and less granularity, i think historical orders are more convenient, as it supports more params(however, theoretically, open-orders endpoint might be sligthly fast)
return self.fetch_orders_by_state(2, symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
return self.fetch_orders_by_state(3, symbol, since, limit, params)
def fetch_canceled_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple canceled orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: timestamp in ms of the earliest order, default is None
:param int|None limit: max number of orders to return, default is None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
return self.fetch_orders_by_state(4, symbol, since, limit, params)
def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType = self.handle_market_type_and_params('fetchOrdersByState', market, params)
if marketType == 'spot':
raise BadRequest(self.id + ' fetchOrdersByState() is not supported for ' + marketType)
else:
params['states'] = state
return self.fetch_orders(symbol, since, limit, params)
def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['marginMode']: only 'isolated' is supported for spot-margin trading
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType = None
marketType, params = self.handle_market_type_and_params('cancelOrder', market, params)
marginMode, query = self.handle_margin_mode_and_params('cancelOrder', params)
data = None
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_string(params, 'clientOrderId')
if clientOrderId is not None:
params = self.omit(query, 'clientOrderId')
request['origClientOrderId'] = clientOrderId
else:
request['orderId'] = id
method = 'spotPrivateDeleteOrder'
if marginMode is not None:
if marginMode != 'isolated':
raise BadRequest(self.id + ' cancelOrder() does not support marginMode ' + marginMode + ' for spot-margin trading')
method = 'spotPrivateDeleteMarginOrder'
data = getattr(self, method)(self.extend(request, query))
#
# spot
#
# {
# "symbol": "BTCUSDT",
# "orderId": "133734823834447872",
# "price": "30000",
# "origQty": "0.0002",
# "type": "LIMIT",
# "side": "BUY"
# }
#
# margin
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "762640232574226432",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "18000",
# "origQty": "0.00147",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "type": "LIMIT",
# "side": "BUY",
# "isIsolated": True,
# "isWorking": True,
# "time": 1661994066000,
# "updateTime": 1661994066000
# }
# ]
#
else:
# TODO: PlanorderCancel endpoint has bug atm. waiting for fix.
method = self.safe_string(self.options, 'cancelOrder', 'contractPrivatePostOrderCancel') # contractPrivatePostOrderCancel, contractPrivatePostPlanorderCancel
method = self.safe_string(query, 'method', method)
response = getattr(self, method)([id]) # the request cannot be changed or extended. This is the only way to send.
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "264995729269765120",
# "errorCode": "0", # if already canceled: "2041"; if doesn't exist: "2040"
# "errorMsg": "success", # if already canceled: "order state cannot be cancelled"; if doesn't exist: "order not exist"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
order = self.safe_value(data, 0)
errorMsg = self.safe_value(order, 'errorMsg', '')
if errorMsg != 'success':
raise InvalidOrder(self.id + ' cancelOrder() the order with id ' + id + ' cannot be cancelled: ' + errorMsg)
return self.parse_order(data, market)
def cancel_orders(self, ids, symbol=None, params={}):
"""
cancel multiple orders
:param [str] ids: order ids
:param str|None symbol: unified market symbol, default is None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol) if (symbol is not None) else None
marketType = self.handle_market_type_and_params('cancelOrders', market, params)
if marketType == 'spot':
raise BadRequest(self.id + ' cancelOrders() is not supported for ' + marketType)
else:
response = self.contractPrivatePostOrderCancel(ids) # the request cannot be changed or extended. The only way to send.
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "264995729269765120",
# "errorCode": "0", # if already canceled: "2041"
# "errorMsg": "success", # if already canceled: "order state cannot be cancelled"
# },
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_orders(data, market)
def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['marginMode']: only 'isolated' is supported for spot-margin trading
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol) if (symbol is not None) else None
request = {}
marketType = None
marketType, params = self.handle_market_type_and_params('cancelAllOrders', market, params)
marginMode, query = self.handle_margin_mode_and_params('cancelAllOrders', params)
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument on spot')
request['symbol'] = market['id']
method = 'spotPrivateDeleteOpenOrders'
if marginMode is not None:
if marginMode != 'isolated':
raise BadRequest(self.id + ' cancelAllOrders() does not support marginMode ' + marginMode + ' for spot-margin trading')
method = 'spotPrivateDeleteMarginOpenOrders'
response = getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "133926492139692032",
# "price": "30000",
# "origQty": "0.0002",
# "type": "LIMIT",
# "side": "BUY"
# },
# ]
#
# margin
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "762640232574226432",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "18000",
# "origQty": "0.00147",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "type": "LIMIT",
# "side": "BUY",
# "isIsolated": True,
# "isWorking": True,
# "time": 1661994066000,
# "updateTime": 1661994066000
# }
# ]
#
return self.parse_orders(response, market)
else:
if symbol is not None:
request['symbol'] = market['id']
# method can be either: contractPrivatePostOrderCancelAll or contractPrivatePostPlanorderCancelAll
# the Planorder endpoints work not only for stop-market orders but also for stop-limit orders that are supposed to have separate endpoint
method = self.safe_string(self.options, 'cancelAllOrders', 'contractPrivatePostOrderCancelAll')
method = self.safe_string(query, 'method', method)
response = getattr(self, method)(self.extend(request, query))
#
# {
# "success": True,
# "code": "0"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market)
def parse_order(self, order, market=None):
#
# spot: createOrder
#
# {
# "symbol": "BTCUSDT",
# "orderId": "123738410679123456",
# "orderListId": -1
# }
#
# margin: createOrder
#
# {
# "symbol": "BTCUSDT",
# "orderId": "762634301354414080",
# "clientOrderId": null,
# "isIsolated": True,
# "transactTime": 1661992652132
# }
#
# spot: cancelOrder, cancelAllOrders
#
# {
# "symbol": "BTCUSDT",
# "orderId": "133926441921286144",
# "price": "30000",
# "origQty": "0.0002",
# "type": "LIMIT",
# "side": "BUY"
# }
#
# margin: cancelOrder, cancelAllOrders
#
# {
# "symbol": "BTCUSDT",
# "orderId": "762640232574226432",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "18000",
# "origQty": "0.00147",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "type": "LIMIT",
# "side": "BUY",
# "isIsolated": True,
# "isWorking": True,
# "time": 1661994066000,
# "updateTime": 1661994066000
# }
#
# spot: fetchOrder, fetchOpenOrders, fetchOrders
#
# {
# "symbol": "BTCUSDT",
# "orderId": "133734823834147272",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "30000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "CANCELED",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647667102000",
# "updateTime": "1647708567000",
# "isWorking": True,
# "origQuoteOrderQty": "6"
# }
#
# margin: fetchOrder, fetchOrders
#
# {
# "symbol": "BTCUSDT",
# "orderId": "763307297891028992",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "18000",
# "origQty": "0.0014",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "type": "LIMIT",
# "side": "BUY",
# "isIsolated": True,
# "isWorking": True,
# "time": 1662153107000,
# "updateTime": 1662153107000
# }
#
# swap: createOrder
#
# 2ff3163e8617443cb9c6fc19d42b1ca4
#
# swap: fetchOrder, fetchOrders
#
# regular
# {
# "orderId": "264995729269765120",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.2",
# "vol": "15",
# "leverage": "20",
# "side": "1", # TODO: not unified
# "category": "1",
# "orderType": "1", # TODO: not unified
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "2.2528",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2", # TODO
# "externalOid": "_m_0e9520c256744d64b942985189026d20",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648850305236",
# "updateTime": "1648850305245",
# "positionMode": "1"
# }
#
# stop
# {
# "id": "265557643326564352",
# "triggerType": "1",
# "triggerPrice": "3",
# "price": "2.9", # not present in stop-market, but in stop-limit order
# "executeCycle": "87600",
# "trend": "1",
# # below keys are same as in regular order structure
# "symbol": "STEPN_USDT",
# "leverage": "20",
# "side": "1",
# "vol": "13",
# "openType": "1",
# "state": "1",
# "orderType": "1",
# "errorCode": "0",
# "createTime": "1648984276000",
# "updateTime": "1648984276000",
# }
#
id = None
if isinstance(order, str):
id = order
else:
id = self.safe_string_2(order, 'orderId', 'id')
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
timestamp = self.safe_integer_n(order, ['time', 'createTime', 'transactTime'])
fee = None
feeCurrency = self.safe_string(order, 'feeCurrency')
if feeCurrency is not None:
takerFee = self.safe_string(order, 'takerFee')
makerFee = self.safe_string(order, 'makerFee')
feeSum = Precise.string_add(takerFee, makerFee)
fee = {
'currency': feeCurrency,
'cost': self.parse_number(feeSum),
}
return self.safe_order({
'id': id,
'clientOrderId': self.safe_string(order, 'clientOrderId'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None, # TODO: self might be 'updateTime' if order-status is filled, otherwise cancellation time. needs to be checked
'status': self.parse_order_status(self.safe_string_2(order, 'status', 'state')),
'symbol': market['symbol'],
'type': self.parse_order_type(self.safe_string(order, 'type')),
'timeInForce': self.parse_order_time_in_force(self.safe_string(order, 'timeInForce')),
'side': self.parse_order_side(self.safe_string(order, 'side')),
'price': self.safe_number(order, 'price'),
'stopPrice': self.safe_number_2(order, 'stopPrice', 'triggerPrice'),
'triggerPrice': self.safe_number_2(order, 'stopPrice', 'triggerPrice'),
'average': self.safe_number(order, 'dealAvgPrice'),
'amount': self.safe_number_2(order, 'origQty', 'vol'),
'cost': self.safe_number(order, 'cummulativeQuoteQty'), # 'cummulativeQuoteQty' vs 'origQuoteOrderQty'
'filled': self.safe_number_2(order, 'executedQty', 'dealVol'),
'remaining': None,
'fee': fee,
'trades': None,
'info': order,
}, market)
def parse_order_side(self, status):
statuses = {
'BUY': 'buy',
'SELL': 'sell',
# contracts v1 : TODO
}
return self.safe_string(statuses, status, status)
def parse_order_type(self, status):
statuses = {
'MARKET': 'market',
'LIMIT': 'limit',
'LIMIT_MAKER': 'limit',
}
return self.safe_string(statuses, status, status)
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
'PARTIALLY_FILLED': 'open',
'PARTIALLY_CANCELED': 'canceled',
# contracts v1
# '1': 'uninformed', # TODO: wt?
'2': 'open',
'3': 'closed',
'4': 'canceled',
# '5': 'invalid', # TODO: wt?
}
return self.safe_string(statuses, status, status)
def parse_order_time_in_force(self, status):
statuses = {
'GTC': 'GTC',
'FOK': 'FOK',
'IOC': 'IOC',
}
return self.safe_string(statuses, status, status)
def fetch_account_helper(self, type, params):
if type == 'spot':
return self.spotPrivateGetAccount(params)
#
# {
# "makerCommission": "20",
# "takerCommission": "20",
# "buyerCommission": "0",
# "sellerCommission": "0",
# "canTrade": True,
# "canWithdraw": True,
# "canDeposit": True,
# "updateTime": null,
# "accountType": "SPOT",
# "balances": [
# {
# "asset": "BTC",
# "free": "0.002",
# "locked": "0"
# },
# {
# "asset": "USDT",
# "free": "88.120131350620957006",
# "locked": "0"
# },
# ],
# "permissions": [
# "SPOT"
# ]
# }
#
elif type == 'swap':
response = self.contractPrivateGetAccountAssets(params)
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "currency":"BSV",
# "positionMargin":0,
# "availableBalance":0,
# "cashBalance":0,
# "frozenBalance":0,
# "equity":0,
# "unrealized":0,
# "bonus":0
# },
# ]
# }
#
return self.safe_value(response, 'data')
def fetch_accounts(self, params={}):
"""
fetch all the accounts associated with a profile
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `account structures <https://docs.ccxt.com/en/latest/manual.html#account-structure>` indexed by the account type
"""
# TODO: is the below endpoints suitable for fetchAccounts?
marketType, query = self.handle_market_type_and_params('fetchAccounts', None, params)
self.load_markets()
response = self.fetch_account_helper(marketType, query)
data = self.safe_value(response, 'balances', [])
result = []
for i in range(0, len(data)):
account = data[i]
currencyId = self.safe_string_2(account, 'asset', 'currency')
code = self.safe_currency_code(currencyId)
result.append({
'id': self.safe_string(account, 'id'),
'type': self.safe_string(account, 'type'),
'code': code,
'info': account,
})
return result
def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
self.load_markets()
response = self.fetch_account_helper('spot', params)
makerFee = self.safe_string(response, 'makerCommission')
takerFee = self.safe_string(response, 'takerCommission')
makerFee = Precise.string_div(makerFee, '1000')
takerFee = Precise.string_div(takerFee, '1000')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'symbol': symbol,
'maker': self.parse_number(makerFee),
'taker': self.parse_number(takerFee),
'percentage': True,
'tierBased': False,
'info': response,
}
return result
def parse_balance(self, response, marketType):
#
# spot
#
# {
# "asset": "USDT",
# "free": "0.000000000674",
# "locked": "0"
# }
#
# swap
#
# {
# "currency": "BSV",
# "positionMargin": 0,
# "availableBalance": 0,
# "cashBalance": 0,
# "frozenBalance": 0,
# "equity": 0,
# "unrealized": 0,
# "bonus": 0
# }
#
# margin
#
# {
# "baseAsset": {
# "asset": "BTC",
# "borrowEnabled": True,
# "borrowed": "0",
# "free": "0",
# "interest": "0",
# "locked": "0",
# "netAsset": "0",
# "netAssetOfBtc": "0",
# "repayEnabled": True,
# "totalAsset": "0"
# }
# "quoteAsset": {
# "asset": "USDT",
# "borrowEnabled": True,
# "borrowed": "0",
# "free": "10",
# "interest": "0",
# "locked": "0",
# "netAsset": "10",
# "netAssetOfBtc": "0",
# "repayEnabled": True,
# "totalAsset": "10"
# }
# "symbol": "BTCUSDT",
# "isolatedCreated": True,
# "enabled": True,
# "marginLevel": "999",
# "marginRatio": "9",
# "indexPrice": "16741.137068965517241379",
# "liquidatePrice": "--",
# "liquidateRate": "--",
# "tradeEnabled": True
# }
#
wallet = None
if marketType == 'margin':
wallet = self.safe_value(response, 'assets', [])
elif marketType == 'swap':
wallet = self.safe_value(response, 'data', [])
else:
wallet = self.safe_value(response, 'balances', [])
result = {'info': response}
if marketType == 'margin':
for i in range(0, len(wallet)):
entry = wallet[i]
marketId = self.safe_string(entry, 'symbol')
symbol = self.safe_symbol(marketId, None)
base = self.safe_value(entry, 'baseAsset', {})
quote = self.safe_value(entry, 'quoteAsset', {})
baseCode = self.safe_currency_code(self.safe_string(base, 'asset'))
quoteCode = self.safe_currency_code(self.safe_string(quote, 'asset'))
subResult = {}
subResult[baseCode] = self.parse_balance_helper(base)
subResult[quoteCode] = self.parse_balance_helper(quote)
result[symbol] = self.safe_balance(subResult)
return result
elif marketType == 'swap':
for i in range(0, len(wallet)):
entry = wallet[i]
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(entry, 'availableBalance')
account['used'] = self.safe_string(entry, 'frozenBalance')
result[code] = account
return self.safe_balance(result)
else:
for i in range(0, len(wallet)):
entry = wallet[i]
currencyId = self.safe_string(entry, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(entry, 'free')
account['used'] = self.safe_string(entry, 'locked')
result[code] = account
return self.safe_balance(result)
def parse_balance_helper(self, entry):
account = self.account()
account['used'] = self.safe_string(entry, 'locked')
account['free'] = self.safe_string(entry, 'free')
account['total'] = self.safe_string(entry, 'totalAsset')
debt = self.safe_string(entry, 'borrowed')
interest = self.safe_string(entry, 'interest')
account['debt'] = Precise.string_add(debt, interest)
return account
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#account-information
see https://mxcdevelop.github.io/apidocs/contract_v1_en/#get-all-informations-of-user-39-s-asset
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#isolated-account
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['symbols']: # required for margin, market id's separated by commas
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
self.load_markets()
marketType = None
request = {}
marketType, params = self.handle_market_type_and_params('fetchBalance', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivateGetAccount',
'swap': 'contractPrivateGetAccountAssets',
'margin': 'spotPrivateGetMarginIsolatedAccount',
})
marginMode = self.safe_string(params, 'marginMode')
isMargin = self.safe_value(params, 'margin', False)
if (marginMode is not None) or (isMargin) or (marketType == 'margin'):
parsedSymbols = None
symbol = self.safe_string(params, 'symbol')
if symbol is None:
symbols = self.safe_value(params, 'symbols')
if symbols is not None:
parsedSymbols = ','.join(self.market_ids(symbols))
else:
market = self.market(symbol)
parsedSymbols = market['id']
self.check_required_argument('fetchBalance', parsedSymbols, 'symbol or symbols')
method = 'spotPrivateGetMarginIsolatedAccount'
marketType = 'margin'
request['symbols'] = parsedSymbols
params = self.omit(params, ['margin', 'marginMode', 'symbol', 'symbols'])
response = getattr(self, method)(self.extend(request, params))
#
# spot
#
# {
# "makerCommission": 0,
# "takerCommission": 20,
# "buyerCommission": 0,
# "sellerCommission": 0,
# "canTrade": True,
# "canWithdraw": True,
# "canDeposit": True,
# "updateTime": null,
# "accountType": "SPOT",
# "balances": [
# {
# "asset": "USDT",
# "free": "0.000000000674",
# "locked": "0"
# },
# ],
# "permissions": ["SPOT"]
# }
#
# swap
#
# {
# "success": True,
# "code": 0,
# "data": [
# {
# "currency": "BSV",
# "positionMargin": 0,
# "availableBalance": 0,
# "cashBalance": 0,
# "frozenBalance": 0,
# "equity": 0,
# "unrealized": 0,
# "bonus": 0
# },
# ]
# }
#
# margin
#
# {
# "assets": [
# {
# "baseAsset": {
# "asset": "BTC",
# "borrowEnabled": True,
# "borrowed": "0",
# "free": "0",
# "interest": "0",
# "locked": "0",
# "netAsset": "0",
# "netAssetOfBtc": "0",
# "repayEnabled": True,
# "totalAsset": "0"
# },
# "quoteAsset": {
# "asset": "USDT",
# "borrowEnabled": True,
# "borrowed": "0",
# "free": "10",
# "interest": "0",
# "locked": "0",
# "netAsset": "10",
# "netAssetOfBtc": "0",
# "repayEnabled": True,
# "totalAsset": "10"
# },
# "symbol": "BTCUSDT",
# "isolatedCreated": True,
# "enabled": True,
# "marginLevel": "999",
# "marginRatio": "9",
# "indexPrice": "16741.137068965517241379",
# "liquidatePrice": "--",
# "liquidateRate": "--",
# "tradeEnabled": True
# }
# ]
# }
#
return self.parse_balance(response, marketType)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchMyTrades', market, params)
request = {
'symbol': market['id'],
}
trades = None
if marketType == 'spot':
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit
trades = self.spotPrivateGetMyTrades(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "id": "133948532984922113",
# "orderId": "133948532531949568",
# "orderListId": "-1",
# "price": "41995.51",
# "qty": "0.0002",
# "quoteQty": "8.399102",
# "commission": "0.016798204",
# "commissionAsset": "USDT",
# "time": "1647718055000",
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
# ]
#
else:
if since is not None:
request['start_time'] = since
end = self.safe_integer(params, 'end_time')
if end is None:
request['end_time'] = self.sum(since, self.options['maxTimeTillEnd'])
if limit is not None:
request['page_size'] = limit
response = self.contractPrivateGetOrderListOrderDeals(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "id": "299444585",
# "symbol": "STEPN_USDT",
# "side": "1",
# "vol": "1",
# "price": "2.45455",
# "feeCurrency": "USDT",
# "fee": "0.00147273",
# "timestamp": "1648924557000",
# "profit": "0",
# "category": "1",
# "orderId": "265307163526610432",
# "positionMode": "1",
# "taker": True
# }
# ]
# }
#
trades = self.safe_value(response, 'data')
return self.parse_trades(trades, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
"""
fetch all the trades made from a single order
:param str id: order id
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchOrderTrades', market, params)
trades = None
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
request['symbol'] = market['id']
request['orderId'] = id
trades = self.spotPrivateGetMyTrades(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "id": "133948532984922113",
# "orderId": "133948532531949568",
# "orderListId": "-1",
# "price": "41995.51",
# "qty": "0.0002",
# "quoteQty": "8.399102",
# "commission": "0.016798204",
# "commissionAsset": "USDT",
# "time": "1647718055000",
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
# ]
#
else:
request['order_id'] = id
response = self.contractPrivateGetOrderDealDetailsOrderId(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "id": "299444585",
# "symbol": "STEPN_USDT",
# "side": "1",
# "vol": "1",
# "price": "2.45455",
# "feeCurrency": "USDT",
# "fee": "0.00147273",
# "timestamp": "1648924557000",
# "profit": "0",
# "category": "1",
# "orderId": "265307163526610432",
# "positionMode": "1",
# "taker": True
# }
# ]
# }
#
trades = self.safe_value(response, 'data')
return self.parse_trades(trades, market, since, limit, query)
def modify_margin_helper(self, symbol, amount, addOrReduce, params={}):
positionId = self.safe_integer(params, 'positionId')
if positionId is None:
raise ArgumentsRequired(self.id + ' modifyMarginHelper() requires a positionId parameter')
self.load_markets()
request = {
'positionId': positionId,
'amount': amount,
'type': addOrReduce,
}
response = self.contractPrivatePostPositionChangeMargin(self.extend(request, params))
#
# {
# "success": True,
# "code": 0
# }
return response
def reduce_margin(self, symbol, amount, params={}):
"""
remove margin from a position
:param str symbol: unified market symbol
:param float amount: the amount of margin to remove
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#reduce-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'SUB', params)
def add_margin(self, symbol, amount, params={}):
"""
add margin
:param str symbol: unified market symbol
:param float amount: amount of margin to add
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#add-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'ADD', params)
def set_leverage(self, leverage, symbol=None, params={}):
"""
set the level of leverage for a market
:param float leverage: the rate of leverage
:param str|None symbol: unified market symbol
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: response from the exchange
"""
self.load_markets()
request = {
'leverage': leverage,
}
positionId = self.safe_integer(params, 'positionId')
if positionId is None:
openType = self.safe_number(params, 'openType') # 1 or 2
positionType = self.safe_number(params, 'positionType') # 1 or 2
market = self.market(symbol) if (symbol is not None) else None
if (openType is None) or (positionType is None) or (market is None):
raise ArgumentsRequired(self.id + ' setLeverage() requires a positionId parameter or a symbol argument with openType and positionType parameters, use openType 1 or 2 for isolated or cross margin respectively, use positionType 1 or 2 for long or short positions')
else:
request['openType'] = openType
request['symbol'] = market['symbol']
request['positionType'] = positionType
else:
request['positionId'] = positionId
return self.contractPrivatePostPositionChangeLeverage(self.extend(request, params))
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
"""
fetch the history of funding payments paid and received on self account
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch funding history for
:param int|None limit: the maximum number of funding history structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `funding history structure <https://docs.ccxt.com/en/latest/manual.html#funding-history-structure>`
"""
self.load_markets()
market = None
request = {
# 'symbol': market['id'],
# 'position_id': positionId,
# 'page_num': 1,
# 'page_size': limit, # default 20, max 100
}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['page_size'] = limit
response = self.contractPrivateGetPositionFundingRecords(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": {
# "pageSize": 20,
# "totalCount": 2,
# "totalPage": 1,
# "currentPage": 1,
# "resultList": [
# {
# "id": 7423910,
# "symbol": "BTC_USDT",
# "positionType": 1,
# "positionValue": 29.30024,
# "funding": 0.00076180624,
# "rate": -0.000026,
# "settleTime": 1643299200000
# },
# {
# "id": 7416473,
# "symbol": "BTC_USDT",
# "positionType": 1,
# "positionValue": 28.9188,
# "funding": 0.0014748588,
# "rate": -0.000051,
# "settleTime": 1643270400000
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
resultList = self.safe_value(data, 'resultList', [])
result = []
for i in range(0, len(resultList)):
entry = resultList[i]
timestamp = self.safe_integer(entry, 'settleTime')
result.append({
'info': entry,
'symbol': symbol,
'code': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': self.safe_number(entry, 'id'),
'amount': self.safe_number(entry, 'funding'),
})
return result
def parse_funding_rate(self, contract, market=None):
#
# {
# "symbol": "BTC_USDT",
# "fundingRate": 0.000014,
# "maxFundingRate": 0.003,
# "minFundingRate": -0.003,
# "collectCycle": 8,
# "nextSettleTime": 1643241600000,
# "timestamp": 1643240373359
# }
#
nextFundingRate = self.safe_number(contract, 'fundingRate')
nextFundingTimestamp = self.safe_integer(contract, 'nextSettleTime')
marketId = self.safe_string(contract, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(contract, 'timestamp')
datetime = self.iso8601(timestamp)
return {
'info': contract,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': timestamp,
'datetime': datetime,
'fundingRate': nextFundingRate,
'fundingTimestamp': nextFundingTimestamp,
'fundingDatetime': self.iso8601(nextFundingTimestamp),
'nextFundingRate': None,
'nextFundingTimestamp': None,
'nextFundingDatetime': None,
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def fetch_funding_rate(self, symbol, params={}):
"""
fetch the current funding rate
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `funding rate structure <https://docs.ccxt.com/en/latest/manual.html#funding-rate-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.contractPublicGetFundingRateSymbol(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": {
# "symbol": "BTC_USDT",
# "fundingRate": 0.000014,
# "maxFundingRate": 0.003,
# "minFundingRate": -0.003,
# "collectCycle": 8,
# "nextSettleTime": 1643241600000,
# "timestamp": 1643240373359
# }
# }
#
result = self.safe_value(response, 'data', {})
return self.parse_funding_rate(result, market)
def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
"""
fetches historical funding rate prices
:param str|None symbol: unified symbol of the market to fetch the funding rate history for
:param int|None since: not used by mexc, but filtered internally by ccxt
:param int|None limit: mexc limit is page_size default 20, maximum is 100
:param dict params: extra parameters specific to the mexc api endpoint
:returns [dict]: a list of `funding rate structures <https://docs.ccxt.com/en/latest/manual.html?#funding-rate-history-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'page_size': limit, # optional
# 'page_num': 1, # optional, current page number, default is 1
}
if limit is not None:
request['page_size'] = limit
response = self.contractPublicGetFundingRateHistory(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": {
# "pageSize": 2,
# "totalCount": 21,
# "totalPage": 11,
# "currentPage": 1,
# "resultList": [
# {
# "symbol": "BTC_USDT",
# "fundingRate": 0.000266,
# "settleTime": 1609804800000
# },
# {
# "symbol": "BTC_USDT",
# "fundingRate": 0.00029,
# "settleTime": 1609776000000
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data')
result = self.safe_value(data, 'resultList', [])
rates = []
for i in range(0, len(result)):
entry = result[i]
marketId = self.safe_string(entry, 'symbol')
symbol = self.safe_symbol(marketId)
timestamp = self.safe_integer(entry, 'settleTime')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'fundingRate'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, market['symbol'], since, limit)
def fetch_leverage_tiers(self, symbols=None, params={}):
"""
retrieve information on the maximum leverage, and maintenance margin for trades of varying trade sizes
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `leverage tiers structures <https://docs.ccxt.com/en/latest/manual.html#leverage-tiers-structure>`, indexed by market symbols
"""
self.load_markets()
response = self.contractPublicGetDetail(params)
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "symbol": "BTC_USDT",
# "displayName": "BTC_USDT永续",
# "displayNameEn": "BTC_USDT SWAP",
# "positionOpenType": 3,
# "baseCoin": "BTC",
# "quoteCoin": "USDT",
# "settleCoin": "USDT",
# "contractSize": 0.0001,
# "minLeverage": 1,
# "maxLeverage": 125,
# "priceScale": 2,
# "volScale": 0,
# "amountScale": 4,
# "priceUnit": 0.5,
# "volUnit": 1,
# "minVol": 1,
# "maxVol": 1000000,
# "bidLimitPriceRate": 0.1,
# "askLimitPriceRate": 0.1,
# "takerFeeRate": 0.0006,
# "makerFeeRate": 0.0002,
# "maintenanceMarginRate": 0.004,
# "initialMarginRate": 0.008,
# "riskBaseVol": 10000,
# "riskIncrVol": 200000,
# "riskIncrMmr": 0.004,
# "riskIncrImr": 0.004,
# "riskLevelLimit": 5,
# "priceCoefficientVariation": 0.1,
# "indexOrigin": ["BINANCE","GATEIO","HUOBI","MXC"],
# "state": 0, # 0 enabled, 1 delivery, 2 completed, 3 offline, 4 pause
# "isNew": False,
# "isHot": True,
# "isHidden": False
# },
# ...
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_leverage_tiers(data, symbols, 'symbol')
def parse_market_leverage_tiers(self, info, market):
"""
@param info: Exchange response for 1 market
{
"symbol": "BTC_USDT",
"displayName": "BTC_USDT永续",
"displayNameEn": "BTC_USDT SWAP",
"positionOpenType": 3,
"baseCoin": "BTC",
"quoteCoin": "USDT",
"settleCoin": "USDT",
"contractSize": 0.0001,
"minLeverage": 1,
"maxLeverage": 125,
"priceScale": 2,
"volScale": 0,
"amountScale": 4,
"priceUnit": 0.5,
"volUnit": 1,
"minVol": 1,
"maxVol": 1000000,
"bidLimitPriceRate": 0.1,
"askLimitPriceRate": 0.1,
"takerFeeRate": 0.0006,
"makerFeeRate": 0.0002,
"maintenanceMarginRate": 0.004,
"initialMarginRate": 0.008,
"riskBaseVol": 10000,
"riskIncrVol": 200000,
"riskIncrMmr": 0.004,
"riskIncrImr": 0.004,
"riskLevelLimit": 5,
"priceCoefficientVariation": 0.1,
"indexOrigin": ["BINANCE","GATEIO","HUOBI","MXC"],
"state": 0, # 0 enabled, 1 delivery, 2 completed, 3 offline, 4 pause
"isNew": False,
"isHot": True,
"isHidden": False
@param market: CCXT market
"""
maintenanceMarginRate = self.safe_string(info, 'maintenanceMarginRate')
initialMarginRate = self.safe_string(info, 'initialMarginRate')
maxVol = self.safe_string(info, 'maxVol')
riskIncrVol = self.safe_string(info, 'riskIncrVol')
riskIncrMmr = self.safe_string(info, 'riskIncrMmr')
riskIncrImr = self.safe_string(info, 'riskIncrImr')
floor = '0'
tiers = []
quoteId = self.safe_string(info, 'quoteCoin')
while(Precise.string_lt(floor, maxVol)):
cap = Precise.string_add(floor, riskIncrVol)
tiers.append({
'tier': self.parse_number(Precise.string_div(cap, riskIncrVol)),
'currency': self.safe_currency_code(quoteId),
'notionalFloor': self.parse_number(floor),
'notionalCap': self.parse_number(cap),
'maintenanceMarginRate': self.parse_number(maintenanceMarginRate),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRate)),
'info': info,
})
initialMarginRate = Precise.string_add(initialMarginRate, riskIncrImr)
maintenanceMarginRate = Precise.string_add(maintenanceMarginRate, riskIncrMmr)
floor = cap
return tiers
def parse_deposit_address(self, depositAddress, currency=None):
#
# {"chain":"ERC-20","address":"0x55cbd73db24eafcca97369e3f2db74b2490586e6"},
# {"chain":"MATIC","address":"0x05aa3236f1970eae0f8feb17ec19435b39574d74"},
# {"chain":"TRC20","address":"TGaPfhW41EXD3sAfs1grLF6DKfugfqANNw"},
# {"chain":"SOL","address":"5FSpUKuh2gjw4mF89T2e7sEjzUA1SkRKjBChFqP43KhV"},
# {"chain":"ALGO","address":"B3XTZND2JJTSYR7R2TQVCUDT4QSSYVAIZYDPWVBX34DGAYATBU3AUV43VU"}
#
#
address = self.safe_string(depositAddress, 'address')
code = self.safe_currency_code(None, currency)
networkId = self.safe_string(depositAddress, 'chain')
network = self.safe_network(networkId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'network': network,
'info': depositAddress,
}
def fetch_deposit_addresses_by_network(self, code, params={}):
"""
fetch a dictionary of addresses for a currency, indexed by network
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#deposit-address-supporting-network
:param str code: unified currency code of the currency for the deposit address
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `address structures <https://docs.ccxt.com/en/latest/manual.html#address-structure>` indexed by the network
"""
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
}
response = self.spotPrivateGetCapitalDepositAddress(self.extend(request, params))
result = []
for i in range(0, len(response)):
depositAddress = response[i]
coin = self.safe_string(depositAddress, 'coin')
currency = self.currency(coin)
networkId = self.safe_string(depositAddress, 'network')
network = self.safe_network(networkId)
address = self.safe_string(depositAddress, 'address', None)
tag = self.safe_string_2(depositAddress, 'tag', 'memo', None)
result.append({
'currency': currency['id'],
'network': network,
'address': address,
'tag': tag,
})
return result
def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#deposit-address-supporting-network
:param str code: unified currency code
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
rawNetwork = self.safe_string_upper(params, 'network')
params = self.omit(params, 'network')
response = self.fetch_deposit_addresses_by_network(code, params)
if rawNetwork is not None:
for i in range(0, len(response)):
depositAddress = response[i]
network = self.safe_string_upper(depositAddress, 'network')
if rawNetwork == network:
return depositAddress
result = self.safe_value(response, 0)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find a deposit address for ' + code + ', consider creating one using the MEXC platform')
return result
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#deposit-history-supporting-network
:param str code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires a currency code argument')
self.load_markets()
request = {
# 'coin': currency['id'] + network example: USDT-TRX,
# 'status': 'status',
# 'startTime': since, # default 90 days
# 'endTime': self.milliseconds(),
# 'limit': limit, # default 1000, maximum 1000
}
currency = None
rawNetwork = self.safe_string(params, 'network')
params = self.omit(params, 'network')
if rawNetwork is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires a network parameter when the currency is specified')
# currently mexc does not have network names unified so for certain things we might need TRX or TRC-20
# due to that I'm applying the network parameter directly so the user can control it on its side
currency = self.currency(code)
request['coin'] = currency['id'] + '-' + rawNetwork
if since is not None:
request['startTime'] = since
if limit is not None:
if limit > 1000:
raise ExchangeError('This exchange supports a maximum limit of 1000')
request['limit'] = limit
response = self.spotPrivateGetCapitalDepositHisrec(self.extend(request, params))
#
# [
# {
# amount: '10',
# coin: 'USDC-TRX',
# network: 'TRX',
# status: '5',
# address: 'TSMcEDDvkqY9dz8RkFnrS86U59GwEZjfvh',
# addressTag: null,
# txId: '51a8f49e6f03f2c056e71fe3291aa65e1032880be855b65cecd0595a1b8af95b',
# insertTime: '1664805021000',
# unlockConfirm: '200',
# confirmTimes: '203'
# }
# ]
#
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#withdraw-history-supporting-network
:param str code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals() requires a currency code argument')
self.load_markets()
request = {
# 'coin': currency['id'],
# 'status': 'status',
# 'startTime': since, # default 90 days
# 'endTime': self.milliseconds(),
# 'limit': limit, # default 1000, maximum 1000
}
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['startTime'] = since
if limit is not None:
if limit > 1000:
raise ExchangeError('This exchange supports a maximum limit of 1000')
request['limit'] = limit
response = self.spotPrivateGetCapitalWithdrawHistory(self.extend(request, params))
#
# [
# {
# id: 'adcd1c8322154de691b815eedcd10c42',
# txId: '0xc8c918cd69b2246db493ef6225a72ffdc664f15b08da3e25c6879b271d05e9d0',
# coin: 'USDC-MATIC',
# network: 'MATIC',
# address: '0xeE6C7a415995312ED52c53a0f8f03e165e0A5D62',
# amount: '2',
# transferType: '0',
# status: '7',
# transactionFee: '1',
# confirmNo: null,
# applyTime: '1664882739000',
# remark: ''
# }
# ]
#
return self.parse_transactions(response, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# amount: '10',
# coin: 'USDC-TRX',
# network: 'TRX',
# status: '5',
# address: 'TSMcEDDvkqY9dz8RkFnrS86U59GwEZjfvh',
# addressTag: null,
# txId: '51a8f49e6f03f2c056e71fe3291aa65e1032880be855b65cecd0595a1b8af95b',
# insertTime: '1664805021000',
# unlockConfirm: '200',
# confirmTimes: '203'
# }
#
# fetchWithdrawals
#
# {
# id: 'adcd1c8322154de691b815eedcd10c42',
# txId: '0xc8c918cd69b2246db493ef6225a72ffdc664f15b08da3e25c6879b271d05e9d0',
# coin: 'USDC-MATIC',
# network: 'MATIC',
# address: '0xeE6C7a415995312ED52c53a0f8f03e165e0A5D62',
# amount: '2',
# transferType: '0',
# status: '7',
# transactionFee: '1',
# confirmNo: null,
# applyTime: '1664882739000',
# remark: ''
# }
#
# withdraw
#
# {
# "withdrawId":"25fb2831fb6d4fc7aa4094612a26c81d"
# }
#
id = self.safe_string(transaction, 'id')
type = 'deposit' if (id is None) else 'withdrawal'
timestamp = self.safe_integer_2(transaction, 'insertTime', 'applyTime')
currencyId = self.safe_string(transaction, 'currency')
network = self.safe_string(transaction, 'network')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amountString = self.safe_string(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCostString = self.safe_string(transaction, 'transactionFee')
if feeCostString is not None:
fee = {
'cost': self.parse_number(feeCostString),
'currency': code,
}
if type == 'withdrawal':
# mexc withdrawal amount includes the fee
amountString = Precise.string_sub(amountString, feeCostString)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': network,
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': self.safe_string(transaction, 'memo'),
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.parse_number(amountString),
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def parse_transaction_status(self, status):
statuses = {
'WAIT': 'pending',
'WAIT_PACKAGING': 'pending',
'SUCCESS': 'ok',
}
return self.safe_string(statuses, status, status)
def fetch_position(self, symbol, params={}):
"""
fetch data on a single open contract trade position
:param str symbol: unified market symbol of the market the position is held in, default is None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.fetch_positions(None, self.extend(request, params))
return self.safe_value(response, 0)
def fetch_positions(self, symbols=None, params={}):
"""
fetch all open positions
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
self.load_markets()
response = self.contractPrivateGetPositionOpenPositions(params)
#
# {
# "success": True,
# "code": 0,
# "data": [
# {
# "positionId": 1394650,
# "symbol": "ETH_USDT",
# "positionType": 1,
# "openType": 1,
# "state": 1,
# "holdVol": 1,
# "frozenVol": 0,
# "closeVol": 0,
# "holdAvgPrice": 1217.3,
# "openAvgPrice": 1217.3,
# "closeAvgPrice": 0,
# "liquidatePrice": 1211.2,
# "oim": 0.1290338,
# "im": 0.1290338,
# "holdFee": 0,
# "realised": -0.0073,
# "leverage": 100,
# "createTime": 1609991676000,
# "updateTime": 1609991676000,
# "autoAddIm": False
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_positions(data, symbols)
def parse_position(self, position, market=None):
#
# {
# "positionId": 1394650,
# "symbol": "ETH_USDT",
# "positionType": 1,
# "openType": 1,
# "state": 1,
# "holdVol": 1,
# "frozenVol": 0,
# "closeVol": 0,
# "holdAvgPrice": 1217.3,
# "openAvgPrice": 1217.3,
# "closeAvgPrice": 0,
# "liquidatePrice": 1211.2,
# "oim": 0.1290338,
# "im": 0.1290338,
# "holdFee": 0,
# "realised": -0.0073,
# "leverage": 100,
# "createTime": 1609991676000,
# "updateTime": 1609991676000,
# "autoAddIm": False
# }
#
market = self.safe_market(self.safe_string(position, 'symbol'), market)
symbol = market['symbol']
contracts = self.safe_string(position, 'holdVol')
entryPrice = self.safe_number(position, 'openAvgPrice')
initialMargin = self.safe_string(position, 'im')
rawSide = self.safe_string(position, 'positionType')
side = 'long' if (rawSide == '1') else 'short'
openType = self.safe_string(position, 'margin_mode')
marginType = 'isolated' if (openType == '1') else 'cross'
leverage = self.safe_number(position, 'leverage')
liquidationPrice = self.safe_number(position, 'liquidatePrice')
timestamp = self.safe_number(position, 'updateTime')
return {
'info': position,
'id': None,
'symbol': symbol,
'contracts': self.parse_number(contracts),
'contractSize': None,
'entryPrice': entryPrice,
'collateral': None,
'side': side,
'unrealizedProfit': None,
'leverage': self.parse_number(leverage),
'percentage': None,
'marginType': marginType,
'notional': None,
'markPrice': None,
'liquidationPrice': liquidationPrice,
'initialMargin': self.parse_number(initialMargin),
'initialMarginPercentage': None,
'maintenanceMargin': None,
'maintenanceMarginPercentage': None,
'marginRatio': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
def fetch_transfer(self, id, since=None, limit=None, params={}):
marketType, query = self.handle_market_type_and_params('fetchTransfer', None, params)
self.load_markets()
if marketType == 'spot':
request = {
'transact_id': id,
}
response = self.spot2PrivateGetAssetInternalTransferInfo(self.extend(request, query))
#
# {
# code: '200',
# data: {
# currency: 'USDT',
# amount: '1',
# transact_id: '954877a2ef54499db9b28a7cf9ebcf41',
# from: 'MAIN',
# to: 'CONTRACT',
# transact_state: 'SUCCESS'
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transfer(data)
elif marketType == 'swap':
raise BadRequest(self.id + ' fetchTransfer() is not supported for ' + marketType)
def fetch_transfers(self, code=None, since=None, limit=None, params={}):
"""
fetch a history of internal transfers made on an account
:param str|None code: unified currency code of the currency transferred
:param int|None since: the earliest time in ms to fetch transfers for
:param int|None limit: the maximum number of transfers structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `transfer structures <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
marketType, query = self.handle_market_type_and_params('fetchTransfers', None, params)
self.load_markets()
request = {}
currency = None
resultList = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if marketType == 'spot':
if since is not None:
request['start_time'] = since
if limit is not None:
if limit > 50:
raise ExchangeError('This exchange supports a maximum limit of 50')
request['page-size'] = limit
response = self.spot2PrivateGetAssetInternalTransferRecord(self.extend(request, query))
#
# {
# code: '200',
# data: {
# total_page: '1',
# total_size: '5',
# result_list: [{
# currency: 'USDT',
# amount: '1',
# transact_id: '954877a2ef54499db9b28a7cf9ebcf41',
# from: 'MAIN',
# to: 'CONTRACT',
# transact_state: 'SUCCESS'
# },
# ...
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
resultList = self.safe_value(data, 'result_list', [])
elif marketType == 'swap':
if limit is not None:
request['page_size'] = limit
response = self.contractPrivateGetAccountTransferRecord(self.extend(request, query))
data = self.safe_value(response, 'data')
resultList = self.safe_value(data, 'resultList')
#
# {
# "success": True,
# "code": "0",
# "data": {
# "pageSize": "20",
# "totalCount": "10",
# "totalPage": "1",
# "currentPage": "1",
# "resultList": [
# {
# "id": "2980812",
# "txid": "fa8a1e7bf05940a3b7025856dc48d025",
# "currency": "USDT",
# "amount": "22.90213135",
# "type": "IN",
# "state": "SUCCESS",
# "createTime": "1648849076000",
# "updateTime": "1648849076000"
# },
# ]
# }
# }
#
return self.parse_transfers(resultList, currency, since, limit)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#user-universal-transfer
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str|None params['symbol']: market symbol required for margin account transfers eg:BTCUSDT
:returns dict: a `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
self.load_markets()
currency = self.currency(code)
accounts = {
'spot': 'SPOT',
'swap': 'FUTURES',
'margin': 'ISOLATED_MARGIN',
}
fromId = self.safe_string(accounts, fromAccount)
toId = self.safe_string(accounts, toAccount)
if fromId is None:
keys = list(accounts.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accounts.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
request = {
'asset': currency['id'],
'amount': amount,
'fromAccountType': fromId,
'toAccountType': toId,
}
if (fromId == 'ISOLATED_MARGIN') or (toId == 'ISOLATED_MARGIN'):
symbol = self.safe_string(params, 'symbol')
params = self.omit(params, 'symbol')
if symbol is None:
raise ArgumentsRequired(self.id + ' transfer() requires a symbol argument for isolated margin')
market = self.market(symbol)
request['symbol'] = market['id']
response = self.spotPrivatePostCapitalTransfer(self.extend(request, params))
#
# {
# "tranId": "ebb06123e6a64f4ab234b396c548d57e"
# }
#
transaction = self.parse_transfer(response, currency)
return self.extend(transaction, {
'amount': amount,
'fromAccount': fromAccount,
'toAccount': toAccount,
})
def parse_transfer(self, transfer, currency=None):
#
# spot: fetchTransfer
#
# {
# currency: 'USDT',
# amount: '1',
# transact_id: 'b60c1df8e7b24b268858003f374ecb75',
# from: 'MAIN',
# to: 'CONTRACT',
# transact_state: 'WAIT'
# }
#
# swap: fetchTransfer
#
# {
# "currency": "USDT",
# "amount": "22.90213135",
# "txid": "fa8a1e7bf05940a3b7025856dc48d025",
# "id": "2980812",
# "type": "IN",
# "state": "SUCCESS",
# "createTime": "1648849076000",
# "updateTime": "1648849076000"
# }
#
# transfer
#
# {
# "tranId": "ebb06123e6a64f4ab234b396c548d57e"
# }
#
currencyId = self.safe_string(transfer, 'currency')
id = self.safe_string_n(transfer, ['transact_id', 'txid', 'tranId'])
timestamp = self.safe_integer(transfer, 'createTime')
datetime = self.iso8601(timestamp) if (timestamp is not None) else None
direction = self.safe_string(transfer, 'type')
accountFrom = None
accountTo = None
if direction is not None:
accountFrom = 'MAIN' if (direction == 'IN') else 'CONTRACT'
accountTo = 'CONTRACT' if (direction == 'IN') else 'MAIN'
else:
accountFrom = self.safe_string(transfer, 'from')
accountTo = self.safe_string(transfer, 'to')
return {
'info': transfer,
'id': id,
'timestamp': timestamp,
'datetime': datetime,
'currency': self.safe_currency_code(currencyId, currency),
'amount': self.safe_number(transfer, 'amount'),
'fromAccount': self.parse_account_id(accountFrom),
'toAccount': self.parse_account_id(accountTo),
'status': self.parse_transfer_status(self.safe_string_2(transfer, 'transact_state', 'state')),
}
def parse_account_id(self, status):
statuses = {
'MAIN': 'spot',
'CONTRACT': 'swap',
}
return self.safe_string(statuses, status, status)
def parse_transfer_status(self, status):
statuses = {
'SUCCESS': 'ok',
'FAILED': 'failed',
'WAIT': 'pending',
}
return self.safe_string(statuses, status, status)
def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#withdraw
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper_2(params, 'network', 'chain') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string(networks, network, network) # handle ETH > ERC-20 alias
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'coin': currency['id'],
'address': address,
'amount': amount,
}
if tag is not None:
request['memo'] = tag
if network is not None:
request['network'] = network
params = self.omit(params, 'network')
response = self.spotPrivatePostCapitalWithdrawApply(self.extend(request, params))
#
# {
# "id":"7213fea8e94b4a5593d507237e5a555b"
# }
#
return self.parse_transaction(response, currency)
def set_position_mode(self, hedged, symbol=None, params={}):
request = {
'positionMode': 1 if hedged else 2, # 1 Hedge, 2 One-way, before changing position mode make sure that there are no active orders, planned orders, or open positions, the risk limit level will be reset to 1
}
response = self.contractPrivatePostPositionChangePositionMode(self.extend(request, params))
#
# {
# "success":true,
# "code":0
# }
#
return response
def fetch_position_mode(self, symbol=None, params={}):
response = self.contractPrivateGetPositionPositionMode(params)
#
# {
# "success":true,
# "code":0,
# "data":2
# }
#
positionMode = self.safe_integer(response, 'data')
return {
'info': response,
'hedged': (positionMode == 1),
}
def borrow_margin(self, code, amount, symbol=None, params={}):
"""
create a loan to borrow margin
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#loan
:param str code: unified currency code of the currency to borrow
:param float amount: the amount to borrow
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `margin loan structure <https://docs.ccxt.com/en/latest/manual.html#margin-loan-structure>`
"""
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' borrowMargin() requires a symbol argument for isolated margin')
market = self.market(symbol)
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': self.currency_to_precision(code, amount),
'symbol': market['id'],
}
response = self.spotPrivatePostMarginLoan(self.extend(request, params))
#
# {
# "tranId": "762407666453712896"
# }
#
transaction = self.parse_margin_loan(response, currency)
return self.extend(transaction, {
'amount': amount,
'symbol': symbol,
})
def repay_margin(self, code, amount, symbol=None, params={}):
"""
repay borrowed margin and interest
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#repayment
:param str code: unified currency code of the currency to repay
:param float amount: the amount to repay
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the mexc3 api endpoint
:param str params['borrowId']: transaction id '762407666453712896'
:returns dict: a `margin loan structure <https://docs.ccxt.com/en/latest/manual.html#margin-loan-structure>`
"""
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' repayMargin() requires a symbol argument for isolated margin')
id = self.safe_string_2(params, 'id', 'borrowId')
if id is None:
raise ArgumentsRequired(self.id + ' repayMargin() requires a borrowId argument in the params')
market = self.market(symbol)
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': self.currency_to_precision(code, amount),
'borrowId': id,
'symbol': market['id'],
}
response = self.spotPrivatePostMarginRepay(self.extend(request, params))
#
# {
# "tranId": "762407666453712896"
# }
#
transaction = self.parse_margin_loan(response, currency)
return self.extend(transaction, {
'amount': amount,
'symbol': symbol,
})
def fetch_transaction_fees(self, codes=None, params={}):
"""
fetch deposit and withdrawal fees
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#query-the-currency-information
:param [str]|None codes: returns fees for all currencies if None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>`
"""
self.load_markets()
response = self.spotPrivateGetCapitalConfigGetall(params)
#
# [
# {
# coin: 'AGLD',
# name: 'Adventure Gold',
# networkList: [
# {
# coin: 'AGLD',
# depositDesc: null,
# depositEnable: True,
# minConfirm: '0',
# name: 'Adventure Gold',
# network: 'ERC20',
# withdrawEnable: True,
# withdrawFee: '10.000000000000000000',
# withdrawIntegerMultiple: null,
# withdrawMax: '1200000.000000000000000000',
# withdrawMin: '20.000000000000000000',
# sameAddress: False,
# contract: '0x32353a6c91143bfd6c7d363b546e62a9a2489a20',
# withdrawTips: null,
# depositTips: null
# }
# ...
# ]
# },
# ...
# ]
#
return self.parse_transaction_fees(response, codes)
def parse_transaction_fees(self, response, codes=None):
withdrawFees = {}
for i in range(0, len(response)):
entry = response[i]
currencyId = self.safe_string(entry, 'coin')
currency = self.safe_currency(currencyId)
code = self.safe_string(currency, 'code')
if (codes is None) or (self.in_array(code, codes)):
withdrawFees[code] = self.parse_transaction_fee(entry, currency)
return {
'withdraw': withdrawFees,
'deposit': {},
'info': response,
}
def parse_transaction_fee(self, transaction, currency=None):
#
# {
# coin: 'AGLD',
# name: 'Adventure Gold',
# networkList: [
# {
# coin: 'AGLD',
# depositDesc: null,
# depositEnable: True,
# minConfirm: '0',
# name: 'Adventure Gold',
# network: 'ERC20',
# withdrawEnable: True,
# withdrawFee: '10.000000000000000000',
# withdrawIntegerMultiple: null,
# withdrawMax: '1200000.000000000000000000',
# withdrawMin: '20.000000000000000000',
# sameAddress: False,
# contract: '0x32353a6c91143bfd6c7d363b546e62a9a2489a20',
# withdrawTips: null,
# depositTips: null
# }
# ...
# ]
# }
#
networkList = self.safe_value(transaction, 'networkList', [])
result = {}
for j in range(0, len(networkList)):
networkEntry = networkList[j]
networkId = self.safe_string(networkEntry, 'network')
networkCode = self.safe_string(self.options['networks'], networkId, networkId)
fee = self.safe_number(networkEntry, 'withdrawFee')
result[networkCode] = fee
return result
def fetch_deposit_withdraw_fees(self, codes=None, params={}):
"""
fetch deposit and withdrawal fees
see https://mxcdevelop.github.io/apidocs/spot_v3_en/#query-the-currency-information
:param [str]|None codes: returns fees for all currencies if None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>`
"""
self.load_markets()
response = self.spotPrivateGetCapitalConfigGetall(params)
#
# [
# {
# coin: 'AGLD',
# name: 'Adventure Gold',
# networkList: [
# {
# coin: 'AGLD',
# depositDesc: null,
# depositEnable: True,
# minConfirm: '0',
# name: 'Adventure Gold',
# network: 'ERC20',
# withdrawEnable: True,
# withdrawFee: '10.000000000000000000',
# withdrawIntegerMultiple: null,
# withdrawMax: '1200000.000000000000000000',
# withdrawMin: '20.000000000000000000',
# sameAddress: False,
# contract: '0x32353a6c91143bfd6c7d363b546e62a9a2489a20',
# withdrawTips: null,
# depositTips: null
# }
# ...
# ]
# },
# ...
# ]
#
return self.parse_deposit_withdraw_fees(response, codes, 'coin')
def parse_deposit_withdraw_fee(self, fee, currency=None):
#
# {
# coin: 'AGLD',
# name: 'Adventure Gold',
# networkList: [
# {
# coin: 'AGLD',
# depositDesc: null,
# depositEnable: True,
# minConfirm: '0',
# name: 'Adventure Gold',
# network: 'ERC20',
# withdrawEnable: True,
# withdrawFee: '10.000000000000000000',
# withdrawIntegerMultiple: null,
# withdrawMax: '1200000.000000000000000000',
# withdrawMin: '20.000000000000000000',
# sameAddress: False,
# contract: '0x32353a6c91143bfd6c7d363b546e62a9a2489a20',
# withdrawTips: null,
# depositTips: null
# }
# ...
# ]
# }
#
networkList = self.safe_value(fee, 'networkList', [])
result = self.deposit_withdraw_fee(fee)
for j in range(0, len(networkList)):
networkEntry = networkList[j]
networkId = self.safe_string(networkEntry, 'network')
networkCode = self.network_id_to_code(networkId, self.safe_string(currency, 'code'))
result['networks'][networkCode] = {
'withdraw': {
'fee': self.safe_number(networkEntry, 'withdrawFee'),
'percentage': None,
},
'deposit': {
'fee': None,
'percentage': None,
},
}
return self.assign_default_deposit_withdraw_fees(result)
def parse_margin_loan(self, info, currency=None):
#
# {
# "tranId": "762407666453712896"
# }
#
return {
'id': self.safe_string(info, 'tranId'),
'currency': self.safe_currency_code(None, currency),
'amount': None,
'symbol': None,
'timestamp': None,
'datetime': None,
'info': info,
}
def handle_margin_mode_and_params(self, methodName, params={}, defaultValue=None):
"""
* @ignore
marginMode specified by params["marginMode"], self.options["marginMode"], self.options["defaultMarginMode"], params["margin"] = True or self.options["defaultType"] = 'margin'
:param dict params: extra parameters specific to the exchange api endpoint
:param bool|None params['margin']: True for trading spot-margin
:returns [str|None, dict]: the marginMode in lowercase
"""
defaultType = self.safe_string(self.options, 'defaultType')
isMargin = self.safe_value(params, 'margin', False)
marginMode = None
marginMode, params = super(mexc3, self).handle_margin_mode_and_params(methodName, params, defaultValue)
if (defaultType == 'margin') or (isMargin is True):
marginMode = 'isolated'
return [marginMode, params]
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
section, access = api
path, params = self.resolve_path(path, params)
url = None
if section == 'spot':
url = self.urls['api'][section][access] + '/api/' + self.version + '/' + path
paramsEncoded = ''
if access == 'private':
params['timestamp'] = self.milliseconds()
params['recvWindow'] = self.safe_integer(self.options, 'recvWindow', 5000)
if params:
paramsEncoded = self.urlencode(params)
url += '?' + paramsEncoded
if access == 'private':
self.check_required_credentials()
signature = self.hmac(self.encode(paramsEncoded), self.encode(self.secret), hashlib.sha256)
url += '&' + 'signature=' + signature
headers = {
'X-MEXC-APIKEY': self.apiKey,
'source': self.safe_string(self.options, 'broker', 'CCXT'),
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
elif section == 'contract' or section == 'spot2':
url = self.urls['api'][section][access] + '/' + self.implode_params(path, params)
params = self.omit(params, self.extract_params(path))
if access == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.milliseconds())
auth = ''
headers = {
'ApiKey': self.apiKey,
'Request-Time': timestamp,
'Content-Type': 'application/json',
'source': self.safe_string(self.options, 'broker', 'CCXT'),
}
if method == 'POST':
auth = self.json(params)
body = auth
else:
params = self.keysort(params)
if params:
auth += self.urlencode(params)
url += '?' + auth
auth = self.apiKey + timestamp + auth
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256)
headers['Signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
# spot
# {"code":-1128,"msg":"Combination of optional parameters invalid.","_extend":null}
# {"success":false,"code":123456,"message":"Order quantity error...."}
#
# contract
#
# {"code":10232,"msg":"The currency not exist"}
# {"code":10216,"msg":"No available deposit address"}
# {"success":true, "code":0, "data":1634095541710}
#
success = self.safe_value(response, 'success', False) # v1
if success is True:
return
responseCode = self.safe_string(response, 'code', None)
if (responseCode is not None) and (responseCode != '200') and (responseCode != '0'):
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], responseCode, feedback)
raise ExchangeError(feedback)
| [
"travis@travis-ci.org"
] | travis@travis-ci.org |
b30b1b3d06da25e2496aaace8ae11976309d769b | 077d6a98c19951ccec87ca03ddd0bb468aa98c14 | /premium/amazon/trees_and_graphs/symmetric_tree.py | c17c98610dd42b5b67797436a273a33d7bdcdb77 | [] | no_license | rayt579/leetcode | c099a5147f161d44179a5a2bf9bd33ab3a661f79 | 9d0ff0f8705451947a6605ab5ef92bb3e27a7147 | refs/heads/master | 2020-03-19T04:45:07.829074 | 2019-04-07T22:26:02 | 2019-04-07T22:26:02 | 135,862,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py | '''
https://leetcode.com/explore/interview/card/amazon/78/trees-and-graphs/507/
'''
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
return self.is_symmetric_iterative(root)
def is_symmetric_recursive(self, root):
if root is None:
return True
return self.symmetric_tree_helper(root.left, root.right)
def symmetric_tree_helper(self, left, right):
if left is None or right is None:
return left == right
if left.val != right.val:
return False
return self.symmetric_tree_helper(left.left, right.right) and self.symmetric_tree_helper(left.right, right.left)
def is_symmetric_iterative(self, root):
if root is None:
return True
explore = [(root.left, root.right)]
while len(explore) > 0:
left, right = explore.pop()
if (not left and right) or (not right and left):
return False
if left and right:
if left.val != right.val:
return False
explore.append((left.right, right.left))
explore.append((left.left, right.right))
return True
sol = Solution()
a = TreeNode(1)
a.left = TreeNode(2)
a.left.left = TreeNode(3)
a.left.right = TreeNode(4)
a.right = TreeNode(2)
a.right.left = TreeNode(4)
a.right.right = TreeNode(3)
b = TreeNode(1)
b.left = TreeNode(2)
b.left.right = TreeNode(3)
b.right = TreeNode(2)
b.right.right = TreeNode(3)
print('Expecting True: {}'.format(sol.isSymmetric(a)))
print('Expecting False: {}'.format(sol.isSymmetric(b)))
| [
"rayt579@yahoo.com"
] | rayt579@yahoo.com |
9706fb2addd72cc76cdd034a6caee3fa0f1c7921 | c8a296a51f0f3fd3be1a16ff5a5f501463957594 | /services/ingestion/poller/ubwFagtimePoller/ubw_fag.py | 722503fafff154775d91f45564c3a8fbfd3476b3 | [] | no_license | rtc11/Dataplattform-version2 | 87ab936f2464ef0a6717b164a160efa620ec7770 | 5569ec0e300d0932dae225cc9939165fb56b5a91 | refs/heads/master | 2021-05-18T14:58:27.863250 | 2020-03-30T10:49:37 | 2020-03-30T10:49:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,537 | py | import boto3
import json
from datetime import datetime
from os import environ
from zeep import Client
from xmltodict import parse
from os import path
def handler(event, context):
poll()
return {'statusCode': 200, 'body': 'Success'}
def ssm_parameters():
path = f'{environ.get("STAGE")}/{environ.get("SERVICE")}'
client = boto3.client('ssm')
return (
client.get_parameter(
Name=f'/{path}/UBW_USERNAME', WithDecryption=True)['Parameter']['Value'],
client.get_parameter(
Name=f'/{path}/UBW_PASSWORD', WithDecryption=True)['Parameter']['Value'],
client.get_parameter(
Name=f'/{path}/UBW_CLIENT', WithDecryption=True)['Parameter']['Value'],
client.get_parameter(
Name=f'/{path}/UBW_URL', WithDecryption=False)['Parameter']['Value'],
client.get_parameter(
Name=f'/{path}/UBW_TEMPLATE_ID', WithDecryption=True)['Parameter']['Value'])
def ubw_record_filter(record):
if "tab" not in record or "reg_period" not in record:
return False
# Only the "B" documents are completed, the rest should be ignored.
if record["tab"] != "B":
return False
# You should only uploads docs that are older than 4 weeks.
year, week = record["reg_period"][0:4], record["reg_period"][4:]
cur_year, cur_week = datetime.now().isocalendar()[0:2]
number_of_weeks = int(year) * 52 + int(week)
current_number_of_weeks = cur_year * 52 + cur_week
if number_of_weeks > current_number_of_weeks - 4:
return False
return True
def poll():
username, password, client, url, template_id = ssm_parameters()
soap_client = Client(wsdl=f'{url}?QueryEngineService/QueryEngineV200606DotNet')
res = soap_client.service.GetTemplateResultAsXML(
input={
'TemplateId': template_id,
'TemplateResultOptions': {
'ShowDescriptions': True,
'Aggregated': True,
'OverrideAggregation': False,
'CalculateFormulas': True,
'FormatAlternativeBreakColumns': True,
'RemoveHiddenColumns': False,
'FirstRecord': -1,
'LastRecord': -1
},
'SearchCriteriaPropertiesList': {
'SearchCriteriaProperties': [
{
'ColumnName': 'timecode',
'Description': 'Tidskode',
'RestrictionType': '!()',
'FromValue': "'X9'",
'DataType': 10,
'DataLength': 25,
'DataCase': 2,
'IsParameter': True,
'IsVisible': False,
'IsPrompt': False
}
]
},
},
credentials={
'Username': username,
'Client': client,
'Password': password,
})
ubw_data = parse(res['TemplateResult'])['Agresso']['AgressoQE']
timestamp = datetime.now().timestamp()
ubw_data = {
'metadata': {'timestamp': timestamp},
'data': [rec for rec in ubw_data if ubw_record_filter(rec)]
}
access_path = environ.get("ACCESS_PATH")
s3 = boto3.resource('s3')
s3_object = s3.Object(environ.get('DATALAKE'), path.join(access_path, f'{int(timestamp)}.json'))
s3_object.put(Body=(bytes(json.dumps(ubw_data).encode('UTF-8'))))
if __name__ == "__main__":
poll()
| [
"jens.markussen@knowit.no"
] | jens.markussen@knowit.no |
c4ab840d46ed70498c05ff4866a7ced75ae8b528 | ec99d10fa821651db7616d68c8ca54f69007c1f4 | /sciencebeam_gym/convert/conversion_pipeline.py | fc5ea0a75386dd57660d7d5ccf65ec5e3afd200e | [
"MIT"
] | permissive | pvk444/sciencebeam-gym | 65d3047554839abb345794100ab21900e574ec7a | 523738b0d242c8dc99fb96ce9eb1d65500e29c1f | refs/heads/master | 2020-04-20T08:37:17.915949 | 2019-02-01T09:19:13 | 2019-02-01T09:19:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,033 | py | from __future__ import absolute_import
import argparse
import os
import logging
import pickle
from io import BytesIO
import apache_beam as beam
from apache_beam.io.filesystems import FileSystems
from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions
from lxml import etree
from sciencebeam_utils.beam_utils.utils import (
TransformAndCount,
TransformAndLog,
MapOrLog,
PreventFusion
)
from sciencebeam_utils.beam_utils.files import (
ReadFileList,
FindFiles
)
from sciencebeam_utils.beam_utils.io import (
read_all_from_path,
save_file_content
)
from sciencebeam_utils.beam_utils.main import (
add_cloud_args,
process_cloud_args,
process_sciencebeam_gym_dep_args
)
from sciencebeam_utils.utils.collection import (
extend_dict,
remove_keys_from_dict
)
from sciencebeam_utils.utils.file_path import (
join_if_relative_path,
get_output_file
)
from sciencebeam_gym.structured_document.structured_document_loader import (
load_structured_document
)
from sciencebeam_gym.structured_document.lxml import (
LxmlStructuredDocument
)
from sciencebeam_gym.preprocess.preprocessing_utils import (
convert_pdf_bytes_to_lxml,
parse_page_range,
save_pages,
pdf_bytes_to_png_pages
)
from sciencebeam_gym.inference_model.extract_to_xml import (
extract_structured_document_to_xml
)
from sciencebeam_gym.models.text.crf.annotate_using_predictions import (
predict_and_annotate_structured_document,
CRF_TAG_SCOPE
)
from sciencebeam_gym.inference_model.annotate_using_predictions import (
annotate_structured_document_using_predicted_images,
AnnotatedImage,
CV_TAG_SCOPE
)
from .grobid.grobid_xml_enhancer import (
GrobidXmlEnhancer
)
from .cv_conversion_utils import (
InferenceModelWrapper,
image_data_to_png
)
from .grobid.grobid_service import (
grobid_service,
GrobidApiPaths
)
def get_logger():
return logging.getLogger(__name__)
class MetricCounters(object):
FILES = 'files'
READ_LXML_ERROR = 'read_lxml_error_count'
CONVERT_PDF_TO_LXML_ERROR = 'ConvertPdfToLxml_error_count'
CONVERT_PDF_TO_PNG_ERROR = 'ConvertPdfToPng_error_count'
CONVERT_LXML_TO_SVG_ANNOT_ERROR = 'ConvertPdfToSvgAnnot_error_count'
CV_PREDICTION_ERROR = 'ComputerVisionPrediction_error_count'
ANNOTATE_USING_PREDICTION_ERROR = 'AnnotateLxmlUsingPrediction_error_count'
EXTRACT_TO_XML_ERROR = 'ExtractToXml_error_count'
GROBID_ERROR = 'Grobid_error_count'
class OutputExt(object):
CRF_ANNOT_LXML = '.crf.lxml.gz'
CRF_CV_ANNOT_LXML = '.crf-cv.lxml.gz'
CV_ANNOT_LXML = '.cv.lxml.gz'
CV_PNG = '.cv-png.zip'
class DataProps(object):
SOURCE_FILENAME = 'source_filename'
PDF_CONTENT = 'pdf_content'
STRUCTURED_DOCUMENT = 'structured_document'
PDF_PNG_PAGES = 'pdf_png_pages'
CV_PREDICTION_PNG_PAGES = 'cv_prediction_png_pages'
COLOR_MAP = 'color_map'
EXTRACTED_XML = 'extracted_xml'
def convert_pdf_bytes_to_structured_document(pdf_content, path=None, page_range=None):
return LxmlStructuredDocument(etree.parse(BytesIO(
convert_pdf_bytes_to_lxml(pdf_content, path=path, page_range=page_range)
)))
def annotate_structured_document_using_predicted_image_data(
structured_document, prediction_images, color_map, tag_scope=None):
return annotate_structured_document_using_predicted_images(
structured_document, (
AnnotatedImage(prediction_image, color_map)
for prediction_image in prediction_images
), tag_scope=tag_scope
)
def extract_annotated_structured_document_to_xml(structured_document, tag_scope=None):
xml_root = extract_structured_document_to_xml(structured_document, tag_scope=tag_scope)
return etree.tostring(xml_root, pretty_print=True)
def load_crf_model(path):
with FileSystems.open(path) as crf_model_f:
return pickle.load(crf_model_f)
def save_structured_document(filename, structured_document):
# only support saving lxml for now
assert isinstance(structured_document, LxmlStructuredDocument)
save_file_content(filename, etree.tostring(structured_document.root, pretty_print=True))
return filename
def get_annot_lxml_ext(crf_enabled, cv_enabled):
if crf_enabled and cv_enabled:
return OutputExt.CRF_CV_ANNOT_LXML
if crf_enabled:
return OutputExt.CRF_ANNOT_LXML
if cv_enabled:
return OutputExt.CV_ANNOT_LXML
raise AssertionError('at least one of crf or cv need to be enabled')
def PdfUrlSource(opt):
if opt.pdf_file_list:
return ReadFileList(opt.pdf_file_list, column=opt.pdf_file_column, limit=opt.limit)
else:
return FindFiles(join_if_relative_path(opt.base_data_path, opt.pdf_path))
def ReadPdfContent():
return "ReadPdfContent" >> TransformAndCount(
beam.Map(lambda pdf_url: {
DataProps.SOURCE_FILENAME: pdf_url,
DataProps.PDF_CONTENT: read_all_from_path(pdf_url)
}),
MetricCounters.FILES
)
def add_read_pdfs_to_annotated_lxml_pipeline_steps(p, opt, get_pipeline_output_file):
page_range = opt.pages
cv_enabled = opt.cv_model_export_dir
extract_tag_scope = None
pdf_urls = p | PdfUrlSource(opt)
lxml_content = (
pdf_urls |
PreventFusion() |
ReadPdfContent() |
"ConvertPdfToLxml" >> MapOrLog(lambda v: extend_dict(v, {
DataProps.STRUCTURED_DOCUMENT: convert_pdf_bytes_to_structured_document(
v[DataProps.PDF_CONTENT], path=v[DataProps.SOURCE_FILENAME],
page_range=page_range
)
}), log_fn=lambda e, v: (
get_logger().warning(
'caught exception (ignoring item): %s, pdf: %s',
e, v[DataProps.SOURCE_FILENAME], exc_info=e
)
), error_count=MetricCounters.CONVERT_PDF_TO_LXML_ERROR)
)
if cv_enabled:
image_size = (
(opt.image_width, opt.image_height)
if opt.image_width and opt.image_height
else None
)
inference_model_wrapper = InferenceModelWrapper(opt.cv_model_export_dir)
cv_predictions = (
lxml_content |
"ConvertPdfToPng" >> MapOrLog(lambda v: remove_keys_from_dict(
extend_dict(v, {
DataProps.PDF_PNG_PAGES: list(pdf_bytes_to_png_pages(
v[DataProps.PDF_CONTENT],
dpi=90, # not used if the image is scaled
image_size=image_size,
page_range=page_range
))
}),
keys_to_remove={DataProps.PDF_CONTENT}
), error_count=MetricCounters.CONVERT_PDF_TO_PNG_ERROR) |
"ComputerVisionPrediction" >> MapOrLog(lambda v: remove_keys_from_dict(
extend_dict(v, {
DataProps.CV_PREDICTION_PNG_PAGES: inference_model_wrapper(
v[DataProps.PDF_PNG_PAGES]
),
DataProps.COLOR_MAP: inference_model_wrapper.get_color_map()
}),
keys_to_remove={DataProps.PDF_PNG_PAGES}
), error_count=MetricCounters.CV_PREDICTION_ERROR)
)
if opt.save_cv_output:
_ = (
cv_predictions |
"SaveComputerVisionOutput" >> TransformAndLog(
beam.Map(lambda v: save_pages(
get_pipeline_output_file(
v[DataProps.SOURCE_FILENAME],
OutputExt.CV_PNG
),
'.png',
[image_data_to_png(image_data)
for image_data in v[DataProps.CV_PREDICTION_PNG_PAGES]]
)),
log_fn=lambda x: get_logger().info('saved cv output: %s', x)
)
)
cv_annotated_lxml = (
cv_predictions |
"AnnotateLxmlUsingCvPrediction" >> MapOrLog(lambda v: remove_keys_from_dict(
extend_dict(v, {
DataProps.STRUCTURED_DOCUMENT: (
annotate_structured_document_using_predicted_image_data(
v[DataProps.STRUCTURED_DOCUMENT],
v[DataProps.CV_PREDICTION_PNG_PAGES],
v[DataProps.COLOR_MAP],
tag_scope=CV_TAG_SCOPE
)
)
}),
keys_to_remove={DataProps.PDF_PNG_PAGES}
), error_count=MetricCounters.ANNOTATE_USING_PREDICTION_ERROR)
)
lxml_content = cv_annotated_lxml
extract_tag_scope = CV_TAG_SCOPE
if opt.crf_model:
model = load_crf_model(opt.crf_model)
crf_annotated_lxml = (
lxml_content |
"AnnotateLxmlUsingCrfPrediction" >> MapOrLog(lambda v: extend_dict(v, {
DataProps.STRUCTURED_DOCUMENT: predict_and_annotate_structured_document(
v[DataProps.STRUCTURED_DOCUMENT], model
)
}), error_count=MetricCounters.ANNOTATE_USING_PREDICTION_ERROR)
)
lxml_content = crf_annotated_lxml
extract_tag_scope = CRF_TAG_SCOPE
if opt.save_annot_lxml:
_ = ( # flake8: noqa
lxml_content |
"SaveAnnotLxml" >> TransformAndLog(
beam.Map(lambda v: save_structured_document(
get_pipeline_output_file(
v[DataProps.SOURCE_FILENAME],
get_annot_lxml_ext(
crf_enabled=opt.crf_model,
cv_enabled=cv_enabled
)
),
v[DataProps.STRUCTURED_DOCUMENT]
)),
log_fn=lambda x: get_logger().info('saved annoted lxml to: %s', x)
)
)
return lxml_content, extract_tag_scope
def add_read_pdfs_to_grobid_xml_pipeline_steps(p, opt):
grobid_transformer = grobid_service(
opt.grobid_url, opt.grobid_action, start_service=opt.start_grobid_service
)
return (
p |
PdfUrlSource(opt) |
PreventFusion() |
ReadPdfContent() |
"Grobid" >> MapOrLog(lambda v: extend_dict(v, {
DataProps.EXTRACTED_XML: grobid_transformer(
(v[DataProps.SOURCE_FILENAME], v[DataProps.PDF_CONTENT])
)[1]
}), error_count=MetricCounters.GROBID_ERROR)
)
def add_read_source_to_extracted_xml_pipeline_steps(p, opt, get_pipeline_output_file):
if opt.lxml_file_list:
lxml_urls = p | ReadFileList(
opt.lxml_file_list, column=opt.lxml_file_column, limit=opt.limit)
annotated_lxml = (
lxml_urls |
PreventFusion() |
"ReadLxmlContent" >> TransformAndCount(
MapOrLog(lambda url: {
DataProps.SOURCE_FILENAME: url,
DataProps.STRUCTURED_DOCUMENT: load_structured_document(url)
}, error_count=MetricCounters.READ_LXML_ERROR),
MetricCounters.FILES
)
)
extract_tag_scope = None
else:
annotated_lxml, extract_tag_scope = add_read_pdfs_to_annotated_lxml_pipeline_steps(
p, opt, get_pipeline_output_file
)
extracted_xml = (
annotated_lxml |
"ExtractToXml" >> MapOrLog(lambda v: remove_keys_from_dict(
extend_dict(v, {
DataProps.EXTRACTED_XML: extract_annotated_structured_document_to_xml(
v[DataProps.STRUCTURED_DOCUMENT],
tag_scope=extract_tag_scope
)
}),
keys_to_remove={DataProps.STRUCTURED_DOCUMENT}
), error_count=MetricCounters.EXTRACT_TO_XML_ERROR)
)
if opt.use_grobid:
enhancer = GrobidXmlEnhancer(
opt.grobid_url, start_service=opt.start_grobid_service
)
extracted_xml = (
extracted_xml |
"GrobidEnhanceXml" >> MapOrLog(lambda v: extend_dict(v, {
DataProps.EXTRACTED_XML: enhancer(
v[DataProps.EXTRACTED_XML]
)
}), error_count=MetricCounters.GROBID_ERROR)
)
return extracted_xml
def configure_pipeline(p, opt):
def get_pipeline_output_file(source_url, ext):
return get_output_file(
source_url,
opt.base_data_path,
opt.output_path,
ext
)
if (
opt.use_grobid and not opt.crf_model and
not opt.cv_model_export_dir and not opt.lxml_file_list
):
extracted_xml = add_read_pdfs_to_grobid_xml_pipeline_steps(p, opt)
else:
extracted_xml = add_read_source_to_extracted_xml_pipeline_steps(
p, opt, get_pipeline_output_file
)
_ = ( # flake8: noqa
extracted_xml |
"WriteXml" >> TransformAndLog(
beam.Map(lambda v: save_file_content(
get_pipeline_output_file(
v[DataProps.SOURCE_FILENAME],
opt.output_suffix
),
v[DataProps.EXTRACTED_XML]
)),
log_fn=lambda x: get_logger().info('saved xml to: %s', x)
)
)
def add_main_args(parser):
parser.add_argument(
'--data-path', type=str, required=True,
help='base data path'
)
source_group = parser.add_argument_group('source')
source_one_of_group = source_group.add_mutually_exclusive_group(required=True)
source_one_of_group.add_argument(
'--pdf-path', type=str, required=False,
help='path to pdf file(s), relative to data-path'
)
source_one_of_group.add_argument(
'--pdf-file-list', type=str, required=False,
help='path to pdf csv/tsv file list'
)
source_group.add_argument(
'--pdf-file-column', type=str, required=False, default='pdf_url',
help='the column of the pdf file list to use'
)
source_one_of_group.add_argument(
'--lxml-file-list', type=str, required=False,
help='path to annotated lxml or svg pages zip file list'
'; (CRF and CV models are not supported in this mode)'
)
source_group.add_argument(
'--lxml-file-column', type=str, required=False, default='url',
help='the column of the lxml file list to use'
)
parser.add_argument(
'--limit', type=int, required=False,
help='limit the number of file pairs to process'
)
output_group = parser.add_argument_group('output')
output_group.add_argument(
'--output-path', required=False,
help='Output directory to write results to.'
)
output_group.add_argument(
'--output-suffix', required=False, default='.crf.xml',
help='Output file suffix to add to the filename (excluding the file extension).'
)
parser.add_argument(
'--save-annot-lxml', action='store_true', default=False,
help='enable saving of annotated lxml'
)
grobid_group = parser.add_argument_group('Grobid')
grobid_group.add_argument(
'--use-grobid', action='store_true', default=False,
help='enable the use of grobid'
)
grobid_group.add_argument(
'--grobid-url', required=False, default=None,
help='Base URL to the Grobid service'
)
parser.add_argument(
'--grobid-action', required=False,
default=GrobidApiPaths.PROCESS_HEADER_DOCUMENT,
help='Name of the Grobid action (if Grobid is used without CRF or CV model)'
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='enable debug output'
)
parser.add_argument(
'--pages', type=parse_page_range, default=None,
help='only processes the selected pages'
)
crf_group = parser.add_argument_group('CRF')
crf_group.add_argument(
'--crf-model', type=str, required=False,
help='path to saved crf model'
)
cv_group = parser.add_argument_group('CV')
cv_group.add_argument(
'--cv-model-export-dir', type=str, required=False,
help='path to cv model export dir'
)
cv_group.add_argument(
'--image-width', type=int, required=False,
default=256,
help='image width of resulting PNGs'
)
cv_group.add_argument(
'--image-height', type=int, required=False,
default=256,
help='image height of resulting PNGs'
)
cv_group.add_argument(
'--save-cv-output', action='store_true', default=False,
help='enable saving of computer vision output (png pages)'
)
def process_main_args(args, parser):
args.base_data_path = args.data_path.replace('/*/', '/')
if not args.output_path:
args.output_path = os.path.join(
os.path.dirname(args.base_data_path),
os.path.basename(args.base_data_path + '-results')
)
if args.lxml_file_list:
if args.crf_model:
parser.error('--crf-model cannot be used in conjunction with --lxml-file-list')
if args.cv_model_export_dir:
parser.error(
'--crf-model-export-dir cannot be used in conjunction with --lxml-file-list'
)
else:
if not args.crf_model and not args.cv_model_export_dir and not args.use_grobid:
parser.error(
'--crf-model, --cv-model-export-dir or --use-grobid required in conjunction'
' with --pdf-file-list or --pdf-path'
)
if args.use_grobid and not args.grobid_url:
args.grobid_url = 'http://localhost:8080/api'
args.start_grobid_service = True
else:
args.start_grobid_service = False
def parse_args(argv=None):
parser = argparse.ArgumentParser()
add_main_args(parser)
add_cloud_args(parser)
args = parser.parse_args(argv)
if args.debug:
logging.getLogger().setLevel('DEBUG')
process_main_args(args, parser)
process_cloud_args(
args, args.output_path,
name='sciencebeam-convert'
)
process_sciencebeam_gym_dep_args(args)
get_logger().info('args: %s', args)
return args
def run(argv=None):
args = parse_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions.from_dictionary(vars(args))
pipeline_options.view_as(SetupOptions).save_main_session = True
with beam.Pipeline(args.runner, options=pipeline_options) as p:
configure_pipeline(p, args)
# Execute the pipeline and wait until it is completed.
if __name__ == '__main__':
logging.basicConfig(level='INFO')
run()
| [
"noreply@github.com"
] | pvk444.noreply@github.com |
ad1eeda9ffaa3cadb44e6a3ead7b2b13ddcc9ce1 | 6534d5a2337685df05c07c5a70cbca2ddef313d2 | /MicroDB.py | 96dfa7636e0a9c35299c0274406c879b89bd717d | [] | no_license | abirlal/NaboDB | dccbfd5842391b1964ac87c1b64c1d1df079c754 | aef7936669211998998194684b1396d6d048aeea | refs/heads/master | 2020-03-18T17:27:58.890940 | 2018-05-27T13:08:21 | 2018-05-27T13:08:21 | 135,030,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,221 | py | import os
import sys
import re
import time, datetime
import datetime
class MicroDB:
"""A customer of ABC Bank with a checking account. Customers have the
following properties:
Attributes:
DB_NAME: A string representing the customer's name.
FILE_PATH: .
"""
def __init__(self, DB_NAME, FILE_PATH = ''):
#DB_NAME = 'ticketsData_20171129'
#FILE_PATH = "C:\\Users\\abirlal.biswas\\Documents\\work_sgas\\py_test\\data\\"
self.DB_NAME = DB_NAME
self.DB_FILE_NAME = self.DB_NAME + '.ndb'
self.FILE_PATH = FILE_PATH
self.DB_FILE = FILE_PATH + self.DB_FILE_NAME
self.DBASE = {}
self.NOERROR = True
if os.path.isdir(self.FILE_PATH):
if os.path.isfile(self.DB_FILE):
self.readDB()
else:
self.NOERROR = False
print ("NDB::ALERT: No Path [%s] Found") % self.FILE_PATH
#def createDB():
def addTable(self, tableName):
if self.isTableInDB(tableName):
print ("NDB::ALERT: Table %s is already present..!!" % tableName )
return False
else:
tbl = {tableName : {'fields':[], 'data':[]}}
self.DBASE.update(tbl)
return True
def addFieldsByList(self, tableName, fieldsList):
if self.isTableInDB(tableName):
if len(self.DBASE[tableName]['fields']) > 0:
print ("NDB::ALERT: Fields are already present in Table %s. Please use addField()..!!" % tableName )
return False
else:
self.DBASE[tableName].update({ 'fields' : fieldsList })
if not 'data' in self.DBASE[tableName]:
self.DBASE[tableName].update({ 'data' : [] })
return True
else:
return False
def addField(self, tableName, field):
if self.isFieldPrsentInTable(tableName, field):
fieldsList = self.DBASE[tableName]['fields']
fieldsList.append(field)
self.DBASE[tableName].update({ 'fields' : fieldsList })
if 'data' in self.DBASE[tableName]:
for i in range(len(self.DBASE[tableName]['data'])):
self.DBASE[tableName]['data'][i].update({field:''})
else:
self.DBASE[tableName].update({ 'data' : [] })
return True
else:
print ("NDB::ALERT: Field is already present in Table %s...!!" % tableName )
return False
def addFieldValueByList(self, tableName, fieldValueList):
if self.isTableInDB(tableName):
dataRow = {}
fields = self.DBASE[tableName]['fields']
fldValPairList = self.DBASE[tableName]['data']
if len(fields) == len(fieldValueList):
for i in range(len(fields)):
dataRow.update({ fields[i] : fieldValueList[i] })
fldValPairList.append(dataRow)
self.DBASE[tableName].update({ 'data' : fldValPairList })
return True
else :
print ("NDB::ALERT: Number of Data is not maching with the number of fields" )
return False
else:
return False
def updateDataByMap(self, tableName, dataMap, searchMap = {}):
if self.isTableInDB(tableName):
fields = self.DBASE[tableName]['fields']
fieldPresentFlag = 1
for key, value in dataMap.items():
if not key in fields :
fieldPresentFlag = 0
break
if fieldPresentFlag == 1:
if searchMap == {}:
for i, e in enumerate(self.DBASE[tableName]['data']):
self.DBASE[tableName]['data'][i].update(dataMap)
else:
for i, e in enumerate(self.searchByKeyValMap(tableName, searchMap)):
self.DBASE[tableName]['data'][e].update(dataMap)
return True
else:
print "NDB::ERROR: Data Map is not propper. Field:Val pair missing...!!", dataMap
return False
else:
return False
def updateDataByDataId(self, tableName, dataMap, dataId = -1):
if self.isTableInDB(tableName):
fields = self.DBASE[tableName]['fields']
fieldPresentFlag = 1
for key, value in dataMap.items():
if not key in fields :
fieldPresentFlag = 0
break
if fieldPresentFlag == 1:
if dataId == -1:
for i, e in enumerate(self.DBASE[tableName]['data']):
self.DBASE[tableName]['data'][i].update(dataMap)
else:
self.DBASE[tableName]['data'][dataId].update(dataMap)
return True
else:
print "NDB::ERROR: Data Map is not propper. Field:Val pair missing...!!? ", dataMap
return False
else:
return False
def deleteDataByDataId(self, tableName, dataId):
if self.isTableInDB(tableName):
for i in range(len(self.DBASE[tableName]['data'])):
if i == dataId:
self.DBASE[tableName]['data'].pop(dataId)
return True
print ("NDB::ERROR: Data Id is not Present in Table %s..!!! ") % tableName
return False
else:
return False
def searchByKeyValMap(self, tableName, searchMap):
for key, value in searchMap.items():
if not key in self.DBASE[tableName]['fields'] :
print "NDB::ERROR: Search Field id not found in %s" % tableName
return []
dataIdList = []
for i in range(len(self.DBASE[tableName]['data'])):
if all((k in self.DBASE[tableName]['data'][i] and self.DBASE[tableName]['data'][i][k] == v) for k,v in searchMap.iteritems()):
dataIdList.append(i)
return dataIdList
def searchLikeByKeyValMap(self, tableName, searchMap):
for key, value in searchMap.items():
if not key in self.DBASE[tableName]['fields'] :
print "NDB::ERROR: Search Field id not found in %s" % tableName
return []
dataIdList = []
for i in range(len(self.DBASE[tableName]['data'])):
if all((k in self.DBASE[tableName]['data'][i] and v in self.DBASE[tableName]['data'][i][k]) for k,v in searchMap.iteritems()):
dataIdList.append(i)
return dataIdList
def orderDataByField(self, tableName, orderOnKey, isInt='n', sortOrder='asc'):
if self.isTableInDB(tableName):
if isInt == 'y':
decorated = [(int(dict_[orderOnKey]), dict_) for dict_ in self.DBASE[tableName]['data']]
else:
decorated = [(dict_[orderOnKey], dict_) for dict_ in self.DBASE[tableName]['data']]
if sortOrder == 'desc':
decorated.sort(reverse=True)
else:
decorated.sort()
print decorated
self.DBASE[tableName]['data'] = [dict_ for (key, dict_) in decorated]
return True
else:
return False
def maxDataInField(self, tableName, fieldName, isInt=''):
returnVal = 'null'
if self.isTableInDB(tableName):
if isInt == 'INT':
returnVal = 0
decorated = [(int(dict_[fieldName]), dict_) for dict_ in self.DBASE[tableName]['data']]
else:
decorated = [(dict_[fieldName], dict_) for dict_ in self.DBASE[tableName]['data']]
decorated.sort(reverse=True)
if len(decorated) > 0:
returnVal = decorated[0][0]
return returnVal
else:
return returnVal
def minDataInField(self, tableName, fieldName, isInt=''):
returnVal = 'null'
if self.isTableInDB(tableName):
if isInt == 'INT':
returnVal = 0
decorated = [(int(dict_[fieldName]), dict_) for dict_ in self.DBASE[tableName]['data']]
else:
decorated = [(dict_[fieldName], dict_) for dict_ in self.DBASE[tableName]['data']]
decorated.sort()
if len(decorated) > 0:
returnVal = decorated[0][0]
return returnVal
else:
return returnVal
def showData(self, tableName, dataIds = [], fieldNames = []):
if self.isTableInDB(tableName):
isFieldNamesOkFlag = 1
if fieldNames == []:
fieldNames = self.DBASE[tableName]['fields']
else:
for j in range(len(fieldNames)):
if not fieldNames[j] in self.DBASE[tableName]['fields'] :
isFieldNamesOkFlag = 0
break
if isFieldNamesOkFlag == 1:
if dataIds == []:
for i in range(len(self.DBASE[tableName]['data'])):
dataMap = self.DBASE[tableName]['data'][i]
print "NDB::DATA_ID=>", i
for j in range(len(fieldNames)):
print "NDB:: %s:%s" % (fieldNames[j], dataMap[fieldNames[j]])
else:
for i, e in enumerate(dataIds):
dataMap = self.DBASE[tableName]['data'][i]
print "NDB::DATA_ID=>", i
for j in range(len(fieldNames)):
print "NDB:: %s:%s" % (fieldNames[j], dataMap[fieldNames[j]])
else:
print ("NDB::ERROR: Filed Names are not matching in table ", tableName )
def getData(self, tableName, dataIds = [], fieldNames = []):
dataMapList = []
try:
if self.isTableInDB(tableName):
isFieldNamesOkFlag = True
isSelectStar = True
if fieldNames == []:
fieldNames = self.DBASE[tableName]['fields']
else:
isSelectStar = False
for j in range(len(fieldNames)):
if not fieldNames[j] in self.DBASE[tableName]['fields'] :
isFieldNamesOkFlag = 0
break
if isFieldNamesOkFlag:
if dataIds == []:
for i in range(len(self.DBASE[tableName]['data'])):
dataMap = self.DBASE[tableName]['data'][i]
if isSelectStar:
dataMapList.append(dataMap)
else:
dataSet = {}
for j in range(len(fieldNames)):
dataSet.update({fieldNames[j]: dataMap[fieldNames[j]]})
dataMapList.append(dataSet)
else:
for i, e in enumerate(dataIds):
dataMap = self.DBASE[tableName]['data'][e]
if isSelectStar:
dataMapList.append(dataMap)
else:
dataSet = {}
for j in range(len(fieldNames)):
dataSet.update({fieldNames[j]: dataMap[fieldNames[j]]})
dataMapList.append(dataSet)
else:
print ("NDB::ALERT: Filed Names are not matching in table ", tableName )
except:
print "NDB::ERROR: Fatal Error in getData()..."
return dataMapList
def showFields(self, tableName):
print self.DBASE[tableName]['fields']
def getFields(self, tableName):
return self.DBASE[tableName]['fields']
def writeDB(self):
try:
dbFile = open( self.DB_FILE, 'w+' )
for tblName, tblData in self.DBASE.items():
dbFile.write("t~%s\n" % (tblName))
#print ("Table name: " + tblName)
fields = 'f~'
for i in range(len(tblData['fields'])):
fields = fields + tblData['fields'][i] + '`'
fields = fields[:-1]
dbFile.write("%s\n" % (fields))
#print (fields)
for i, e in enumerate(tblData['data']):
dataRow = 'd~'
for k in range(len(tblData['fields'])):
dVal = str(tblData['data'][i][tblData['fields'][k]])
dVal = dVal.replace('\n','\\n')
dVal = dVal.replace('\r','\\r')
dataRow = dataRow + dVal + '`'
dataRow = dataRow[:-1]
dbFile.write("%s\n" % (dataRow))
print "NDB::INFO: DB is saved in disk... "
dbFile.close()
except:
print "NDB::ERROR: Saving in disk failed... "
def readDB(self):
fields = []
dbFile = open( self.DB_FILE, 'r+' )
dbFileLines=dbFile.readlines()
tbl = {}
tblName = ''
fields = []
fieldData = []
fldValPairList = []
for line in dbFileLines:
if 't~' in line:
tblName = re.match(r"t~(.*)", line).group(1)
self.addTable(tblName)
if 'f~' in line:
fldValPairList = []
fieldsStr = re.match(r"f~(.*)", line).group(1)
fields = fieldsStr.split('`')
self.addFieldsByList(tblName, fields)
#print fields
if 'd~' in line:
dataRowStr = re.match(r"d~(.*)", line).group(1)
fieldData = dataRowStr.split('`')
fieldData = [word.replace('\\n','\n') for word in fieldData]
fieldData = [word.replace('\\r','\r') for word in fieldData]
self.addFieldValueByList(tblName, fieldData)
dbFile.close()
def isTableInDB(self, tableName):
if tableName in self.DBASE:
return True
else:
return False
def isFieldPrsentInTable(self, tableName, field):
if isTableInDB(tableName):
if field in self.DBASE[tableName]['fields']:
return True
else:
print ("NDB::ALERT: No table %s present ") % tableName
return False
else:
print ("NDB::ALERT: %s is already present as Table...!!" % tableName )
return False
#============================================================================
def printDbTree(self):
self.walk_dict(self.DBASE)
#print(self.DBASE)
def walk_dict(self, d):
for k,v in d.items():
if k not in ('fields', 'data'):
print ("NDB::Info Table name: " + k)
if isinstance(v, dict):
self.walk_dict(v)
else:
print "%s: %s" % (k, v)
| [
"noreply@github.com"
] | abirlal.noreply@github.com |
ef7d446f6ff097c39a063014124dc20ebc83e3fa | 82df9118cb22b335e6456ba6df7f22a1179a1de4 | /Laba2/Vector.py | 490aedc3e23d5fe5de19980a2f99ee0dcac6bf35 | [] | no_license | Alessandroo/Myts | 1dd3032db5bca6087c7e447bc06dc7cb9e03321f | 96b9b7d675d38095231086544898bddcde00be80 | refs/heads/master | 2021-01-17T07:15:12.464628 | 2016-06-17T20:15:27 | 2016-06-17T20:15:27 | 52,388,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,347 | py | from itertools import starmap, zip_longest
from operator import add, mul, sub
class Vector:
def __init__(self, arg, dimension_error=True):
self.dimension_error = dimension_error
if isinstance(arg, int):
if arg < 1:
raise ValueError('Count of Vector arguments should be more 0')
self.vector = [0] * arg
elif isinstance(arg, list):
self.vector = arg
else:
raise TypeError('arg Vector() should be int or list')
@property
def length(self):
return sum([n * n for n in self]) ** 0.5
@property
def dimension(self):
return len(self.vector)
def __add__(self, other):
if not isinstance(other, Vector):
raise TypeError('In operation with Vector use only other Vector')
if self.dimension_error:
if self.dimension == other.dimension:
return Vector(list(map(lambda x, y: x + y, self, other)))
else:
raise ValueError('Dimensions of Vectors are differ')
return Vector(list(starmap(add, zip_longest(self, other, fillvalue=0))))
def __sub__(self, other):
if not isinstance(other, Vector):
raise TypeError('In operation with Vector use only other Vector')
if self.dimension_error:
if self.dimension == other.dimension:
return Vector(list(map(lambda x, y: x - y, self, other)))
else:
raise ValueError('Dimensions of Vectors are differ')
return Vector(list(starmap(sub, zip_longest(self.vector, other, fillvalue=0))))
def __mul__(self, other):
if isinstance(other, (int, float)):
return Vector([num * other for num in self.vector])
elif isinstance(other, Vector):
if self.dimension_error:
if self.dimension == other.dimension:
return sum(map(lambda x, y: x * y, self, other))
else:
raise ValueError('Dimensions of Vectors are differ')
return sum(starmap(mul, zip_longest(self.vector, other, fillvalue=0)))
else:
raise TypeError('In operation with Vector use only other Vector')
def __eq__(self, other):
if not isinstance(other, Vector):
return False
return self.vector == other.vector
def __getitem__(self, key):
if isinstance(key, int):
if key < 0 or key > len(self.vector) - 1:
raise IndexError('Index out of range')
return self.vector[key]
if isinstance(key, slice):
return self.vector.__getitem__(key)
else:
raise TypeError('Index should be int or slice')
def __setitem__(self, key, value):
if isinstance(key, int):
if key < 0 or key > len(self.vector) - 1:
raise IndexError('Index out of range')
self.vector[key] = value
else:
raise TypeError('Index should be int')
def __str__(self):
return str(self.vector)
def main():
a = Vector([1, 7, 8, 9])
b = Vector([1, 7, 8, 9])
print(a)
print(a.length)
print(b)
print(b.length)
print(a == b)
print(a[1])
print(a + b)
print(a * b)
print(a - b)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | Alessandroo.noreply@github.com |
c7659badb78a370e23b3aad6289e858062144c36 | 0226cd6ebb802b35b8884b05cc5f039d3f336614 | /PJ/改数据库.py | a5feeb802c49775d6ff6348af71a942a8680131f | [] | no_license | dxcv/GDP_Alpha | 95dff41644d454076e225e813ab7b9794b960fa6 | 84c4a96d713d33af87113ea6b4c9017688852e2b | refs/heads/master | 2020-06-30T23:58:01.205292 | 2017-09-01T11:32:13 | 2017-09-01T11:32:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | import sqlite3
import pandas as pd
from datetime import datetime as dt
conn = sqlite3.connect('F:\project_gdp\GD.db')
indexweight = pd.read_sql_query('''select * from indexweight''', conn)
cur = conn.cursor()
cur.execute('''truncate table indexweight''')
conn.commit()
indexweight.index = [dt.strptime(i, "%Y-%m-%d") for i in indexweight.time]
def recreate(df):
df = df.resample('D').ffill()
df.loc[:, 'time'] = [dt.strftime(i, "%Y-%m-%d") for i in df.index]
return df
data = indexweight.groupby(['stkcd', 'indexname']).apply(recreate)
data.index = range(len(data))
import pickle
all_tradedate = pickle.load(open('F:\百度云同步盘\旦复旦的学习\Quant\GDP\PJ\pickle\\all_tradedate', 'rb'))
data = data[data.time.isin(all_tradedate.time)]
data.to_sql('indexweight', conn, if_exists='append', index=False, index_label=None, chunksize=1000) | [
"gaoflaine@126.com"
] | gaoflaine@126.com |
b2ffb0440e962e6c3a74dfd70512b6bb1f5c6c01 | cd80d0937fc5dd427d9707ad7a5f4540f0f7e106 | /DataCleaning_Enrichment_UFO-master/02_filter_urls.py | e57f36b618ef5b19522a96933123b21e82f8abcf | [] | no_license | akarshgoyal/CSCI-599-Content_Detection_and_Analysis_for_Big_Data | 0230b6462a5247dec1b13fbc4017ad6b660f2088 | 2959c8a009a5352e3965c3439e94c90fb28e339c | refs/heads/master | 2020-03-21T06:30:21.940633 | 2018-06-22T17:21:51 | 2018-06-22T17:21:51 | 138,224,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # discard unsupported extensions
urls = []
exts = ['jpg','jpeg', 'png', 'mp4', 'gif']
with open('https_urls.txt') as f:
for url in f.readlines():
url = url.rstrip('\n')
if url[-3:] in exts or url[-4:] in exts:
urls.append(url+'\n')
with open('filtered.txt', 'w') as f:
f.writelines(urls)
| [
"akarsh.goyal15@gmail.com"
] | akarsh.goyal15@gmail.com |
169c98b0651f851a52a867cef33257ff4c8c3043 | 60e1017f6dabdd6b8ca1de4168488031b384cc8f | /pyxel/editor/piano_roll.py | cd26fe833b099ed1a7087d6f797c8bb1dadffa4b | [
"MIT"
] | permissive | 20-1-SKKU-OSS/2020-1-OSS-1 | 096dc50ab0118286d51d137382ac5252aea8da73 | 576b90c1e99dfde7b430546e3fd37336bc9f5d6c | refs/heads/master | 2022-10-10T01:00:55.735944 | 2020-06-10T01:52:34 | 2020-06-10T01:52:34 | 264,388,446 | 1 | 1 | MIT | 2020-06-06T14:51:22 | 2020-05-16T08:02:43 | C++ | UTF-8 | Python | false | false | 4,861 | py | import pyxel
from pyxel.ui import Widget
from pyxel.ui.constants import WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME
from .constants import (
EDITOR_IMAGE_X,
EDITOR_IMAGE_Y,
MAX_SOUND_LENGTH,
PIANO_ROLL_BACKGROUND_COLOR,
PIANO_ROLL_CURSOR_EDIT_COLOR,
PIANO_ROLL_CURSOR_PLAY_COLOR,
PIANO_ROLL_NOTE_COLOR,
PIANO_ROLL_REST_COLOR,
)
class PianoRoll(Widget):
def __init__(self, parent):
super().__init__(parent, 30, 25, 193, 123)
self._press_x = 0
self._press_y = 0
self.add_event_handler("mouse_down", self.__on_mouse_down)
self.add_event_handler("mouse_up", self.__on_mouse_up)
self.add_event_handler("mouse_drag", self.__on_mouse_drag)
self.add_event_handler("mouse_click", self.__on_mouse_click)
self.add_event_handler("mouse_hover", self.__on_mouse_hover)
self.add_event_handler("update", self.__on_update)
self.add_event_handler("draw", self.__on_draw)
def _screen_to_view(self, x, y):
x = min(max((x - self.x - 1) // 4, 0), MAX_SOUND_LENGTH - 1)
y = min(max(59 - (y - self.y - 1) // 2, -1), 59)
return x, y
def __on_mouse_down(self, key, x, y):
if key != pyxel.MOUSE_LEFT_BUTTON or self.parent.is_playing:
return
x, y = self._screen_to_view(x, y)
self._press_x = x
self._press_y = y
self.parent.field_cursor.move(x, 0)
def __on_mouse_up(self, key, x, y):
pass
def __on_mouse_drag(self, key, x, y, dx, dy):
if key != pyxel.MOUSE_LEFT_BUTTON or self.parent.is_playing:
return
x, y = self._screen_to_view(x, y)
if x > self._press_x:
x1 = self._press_x
y1 = self._press_y
x2 = x
y2 = y
elif x < self._press_x:
x1 = x
y1 = y
x2 = self._press_x
y2 = self._press_y
else:
return
self.parent.add_pre_history(x, 0)
data = self.parent.field_cursor.data
padding_length = self._press_x + 1 - len(data)
if padding_length > 0:
data.extend([-1] * padding_length)
self._press_x = x
self._press_y = y
self.parent.field_cursor.move(x, 0)
dx = x2 - x1
dy = y2 - y1
for i in range(dx + 1):
value = round(y1 + (dy / dx) * i)
if x1 + i >= len(data):
data.append(value)
else:
data[x1 + i] = value
self.parent.add_post_history(x, 0)
def __on_mouse_click(self, key, x, y):
if key != pyxel.MOUSE_LEFT_BUTTON or self.parent.is_playing:
return
x, y = self._screen_to_view(x, y)
self.parent.field_cursor.move(x, 0)
data = self.parent.field_cursor.data
self.parent.add_pre_history(x, 0)
padding_length = x + 1 - len(data)
if padding_length > 0:
data.extend([-1] * padding_length)
data[x] = y
self.parent.add_post_history(x, 0)
def __on_mouse_hover(self, x, y):
self.parent.help_message = "NOTE:CLICK/PIANO_KEY+ENTER/BS/DEL"
def __on_update(self):
cursor_y = self.parent.field_cursor.y
if cursor_y > 0 or self.parent.is_playing:
return
if (
pyxel.btnp(pyxel.KEY_ENTER, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME)
or pyxel.btnp(pyxel.KEY_KP_ENTER, WIDGET_HOLD_TIME, WIDGET_REPEAT_TIME)
) and self.parent.keyboard_note is not None:
self.parent.field_cursor.insert(self.parent.keyboard_note)
def __on_draw(self):
pyxel.rect(self.x, self.y, self.width, self.height, 7)
if self.parent.is_playing:
x = (self.parent.play_pos % 100) * 4 + 31
pyxel.rect(x, 25, 3, 123, PIANO_ROLL_CURSOR_PLAY_COLOR)
else:
if self.parent.field_cursor.y == 0:
x = self.parent.field_cursor.x * 4 + 31
pyxel.rect(x, 25, 3, 123, PIANO_ROLL_CURSOR_EDIT_COLOR)
pyxel.blt(
self.x,
self.y,
pyxel.IMAGE_BANK_FOR_SYSTEM,
EDITOR_IMAGE_X,
EDITOR_IMAGE_Y + 7,
193,
72,
PIANO_ROLL_BACKGROUND_COLOR,
)
pyxel.blt(
self.x,
self.y + 72,
pyxel.IMAGE_BANK_FOR_SYSTEM,
EDITOR_IMAGE_X,
EDITOR_IMAGE_Y + 7,
193,
51,
PIANO_ROLL_BACKGROUND_COLOR,
)
for i, note in enumerate(self.parent.get_data(0)):
x = i * 4 + 31
y = 143 - note * 2
pyxel.rect(
x,
y,
3,
3,
PIANO_ROLL_NOTE_COLOR if note >= 0 else PIANO_ROLL_REST_COLOR,
)
| [
"noreply@github.com"
] | 20-1-SKKU-OSS.noreply@github.com |
ce4853a2598aa548e3d6467111bf4e7918031c68 | d960bbd9b47ea06f36e7b147855e635d6783d499 | /contacts/models.py | 339ef02e59ad8b938c75288678035da35cd177d0 | [] | no_license | SaiAshish9/djangoJWT | 24bbd0e1e46a3ed5936ec9add28a2eacf744b117 | 89a81ba18aa8ed86ee85a73bc5eed66f4bf29108 | refs/heads/master | 2022-12-25T05:31:59.567848 | 2020-10-06T12:56:00 | 2020-10-06T12:56:00 | 301,374,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Contact(models.Model):
owner=models.ForeignKey(to=User,on_delete=models.CASCADE)
country_code=models.CharField(max_length=30)
first_name=models.CharField(max_length=30)
last_name=models.CharField(max_length=30)
phone_number = models.CharField(max_length=30)
contact_picture=models.URLField(null=True)
is_favorite=models.BooleanField(default=True) | [
"saiashish3760@gmail.com"
] | saiashish3760@gmail.com |
b47afc10de0259c6eb188170b7e7ba72ae0b409f | bf813d47fc6d9433cfdd181b49f2a526f48bdd1e | /wiseowl/countries/List countries.py | 939598b76d3b887f7a0cc611c7205eed480f9a81 | [
"MIT"
] | permissive | Chameleover/Study-repo | 7493cded566b089f748330ded7624c4f11526848 | ef2d0e7f5a75ff593819a0c90b3fa02720cb472b | refs/heads/master | 2023-02-17T10:48:17.028460 | 2023-02-15T17:53:21 | 2023-02-15T17:53:21 | 89,254,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | import csv
def get_contents_of_file(continent_name):
country_list = []
try:
with open(continent_name + '.csv', 'r') as file:
print(continent_name)
print('-' * len(continent_name))
for line in csv.reader(file):
country, capital = line[0], line[1]
print(f'{country} ({capital})')
except FileNotFoundError:
print('No such continent!')
def get_country_dict(country_list):
return country_list
def print_results(name, country_list):
pass
# choose continent to show data for
continent_name = "Oceania"
get_contents_of_file(continent_name)
| [
"wargan506@gmail.com"
] | wargan506@gmail.com |
e0950e4b9bb231e66cce44df5d1f4fce0e4e826c | 2ff11b4a789fbdfc716e44a6186ed96a82163440 | /app.py | 50d08d5ca6b59682ba983942ac8a7aff94603639 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | cdubiel08/Earnings-Calls-NLP | 7493e4cae7cbf822aa0540968cd3581ddc4eedf7 | 8630b4246c60a574c22d440bcb1a8ee1fb985027 | refs/heads/main | 2023-05-31T13:06:18.450868 | 2023-05-09T15:48:37 | 2023-05-09T15:48:37 | 330,034,914 | 19 | 11 | null | null | null | null | UTF-8 | Python | false | false | 11,502 | py | # import necessary libraries
import os
import numpy as np
import datetime
import psycopg2
from sqlalchemy import cast, Date
import csv
from flask import (
Flask,
render_template,
jsonify,
request,
redirect)
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from sqlalchemy.orm import aliased
# Postgres database user and password import
from db_key import user, password
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
# create route that renders index.html template
@app.route("/")
def home():
return render_template("index.html");
# end home() route
#################################################
# add the routes for the dashboard
@app.route("/dashboard")
def dashboard():
return render_template("dashboard.html");
## end dashboard() route
#################################################
# add the routes for the NLP
@app.route("/nlp")
def NLP():
return render_template("nlp.html");
## end NLP() route
#################################################
# add the routes for the Forecast
@app.route("/forecast")
def Forecast():
return render_template("forecast.html");
## end Forecast() route
#################################################
# add the routes for the Design
@app.route("/design")
def Design():
return render_template("design.html");
## end Design() route
# add the routes for the Design
@app.route("/test")
def test():
return render_template("index2.html");
## end Design() route
#################################################
# the api retrieves the companies list
@app.route("/api/GetCompanyList")
def GetCompanyList():
with open('Resources/input/companylist.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',');
first_line = True
company_data = [];
for row in data:
if not first_line:
dict = {
"Ticker": row[0],
"Company": row[1]
}
company_data.append (dict)
else:
first_line = False
return jsonify(company_data)
# end company_data() route
#################################################
# the api retrieves the LSTM data from the csv sends the json values
@app.route("/api/GetLSTMData/<ticker>")
def GetLSTMData(ticker):
with open('Resources/output/LSTMFinal.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',');
first_line = True
lstm_data = [];
results = filter(lambda row: row[0] == ticker, data)
for row in results:
if not first_line:
dict = {
"Ticker": row[0],
"Date": row[1],
"Actual": row[2],
"Pred": row[3],
"PredLag" : row[4]
}
lstm_data.append (dict)
else:
first_line = False
return jsonify(lstm_data)
# end lstm_data() route
#################################################
# the api retrieves the LSTM data from the csv sends the json values
@app.route("/api/GetGRUData/<ticker>")
def GetGRUData(ticker):
with open('Resources/output/GRUFinal.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',');
first_line = True
lstm_data = [];
results = filter(lambda row: row[0] == ticker, data)
for row in results:
if not first_line:
dict = {
"Ticker": row[0],
"Date": row[1],
"Actual": row[2],
"Pred": row[3],
"PredLag" : row[4]
}
lstm_data.append (dict)
else:
first_line = False
return jsonify(lstm_data)
# end GRU_data() route
#################################################
# the api retrieves the data and calculates the averages for stock
@app.route("/api/GetRollingAverages/<ticker>")
def GetRollingAverages(ticker):
import pandas as pd;
# For time stamps
from datetime import datetime, timedelta;
print(ticker);
import math;
with open('Resources/combined_top_25.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',');
lstm_data = [];
results = filter(lambda row: (row[1] == ticker) , data);
df = pd.DataFrame(results);
exp1 = df[3].ewm(span=20, adjust=False).mean();
rolling_mean = df[3].rolling(window=20).mean();
df[2] = df[2].astype(float)
changepercent = df[2].pct_change();
df['rolling_mean'] = rolling_mean;
df['exp1'] = exp1;
df['EReturn'] = changepercent;
df['mean'] = changepercent.mean();
df['std'] = changepercent.std();
df = df.dropna();
for i, row in df.iterrows():
dict = {
"Ticker": row[1],
"Date": row[0],
"Close": row[3],
"MA": row['rolling_mean'],
"EMA" : row['exp1'] ,
"ER" : row['EReturn'],
"mean" : row['mean'],
"std" : row ['std']
}
lstm_data.append (dict)
return jsonify(lstm_data)
# end GRU_data() route
#################################################
# the api retrieves the data and calculates the MSE,RMSE,SI for stock
@app.route("/api/GetMetrics/<ticker>")
def GetMetrics(ticker):
import pandas as pd;
# For time stamps
from datetime import datetime, timedelta;
print(ticker);
import math;
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
import sklearn.metrics as metrics
# Importing the statistics module
import statistics
#define function to calculate cv
cv = lambda x: np.std(x, ddof=1) / np.mean(x) * 100;
# calculate performance statistics for GRU
with open('Resources/output/GRUFinal.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',');
first_line = True
metric_data = [];
results = filter(lambda row: row[0] == ticker, data)
df = pd.DataFrame(results);
#print(df)
df[2] = df[2].astype(float)
df[4] = df[4].replace(r'^\s*$', 0, regex=True)
df[4] = df[4].astype(float)
df[3] = df[3].replace(r'^\s*$', 0, regex=True)
df[3] = df[3].astype(float);
gruMSE = mean_squared_error(df[2], df[3]);
gruRMSE = math.sqrt(gruMSE);
gruNRMSE = gruRMSE /(statistics.mean(df[2]));
gruMAE = mean_absolute_error(df[2], df[3]);
gruR2 = metrics.r2_score(df[2],df[3]);
gruMSELag = mean_squared_error(df[2], df[4]);
gruRMSELag = math.sqrt(gruMSELag);
gruNRMSELag = gruRMSELag /(statistics.mean(df[2]));
gruMAELag = mean_absolute_error(df[2], df[4]);
gruR2Lag = metrics.r2_score(df[2],df[4]);
# calculate performance statistics for LSTM
with open('Resources/output/LSTMFinal.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',');
first_line = True
metric_data = [];
results = filter(lambda row: row[0] == ticker, data)
df = pd.DataFrame(results);
#print(df)
df[2] = df[2].astype(float)
df[4] = df[4].replace(r'^\s*$', 0, regex=True)
df[4] = df[4].astype(float)
df[3] = df[3].replace(r'^\s*$', 0, regex=True)
df[3] = df[3].astype(float);
lstmMSE = mean_squared_error(df[2], df[3]);
lstmRMSE = math.sqrt(lstmMSE);
lstmNRMSE = lstmRMSE /(statistics.mean(df[2]));
lstmMAE = mean_absolute_error(df[2], df[3]);
lstmR2 = metrics.r2_score(df[2],df[3]);
lstmMSELag = mean_squared_error(df[2], df[4]);
lstmRMSELag = math.sqrt(lstmMSELag);
lstmNRMSELag = lstmRMSELag /(statistics.mean(df[2]));
lstmMAELag = mean_absolute_error(df[2], df[4]);
lstmR2Lag = metrics.r2_score(df[2],df[4]);
#build data model
statDict = {
'lstmMSE' : str("{0:.4f}".format(lstmMSE)),
'lstmRMSE' : str("{0:.4f}".format(lstmRMSE)),
'lstmNRMSE' : str("{0:.4f}".format(lstmNRMSE)),
'lstmMAE' : str("{0:.4f}".format(lstmMAE)),
'lstmR2' : str("{0:.4f}".format(lstmR2)),
'lstmMSELag' : str("{0:.4f}".format(lstmMSELag)),
'lstmRMSELag' : str("{0:.4f}".format(lstmRMSELag)),
'lstmNRMSELag' : str("{0:.4f}".format(lstmNRMSELag)),
'lstmMAELag' : str("{0:.4f}".format(lstmMAELag)),
'lstmR2Lag' : str("{0:.4f}".format(lstmR2Lag)),
'gruMSE' : str("{0:.4f}".format(gruMSE)),
'gruRMSE' : str("{0:.4f}".format(gruRMSE)),
'gruNRMSE' : str("{0:.4f}".format(gruNRMSE)),
'gruMAE' : str("{0:.4f}".format(gruMAE)),
'gruR2' : str("{0:.4f}".format(gruR2)),
'gruMSELag' : str("{0:.4f}".format(gruMSELag)),
'gruRMSELag' : str("{0:.4f}".format(gruRMSELag)),
'gruNRMSELag' : str("{0:.4f}".format(gruNRMSELag)),
'gruMAELag' : str("{0:.4f}".format(gruMAELag)),
'gruR2Lag' : str("{0:.4f}".format(gruR2Lag))
}
return jsonify(statDict)
return jsonify(metric_data)
# end GetMetrics() route
#################################################
# the api retrieves the stock data from the web and stores them in csv
@app.route("/api/GetStockData")
def GetStockData():
import pandas as pd;
from pprint import pprint;
with open('Resources/today.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',');
print('first')
first_line = True
stock_overview_data = [];
#results = filter(lambda row: row[1] == ticker, data)
for row in data:
if not first_line:
print('inside')
df = pd.DataFrame({
"ticker" : row[1],
"52 Week Range" : row[6],
"Market Cap" : row[7],
"PE Ratio" : row[8],
"EPS" : row[9],
"Earnings Date": row[10] ,
"Dividend Rate" : row[11]
}, index=[0]);
metrics_fact_table = df.to_html( classes="table table-striped")
dict = {
"Ticker": row[1],
"Name": row[2],
"Open": row[3],
"Close": row[4],
"Volume" : row[5],
"Metrics" : metrics_fact_table,
"ShortTerm" :row[12],
"MiddleTerm" :row[13],
"LongTerm" : row[14]
}
stock_overview_data.append (dict)
else:
first_line = False
return jsonify(stock_overview_data)
# end stock_overview_data() route
#################################################
if __name__ == "__main__":
app.run(debug=True)
################################################# | [
"bradhika80@gmail.com"
] | bradhika80@gmail.com |
95873ca08c4d30bbbc670f9e9f7e1b71d5522737 | 6ef25eb3aea75a54a4b0766c87e5041dd722fa1a | /Ex11.py | 4354db635e4060be57cd29b0ee6d149269d7a526 | [] | no_license | Chiperi2000/Laborator-4 | 9f59781c66b22d754433b27e5dea303e8b55a59b | ed77c26ad5f9ed08302196fecf2889756744d180 | refs/heads/master | 2023-08-26T03:20:13.008014 | 2021-11-03T14:23:26 | 2021-11-03T14:23:26 | 422,481,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | #Scrieți un program care va afișa doar consoanele dintr-un text citit din consolă.
text = input("Scrieti textul: ")
for i in text:
if i in 'aeiouAEIOU':
print(' ', end='')
else:
print(i,end='') | [
"iulianchiperi2000@gmail.com"
] | iulianchiperi2000@gmail.com |
f79ca246e8562faf491be9c5b88ba50464753ac3 | c440fffb3910ddc20c65af12d1991a589e8be2f7 | /2b-heredity/heredity.py | dc8df347467bbd99f728945d00ab7df6f12503d7 | [] | no_license | danielzhou92/cs50ai | fb2f427e2886c57b443b98f427cc671b3a2b0d6e | eac75fccc99df22c072cfe48be4faf1b46c6023f | refs/heads/main | 2023-02-24T22:20:29.912228 | 2021-01-28T01:13:57 | 2021-01-28T01:13:57 | 331,793,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,024 | py | import csv
import itertools
import sys
PROBS = {
# Unconditional probabilities for having gene
"gene": {
2: 0.01,
1: 0.03,
0: 0.96
},
"trait": {
# Probability of trait given two copies of gene
2: {
True: 0.65,
False: 0.35
},
# Probability of trait given one copy of gene
1: {
True: 0.56,
False: 0.44
},
# Probability of trait given no gene
0: {
True: 0.01,
False: 0.99
}
},
# Mutation probability
"mutation": 0.01
}
def main():
# Check for proper usage
if len(sys.argv) != 2:
sys.exit("Usage: python heredity.py data.csv")
people = load_data(sys.argv[1])
# Keep track of gene and trait probabilities for each person
probabilities = {
person: {
"gene": {
2: 0,
1: 0,
0: 0
},
"trait": {
True: 0,
False: 0
}
}
for person in people
}
# Loop over all sets of people who might have the trait
names = set(people)
for have_trait in powerset(names):
# Check if current set of people violates known information
fails_evidence = any(
(people[person]["trait"] is not None and
people[person]["trait"] != (person in have_trait))
for person in names
)
if fails_evidence:
continue
# Loop over all sets of people who might have the gene
for one_gene in powerset(names):
for two_genes in powerset(names - one_gene):
# Update probabilities with new joint probability
p = joint_probability(people, one_gene, two_genes, have_trait)
update(probabilities, one_gene, two_genes, have_trait, p)
# Ensure probabilities sum to 1
normalize(probabilities)
# Print results
for person in people:
print(f"{person}:")
for field in probabilities[person]:
print(f" {field.capitalize()}:")
for value in probabilities[person][field]:
p = probabilities[person][field][value]
print(f" {value}: {p:.4f}")
def load_data(filename):
"""
Load gene and trait data from a file into a dictionary.
File assumed to be a CSV containing fields name, mother, father, trait.
mother, father must both be blank, or both be valid names in the CSV.
trait should be 0 or 1 if trait is known, blank otherwise.
"""
data = dict()
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
name = row["name"]
data[name] = {
"name": name,
"mother": row["mother"] or None,
"father": row["father"] or None,
"trait": (True if row["trait"] == "1" else
False if row["trait"] == "0" else None)
}
return data
def powerset(s):
"""
Return a list of all possible subsets of set s.
"""
s = list(s)
return [
set(s) for s in itertools.chain.from_iterable(
itertools.combinations(s, r) for r in range(len(s) + 1)
)
]
def joint_probability(people, one_gene, two_genes, have_trait):
"""
Compute and return a joint probability.
The probability returned should be the probability that
* everyone in set `one_gene` has one copy of the gene, and
* everyone in set `two_genes` has two copies of the gene, and
* everyone not in `one_gene` or `two_gene` does not have the gene, and
* everyone in set `have_trait` has the trait, and
* everyone not in set` have_trait` does not have the trait.
"""
joint_prob = 1
for person in people.values():
# convert the number of copy of the gene that the person has into a number
gene_num = convert_gene_to_num(one_gene, two_genes, person['name'])
# convert the have_trait variable to boolean
trait = convert_trait_to_bool(have_trait, person['name'])
# if person has known father and mother, calculate it like this
if person['mother'] and person['father']:
# calculate probability of getting a gene from mother, as well as from father (seperately)
get_from_father = chance_to_pass_gene(
one_gene, two_genes, person['father'])
get_from_mother = chance_to_pass_gene(
one_gene, two_genes, person['mother'])
not_from_father = 1 - get_from_father
not_from_mother = 1 - get_from_mother
if gene_num == 0:
# probability of not getting it from either
prob_gene = not_from_father * not_from_mother
elif gene_num == 1:
# probability of getting one from either and none from the other
prob_gene = get_from_father * not_from_mother + not_from_father * get_from_mother
else:
# probability of getting one from each parent
prob_gene = get_from_father * get_from_mother
# if person has no known father and mother, calculater it like this
else:
prob_gene = PROBS['gene'][gene_num]
prob_trait = prob_gene * PROBS['trait'][gene_num][trait]
joint_prob *= prob_trait
return joint_prob
def update(probabilities, one_gene, two_genes, have_trait, p):
"""
Add to `probabilities` a new joint probability `p`.
Each person should have their "gene" and "trait" distributions updated.
Which value for each distribution is updated depends on whether
the person is in `have_gene` and `have_trait`, respectively.
"""
for person in probabilities:
# convert the number of copy of the gene that the person has into a number
gene_num = convert_gene_to_num(one_gene, two_genes, person)
# convert the have_trait variable to boolean
trait = convert_trait_to_bool(have_trait, person)
probabilities[person]['gene'][gene_num] += p
probabilities[person]['trait'][trait] += p
def normalize(probabilities):
"""
Update `probabilities` such that each probability distribution
is normalized (i.e., sums to 1, with relative proportions the same).
"""
for person in probabilities:
gene_prob_sum = sum(probabilities[person]['gene'].values())
trait_prob_sum = sum(probabilities[person]['trait'].values())
for gene in probabilities[person]['gene']:
probabilities[person]['gene'][gene] /= gene_prob_sum
for trait in probabilities[person]['trait']:
probabilities[person]['trait'][trait] /= gene_prob_sum
def convert_gene_to_num(one_gene, two_genes, person):
# convert the number of copy of the gene that the person has into a number
if person in one_gene:
return 1
elif person in two_genes:
return 2
else:
return 0
def convert_trait_to_bool(have_trait, person):
# convert the have_trait variable to boolean
if person in have_trait:
return True
else:
return False
def chance_to_pass_gene(one_gene, two_genes, person):
# convert the number of copy of the gene that the person has into a number
if person in one_gene:
# this is calculated bu summing the chance of the 1 gene being passed on and not mutating, together with the chance of the none-gene mutating and being passed on
# the mutating chance cancells out therefor its just 0.5
# since we are not worrying about efficiency in the program, for clarity, just leave the canceling out part to the computer
return 0.5 - PROBS['mutation'] + PROBS['mutation']
elif person in two_genes:
return 1 - PROBS['mutation']
else:
return PROBS['mutation']
if __name__ == "__main__":
main()
| [
"64497626+danielzhou92@users.noreply.github.com"
] | 64497626+danielzhou92@users.noreply.github.com |
ce82682378cab4d7bab478157bc79bb70d8c3296 | 0495373a700a1a7de0476e12932ec695bafba921 | /import_modules/__init__.py | 9bff3e063aaab282ce926a4d221f06ec8320b7e2 | [] | no_license | nuxeo-cps/legacy--CPSIO | 6e279d68794c8e5e2b8901e10b0ff19ec6406826 | b0f7bdd93ff9d9a4eb3227d4654e64fea13ccdc7 | refs/heads/main | 2023-01-22T22:28:20.651992 | 2010-12-19T19:01:41 | 2010-12-19T19:01:41 | 317,992,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | import CPS3Importer
import CPSSkinsImporter
| [
"devnull@localhost"
] | devnull@localhost |
02eb1bda52ca35144612a82ee46b3b8d53d7735a | 3061eb25fc2a019b7d28a689402e8a9691b812e6 | /05_if.py | bce76a533f21d6c66d91e3e25ae8a37ddfdb5a4c | [
"MIT"
] | permissive | Datenschule/EinsteigerWorkshop | 10ea296f0953ac8298c741a4120165deb31ac61b | 2a470adab39da68ab418af475213e4a21b90f7f8 | refs/heads/master | 2021-01-22T09:27:24.315014 | 2016-07-07T23:27:19 | 2016-07-07T23:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Ein Rätsel
wort = raw_input("Wie lautet das geheime Wort? ")
# Mit == können wir zwei Werte / Variablen vergleichen
if wort == "Alpaka":
print("Richtig! Die geheime Wort ist {}!".format(wort))
else:
print("{} ist leider falsch!".format(wort))
| [
"hanno@almostintelligent.de"
] | hanno@almostintelligent.de |
11b3652318123366a532aa8d94ef5e7ed5a5a75a | 3309c419189ae3c98cd967e502866226731fd390 | /bce/parser/molecule/ast_printer_mathml.py | f0c4b0b59579aee426b616a311f87260544139e0 | [] | permissive | xiguashuiguo/bce | 71ac48e552ab062593b76b5bada54e80afad5cfc | b165a11b19e5b03a6de4392ff86032847842013b | refs/heads/master | 2020-03-14T06:29:52.444580 | 2018-02-02T23:25:40 | 2018-02-02T23:25:40 | 131,485,410 | 1 | 0 | BSD-3-Clause | 2018-04-29T10:32:26 | 2018-04-29T10:32:25 | null | UTF-8 | Python | false | false | 14,048 | py | #!/usr/bin/env python
#
# Copyright 2014 - 2018 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
import bce.math.constant as _math_constant
import bce.parser.interface.printer as _interface_printer
import bce.parser.ast.molecule as _ml_ast_base
import bce.parser.molecule.ast_bfs as _ml_ast_bfs
import bce.dom.mathml.all as _mathml
def _print_operand(
value,
need_wrapping,
mexp_parser,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Print an operand.
:type need_wrapping: bool
:type mexp_parser: bce.parser.interface.mexp_parser.MathExpressionParserInterface
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param value: The operand value.
:param need_wrapping: Set to True if you need to wrap the expression when it is neither an integer nor a symbol.
:param mexp_parser: The math expression parser.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : bce.dom.mathml.all.Base
:return: The printed MathML node.
"""
# Simplify.
value = value.simplify()
if value.is_Integer:
return _mathml.NumberComponent(str(value))
else:
if need_wrapping and not (value.is_Integer or value.is_Symbol):
# Use a pair of parentheses to wrap the printed expression.
r = _mathml.RowComponent()
r.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_LEFT_PARENTHESIS))
r.append_object(mexp_parser.print_out(
value,
printer_type=_interface_printer.PRINTER_TYPE_MATHML,
protected_header_enabled=mexp_protected_header_enabled,
protected_header_prefix=mexp_protected_header_prefix
))
r.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_RIGHT_PARENTHESIS))
return r
else:
return mexp_parser.print_out(
value,
printer_type=_interface_printer.PRINTER_TYPE_MATHML,
protected_header_enabled=mexp_protected_header_enabled,
protected_header_prefix=mexp_protected_header_prefix
)
def _print_super_electronic(
charge,
mexp_parser,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Print electronic charge value.
:type mexp_parser: bce.parser.interface.mexp_parser.MathExpressionParserInterface
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param charge: The charge number.
:param mexp_parser: The math expression parser.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : bce.dom.mathml.all.Base
:return: The printed MathML node.
"""
# Print the positivity part.
if charge.is_negative:
charge = -charge
positivity = _mathml.OperatorComponent(_mathml.OPERATOR_MINUS)
else:
positivity = _mathml.OperatorComponent(_mathml.OPERATOR_PLUS)
# Simplify.
charge = charge.simplify()
if charge == _math_constant.ONE:
return positivity
else:
# Initialize a row component to contain the printing result.
r = _mathml.RowComponent()
# Print the charge part.
r.append_object(_print_operand(
charge,
True,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
))
# Add the positivity flag.
r.append_object(positivity)
return r
def _print_suffix(
main_dom,
node,
mexp_parser,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Print suffix part of specified node.
:type node: bce.parser.ast.molecule._ASTNodeWithSuffix
:type mexp_parser: bce.parser.interface.mexp_parser.MathExpressionParserInterface
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param main_dom: The main DOM node.
:param node: The AST node.
:param mexp_parser: The math expression parser.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : bce.dom.mathml.all.Base
:return: The printed MathML node.
"""
# Print the suffix number part.
sfx = node.get_suffix_number().simplify()
if sfx != _math_constant.ONE:
sfx_dom = _print_operand(
sfx,
False,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
else:
sfx_dom = None
# Do combination and return.
if sfx_dom is None:
return main_dom
else:
return _mathml.SubComponent(main_dom, sfx_dom)
def print_ast(
root_node,
mexp_parser,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Print an AST to BCE expression.
:type root_node: bce.parser.ast.molecule.ASTNodeHydrateGroup | bce.parser.ast.molecule.ASTNodeMolecule
:type mexp_parser: bce.parser.interface.mexp_parser.MathExpressionParserInterface
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param root_node: The root node of the AST.
:param mexp_parser: The math expression parser.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : bce.dom.mathml.all.Base
:return: The printed expression.
"""
# Get the printing order.
work_order = _ml_ast_bfs.do_bfs(root_node, True)
# Initialize the printed result container.
printed = {}
for work_node in work_order:
if work_node.is_hydrate_group():
assert isinstance(work_node, _ml_ast_base.ASTNodeHydrateGroup)
# Initialize a row component to contain the printing result.
build = _mathml.RowComponent()
# Print the prefix number part.
pfx = work_node.get_prefix_number().simplify()
if pfx != _math_constant.ONE:
build.append_object(_print_operand(
pfx,
True,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
))
build.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_LEFT_PARENTHESIS))
surround = True
else:
surround = False
# Print children nodes.
build.append_object(printed[id(work_node[0])])
for child_id in range(1, len(work_node)):
build.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_DOT))
build.append_object(printed[id(work_node[child_id])])
# Complete the surrounding parentheses if the flag was marked.
if surround:
build.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_RIGHT_PARENTHESIS))
# Save printing result.
printed[id(work_node)] = build
elif work_node.is_molecule():
assert isinstance(work_node, _ml_ast_base.ASTNodeMolecule)
# Initialize a row component to contain the printing result.
build = _mathml.RowComponent()
# Print the prefix number part.
pfx = work_node.get_prefix_number().simplify()
if pfx != _math_constant.ONE:
build.append_object(_print_operand(
pfx,
True,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
))
# Print children nodes.
for child_id in range(0, len(work_node)):
build.append_object(printed[id(work_node[child_id])])
el_charge = work_node.get_electronic_count().simplify()
if not el_charge.is_zero:
if len(work_node) == 0:
build.append_object(_mathml.SuperComponent(
_mathml.TextComponent("e"),
_print_super_electronic(
el_charge,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
))
else:
# Find the innermost row component.
innermost = build
while innermost[-1].is_row():
innermost = innermost[-1]
# Fetch the last item.
last_item = innermost[-1]
# Add the electronic.
if last_item.is_sub():
assert isinstance(last_item, _mathml.SubComponent)
last_item = _mathml.SubAndSuperComponent(
last_item.get_main_object(),
last_item.get_sub_object(),
_print_super_electronic(
el_charge,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
)
else:
last_item = _mathml.SuperComponent(
last_item,
_print_super_electronic(
el_charge,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
)
# Save the modified item.
innermost[-1] = last_item
# Save printing result.
printed[id(work_node)] = build
elif work_node.is_atom():
assert isinstance(work_node, _ml_ast_base.ASTNodeAtom)
# Print and save the result.
printed[id(work_node)] = _print_suffix(
_mathml.TextComponent(work_node.get_atom_symbol()),
work_node,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
elif work_node.is_parenthesis():
assert isinstance(work_node, _ml_ast_base.ASTNodeParenthesisWrapper)
# Initialize a row component to contain the printing result.
build = _mathml.RowComponent()
# Print.
build.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_LEFT_PARENTHESIS))
build.append_object(printed[id(work_node.get_inner_node())])
build.append_object(_print_suffix(
_mathml.OperatorComponent(_mathml.OPERATOR_RIGHT_PARENTHESIS),
work_node,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
))
# Save printing result.
printed[id(work_node)] = build
elif work_node.is_abbreviation():
assert isinstance(work_node, _ml_ast_base.ASTNodeAbbreviation)
# Print and save the result.
printed[id(work_node)] = _print_suffix(
_mathml.TextComponent("[%s]" % work_node.get_abbreviation_symbol()),
work_node,
mexp_parser,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
else:
raise RuntimeError("BUG: Unhandled AST node type.")
# Post process - add status.
post_process = printed[id(root_node)]
if root_node.get_status() is not None:
if not post_process.is_row():
tmp = _mathml.RowComponent()
tmp.append_object(post_process)
post_process = tmp
post_process.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_LEFT_PARENTHESIS))
if root_node.get_status() == _ml_ast_base.STATUS_GAS:
post_process.append_object(_mathml.TextComponent("g"))
elif root_node.get_status() == _ml_ast_base.STATUS_LIQUID:
post_process.append_object(_mathml.TextComponent("l"))
elif root_node.get_status() == _ml_ast_base.STATUS_SOLID:
post_process.append_object(_mathml.TextComponent("s"))
elif root_node.get_status() == _ml_ast_base.STATUS_AQUEOUS:
post_process.append_object(_mathml.TextComponent("aq"))
else:
raise RuntimeError("BUG: No such status.")
post_process.append_object(_mathml.OperatorComponent(_mathml.OPERATOR_RIGHT_PARENTHESIS))
return printed[id(root_node)]
| [
"xiaojsoft@gmail.com"
] | xiaojsoft@gmail.com |
563527056ef7f875e07be1964c095221e9bbf545 | a7004a735b6f61cec6993147af1766bc9bbb54ef | /resources.py | d9b248088eabb7578efdcfcefdfdc5c00452fea6 | [] | no_license | ValPinnaSardinia/OA_Webmap_updater | b54ac39c706faf240c351050c47dcb82992cae27 | 831e4bf7342a0119df140599698869490eb5f66b | refs/heads/main | 2023-03-19T23:17:00.474765 | 2021-03-20T14:46:48 | 2021-03-20T14:46:48 | 349,748,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,699 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.11.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x07\x53\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x19\x00\x00\x00\x18\x08\x06\x00\x00\x00\x0f\xb5\x56\xc6\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0f\xbe\x00\x00\x0f\xbe\
\x01\x4d\xf7\xd0\x43\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x77\x77\x77\x2e\x69\x6e\x6b\x73\x63\x61\
\x70\x65\x2e\x6f\x72\x67\x9b\xee\x3c\x1a\x00\x00\x06\xe0\x49\x44\
\x41\x54\x48\x89\x85\x95\x5b\x8c\x5c\x05\x1d\x87\xbf\x73\x3f\x73\
\x66\xce\xdc\x76\x67\xef\x3b\xbb\xdd\xdd\x76\x7b\xbf\xd8\x42\x6f\
\xd8\x12\x8c\xc1\x34\x18\x24\xa6\x48\x88\xa0\x26\xc5\x6a\x54\x28\
\x35\x4a\xe3\x83\x36\x7d\x30\x21\x05\x8b\xc1\x98\x28\x24\x3e\x19\
\x43\x25\x26\x35\x18\xf0\x42\xad\x56\xb6\x11\x17\x0b\xb4\x62\x5c\
\x76\xbb\xdd\xdd\xce\x76\x76\x76\xee\x73\xce\x5c\xce\xd5\x07\x83\
\x11\xa5\xf8\x7b\xfa\xe7\xff\xf0\xfd\x5e\x7e\xc9\x27\xf0\x21\x39\
\xf9\x93\xdf\xeb\x5b\xc7\xd6\x1c\x8a\xea\xca\xa1\x52\xb5\xb9\xd9\
\xf7\x99\x0c\xc3\x30\x06\x58\x09\x53\xcb\x0d\xf4\x18\x7f\xaa\x37\
\xbd\x73\x6f\xbf\x95\xbf\xf4\xf8\x17\x76\x54\x6f\xc5\x11\x3e\xe8\
\x79\xf8\x6c\x28\x1d\x10\xde\x79\x7c\x74\xc0\x3c\xa1\xca\x62\xd7\
\xd2\x8a\x85\x80\xc0\xf4\x3b\x2b\x3c\xf0\x89\x75\xcc\x5c\xaf\xa0\
\x69\x32\x57\x67\x8a\x8c\x67\x93\xe8\x9a\x64\xf5\xf7\x18\xcf\x2e\
\x5e\xab\x3d\x75\xe4\xfe\x4d\xe5\xff\x5b\x72\xe2\xa9\x8b\x3b\x7b\
\x06\x12\xcf\x6d\x9a\xe8\xda\x01\x90\x2b\x58\x28\x8a\x44\xb5\xde\
\x21\x57\xb0\x18\x1f\x4e\xb2\x90\xab\x11\x33\x54\x96\x0b\x16\xfd\
\x99\x28\x6b\x86\x12\x34\x6c\x07\x43\x93\x8b\xaa\x2a\x1c\x3d\x7c\
\xd7\xf8\x2f\x6e\x59\xf2\x8d\xa7\xa7\xee\xf1\xc2\xf0\xec\xfe\x5d\
\x83\x11\xdf\x0f\x08\xc3\x10\x45\x96\xb0\x5b\x2e\xba\x2a\x33\x7d\
\x25\xcf\xc6\xb5\x5d\x08\x80\xae\x4a\xd4\x2c\x87\x46\xd3\x45\x12\
\x05\x64\x45\x24\x69\x6a\xcc\x2d\xd6\xc2\x58\x54\x3e\xfe\xf5\x07\
\xb7\x3d\xf3\x1e\x57\x7c\xef\x78\xe4\xd4\x85\xdb\x3a\x41\xf0\xc2\
\xd8\x48\x32\x22\x0a\x50\xae\xb6\xb1\x6c\x97\x72\xad\x4d\x26\x15\
\xa1\xda\xe8\xb0\x52\xb4\x79\xe9\xc2\x35\x7e\xf0\xf3\xb7\xf1\x03\
\x58\x29\x36\x51\x64\x91\xb6\xe3\xe3\x38\x3e\xab\xe5\x16\x83\xbd\
\x51\x61\x66\xb1\xf6\xbd\x53\xcf\x4f\x1f\x79\x5f\xc9\x43\xa7\x7f\
\x1d\xdd\x32\xd9\xfd\xe2\xc8\x40\xdc\xe8\x4d\x47\x98\x5b\xac\xd1\
\x9d\x8a\xb0\x61\x2c\xcd\xa6\xf1\x34\x37\x56\x2c\x7c\x2f\x20\x04\
\x76\x6d\xea\x43\x95\x45\x14\x45\x24\x6e\xaa\x28\x8a\xc4\x86\xf1\
\x34\xfb\x77\x0c\x30\x3e\x14\xa7\xd9\xf6\xd8\xb7\xad\x5f\xc8\xf6\
\xc5\xbf\x7f\xe6\x67\x97\x47\x01\x64\x80\x11\x23\x79\xac\x37\x1d\
\xc9\xda\x1d\x1f\xbb\xe5\x51\x28\x37\x99\x59\xa8\x30\x9b\xab\xf1\
\xe9\x8f\x4d\x30\x3e\x18\x67\x76\xa9\x86\xdd\x72\xa9\xd4\xdb\x8c\
\x0d\x25\xb8\xb6\x54\xa3\x2b\xa1\x13\x84\x70\xf1\xf2\x32\x93\x23\
\x49\x26\x86\x12\x48\x82\x80\xdd\xf2\x08\xfc\xc0\x58\x33\xd8\x75\
\x06\xb8\x4f\x38\xf9\xa3\x69\x63\x74\x28\x3e\xbf\x52\x6a\xf6\x8c\
\x0c\xc4\x69\xd8\x0e\x97\xde\xba\x89\xe3\xfa\xc8\x8a\xc4\xdd\x7b\
\xb3\xac\x96\x9b\xb4\xda\x3e\xa1\x00\x7d\x69\x83\x95\xa2\xcd\x4a\
\xa9\x49\x6f\xb7\xc1\xc2\x72\x03\xdb\xf5\xd8\xbe\xbe\x87\x35\xfd\
\x71\x96\x6e\x36\x70\x3d\x1f\x33\xa6\xa1\x2b\x62\x58\xb3\x9d\x2d\
\x62\xa9\xd6\xba\x57\x91\xc4\x9e\x58\x4c\x25\x5f\xb4\xf1\xfd\x90\
\xa8\xa1\x60\xb7\x3d\x4c\x5d\xa1\x61\xbb\xcc\xdf\xa8\x33\xbb\x54\
\x65\xf1\x66\x83\xf3\x7f\x59\xe2\x46\xa9\x49\xa6\xdb\x20\x61\x6a\
\x44\xa3\x0a\xbe\x13\x60\x37\x1c\xe6\x16\xab\x48\xb2\x40\xe0\x87\
\x5c\xcf\xd5\xe8\x38\xbe\x30\x31\x9c\x7c\x58\xde\xb3\xa5\xff\xce\
\x7c\xc9\xa6\xd1\xf6\x58\x97\x4d\x92\x2f\xd8\xdc\x75\xfb\x30\xae\
\x1b\xb0\xbc\x6a\xe1\x38\x3e\xc5\x6a\x8b\xe1\xbe\x38\x5b\xd6\xa6\
\x19\xee\x33\x69\x58\x1d\x5e\x99\x5a\xc4\x8c\x28\xac\x1b\x4e\xb2\
\x7d\x32\x43\x3a\xae\x53\x6d\x74\x28\x56\x5a\x48\xb2\x88\x10\xc2\
\xc2\x72\x03\x51\x12\x0e\x8a\x99\x74\x64\xb7\xdd\xf4\xe8\x34\x5d\
\xe6\x17\x6b\xa8\xaa\x88\x08\x88\x84\x64\x92\x11\x2e\x4c\x2f\x61\
\x44\x14\x32\x49\x9d\x7d\x5b\x7b\xb1\xec\x0e\x1b\xc7\xd2\x7c\xf9\
\xfe\x2d\xfc\xf9\x4a\x9e\x68\x44\x41\x12\xfe\x35\xe1\x7a\xc7\x63\
\x78\xd0\xa4\x3b\xa9\x53\xb7\x1c\x26\x46\x92\x74\x1c\x7f\x8b\x9c\
\x2f\xda\x23\xff\x58\xa8\xd0\xdd\x65\x10\x13\xe0\xc5\x57\xe7\x78\
\x37\x5f\x47\x53\x25\xee\xd9\x3b\x4a\xcc\x50\x59\xad\x34\xc9\xf6\
\xc4\x28\xd6\x3a\x1c\x7b\xe6\x35\x0e\x7e\x64\x90\xef\x1c\xd9\x45\
\x76\x20\x8e\xae\xcb\xbc\xf0\xca\x0c\x57\x17\xcb\x28\x92\xc8\xc7\
\xf7\x66\xd9\xb9\xb6\x07\x45\x11\x89\xe8\x32\xab\xe5\x96\x21\x4b\
\xb2\x28\x55\x1b\x6d\x24\x5d\x22\x1d\x53\xf1\xfd\x00\x53\x97\x49\
\xc5\x75\xd2\x49\x1d\xcf\xf1\xb1\x9a\x0e\xd7\x97\xeb\x68\xaa\xcc\
\xb1\xcf\x6c\x65\x72\x34\xc5\x4a\xb9\x8d\x28\x0a\x04\x41\x48\xcc\
\x50\xc8\x66\x4c\x9a\xae\x47\x22\xa6\xe1\xb8\x3e\x55\xab\xc3\xcd\
\x82\x8d\x24\x12\x4a\x9f\x7a\xf0\xab\x9f\x73\xbc\xa0\x7b\xe3\x68\
\x1a\x59\x12\x11\x10\xb8\xf7\xe0\x18\x77\xef\x19\x21\x70\x03\xf2\
\xab\x36\x15\xab\x83\xef\x87\xcc\xcc\x57\x88\x45\x54\x00\xde\x5d\
\xac\x11\x06\x21\x7d\x99\x28\xf5\x86\x43\xb6\xd7\x64\x38\x13\x23\
\xf4\xc1\xf7\x03\x64\x59\x44\xd7\x24\x64\x49\x28\xca\xaa\x2c\x5d\
\xde\xb9\x3e\x33\xd9\x74\x02\x1c\x3c\x36\x8c\xa5\xf1\xbc\x80\x76\
\xc7\xa3\x50\x6c\x52\x6d\xb4\xc9\xa4\x0c\x6a\xf5\x36\xc5\x7a\x9b\
\x5c\xd1\x26\x15\xd7\x10\x43\x81\x03\xb7\x0d\x31\x7b\xbd\x4a\xe0\
\x07\xc4\x4c\x0d\x04\x70\xdc\x00\x49\x14\x18\x1d\x8c\x23\x0a\x02\
\x03\x3d\xd1\x65\xb1\xb0\xda\x7a\xb9\x3b\x15\xa1\x54\x6e\xe1\xf8\
\x01\x84\x21\xb3\x4b\x55\xfe\xf0\xfa\x12\xa3\xd9\x04\x0f\x1c\x5a\
\x8f\x2c\x40\x3c\xae\x13\xf8\x01\xae\xe3\x51\xa9\xb4\x88\x44\x24\
\xae\xce\xac\x92\x2b\x58\xdc\xbe\xad\x9f\xbd\xdb\xfb\x18\xe9\x37\
\x49\x99\x1a\x09\x53\x47\x95\x45\x5c\x2f\xc0\x8c\xc8\x6f\x48\xbb\
\xef\x7c\x78\x3e\x66\xea\x8f\x56\xac\xb6\x9a\x30\x35\x82\x10\x96\
\xf2\x0d\x14\x59\xe2\x57\x53\xd7\xf1\x42\xd8\xb9\xbe\x07\xcf\xf5\
\x19\x19\x88\x93\x1d\x4e\xb2\x77\xdb\x00\xb1\xa8\x86\xd5\x74\xd0\
\x75\x85\xdf\xbd\x71\x83\x20\x84\xdd\x9b\x7b\x59\xc8\x5b\xa4\x12\
\x1a\xed\x8e\x8f\x69\x28\x5c\xbd\x56\xfa\x9a\x78\xf2\xd1\x3d\xf5\
\xa9\x37\x73\x3f\x9c\xc8\x26\xa9\xd6\x3b\x28\xb2\xf8\xef\xbd\xaf\
\x1b\x4a\x92\x30\x54\xac\xa6\x4b\x26\x19\xe1\xca\x5c\x89\xe7\xce\
\xfd\x8d\x97\x2e\x5d\x27\x11\x53\xf1\xc2\x90\xe5\x42\x03\xdf\xf5\
\x69\x36\x5d\x6a\xb6\x87\x22\x09\x74\x5c\x9f\x8e\xe3\xe3\xfb\xc1\
\x1f\xbf\xf9\xd9\x1d\xaf\x89\x00\x9b\x37\x1a\x27\xe7\x16\xaa\xb3\
\xab\x95\x16\x61\x08\x7b\xb7\x0f\xd0\x72\x3c\xca\xd5\x16\xb9\x9b\
\x0d\xf2\x25\x1b\xc7\xf5\x29\xd6\xda\xf8\x41\xc0\xe5\x99\x22\xa9\
\x84\x86\x6d\xb9\xac\x56\x5a\x24\xa3\x1a\xbe\x17\x32\xf5\xd7\x65\
\xe2\xa6\x86\xeb\x06\x20\x08\xc1\xab\xaf\x2f\x9c\x78\x9f\x4f\xbe\
\x78\xea\xfc\x6e\x51\x95\x2f\x84\x41\xa8\x6f\x5d\x97\xa1\x66\x75\
\x68\x77\x3c\x62\x51\x95\xdc\x8a\x85\xae\x88\x5c\x99\x2d\xe1\x07\
\x21\xaa\x2a\xf1\xf9\x4f\x6e\xe4\x97\x17\xe6\xd8\xb7\x7d\x80\x86\
\xed\x10\x8b\xaa\xe8\xaa\x4c\xa1\xdc\x24\x66\x28\xcc\x2f\x95\x8f\
\x9f\x3e\x76\xc7\x99\xff\x91\xd6\x43\xdf\xfe\xcd\x61\x01\xe9\xa7\
\xa9\x84\xae\xc4\x0c\x85\xc1\x5e\x13\xc7\xf1\x48\xc6\x75\xe6\x16\
\xab\xb4\x5a\x2e\x11\x5d\x46\x14\x04\x74\x4d\xa6\xa7\xcb\x00\x42\
\xcc\x98\x46\xa9\xd2\x22\x93\x8a\x50\xa8\xb4\xc2\xbf\xcf\x16\x9f\
\x78\xf6\x89\x03\xa7\x6f\xa9\xdf\x2f\x7d\xf7\xfc\xfe\xe1\x9e\xc4\
\x73\xd9\xc1\xf8\x86\xf9\x5c\x1d\xd7\x0f\x88\x1a\x0a\x96\xe5\xa0\
\xc8\x22\x86\xa1\x10\x02\xa6\x2e\x53\xaa\x76\x48\x98\x2a\xb1\x88\
\xc2\xc6\xf1\x34\xf9\x55\x7b\x65\xea\xcd\xfc\xd1\x27\x1f\xdb\x73\
\xee\x43\x1d\x0f\x70\xf6\x6c\x28\x4d\xe7\xa6\xee\xeb\xee\x32\xbe\
\x92\x30\x23\x07\x4b\xb5\x96\xb0\xb0\x5c\xa7\x58\x69\xe1\x85\x21\
\x61\x18\xf2\xd1\x1d\x83\x28\x92\xc8\xe6\x89\x34\xcd\xa6\xe7\x5d\
\x5b\xaa\xfe\xb8\x6d\xbb\xdf\x3a\x71\x74\x57\xed\xbf\x79\x1f\x58\
\xf2\x9f\x39\xfe\xf4\xc5\x6d\xae\xcf\x23\xc3\xbd\xe6\x9e\x68\x54\
\xe9\x76\xdd\x20\xa5\x69\xb2\x26\x0a\x61\xfe\x66\xc1\x9e\xae\x5b\
\x9d\xdf\x2a\x32\x2f\x3f\xf9\xd8\x1d\x8b\xb7\x62\xfc\x13\x3c\x43\
\x33\x22\xe3\xce\xad\x4a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x11\
\x0b\xc0\x55\x22\
\x00\x4f\
\x00\x41\x00\x5f\x00\x57\x00\x65\x00\x62\x00\x4d\x00\x61\x00\x70\x00\x5f\x00\x55\x00\x70\x00\x64\x00\x61\x00\x74\x00\x65\x00\x72\
\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x78\x4f\x99\x9e\x8f\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"noreply@github.com"
] | ValPinnaSardinia.noreply@github.com |
765896b42b4ef205e8d89d9ee674c03d3cccb302 | 89a558631c1a5045d6bf10f2e286a7d6d4656a18 | /rps_guess_games_loop.py | 6008443ccaad201428082daacb824f9920937cce | [] | no_license | hackrmann/learn_python | 743764efe0fdcd71d04ad4b4a60077ecd0aece90 | d37c811b2359430a6448aa3bbeded756d265ef36 | refs/heads/master | 2020-03-26T00:36:39.569470 | 2019-01-19T20:49:27 | 2019-01-19T20:49:27 | 144,327,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | import random
#guessing game
# a = random.randint(0,100)
# print("Let's play a guessing game! Enter a number from 1-100")
# b = int(input())
# while(a!=b):
# if (a>b):
# print("Too low!")
# else:
# print("Too high!")
# b = int(input())
# print("Congratulations! You guessed right!")
#rock_paper_scissors advanced play against AI
a = input("Let's play rock paper scissors! Type n to quit!\nEnter your choice: ").lower()
c = ['rock','paper','scissors']
b = random.randint(0,2)
d = 0
e = 0
while True:
print(f"Computer entered {c[b]}")
if(a==c[b]):
print("Draw!")
elif (a==c[0] and b==1):
print("Computer wins!")
e = e + 1
elif (a==c[1] and b==2):
print("Computer wins!")
e = e + 1
elif(a==c[2] and b==0):
print("Computer wins!")
e = e + 1
elif a==c[0] or a==c[1] or a==c[2]:
print("Player wins")
d = d + 1
elif a=="n":
break
else:
print("Invalid expression! Please give proper input")
b = random.randint(0,2)
a = input("Enter another: ")
print("Final Score\nYou:{}\nComputer:{}".format(d,e)) | [
"thejus44@gmail.com"
] | thejus44@gmail.com |
2a41304e453dedd199f13c1494a4b7b084da4cee | 5a4f82c4e10e09f1b6698371d1a23de36f351260 | /math/0x01-plotting/6-bars.py | 15faaea9518e2516ab67b677afee498112879a6e | [] | no_license | icculp/holbertonschool-machine_learning | 20a0bb730134e24504d406cf7a88b1f688e1905d | 5114f884241b3406940b00450d8c71f55d5d6a70 | refs/heads/main | 2023-07-16T11:58:15.976074 | 2021-08-12T16:22:13 | 2021-08-12T16:22:13 | 317,306,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(5)
fruit = np.random.randint(0, 20, (4, 3))
t = np.arange(3)
p1 = plt.bar(t, fruit[0], 0.5, color='red')
p2 = plt.bar(t, fruit[1], 0.5, color='yellow', bottom=fruit[0])
p3 = plt.bar(t, fruit[2], 0.5, color='#ff8000', bottom=fruit[0]+fruit[1])
p4 = plt.bar(t, fruit[3], 0.5, color='#ffe5b4',
bottom=fruit[0]+fruit[1]+fruit[2])
plt.ylabel('Quantity of Fruit')
plt.title('Number of Fruit per Person')
plt.xticks(t, ('Farrah', 'Fred', 'Felicia'))
plt.yticks([i for i in range(0, 90, 10)])
plt.legend((p1, p2, p3, p4), ('apples', 'bananas', 'oranges', 'peaches'))
plt.tight_layout()
plt.show()
| [
"icculp@gmail.com"
] | icculp@gmail.com |
db7491cfb2ce5b08e971b2bfe3d863c1553d758a | fb33b689b8ebd54695828dab3f9d1db074203d34 | /practice/test/projectA/run_all.py | 5a578320d4873ec8c3d7ab52c7bf608d168982ce | [] | no_license | takumikaka/workspace | 471ab6e60d47f61ae36e4b95e8d58b0840188f65 | f72946ff5f46b549dfab51a0038f05478b301490 | refs/heads/master | 2021-05-06T11:58:24.334256 | 2019-03-22T08:32:18 | 2019-03-22T08:32:18 | 113,008,406 | 1 | 0 | null | 2018-02-23T12:28:54 | 2017-12-04T07:14:00 | Python | UTF-8 | Python | false | false | 550 | py | # coding:UTF-8 -*-
import unittest
from config.config import *
from lib.send_email import *
from lib.HTMLTestReportCN import HTMLTestRunner
logging.info("================================== 测试开始 ==================================")
suite = unittest.defaultTestLoader.discover(test_path)
with open(report_file, "wb") as f:
HTMLTestRunner(stream=f, title="API Report", description="测试详情", tester="C罗").run(suite)
send_email()
logging.info("================================== 测试开始 ==================================")
| [
"qk2006qk@163.com"
] | qk2006qk@163.com |
5444ea18db1e45f5cccce52d5d1d6ab3fc4ed9c0 | 00d50173c5ec9ea2db108b372cae2bd6b265520a | /3_Longest_Substring_without_repeating_Chars.py | 18becfb0a6ff7814831821c7d9315bee38bc1762 | [] | no_license | dipmukherg/My-LeetCode-Solutions | b33bb8470de2994a22f2e925a318ecdcaeab7e21 | 584515c5596286f4a793082b8c57068ffc2f8be0 | refs/heads/master | 2021-03-31T18:20:13.116409 | 2020-03-18T03:03:20 | 2020-03-18T03:03:20 | 248,124,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
counter = 0
start = 0
max_len = 0
lookup = set()
idx=0
while idx!=len(s):
#for idx,i in enumerate(s):
if idx==0:
counter = 1
max_len=1
lookup.add(s[idx])
idx+=1
print(max_len)
else:
if s[idx] not in lookup:
lookup.add(s[idx])
counter+=1
if counter>max_len:
max_len=counter
idx+=1
#print(s[idx],counter,'lookup_failed')
else:
counter=0
lookup=set()
start+=1
idx=start
#print(s[idx],counter,'lookup_successful')
return max_len
| [
"noreply@github.com"
] | dipmukherg.noreply@github.com |
90d700d4d210dd0d70bcbbb9d34d43205593f234 | a628672f941279bf19ac0be838e9fe5d75e38f5c | /src/macuitest/lib/elements/ui/monitor.py | bbdccb44924b3d84058d8d8ddeafbf25423b1dae | [
"MIT"
] | permissive | andriykislitsyn/macuitest | 2cfeb8ee5bb2ddad67caa36c71d58f4a48e28395 | 547041fcd778355dc2e9aff3bb0a80fe7ea5e26f | refs/heads/master | 2023-08-26T18:55:18.497702 | 2021-10-15T08:25:23 | 2021-10-15T08:25:23 | 277,842,900 | 4 | 1 | Apache-2.0 | 2020-08-25T12:41:42 | 2020-07-07T14:43:16 | Python | UTF-8 | Python | false | false | 3,237 | py | from pathlib import Path
from typing import Optional
from typing import Tuple
from typing import Union
import AppKit
import numpy
import Quartz
from Foundation import NSURL
from Quartz import CGDisplayBounds
from Quartz import CGMainDisplayID
from Quartz import CoreGraphics
from macuitest.config.constants import Region
from macuitest.config.constants import ScreenSize
class Monitor:
def __init__(self):
self.__is_retina: Optional[bool] = None
self.__screen_size: Optional[ScreenSize] = None
def make_snapshot(self, region: Optional[Region] = None) -> numpy.ndarray:
h, w = (
(region.y2 - region.y1, region.x2 - region.x1)
if region
else (self.size.height, self.size.width)
)
pixel_data = self.get_pixel_data(region=region)
_image = numpy.frombuffer(pixel_data[0], dtype=numpy.uint8)
return _image.reshape((h, pixel_data[1], 4))[:, :w, :]
@property
def bytes(self):
return bytes(numpy.frombuffer(self.get_pixel_data()[0], numpy.uint8))
@property
def is_retina(self) -> bool:
if self.__is_retina is None:
self.__is_retina = AppKit.NSScreen.mainScreen().backingScaleFactor() > 1.0
return self.__is_retina
@property
def size(self) -> ScreenSize:
if self.__screen_size is None:
size = CGDisplayBounds(CGMainDisplayID()).size
self.__screen_size = ScreenSize(int(size.width), int(size.height))
return self.__screen_size
@staticmethod
def get_pixel_data(region: Optional[Region] = None):
region = (
CoreGraphics.CGRectInfinite
if region is None
else CoreGraphics.CGRectMake(
region.x1, region.y1, region.x2 - region.x1, region.y2 - region.y1
)
)
image = CoreGraphics.CGWindowListCreateImage(
region,
CoreGraphics.kCGWindowListOptionOnScreenOnly,
CoreGraphics.kCGNullWindowID,
CoreGraphics.kCGWindowImageDefault,
)
pixel_data = CoreGraphics.CGDataProviderCopyData(CoreGraphics.CGImageGetDataProvider(image))
bytes_per_row = CoreGraphics.CGImageGetBytesPerRow(image) // 4
return pixel_data, bytes_per_row
@staticmethod
def save_screenshot(
where: Union[str, Path], region: Optional[Tuple[int, int, int, int]] = None
) -> Union[str, Path]:
"""Take a screenshot and save it to `where.
Note: Region is defined by (x, y) pair of top left point, and width, length params.
"""
region = CoreGraphics.CGRectInfinite if region is None else CoreGraphics.CGRectMake(*region)
image = CoreGraphics.CGWindowListCreateImage(
region,
CoreGraphics.kCGWindowListOptionOnScreenOnly,
CoreGraphics.kCGNullWindowID,
CoreGraphics.kCGWindowImageDefault,
)
destination = Quartz.CGImageDestinationCreateWithURL(
NSURL.fileURLWithPath_(str(where)), "public.png", 1, None
)
Quartz.CGImageDestinationAddImage(destination, image, dict())
Quartz.CGImageDestinationFinalize(destination)
return where
monitor = Monitor()
| [
"andriykislitsyn@gmail.com"
] | andriykislitsyn@gmail.com |
010eef8bba00a52ad44b28b1f4186a1236169a07 | c049636b9fa7435b8023752fe1dfd88dc676ab8f | /src/assets/locales/jsonGenerator.py | f968c99022a20e02aa0f9810fc26c6f8c329618d | [] | no_license | YashIsDeep/react-i18support | bedd61a787ad7b4f7640dd444a2d6937d2de9125 | 6053a97b8c946bd563d6af58eeb13dc77c786eae | refs/heads/master | 2022-10-23T21:41:22.155056 | 2020-06-16T11:53:57 | 2020-06-16T11:53:57 | 267,530,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | import sys
import random
def gen(len):
alphabets="abcdefghijklmnopqrstuvwxyz"
str=""
for i in range(len):
str=str+alphabets[random.randrange(0,26)]
return str
N=10000
len=5
print("{");
for i in range(N-1):
randString=gen(len);
print("\t\""+randString+"\":\"lg."+randString+"\",")
randString=gen(len);
print("\t\""+randString+"\":\"lg."+randString+"\"")
print("}") | [
"yashparth.gupta@gmail.com"
] | yashparth.gupta@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.