hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
584b6938b21baa80544be5899accf3e8f5524589
| 218
|
py
|
Python
|
Modulo 3/HelloWorld.py
|
antonio343/clase
|
fda04a606246695aa5d93c8b2b5e2890a16d5973
|
[
"MIT"
] | null | null | null |
Modulo 3/HelloWorld.py
|
antonio343/clase
|
fda04a606246695aa5d93c8b2b5e2890a16d5973
|
[
"MIT"
] | null | null | null |
Modulo 3/HelloWorld.py
|
antonio343/clase
|
fda04a606246695aa5d93c8b2b5e2890a16d5973
|
[
"MIT"
] | null | null | null |
import sys
print("Hello world, I am",sys.executable, sys.version)
x=input("Dame un numero mayor que cero: ")
x = int(x)
if x < 0:
print('Negative changed to zero')
x = 0
print(f"El valor final de x es: {x}")
| 19.818182
| 54
| 0.646789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 108
| 0.495413
|
584b955b3560453b5439bd686f05b35e554caf34
| 436
|
py
|
Python
|
rasiberryPiGPIOBaseController/driver/test.py
|
onwebbe/rasiberryPiBaseController
|
bdb81cb5a0e62414fa091635a83db799017249e7
|
[
"MIT"
] | null | null | null |
rasiberryPiGPIOBaseController/driver/test.py
|
onwebbe/rasiberryPiBaseController
|
bdb81cb5a0e62414fa091635a83db799017249e7
|
[
"MIT"
] | null | null | null |
rasiberryPiGPIOBaseController/driver/test.py
|
onwebbe/rasiberryPiBaseController
|
bdb81cb5a0e62414fa091635a83db799017249e7
|
[
"MIT"
] | null | null | null |
def convertToHEXForChar(charList):
convertedCharList = []
for message in charList:
convertedCharList.append(ord(message))
return convertedCharList
def displayChar(line, *args):
concatedList = []
for argItem in args:
concatedList.extend(argItem)
print(len(concatedList))
for message in concatedList:
print(message)
def main():
displayChar(0, [0x00], convertToHEXForChar("! Rasbperry Pi"))
main()
| 22.947368
| 63
| 0.713303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 0.036697
|
584bc0b37c22b1a874521a0f4acbe34fb62b2cde
| 17,670
|
py
|
Python
|
Acquire/Client/_user.py
|
michellab/BioSimSpaceCloud
|
456b146a2131565e354352872d3e75a08c3652d1
|
[
"Apache-2.0"
] | 2
|
2019-02-15T16:04:19.000Z
|
2019-02-19T15:42:27.000Z
|
Acquire/Client/_user.py
|
michellab/BioSimSpaceCloud
|
456b146a2131565e354352872d3e75a08c3652d1
|
[
"Apache-2.0"
] | null | null | null |
Acquire/Client/_user.py
|
michellab/BioSimSpaceCloud
|
456b146a2131565e354352872d3e75a08c3652d1
|
[
"Apache-2.0"
] | null | null | null |
import os as _os
from enum import Enum as _Enum
from datetime import datetime as _datetime
import time as _time
from Acquire.Service import call_function as _call_function
from Acquire.Service import Service as _Service
from Acquire.ObjectStore import bytes_to_string as _bytes_to_string
from Acquire.ObjectStore import string_to_bytes as _string_to_bytes
from Acquire.Crypto import PrivateKey as _PrivateKey
from Acquire.Crypto import PublicKey as _PublicKey
from ._qrcode import create_qrcode as _create_qrcode
from ._qrcode import has_qrcode as _has_qrcode
from ._errors import UserError, LoginError
# If we can, import socket to get the hostname and IP address
try:
import socket as _socket
_has_socket = True
except:
_has_socket = False
__all__ = ["User", "username_to_uid", "uid_to_username", "get_session_keys"]
class _LoginStatus(_Enum):
EMPTY = 0
LOGGING_IN = 1
LOGGED_IN = 2
LOGGED_OUT = 3
ERROR = 4
def _get_identity_url():
"""Function to discover and return the default identity url"""
return "http://130.61.60.88:8080/t/identity"
def _get_identity_service(identity_url=None):
"""Function to return the identity service for the system"""
if identity_url is None:
identity_url = _get_identity_url()
privkey = _PrivateKey()
response = _call_function(identity_url, response_key=privkey)
try:
service = _Service.from_data(response["service_info"])
except:
raise LoginError("Have not received the identity service info from "
"the identity service at '%s' - got '%s'" %
(identity_url, response))
if not service.is_identity_service():
raise LoginError(
"You can only use a valid identity service to log in! "
"The service at '%s' is a '%s'" %
(identity_url, service.service_type()))
if identity_url != service.service_url():
service.update_service_url(identity_url)
return service
def uid_to_username(user_uid, identity_url=None):
"""Function to return the username for the passed uid"""
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
user_uid=str(user_uid))
return response["username"]
def username_to_uid(username, identity_url=None):
"""Function to return the uid for the passed username"""
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
username=str(username))
return response["user_uid"]
def get_session_keys(username=None, user_uid=None, session_uid=None,
identity_url=None):
"""Function to return the session keys for the specified user"""
if username is None and user_uid is None:
raise ValueError("You must supply either the username or user_uid!")
if session_uid is None:
raise ValueError("You must supply a valid UID for a login session")
if identity_url is None:
identity_url = _get_identity_url()
response = _call_function(identity_url, "whois",
username=username,
user_uid=user_uid,
session_uid=session_uid)
try:
response["public_key"] = _PublicKey.from_data(response["public_key"])
except:
pass
try:
response["public_cert"] = _PublicKey.from_data(response["public_cert"])
except:
pass
return response
class User:
"""This class holds all functionality that would be used
by a user to authenticate with and access the service.
This represents a single client login, and is the
user-facing part of Acquire
"""
def __init__(self, username, identity_url=None):
"""Construct a null user"""
self._username = username
self._status = _LoginStatus.EMPTY
self._identity_service = None
if identity_url:
self._identity_url = identity_url
self._user_uid = None
def __str__(self):
return "User(name='%s', status=%s)" % (self.username(), self.status())
def __enter__(self):
"""Enter function used by 'with' statements'"""
pass
def __exit__(self, exception_type, exception_value, traceback):
"""Ensure that we logout"""
self.logout()
def __del__(self):
"""Make sure that we log out before deleting this object"""
self.logout()
def _set_status(self, status):
"""Internal function used to set the status from the
string obtained from the LoginSession"""
if status == "approved":
self._status = _LoginStatus.LOGGED_IN
elif status == "denied":
self._set_error_state("Permission to log in was denied!")
elif status == "logged_out":
self._status = _LoginStatus.LOGGED_OUT
def username(self):
"""Return the username of the user"""
return self._username
def uid(self):
"""Return the UID of this user. This uniquely identifies the
user across all systems
"""
if self._user_uid is None:
self._user_uid = username_to_uid(self.username(),
self.identity_service_url())
return self._user_uid
def status(self):
"""Return the current status of this user"""
return self._status
def _check_for_error(self):
"""Call to ensure that this object is not in an error
state. If it is in an error state then raise an
exception"""
if self._status == _LoginStatus.ERROR:
raise LoginError(self._error_string)
def _set_error_state(self, message):
"""Put this object into an error state, displaying the
passed message if anyone tries to use this object"""
self._status = _LoginStatus.ERROR
self._error_string = message
def session_key(self):
"""Return the session key for the current login session"""
self._check_for_error()
try:
return self._session_key
except:
return None
def signing_key(self):
"""Return the signing key used for the current login session"""
self._check_for_error()
try:
return self._signing_key
except:
return None
def identity_service(self):
"""Return the identity service info object for the identity
service used to validate the identity of this user
"""
if self._identity_service:
return self._identity_service
self._identity_service = _get_identity_service(
self.identity_service_url())
return self._identity_service
def identity_service_url(self):
"""Return the URL to the identity service. This is the full URL
to the service, minus the actual function to be called, e.g.
https://function_service.com/t/identity
"""
self._check_for_error()
try:
return self._identity_url
except:
# return the default URL - this should be discovered...
return _get_identity_url()
def login_url(self):
"""Return the URL that the user must connect to to authenticate
this login session"""
self._check_for_error()
try:
return self._login_url
except:
return None
def login_qr_code(self):
"""Return a QR code of the login URL that the user must connect to
to authenticate this login session"""
self._check_for_error()
try:
return self._login_qrcode
except:
return None
def session_uid(self):
"""Return the UID of the current login session. Returns None
if there is no valid login session"""
self._check_for_error()
try:
return self._session_uid
except:
return None
def is_empty(self):
"""Return whether or not this is an empty login (so has not
been used for anything yet..."""
return self._status == _LoginStatus.EMPTY
def is_logged_in(self):
"""Return whether or not the user has successfully logged in"""
return self._status == _LoginStatus.LOGGED_IN
def is_logging_in(self):
"""Return whether or not the user is in the process of loggin in"""
return self._status == _LoginStatus.LOGGING_IN
def logout(self):
"""Log out from the current session"""
if self.is_logged_in() or self.is_logging_in():
identity_url = self.identity_service_url()
if identity_url is None:
return
# create a permission message that can be signed
# and then validated by the user
permission = "Log out request for %s" % self._session_uid
signature = self.signing_key().sign(permission)
print("Logging out %s from session %s" % (self._username,
self._session_uid))
result = _call_function(
identity_url, "logout",
args_key=self.identity_service().public_key(),
username=self._username,
session_uid=self._session_uid,
permission=permission,
signature=_bytes_to_string(signature))
print(result)
self._status = _LoginStatus.LOGGED_OUT
return result
def register(self, password, identity_url=None):
"""Request to register this user with the identity service running
at 'identity_url', using the supplied 'password'. This will
return a QR code that you must use immediately to add this
user on the identity service to a QR code generator"""
if self._username is None:
return None
if identity_url is None:
identity_url = _get_identity_url()
privkey = _PrivateKey()
result = _call_function(
identity_url, "register",
args_key=self.identity_service().public_key(),
response_key=privkey,
public_cert=self.identity_service().public_certificate(),
username=self._username, password=password)
try:
provisioning_uri = result["provisioning_uri"]
except:
raise UserError(
"Cannot register the user '%s' on "
"the identity service at '%s'!" %
(self._username, identity_url))
# return a QR code for the provisioning URI
return (provisioning_uri, _create_qrcode(provisioning_uri))
def request_login(self, login_message=None):
"""Request to authenticate as this user. This returns a login URL that
you must connect to to supply your login credentials
If 'login_message' is supplied, then this is passed to
the identity service so that it can be displayed
when the user accesses the login page. This helps
the user validate that they have accessed the correct
login page. Note that if the message is None,
then a random message will be generated.
"""
self._check_for_error()
if not self.is_empty():
raise LoginError("You cannot try to log in twice using the same "
"User object. Create another object if you want "
"to try to log in again.")
# first, create a private key that will be used
# to sign all requests and identify this login
session_key = _PrivateKey()
signing_key = _PrivateKey()
args = {"username": self._username,
"public_key": session_key.public_key().to_data(),
"public_certificate": signing_key.public_key().to_data(),
"ipaddr": None}
# get information from the local machine to help
# the user validate that the login details are correct
if _has_socket:
hostname = _socket.gethostname()
ipaddr = _socket.gethostbyname(hostname)
args["ipaddr"] = ipaddr
args["hostname"] = hostname
if login_message is None:
login_message = "User '%s' in process '%s' wants to log in..." % \
(_os.getlogin(), _os.getpid())
args["message"] = login_message
result = _call_function(
self.identity_service_url(), "request_login",
args_key=self.identity_service().public_key(),
response_key=session_key,
public_cert=self.identity_service().public_certificate(),
username=self._username,
public_key=session_key.public_key().to_data(),
public_certificate=signing_key.public_key().to_data(),
ipaddr=None,
message=login_message)
# look for status = 0
try:
status = int(result["status"])
except:
status = -1
try:
message = result["message"]
except:
message = str(result)
try:
prune_message = result["prune_message"]
print("Pruning old sessions...\n%s" % "\n".join(prune_message))
except:
pass
if status != 0:
error = "Failed to login. Error = %d. Message = %s" % \
(status, message)
self._set_error_state(error)
raise LoginError(error)
try:
login_url = result["login_url"]
except:
login_url = None
if login_url is None:
error = "Failed to login. Could not extract the login URL! " \
"Result is %s" % (str(result))
self._set_error_state(error)
raise LoginError(error)
try:
session_uid = result["session_uid"]
except:
session_uid = None
if session_uid is None:
error = "Failed to login. Could not extract the login " \
"session UID! Result is %s" % (str(result))
self._set_error_state(error)
raise LoginError(error)
# now save all of the needed data
self._login_url = result["login_url"]
self._session_key = session_key
self._signing_key = signing_key
self._session_uid = session_uid
self._status = _LoginStatus.LOGGING_IN
self._user_uid = result["user_uid"]
qrcode = None
if _has_qrcode():
try:
self._login_qrcode = _create_qrcode(self._login_url)
qrcode = self._login_qrcode
except:
pass
return (self._login_url, qrcode)
def _poll_session_status(self):
"""Function used to query the identity service for this session
to poll for the session status"""
identity_url = self.identity_service_url()
if identity_url is None:
return
result = _call_function(identity_url, "get_status",
username=self._username,
session_uid=self._session_uid)
# look for status = 0
try:
status = int(result["status"])
except:
status = -1
try:
message = result["message"]
except:
message = str(result)
if status != 0:
error = "Failed to query identity service. Error = %d. " \
"Message = %s" % (status, message)
self._set_error_state(error)
raise LoginError(error)
# now update the status...
status = result["session_status"]
self._set_status(status)
def wait_for_login(self, timeout=None, polling_delta=5):
"""Block until the user has logged in. If 'timeout' is set
then we will wait for a maximum of that number of seconds
This will check whether we have logged in by polling
the identity service every 'polling_delta' seconds.
"""
self._check_for_error()
if not self.is_logging_in():
return self.is_logged_in()
polling_delta = int(polling_delta)
if polling_delta > 60:
polling_delta = 60
elif polling_delta < 1:
polling_delta = 1
if timeout is None:
# block forever....
while True:
self._poll_session_status()
if self.is_logged_in():
return True
elif not self.is_logging_in():
return False
_time.sleep(polling_delta)
else:
# only block until the timeout has been reached
timeout = int(timeout)
if timeout < 1:
timeout = 1
start_time = _datetime.now()
while (_datetime.now() - start_time).seconds < timeout:
self._poll_session_status()
if self.is_logged_in():
return True
elif not self.is_logging_in():
return False
_time.sleep(polling_delta)
return False
| 32.244526
| 79
| 0.590323
| 14,177
| 0.80232
| 0
| 0
| 0
| 0
| 0
| 0
| 5,409
| 0.306112
|
584c241bf384f1ee86da8eb49a7b42c532f3a92a
| 8,007
|
py
|
Python
|
botasky/utils/MyMAIL.py
|
5atouristspot/sql_audit
|
54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7
|
[
"MIT"
] | null | null | null |
botasky/utils/MyMAIL.py
|
5atouristspot/sql_audit
|
54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7
|
[
"MIT"
] | null | null | null |
botasky/utils/MyMAIL.py
|
5atouristspot/sql_audit
|
54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
Created on 2017-4-06
@module: MyMAIL
@used: send mail
"""
import smtplib
import mimetypes
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from MyLOG import MyLog
from botasky.utils.MyFILE import project_abdir, recursiveSearchFile
logConfig = recursiveSearchFile(project_abdir, '*logConfig.ini')[0]
mylog = MyLog(logConfig, 'MyMAIL.py')
logger = mylog.outputLog()
__all__ = ['MyMail']
__author__ = 'zhihao'
mail_info = {'mail_host': 'smtp.163.com',
'mail_user': '15895890858',
'mail_pass': 'zhi@hao@111',
'mail_postfix': '163.com'}
class MyMail():
'''
used : send mail
'''
def __init__(self, mail_info):
'''
used : init mail
:param mail_info: smtp server config
'''
self.mail_info = mail_info
def send_mail(self, to_list, mail_type, subject, content, attachment_list, img_list):
'''
used : send mail
:param to_list: target mail adresses
:param mail_type: plain or html
:param subject: title
:param content: main body
:param attachment_list: attachment
:param img_list: picture
:return:
'''
my_adress = "0905zhihao" + "<" + self.mail_info['mail_user'] + "@" + self.mail_info['mail_postfix'] + ">"
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = my_adress
msg['To'] = ";".join(to_list)
#main text
if mail_type == 'plain' or mail_type == 'html':
try:
body_msg = MIMEText(content, _subtype=mail_type, _charset='gb2312')
msg.attach(body_msg)
exec_info = "[action]:init msg" \
"[status]:OK" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:init msg" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'], e=e)
logger.error(error_msg)
else:
error_msg = "[action]:send mail_type" \
"[status]:FAIL" \
"[Errorcode]mail_type is not format" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'])
print error_msg
logger.info(error_msg)
#attachment
if attachment_list == '' or len(attachment_list) == 0:
pass
else:
for attachment in attachment_list:
try:
att = MIMEText(open(attachment, 'rb').read(), 'base64', 'gb2312')
att["Content-Type"] = 'application/octet-stream'
#display name
att["Content-Disposition"] = 'attachment; filename="'+attachment+'\"\''
msg.attach(att)
exec_info = "[action]:add attachment" \
"[status]:OK" \
"[attachment]:{attachment}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(attachment=attachment, Subject=msg['Subject'],
From=msg['From'], To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:add attachment" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[attachment]={attachment}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'],
attachment=attachment, To=msg['To'], e=e)
logger.error(error_msg)
#img
if img_list == '' or len(img_list) == 0:
pass
else:
for image_adress in img_list:
try:
image = MIMEImage(open(image_adress, 'rb').read())
image.add_header('Content-ID', '<image1>')
msg.attach(image)
exec_info = "[action]:add image" \
"[status]:OK" \
"[image]:{image}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(image=image_adress, Subject=msg['Subject'],
From=msg['From'], To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:add image" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[image]:{image}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'],
image=image_adress, To=msg['To'], e=e)
logger.error(error_msg)
#send mail
try:
server = smtplib.SMTP()
server.connect(self.mail_info['mail_host'])
server.login(self.mail_info['mail_user'], self.mail_info['mail_pass'])
server.sendmail(msg['from'], msg['to'], msg.as_string())
server.quit()
exec_info = "[action]:send mail" \
"[status]:OK" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'],To=msg['To'])
logger.info(exec_info)
except Exception, e:
print Exception, ":", e
error_msg = "[action]:send mail" \
"[status]:FAIL" \
"[Errorcode]:{e}" \
"[Subject]:{Subject}" \
"[From]:{From}" \
"[To]:{To}".format(Subject=msg['Subject'], From=msg['From'], To=msg['To'], e=e)
logger.error(error_msg)
if __name__ == '__main__':
'''
mail_info = {'mail_host': 'smtp.163.com',
'mail_user': '15002283621',
'mail_pass': 'zhihao1206',
'mail_postfix': '163.com'}
#to_list = ['15002283621@163.com']
to_list = ['1204207658@qq.com']
subject = 'xxxxxxxxxxxxx'
content = 'xxxxxxxxxxxxx'
#attachment_list = ['F:\img\img.rar', 'F:\img\img2.rar']
attachment_list = []
#img_list = ['F:\img\\1025.jpg', 'F:\img\\1041.jpg']
img_list = []
mail = MyMail(mail_info)
mail.send_mail(to_list, 'plain', subject, content, attachment_list, img_list)
'''
import MyMAIL
help(MyMAIL)
| 36.729358
| 114
| 0.429374
| 6,562
| 0.819533
| 0
| 0
| 0
| 0
| 0
| 0
| 2,737
| 0.341826
|
584dcc24968eeec28c6969e280feb5d4d108b6e6
| 7,694
|
py
|
Python
|
db_adapter/curw_fcst/source/source_utils.py
|
CUrW-SL/curw_db_adapter
|
9d9ef24f42080910e0bd251bc7f001b0a4b0ab31
|
[
"MIT"
] | 2
|
2019-04-26T07:50:33.000Z
|
2019-09-28T20:15:33.000Z
|
db_adapter/curw_fcst/source/source_utils.py
|
CUrW-SL/curw_db_adapter
|
9d9ef24f42080910e0bd251bc7f001b0a4b0ab31
|
[
"MIT"
] | 1
|
2019-04-03T09:30:38.000Z
|
2019-04-20T18:11:59.000Z
|
db_adapter/curw_fcst/source/source_utils.py
|
shadhini/curw_db_adapter
|
4db8e1ea8794ffbd0dce29ac954a13315e83d843
|
[
"MIT"
] | null | null | null |
import json
import traceback
from db_adapter.exceptions import DatabaseAdapterError
from db_adapter.logger import logger
"""
Source JSON Object would looks like this
e.g.:
{
'model' : 'wrfSE',
'version' : 'v3',
'parameters': { }
}
{
'model' : 'OBS_WATER_LEVEL',
'version' : '',
'parameters': {
"CHANNEL_CELL_MAP" : {
"594" : "Wellawatta", "1547": "Ingurukade", "3255": "Yakbedda", "3730": "Wellampitiya",
"7033": "Janakala Kendraya"
}, "FLOOD_PLAIN_CELL_MAP": { }
}
}
"""
def get_source_by_id(pool, id_):
"""
Retrieve source by id
:param pool: database connection pool
:param id_: source id
:return: Source if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT * FROM `source` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
if row_count > 0:
return cursor.fetchone()
else:
return None
except Exception as exception:
error_message = "Retrieving source with source_id {} failed".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_source_id(pool, model, version) -> str:
"""
Retrieve Source id
:param pool: database connection pool
:param model:
:param version:
:return: str: source id if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `id` FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
if row_count > 0:
return cursor.fetchone()['id']
else:
return None
except Exception as exception:
error_message = "Retrieving source id: model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def add_source(pool, model, version, parameters=None):
"""
Insert sources into the database
:param pool: database connection pool
:param model: string
:param version: string
:param parameters: JSON
:return: True if the source has been added to the "Source' table of the database, else False
"""
connection = pool.connection()
try:
if get_source_id(pool=pool, model=model, version=version) is None:
with connection.cursor() as cursor:
sql_statement = "INSERT INTO `source` (`model`, `version`, `parameters`) VALUES ( %s, %s, %s)"
row_count = cursor.execute(sql_statement, (model, version, json.dumps(parameters)))
connection.commit()
return True if row_count > 0 else False
else:
logger.info("Source with model={} and version={} already exists in the database".format(model, version))
return False
except Exception as exception:
connection.rollback()
error_message = "Insertion of source: model={}, version={} and parameters={} failed".format(model, version, parameters)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def add_sources(sources, pool):
"""
Add sources into Source table
:param sources: list of json objects that define source attributes
e.g.:
{
'model' : 'wrfSE',
'version' : 'v3',
'parameters': { }
}
{
'model' : 'OBS_WATER_LEVEL',
'version' : '',
'parameters': {
"CHANNEL_CELL_MAP" : {
"594" : "Wellawatta", "1547": "Ingurukade", "3255": "Yakbedda", "3730": "Wellampitiya",
"7033": "Janakala Kendraya"
}, "FLOOD_PLAIN_CELL_MAP": { }
}
}
:return:
"""
for source in sources:
print(add_source(pool=pool, model=source.get('model'), version=source.get('version'),
parameters=source.get('parameters')))
print(source.get('model'))
def delete_source(pool, model, version):
"""
Delete source from Source table, given model and version
:param pool: database connection pool
:param model: str
:param version: str
:return: True if the deletion was successful
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "DELETE FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
connection.commit()
if row_count > 0:
return True
else:
logger.info("There's no record of source in the database with model={} and version={}".format(model, version))
return False
except Exception as exception:
connection.rollback()
error_message = "Deleting source with model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def delete_source_by_id(pool, id_):
"""
Delete source from Source table by id
:param pool: database connection pool
:param id_:
:return: True if the deletion was successful, else False
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "DELETE FROM `source` WHERE `id`=%s"
row_count = cursor.execute(sql_statement, id_)
connection.commit()
if row_count > 0 :
return True
else:
logger.info("There's no record of source in the database with the source id {}".format(id_))
return False
except Exception as exception:
connection.rollback()
error_message = "Deleting source with id {} failed.".format(id_)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
def get_source_parameters(pool, model, version):
"""
Retrieve Source parameters
:param pool: database connection pool
:param model:
:param version:
:return: str: json object parameters if source exists in the database, else None
"""
connection = pool.connection()
try:
with connection.cursor() as cursor:
sql_statement = "SELECT `parameters` FROM `source` WHERE `model`=%s and `version`=%s"
row_count = cursor.execute(sql_statement, (model, version))
if row_count > 0:
return cursor.fetchone()['parameters']
else:
return None
except Exception as exception:
error_message = "Retrieving source parameters: model={} and version={} failed.".format(model, version)
logger.error(error_message)
traceback.print_exc()
raise exception
finally:
if connection is not None:
connection.close()
| 32.601695
| 127
| 0.583442
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,324
| 0.432025
|
584f6d166970adb6f3793037f401b85f026ce2ab
| 511
|
py
|
Python
|
tests/kyu_7_tests/test_binary_addition.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_7_tests/test_binary_addition.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_7_tests/test_binary_addition.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_7.binary_addition import add_binary
class AddBinaryTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(add_binary(1, 1), '10')
def test_equals_2(self):
self.assertEqual(add_binary(0, 1), '1')
def test_equals_3(self):
self.assertEqual(add_binary(1, 0), '1')
def test_equals_4(self):
self.assertEqual(add_binary(2, 2), '100')
def test_equals_5(self):
self.assertEqual(add_binary(51, 12), '111111')
| 24.333333
| 54
| 0.675147
| 440
| 0.861057
| 0
| 0
| 0
| 0
| 0
| 0
| 23
| 0.04501
|
584ff888d14bb4a1085d283e99cd26c1976fee31
| 739
|
py
|
Python
|
var/spack/repos/builtin/packages/netdata/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/netdata/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/netdata/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Netdata(AutotoolsPackage):
"""Real-time performance monitoring, done right!"""
homepage = "https://www.netdata.cloud/"
url = "https://github.com/netdata/netdata/archive/v1.22.1.tar.gz"
version('1.22.1', sha256='6efd785eab82f98892b4b4017cadfa4ce1688985915499bc75f2f888765a3446')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('libuv')
depends_on('uuid')
| 32.130435
| 96
| 0.70636
| 518
| 0.700947
| 0
| 0
| 0
| 0
| 0
| 0
| 479
| 0.648173
|
5850feed17b8dae7b2795290112a605c61fbeef1
| 1,727
|
py
|
Python
|
examples/my_quickstart.py
|
87boy/sisu
|
823d12c9a8126ab41bb14b6d91cad9acbb95bc47
|
[
"Apache-2.0"
] | null | null | null |
examples/my_quickstart.py
|
87boy/sisu
|
823d12c9a8126ab41bb14b6d91cad9acbb95bc47
|
[
"Apache-2.0"
] | null | null | null |
examples/my_quickstart.py
|
87boy/sisu
|
823d12c9a8126ab41bb14b6d91cad9acbb95bc47
|
[
"Apache-2.0"
] | null | null | null |
import flask
import flask.ext.sqlalchemy
import flask.ext.restless
# Create the Flask application and the Flask-SQLAlchemy object.
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = flask.ext.sqlalchemy.SQLAlchemy(app)
# Create your Flask-SQLALchemy models as usual but with the following two
# (reasonable) restrictions:
# 1. They must have a primary key column of type sqlalchemy.Integer or
# type sqlalchemy.Unicode.
# 2. They must have an __init__ method which accepts keyword arguments for
# all columns (the constructor in flask.ext.sqlalchemy.SQLAlchemy.Model
# supplies such a method, so you don't need to declare a new one).
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode, unique=True)
birth_date = db.Column(db.Date)
class Computer(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode, unique=True)
vendor = db.Column(db.Unicode)
purchase_time = db.Column(db.DateTime)
owner_id = db.Column(db.Integer, db.ForeignKey('person.id'))
owner = db.relationship('Person', backref=db.backref('computers',
lazy='dynamic'))
# Create the database tables.
db.create_all()
# Create the Flask-Restless API manager.
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
manager.create_api(Person, methods=['GET', 'POST', 'DELETE'])
manager.create_api(Computer, methods=['GET'])
# start the flask loop
app.run()
| 35.244898
| 76
| 0.711639
| 561
| 0.324841
| 0
| 0
| 0
| 0
| 0
| 0
| 827
| 0.478865
|
5853ac3ad2b07e0bcfbda162b15356c29c25cefe
| 4,687
|
py
|
Python
|
src/crypto_wallet/crypto_wallet.py
|
Sedosa/Blockchain-Analytics
|
a09de9cfd308c70e38a05d4127fb372af5b919b7
|
[
"MIT"
] | null | null | null |
src/crypto_wallet/crypto_wallet.py
|
Sedosa/Blockchain-Analytics
|
a09de9cfd308c70e38a05d4127fb372af5b919b7
|
[
"MIT"
] | null | null | null |
src/crypto_wallet/crypto_wallet.py
|
Sedosa/Blockchain-Analytics
|
a09de9cfd308c70e38a05d4127fb372af5b919b7
|
[
"MIT"
] | null | null | null |
"""
This is a script that takes a calculates the value of a cryptocurrency portfolio
It uses JSON in the with quantities of different cryptocurrencies in the form
{
"ticker" : volume,
"ticker" : volume
}
gets the live price from an API endpoint and returns the price of each item in the portfolio and the total
It also writes these into a sqlite3 database for future reference with a timestamp
"""
import os, logging, argparse, json
import sqlite3
import requests
import datetime
import time
"""
TODO: Error handling & logging
"""
# Need API from https://min-api.cryptocompare.com/
API_KEY = os.getenv("CRYPTO_API_KEY")
HEADER = {"authorization": f"Apikey {API_KEY}"}
# Taken from https://docs.python.org/3/library/sqlite3.html#registering-an-adapter-callable
def adapt_datetime(ts):
return time.mktime(ts.timetuple())
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
def setup_db(db_path):
"""
Initialises a local sqlite3 database and create the table required to hold data.
Parameters
-------------
db_path
string : A filepath to a target sqlite database
Returns
-------------
con:
Connection : Returns a connection to that database
"""
con = sqlite3.connect(db_path)
# Create table
with con:
con.execute(
"""CREATE TABLE IF NOT EXISTS CRYPTO_PRICE
(DATE timestamp, TICKER text, QTY real, PRICE real, VALUE real )"""
)
logging.info("Database and table created")
return con
def insert_into_db(connection, ticker, price, dict):
"""
Writes crypto price data to specified sqlite3 database
Parameters
-------------
connection
string : Connection to sqlite3 database output of setup_db() fn
ticker
string : String of the Ticker for a cryptocurrency e.g. BTC
price
float : Price of a cryptocurrency
dict
Dictionary : Dictionary loaded from portfolio JSON. output of parse_json() fn
"""
now = datetime.datetime.now()
with connection as con:
if ticker != "SUM":
con.execute(
"""insert into CRYPTO_PRICE
values (?,?,?,?,?)""",
(now, ticker, dict[ticker], price, price * dict[ticker]),
)
else:
con.execute(
"""insert into CRYPTO_PRICE
values (?,?,?,?,?)""",
(now, ticker, 0, price, price),
)
logging.info(f"Inserted {ticker} values into database")
def parse_json(json_path):
"""
Loads portfolio in JSON into a python dictionary.
Parameters
-------------
json_path
string : Path to portfolio JSON described in header documentation
Returns
-------------
crypto_dict
Dictionary : Dictionary loaded from portfolio json. output of parse_json() fn
"""
with open(json_path) as j:
crypto_dict = json.load(j)
return crypto_dict
def get_price(ticker):
"""
Returns the live price of a unit a cryptocurrency in GBP.
Parameters
-------------
ticker
string : String of the Ticker for a cryptocurrency e.g. BTC
Returns
-------------
price
float : Price of a cryptocurrency
"""
API_ENDPOINT = f"https://min-api.cryptocompare.com/data/price?fsym={ticker}&tsyms=GBP"
response = requests.get(API_ENDPOINT, headers=HEADER)
price = response.json()["GBP"]
return price
def main(json_path, connection):
crypto_dict = parse_json(json_path)
wallet_dict = dict()
for key_ in crypto_dict.keys():
price = get_price(key_)
print(f"{key_}: £{round(price*crypto_dict[key_],2)}")
wallet_dict[key_] = price * crypto_dict[key_]
insert_into_db(connection, key_, price, crypto_dict)
insert_into_db(connection, "SUM", sum(wallet_dict.values()), crypto_dict)
print(f"Total: £{sum(wallet_dict.values())}")
return sum(wallet_dict.values())
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO, format="[%(levelname)s: %(asctime)s] %(filename)s, %(funcName)s, line %(lineno)d : %(message)s"
)
parser = argparse.ArgumentParser()
parser.add_argument(
"--filepath_in", required=False, type=str, default=os.getcwd(), help="Filepath to json holding volumes of crypto"
)
parser.add_argument(
"--db_path", required=False, type=str, default=f"{os.getcwd()}/crypto.db", help="Filepath to sqlite database"
)
args = parser.parse_args()
FILEPATH_IN = args.filepath_in
con = setup_db(args.db_path)
main(FILEPATH_IN, con)
con.close()
| 26.480226
| 123
| 0.631961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,696
| 0.574963
|
5853d100285433e6202ec4adff867b94b7270769
| 1,685
|
py
|
Python
|
np43s.py
|
Muraru-taketa/100_knocks
|
d34c0157d15a0fda45ac60e41e93bd6b73cebb58
|
[
"MIT"
] | null | null | null |
np43s.py
|
Muraru-taketa/100_knocks
|
d34c0157d15a0fda45ac60e41e93bd6b73cebb58
|
[
"MIT"
] | null | null | null |
np43s.py
|
Muraru-taketa/100_knocks
|
d34c0157d15a0fda45ac60e41e93bd6b73cebb58
|
[
"MIT"
] | null | null | null |
#np43.py
#43. 名詞を含む文節が動詞を含む文節に係るものを抽出
"""名詞を含む文節が,動詞を含む文節に係るとき,
これらをタブ区切り形式で抽出せよ.ただし,句読点などの記号は出力しないようにせよ.
"""
import re
# 区切り文字
separator = re.compile('\t|,')
# かかりうけ
kakari = re.compile(r'''(?:\*\s\d+\s) # キャプチャ対象外
(-?\d+) # 数字(係り先)
''', re.VERBOSE)
class Morph:
def __init__(self, morph):
#タブとカンマで分割
tab = separator.split(morph)
self.surface = tab[0] # 表層形(surface)
self.base = tab[7] # 基本形(base)
self.pos = tab[1] # 品詞(pos)
self.pos1 = tab[2] # 品詞細分類1(pos1)
class Chunk:
def __init__(self, morphs, dst):
self.morphs = morphs
self.srcs = [] # 係り元文節インデックス番号のリスト
self.dst = dst # 係り先文節インデックス番号
self.phrase = ''.join([morph.surface for morph in morphs if morph.pos!='記号'])
# 係り元を代入,Chunkリストを文のリストを追加
def append_sentence(chunks, sentences):
# 係り元を代入
for i, chunk in enumerate(chunks):
if chunk.dst != -1:
chunks[chunk.dst].srcs.append(i)
sentences.append(chunks)
return sentences
import np41sss
sentences = np41sss.Ai_morphs()
sentence = sentences[1]
for chunk in sentence:
if int(chunk.dst) != -1:
modifier = ''.join([morph.surface if morph.pos != '記号' else '' for morph in chunk.morphs])
modifier_pos = [morph.pos for morph in chunk.morphs]#記号等々の排除chunk,morphs内
modifiee = ''.join([morph.surface if morph.pos != '記号' else '' for morph in sentence[int(chunk.dst)].morphs])
modifiee_pos = [morph.pos for morph in sentence[int(chunk.dst)].morphs]
if '名詞' in modifier_pos and '動詞' in modifiee_pos:#動詞と名詞を含むものを取り出す
print(modifier, modifiee, sep='\t')#タブ区切り
| 30.636364
| 113
| 0.619585
| 630
| 0.295636
| 0
| 0
| 0
| 0
| 0
| 0
| 884
| 0.414829
|
585453c1a7dceaddf108fc0199e9890c1f5860d6
| 4,026
|
py
|
Python
|
backend/presentation/Viewsets/comment_view.py
|
Weida-W/CMPUT404-project-socialdistribution
|
41d8a7f7f013723d2a3878156953fbc11c2e6156
|
[
"W3C-20150513"
] | null | null | null |
backend/presentation/Viewsets/comment_view.py
|
Weida-W/CMPUT404-project-socialdistribution
|
41d8a7f7f013723d2a3878156953fbc11c2e6156
|
[
"W3C-20150513"
] | 75
|
2021-01-13T23:48:48.000Z
|
2021-04-16T19:39:38.000Z
|
backend/presentation/Viewsets/comment_view.py
|
Weida-W/CMPUT404-project-socialdistribution
|
41d8a7f7f013723d2a3878156953fbc11c2e6156
|
[
"W3C-20150513"
] | 12
|
2021-01-13T23:22:35.000Z
|
2021-04-28T08:13:38.000Z
|
from presentation.models import Author, Follower, Post, Comment
from django.shortcuts import get_object_or_404
from presentation.Serializers.comment_serializer import CommentSerializer
from rest_framework import viewsets, status
from django.http import JsonResponse
from rest_framework.response import Response
import uuid
from urllib.parse import urlparse
from . import urlutil
'''
URL: ://service/author/{author_id}/posts/{post_id}/comments access
GET get comments of the post
POST if you post an object of “type”:”comment”, it will add your comment to the post
'''
def getAuthorIDFromRequestURL(request, id):
host = urlutil.getSafeURL(request.build_absolute_uri())
author_id = f"{host}/author/{id}"
return author_id
def getPostIDFromRequestURL(request, id):
post_id = f"/posts/{id}"
return post_id
def getCommentIDFromRequestURL(request, id):
comment_id = f"/comments/{id}"
return comment_id
class CommentViewSet(viewsets.ModelViewSet):
serializer_class = CommentSerializer
queryset = Comment.objects.all()
# GET a list of comments of the post
def list(self, request, *args, **kwargs):
author_id = getAuthorIDFromRequestURL(
request, self.kwargs['author_id'])
post_id = getPostIDFromRequestURL(
request, self.kwargs['post_id'])
post_id = author_id + post_id
queryset = Comment.objects.filter(post=post_id)
if queryset.exists():
comments = list(queryset.values())
# May have mistakes here, do we need to change comment model?
return JsonResponse(comments, safe=False)
else:
return JsonResponse([], safe=False)
# GET a single comment using comment_id
def retrieve(self, request, *args, **kwargs):
comment_id = request.build_absolute_uri()[:-1]
queryset = Comment.objects.get(id=comment_id)
if queryset.exists():
serializer = CommentSerializer(queryset)
return Response(serializer.data, 200)
else:
return Response({"msg": "No comment for given id"}, 404)
# POST a new comment under a post
def create(self, request, *args, **kwargs):
request_data = request.data.copy()
# assume the id of the commmenter is part of the data
# CHANGE THIS LATER!
commenter_id = request_data.get('author', None)
author_id = getAuthorIDFromRequestURL(
request, self.kwargs['author_id'])
post_id = getPostIDFromRequestURL(
request, self.kwargs['post_id'])
post_id = author_id + post_id
comment = request_data.get('comment', None)
content_type = request_data.get('contentType', None)
# create comment id
cuuid = str(uuid.uuid4().hex)
comment_id = f"{post_id}/comments/{cuuid}"
comment_data = {'type': 'comment', 'author': commenter_id, 'comment': comment, 'contentType': content_type,
'post': post_id, 'id': comment_id}
serializer = self.serializer_class(data=comment_data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, 200)
else:
return Response(serializer.errors,
status=400)
def delete(self, request, *args, **kwargs):
author_id = getAuthorIDFromRequestURL(
request, self.kwargs['author_id'])
post_id = getPostIDFromRequestURL(
request, self.kwargs['post_id'])
post_id = author_id + post_id
comments = get_object_or_404(Comment, post=post_id)
comment_id = getCommentIDFromRequestURL(
request, self.kwargs['comment_id'])
comment_id = post_id + comment_id
comment = get_object_or_404(Comment, id=comment_id)
# Possible mistake?
try:
comment.delete()
except ValueError:
return Response("No such a comment. Deletion fails.", 500)
return Response("Delete successful")
| 37.981132
| 115
| 0.655986
| 3,093
| 0.766733
| 0
| 0
| 0
| 0
| 0
| 0
| 799
| 0.198066
|
5854bedf049dafa402041ca2798dee49d6f30c6d
| 11,520
|
py
|
Python
|
bundle/vim-pandoc-master/python3/vim_pandoc/command.py
|
ian-mitchell-001/my-vim-configs
|
198747234df311179185ce9fb8424bb1c1c64771
|
[
"Unlicense"
] | null | null | null |
bundle/vim-pandoc-master/python3/vim_pandoc/command.py
|
ian-mitchell-001/my-vim-configs
|
198747234df311179185ce9fb8424bb1c1c64771
|
[
"Unlicense"
] | null | null | null |
bundle/vim-pandoc-master/python3/vim_pandoc/command.py
|
ian-mitchell-001/my-vim-configs
|
198747234df311179185ce9fb8424bb1c1c64771
|
[
"Unlicense"
] | null | null | null |
# encoding=utf-8
import vim
import re
import sys
import os.path
import argparse
import shlex
from subprocess import Popen, PIPE
from itertools import chain
from vim_pandoc.utils import plugin_enabled_modules, ensure_string
from vim_pandoc.bib.vim_completer import find_bibfiles
from vim_pandoc.helpparser import PandocInfo
class PandocCommand(object):
def __init__(self):
self.pandoc_info = PandocInfo(vim.vars["pandoc#command#path"])
self.formats_table = {}
self.build_formats_table()
self._output_file_path = None
self._run_command = None
self._out = None
def build_formats_table(self):
for i in self.pandoc_info.output_formats:
if i in ("asciidoc", "plain"):
self.formats_table[i] = "txt"
elif i in ("beamer", "pdf"):
self.formats_table[i] = "pdf"
elif i in ("dzslides", "html", "html5", "mediawiki", "revealjs", "s5", "slideous", "slidy"):
self.formats_table[i] = "html"
elif i in ("markdown", "gfm", "markdown_github", "markdown_mmd", "markdown_phpextra", "markdown_strict"):
self.formats_table[i] = "md"
elif i in ("odt", "opendocument"):
self.formats_table[i] = "odt"
elif i == "native":
self.formats_table[i] = "hs"
elif i == "texinfo":
self.formats_table[i] = "info"
elif i == "latex":
self.formats_table[i] = "tex"
else:
self.formats_table[i] = i
if "latex" in self.formats_table or "beamer" in self.formats_table and "pdf" not in self.formats_table:
self.formats_table["pdf"] = "pdf"
def __call__(self, args, should_open):
largs = shlex.split(args)
if largs == []:
largs = ['html'] # make sure we pass an output format
p = self.pandoc_info.build_argument_parser()
c_vars = vars(p.parse_args(largs))
# Infer arguments from vim environment
# a) bibliographies
if 'bibliographies' in plugin_enabled_modules():
local_bibs = vim.eval('b:pandoc_biblio_bibs')
found_bibs = find_bibfiles()
if local_bibs or found_bibs and not c_vars['bibliography']:
c_vars['bibliography'] = []
if local_bibs:
c_vars['bibliography'].extend(local_bibs)
if found_bibs:
c_vars['bibliography'].extend(found_bibs)
# Now, we must determine what are our input and output files
# a) First, let's see what is the desired output format...
output_format = c_vars['output_format'] \
if self.pandoc_info.is_valid_output_format(c_vars['output_format']) \
or c_vars['output_format'] == 'pdf' \
else "html"
# overwrite --to with this value
# 'pdf' is not a valid output format, we pass it to -o instead)
if output_format != 'pdf':
c_vars['to'] = output_format
if output_format == 'pdf':
# pdf engine
if self.pandoc_info.version >= '2':
engine_option = 'pdf_engine'
else:
engine_option = 'latex_engine'
if not c_vars[engine_option]:
try: # try a buffer local engine
engine_var = ensure_string(vim.current.buffer.vars['pandoc_command_latex_engine'])
except: # use te global value
engine_var = ensure_string(vim.vars['pandoc#command#latex_engine'])
c_vars[engine_option] = str(engine_var)
if not c_vars['output']:
self._output_file_path = vim.eval('expand("%:r")') + '.' \
+ self.formats_table[re.split("[-+]", output_format)[0]]
c_vars['output'] = self._output_file_path
else:
self._output_file_path = os.path.expanduser(c_vars['output'][0])
input_arg = '"' + vim.eval('expand("%")') + '"'
# Now, we reconstruct the pandoc call
arglist = []
arglist.append(ensure_string(vim.vars['pandoc#compiler#command']))
arglist.append(ensure_string(vim.vars['pandoc#compiler#arguments']))
# Only consider enabled flags and arguments with values
extra_arg_vars_keys = [k for k in c_vars.keys() if c_vars[k] and k != 'output_format']
for var in extra_arg_vars_keys:
real_var = var.replace("_", "-")
val = c_vars[var]
if type(val) == list and len(val) > 1: # multiple values, repeat keys
for vv in val:
if type(vv) == list and type(vv[0]) == list:
vv = vv[0][0]
elif type(vv) == list:
vv = vv[0]
elif type(val) == bool:
vv = None
if vv:
vv = os.path.expanduser(vv)
arglist.append("--" + real_var + '="' + str(vv) + '"')
else:
arglist.append("--" + real_var)
else:
if type(val) == list and type(val[0]) == list:
val = val[0][0]
elif type(val) == list:
val = val[0]
elif type(val) == bool:
val = None
if val:
val = os.path.expanduser(val)
arglist.append('--' + real_var + '="' + str(val) + '"')
else:
arglist.append('--' + real_var)
arglist.append(input_arg)
self._run_command = " ".join(arglist)
# execute
self.execute(should_open)
def execute(self, should_open):
with open("pandoc.out", 'w') as tmp:
# for nvim
if vim.eval("has('nvim')") == '1':
try:
should_open_s = str(int(should_open))
except:
should_open_s = '0'
vim.command("call jobstart(" + \
"['"+ "','".join(shlex.split(self._run_command)) + "'], " + \
" extend({'should_open': '" + should_open_s + "'}," +\
" {'on_exit': 'pandoc#command#JobHandler'," + \
"'on_stdout': 'pandoc#command#JobHandler'," + \
"'on_stderr': 'pandoc#command#JobHandler'}))")
# for vim versions with clientserver support
elif vim.eval("has('clientserver')") == '1' and \
vim.eval("v:servername") != "" and \
vim.eval("executable('python')") == '1':
async_runner = '"' + os.path.join(os.path.dirname(__file__), "async.py") + '"'
servername_arg = "--servername=" + vim.eval("v:servername")
open_arg = "--open" if should_open else "--noopen"
async_command = " ".join(["python", async_runner, servername_arg, open_arg, self._run_command])
try:
Popen(shlex.split(async_command), stdout=tmp, stderr=tmp)
except:
vim.command('echoe "vim-pandoc: could not execute pandoc asynchronously"')
else:
try: # fallback to synchronous execution
com = Popen(shlex.split(self._run_command), stdout=tmp, stderr=tmp)
com.wait()
except:
vim.command('echoe "vim-pandoc: could not execute pandoc"')
return
self.on_done(should_open, com.returncode)
def on_done(self, should_open, returncode):
if self._run_command and self._output_file_path:
vim.command("echohl Statement")
vim.command("echom 'vim-pandoc:ran " + self._run_command + "'")
vim.command("echohl None")
if vim.eval("g:pandoc#command#use_message_buffers") == '1' \
and returncode not in ('0', 0):
vim.command("let split = &splitbelow")
vim.command("set splitbelow")
vim.command("5new pandoc\ output")
vim.command("let &splitbelow = split")
vim.command("setlocal wrap")
vim.command("setlocal linebreak")
vim.current.buffer[0] = "# Press q to close this"
vim.current.buffer.append("> " + self._run_command)
vim.command("normal! G")
if vim.eval('filereadable("pandoc.out")') == '1':
vim.command("silent r pandoc.out")
vim.command("setlocal buftype=nofile")
vim.command("setlocal nobuflisted")
# pressing q on the buffer will delete it
vim.command("map <buffer> q :bd<cr>")
# we will highlight some elements in the buffer
vim.command("syn match PandocOutputMarks /^>>/")
vim.command("syn match PandocCommand /^>.*$/hs=s+1")
vim.command("syn match PandocInstructions /^#.*$/")
vim.command("hi! link PandocOutputMarks Operator")
vim.command("hi! link PandocCommand Debug")
vim.command("hi! link PandocInstructions Comment")
# under windows, pandoc.out is not closed by async.py in time sometimes,
# so we wait a bit
if sys.platform.startswith("win"):
from time import sleep
sleep(1)
if os.path.exists("pandoc.out"):
os.remove("pandoc.out")
# open file if needed
# nvim's python host doesn't change the directory the same way vim does
if vim.eval('has("nvim")') == '1':
os.chdir(vim.eval('expand("%:p:h")'))
if os.path.exists(os.path.abspath(self._output_file_path)) and should_open:
# if g:pandoc#command#custom_open is defined and is a valid funcref
if vim.eval("g:pandoc#command#custom_open") != "" \
and vim.eval("exists('*"+vim.eval("g:pandoc#command#custom_open")+"')") == '1':
custom_command = vim.eval(vim.eval("g:pandoc#command#custom_open") \
+ "('"+self._output_file_path+"')")
Popen(shlex.split(custom_command))
# otherwise use platform defaults:
else:
if sys.platform == "darwin" or sys.platform.startswith("linux"):
if sys.platform == "darwin":
open_command = "open" #OSX
elif sys.platform.startswith("linux"):
open_command = "xdg-open" # freedesktop/linux
with open(os.devnull, 'wb') as fnull:
Popen([open_command, self._output_file_path], stderr=fnull)
elif sys.platform.startswith("win"):
Popen('cmd /c "start ' + self._output_file_path + '"')
# we reset this
self._output_file_path = None
self._run_command = None
vim.command("redraw")
if returncode in ("0", 0):
vim.command("echohl Statement")
vim.command("echom 'vim-pandoc:ran successfully.'")
vim.command("echohl None")
pandoc = PandocCommand()
| 44.307692
| 118
| 0.521354
| 11,168
| 0.969444
| 0
| 0
| 0
| 0
| 0
| 0
| 3,163
| 0.274566
|
58560f5398484c07794db5199083195112cafef3
| 10,955
|
py
|
Python
|
databricks/koalas/strings.py
|
mercileesb/koalas
|
685176c512f31166f0e472aa0f461d0f1449fb0c
|
[
"Apache-2.0"
] | 1
|
2021-01-17T18:26:33.000Z
|
2021-01-17T18:26:33.000Z
|
databricks/koalas/strings.py
|
mercileesb/koalas
|
685176c512f31166f0e472aa0f461d0f1449fb0c
|
[
"Apache-2.0"
] | null | null | null |
databricks/koalas/strings.py
|
mercileesb/koalas
|
685176c512f31166f0e472aa0f461d0f1449fb0c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
String functions on Koalas Series
"""
from typing import TYPE_CHECKING
import numpy as np
from pyspark.sql.types import StringType, BinaryType, BooleanType
from databricks.koalas.base import _wrap_accessor_pandas
if TYPE_CHECKING:
import databricks.koalas as ks
class StringMethods(object):
"""String methods for Koalas Series"""
def __init__(self, series: 'ks.Series'):
if not isinstance(series.spark_type, (StringType, BinaryType)):
raise ValueError(
"Cannot call StringMethods on type {}"
.format(series.spark_type))
self._data = series
self.name = self._data.name
# Methods
def capitalize(self) -> 'ks.Series':
"""
Convert Strings in the series to be capitalized.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.capitalize(),
StringType()
).alias(self.name)
def lower(self) -> 'ks.Series':
"""
Convert strings in the Series/Index to all lowercase.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.lower(),
StringType()
).alias(self.name)
def upper(self) -> 'ks.Series':
"""
Convert strings in the Series/Index to all uppercase.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.upper(),
StringType()
).alias(self.name)
def swapcase(self) -> 'ks.Series':
"""
Convert strings in the Series/Index to be swapcased.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.swapcase(),
StringType()
).alias(self.name)
def startswith(self, pattern, na=np.NaN) -> 'ks.Series':
"""
Test if the start of each string element matches a pattern.
Equivalent to :func:`str.startswith`.
Parameters
----------
pattern : str
Character sequence. Regular expressions are not accepted.
na : object, defulat NaN
Object shown if element is not a string.
Returns
-------
Series of bool
Koalas Series of booleans indicating whether the given pattern
matches the start of each string element.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.startswith(pattern, na),
BooleanType()
).alias(self.name)
def endswith(self, pattern, na=np.NaN) -> 'ks.Series':
"""
Test if the end of each string element matches a pattern.
Equivalent to :func:`str.endswith`.
Parameters
----------
pattern : str
Character sequence. Regular expressions are not accepted.
na : object, defulat NaN
Object shown if element is not a string.
Returns
-------
Series of bool
Koalas Series of booleans indicating whether the given pattern
matches the end of each string element.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.endswith(pattern, na),
BooleanType()
).alias(self.name)
def strip(self, to_strip=None) -> 'ks.Series':
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from left and
right sides. Equivalent to :func:`str.strip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of str
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.strip(to_strip),
StringType()
).alias(self.name)
def lstrip(self, to_strip=None) -> 'ks.Series':
"""
Remove leading characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from left side.
Equivalent to :func:`str.lstrip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of str
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.lstrip(to_strip),
StringType()
).alias(self.name)
def rstrip(self, to_strip=None) -> 'ks.Series':
"""
Remove trailing characters.
Strip whitespaces (including newlines) or a set of specified
characters from each string in the Series/Index from right side.
Equivalent to :func:`str.rstrip`.
Parameters
----------
to_strip : str
Specifying the set of characters to be removed. All combinations
of this set of characters will be stripped. If None then
whitespaces are removed.
Returns
-------
Series of str
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.rstrip(to_strip),
StringType()
).alias(self.name)
def get(self, i) -> 'ks.Series':
"""
Extract element from each string in the Series/Index at the
specified position.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series of objects
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.get(i),
StringType()
).alias(self.name)
def isalnum(self) -> 'ks.Series':
"""
Check whether all characters in each string are alphanumeric.
This is equivalent to running the Python string method
:func:`str.isalnum` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isalnum(),
BooleanType()
).alias(self.name)
def isalpha(self) -> 'ks.Series':
"""
Check whether all characters in each string are alphabetic.
This is equivalent to running the Python string method
:func:`str.isalpha` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isalpha(),
BooleanType()
).alias(self.name)
def isdigit(self) -> 'ks.Series':
"""
Check whether all characters in each string are digits.
This is equivalent to running the Python string method
:func:`str.isdigit` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isdigit(),
BooleanType()
).alias(self.name)
def isspace(self) -> 'ks.Series':
"""
Check whether all characters in each string are whitespaces.
This is equivalent to running the Python string method
:func:`str.isspace` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isspace(),
BooleanType()
).alias(self.name)
def islower(self) -> 'ks.Series':
"""
Check whether all characters in each string are lowercase.
This is equivalent to running the Python string method
:func:`str.islower` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.islower(),
BooleanType()
).alias(self.name)
def isupper(self) -> 'ks.Series':
"""
Check whether all characters in each string are uppercase.
This is equivalent to running the Python string method
:func:`str.isupper` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isupper(),
BooleanType()
).alias(self.name)
def istitle(self) -> 'ks.Series':
"""
Check whether all characters in each string are titlecase.
This is equivalent to running the Python string method
:func:`str.istitle` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.istitle(),
BooleanType()
).alias(self.name)
def isnumeric(self) -> 'ks.Series':
"""
Check whether all characters in each string are numeric.
This is equivalent to running the Python string method
:func:`str.isnumeric` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isnumeric(),
BooleanType()
).alias(self.name)
def isdecimal(self) -> 'ks.Series':
"""
Check whether all characters in each string are decimals.
This is equivalent to running the Python string method
:func:`str.isdecimal` for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
"""
return _wrap_accessor_pandas(
self,
lambda x: x.str.isdecimal(),
BooleanType()
).alias(self.name)
| 31.033994
| 76
| 0.583204
| 10,089
| 0.920949
| 0
| 0
| 0
| 0
| 0
| 0
| 6,762
| 0.617252
|
585693264a6958d193fa10022658456c7350638b
| 807
|
py
|
Python
|
python/turbodbc_test/test_cursor_async_io.py
|
fjetter/turbodbc
|
b11f0a1bc7d67bc3cbc60f564594f0e735f524f4
|
[
"MIT"
] | null | null | null |
python/turbodbc_test/test_cursor_async_io.py
|
fjetter/turbodbc
|
b11f0a1bc7d67bc3cbc60f564594f0e735f524f4
|
[
"MIT"
] | null | null | null |
python/turbodbc_test/test_cursor_async_io.py
|
fjetter/turbodbc
|
b11f0a1bc7d67bc3cbc60f564594f0e735f524f4
|
[
"MIT"
] | null | null | null |
import pytest
import six
from turbodbc import connect
from query_fixture import query_fixture
from helpers import for_one_database, open_cursor
@for_one_database
def test_many_batches_with_async_io(dsn, configuration):
with open_cursor(configuration, use_async_io=True) as cursor:
with query_fixture(cursor, configuration, 'INSERT INTEGER') as table_name:
# insert 2^16 rows
cursor.execute("INSERT INTO {} VALUES (1)".format(table_name))
for _ in six.moves.range(16):
cursor.execute("INSERT INTO {} SELECT * FROM {}".format(table_name,
table_name))
cursor.execute("SELECT * FROM {}".format(table_name))
assert sum(1 for _ in cursor) == 2**16
| 36.681818
| 84
| 0.629492
| 0
| 0
| 0
| 0
| 658
| 0.815366
| 0
| 0
| 112
| 0.138786
|
5856c891983edcd6b2efc2d720455bfccf0cdf79
| 1,491
|
py
|
Python
|
llist_gameboard/urls.py
|
Plongesam/data-structures-game
|
a47c849ea97763eff1005273a58aa3d8ab663ff2
|
[
"Apache-2.0"
] | 2
|
2021-03-02T20:06:34.000Z
|
2021-03-31T02:51:35.000Z
|
llist_gameboard/urls.py
|
Plongesam/data-structures-game
|
a47c849ea97763eff1005273a58aa3d8ab663ff2
|
[
"Apache-2.0"
] | 68
|
2021-03-02T20:20:21.000Z
|
2021-05-13T02:21:57.000Z
|
llist_gameboard/urls.py
|
Plongesam/data-structures-game
|
a47c849ea97763eff1005273a58aa3d8ab663ff2
|
[
"Apache-2.0"
] | null | null | null |
"""
URL's for the LList Game Board app.
"""
from django.urls import path
from llist_gameboard.api import llist_api
from . import views
urlpatterns = [
# Views
path('', views.llist_game_board, name='llist-game-board'),
#Game Play API Calls For Linked List
path('llist_api', llist_api.api_overview, name='llist-game-board-api_overview'),
path('llist_api/start_game/<str:difficulty>/<str:player_ids>/<str:data_structures>', llist_api.start_game, name='llist-game-board-start_game'),
path('llist_api/board/<str:game_id>', llist_api.board, name='llist-game-board-game_status'),
path('llist_api/dig_tunnel/<str:game_id>/<str:origin>/<str:destination>', llist_api.dig_tunnel, name='llist-game-board-dig_tunnel'),
path('llist_api/dig_chamber/<str:game_id>/<str:origin>/<str:move_ant>/<str:ant>', llist_api.dig_chamber, name='llist-game-board-dig_chamber'),
path('llist_api/fill_chamber/<str:game_id>/<str:to_fill>', llist_api.fill_chamber, name='llist-game-board-fill_chamber'),
path('llist_api/spawn_ant/<str:game_id>', llist_api.spawn_ant, name='llist-game-board-spawn_ant'),
path('llist_api/forage/<str:game_id>/<str:difficulty>/<str:dest>', llist_api.forage, name='llist-game-board-forage'),
path('llist_api/move_food/<str:game_id>/<str:start>/<str:dest>', llist_api.move_food, name='llist-game-board-move_food'),
path('llist_api/move_ant/<str:game_id>/<str:start>/<str:dest>', llist_api.move_ant, name='llist-game-board-move_ant'),
]
| 59.64
| 147
| 0.733736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 923
| 0.619048
|
5857c8cf49629013e2ff3dd558ee69aaefccf283
| 208
|
py
|
Python
|
tests/test_most_invoices.py
|
swimmio/sqlalchemy_swimm
|
d24accb7792743cf586bd7062531d108e7063eba
|
[
"MIT"
] | null | null | null |
tests/test_most_invoices.py
|
swimmio/sqlalchemy_swimm
|
d24accb7792743cf586bd7062531d108e7063eba
|
[
"MIT"
] | null | null | null |
tests/test_most_invoices.py
|
swimmio/sqlalchemy_swimm
|
d24accb7792743cf586bd7062531d108e7063eba
|
[
"MIT"
] | null | null | null |
from src import most_invoices
EXPECTED_RESULT = (14, 'Berlin')
def test_most_invoices() -> None:
tested_result = most_invoices.get_city_with_most_invoices()
assert tested_result == EXPECTED_RESULT
| 23.111111
| 63
| 0.774038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.038462
|
58593da1cc559e0383548c77af9516f78e6dbe07
| 8,223
|
py
|
Python
|
VIP_modules/widgets/ResultCanvas_QTAgg.py
|
Nikolaj-K/lab-control-GUI
|
3c7811de57f110870cf4740743fd84b76d918ad3
|
[
"MIT"
] | 17
|
2017-05-24T13:31:31.000Z
|
2021-12-04T22:47:33.000Z
|
VIP_modules/widgets/ResultCanvas_QTAgg.py
|
Nikolaj-K/lab-control-GUI
|
3c7811de57f110870cf4740743fd84b76d918ad3
|
[
"MIT"
] | null | null | null |
VIP_modules/widgets/ResultCanvas_QTAgg.py
|
Nikolaj-K/lab-control-GUI
|
3c7811de57f110870cf4740743fd84b76d918ad3
|
[
"MIT"
] | 6
|
2017-11-21T01:32:33.000Z
|
2020-12-15T05:28:17.000Z
|
import random
import numpy as np
import operator
from scipy import optimize
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.figure import Figure as MatplotlibFigure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm as color_map
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import interface.auxiliary_functions as auxi
import dictionaries.constants as cs
#################################################################################
class ResultsCanvas(FigureCanvasQTAgg):
def __init__(self, canvas_ref, vip):
self._Figure = MatplotlibFigure(figsize = cs.FIG_SIZE, dpi = cs.DPI)#, tight_layout=True, frameon=True)
super(ResultsCanvas, self).__init__(self._Figure)
self.update_figure(canvas_ref, vip)
def _from_options(self, canvas_ref, vip):
self.Axes.set_position(self._get_axes_position(vip))
labels_x = self.Axes.xaxis.get_ticklabels()
labels_y = self.Axes.yaxis.get_ticklabels()
fontsize = vip.get('Options', 'R_axes_font_size')
angle = vip.get('Options', 'R_x_plot_label_rotation')
for label in labels_x+labels_y:
label.set_fontsize(fontsize)
if vip.get(canvas_ref, 'F_plot_function') == 'density':
for label in labels_x:
label.set_rotation(angle)
def _get_axes_position(self, vip):
session_keys = ['R_x_plot_position', 'R_y_plot_position', 'R_x_plot_size', 'R_y_plot_size']
f = lambda k: float(vip.get('Options', k))
return map(f, session_keys)
#################################################################################
class Canvas2dData(ResultsCanvas):
def __init__(self, canvas_ref, vip):
super(Canvas2dData, self).__init__(canvas_ref, vip)
def update_figure(self, canvas_ref, vip):
self._Figure.clear()
#from numpy.random import rand
#x, y, c, s = rand(4, 100)
#def onpick3(event):
# ind = event.ind
# print 'onpick3 scatter:', ind, np.take(x_axis, ind), np.take(y_axis, ind)
#self._Figure.canvas.mpl_connect('pick_event', onpick3)
try:
data_set = vip.get(canvas_ref, 'F_data_set')
plot_data2D = vip.plot_data[data_set]['2d_data']
########## Axes
self.Axes = self._Figure.add_axes(cs.AXES_POSITION_INIT)
x_axis = plot_data2D['axis_1']
y_axis = plot_data2D['axis_r']
self.Axes.plot(x_axis, y_axis, auxi.colour(cs.PLOT_COLOR_RANGE))
#self.Axes.set_xlim([x_axis[0], x_axis[-1]])
self.Axes.set_xlim(sorted([x_axis[0], x_axis[-1]]))
self._from_options(canvas_ref, vip)
self.Axes.set_xlabel(plot_data2D['label_1'])
self.Axes.set_ylabel(plot_data2D['label_r'])
#self.Axes.hold(False)
########## Extrema
#max_index, max_y = max(enumerate(y_axis), key=operator.itemgetter(1))
#vip.maximal_x = x_axis[max_index]
min_index, min_y = min(enumerate(y_axis), key=operator.itemgetter(1))
vip.minimal_x = x_axis[min_index]
print "* GLOBAL MINIMUM:\n{0}".format(vip.minimal_x)
if canvas_ref in ['Plot_column_1']:
########## Savitzky Golay Filter
ws = len(y_axis)/cs.SAVITZKY_GOLAY_FILTER_RANGE_DENOMINATOR
ws = ws if (ws % 2 == 1) else (ws + 1)
try:
y_axis_sg = auxi.savitzky_golay_filter(y_axis, window_size=ws, order=cs.SAVITZKY_GOLAY_FILTER_ORDER)
self.Axes.plot(x_axis, y_axis_sg, cs.FILTER_CURVE_STYLE, linewidth=cs.FILTER_LINEWIDTH)
except TypeError as exception:
print "! (update_figure) couldn't compute 'savitzky_golay_filter':"
print exception
########## Fit
try:
def lorenzian_fit(x, A, k, ke):
"""Take min_x of this session and define a fit function"""
def h(ke_):
return (k / 2 - ke_)**2 + (x - vip.minimal_x)**2
r = A * h(ke) / h(0)
return auxi.to_dB(r)
parameters, covariance = optimize.curve_fit(lorenzian_fit, x_axis, y_axis_sg)
LINE = 40 * "." + "\n"
print LINE
print "LORENZIAN FIT AT FILTER CUVE MINIMUM:\n"
print "* PARAMETERS:\n\n [A, kappa, kappa_e]\n= {0}\n".format(parameters)
print "* PARAMETERS:\n\n kappa_e / kappa\n= {0}\n" .format(parameters[1] / parameters[0])
print "* COVARIANCE:\n\n Matrix\n= {0}\n" .format(covariance)
print "* MINIMUM: \n\n (x,y)\n= ({0}, {1})\n" .format(x_axis[min_index], y_axis[min_index])
print LINE
fit_function = lambda x: lorenzian_fit(x, *parameters)
y_axis_fit = map(fit_function, x_axis)
self.Axes.plot(x_axis, y_axis_fit, cs.FITTING_CURVE_STYLE, linewidth=cs.FITTING_LINEWIDTH, linestyle=cs.FITTING_LINESTYLE)
except:
print "! (update_figure) couldn't fit to lorenzian_fit."
else:
pass
try:
self.draw()
except ValueError:
message = "! (update_figure, ValueError) at vip.draw."
vip.GUI_feedback(message)
except KeyError:
message = "! (update_figure) The specified dataset might not exist."
vip.GUI_feedback(message)
#################################################################################
class Canvas3dData(ResultsCanvas):
def __init__(self, canvas_ref, vip):
super(Canvas3dData, self).__init__(canvas_ref, vip)
def update_figure(self, canvas_ref, vip):
self._Figure.clear()
try:
data_set = vip.get(canvas_ref, 'F_data_set')
plot_data3D = vip.plot_data[data_set]['3d_data']
########## Axes
X, Y = np.meshgrid(plot_data3D['axis_1'], plot_data3D['axis_2'])
Z = np.array(plot_data3D['axis_r'])
if vip.get(canvas_ref, 'F_plot_function') == 'density':
self.Axes = self._Figure.add_axes(cs.AXES_POSITION_INIT)
self.Axes.pcolormesh(X, Y, Z, cmap = color_map.coolwarm)
elif vip.get(canvas_ref, 'F_plot_function') == 'surface':
self.Axes = Axes3D(self._Figure)
surf = self.Axes.plot_surface(X, Y, Z, cmap = color_map.coolwarm, rstride = 1, cstride = 1, linewidth = 0.15, antialiased = False)
self.Axes.zaxis.set_major_locator(LinearLocator(10))
self.Axes.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#self.Axes.set_zlim(-1.01, 1.01)
position_color_bar = [0.015, 0.17, 0.015, 0.75]
Axes_color_bar = self._Figure.add_axes(position_color_bar)
self._Figure.colorbar(surf, cax = Axes_color_bar)
self._from_options(canvas_ref, vip)
#self.Axes.hold(False)
self.Axes.set_xlabel(plot_data3D['label_1'])
self.Axes.set_ylabel(plot_data3D['label_2'])
########## / Axes
try:
self.draw()
except ValueError:
message = "(update_figure, vip.draw, ValueError)"
vip.GUI_feedback(message)
except KeyError:
message = "The specified dataset might not exist"
vip.GUI_feedback(message)
| 46.721591
| 147
| 0.531558
| 7,479
| 0.909522
| 0
| 0
| 0
| 0
| 0
| 0
| 1,932
| 0.234951
|
585a68e41b2ee9276af7dd0a8f001bc6f258c0ac
| 4,538
|
py
|
Python
|
data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_kruskal.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_kruskal.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_kruskal.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
#!/usr/bin/env python
# Given weighted graph, perform kruskal-based clustering
from common import *
from cluster_common import *
import argparse
import csv
import pickle as pickle
from collections import defaultdict
class unionfind:
mp = {}
blacklisted_edges = set()
# blacklisted_e_nodes = set()
# blacklist_edges_adj = defaultdict(set)
def get_id(self, a):
if a not in self.mp:
self.mp[a] = a
return a
if self.mp[a] == a:
return a
else:
self.mp[a] = self.get_id(self.mp[a])
return self.mp[a]
def mergeset(self, a, b):
self.mp[self.get_id(b)] = self.get_id(a)
def mergeall(self, a):
d = self.get_id(a[0])
for b in a[1:]:
if not self.check_for_blacklist(b, d):
self.mp[self.get_id(b)] = d
def disallow(self, v1, v2):
if v2 > v1:
v1, v2 = v2, v1
self.blacklisted_edges.add((v1, v2))
# self.blacklisted_e_nodes.add(v1)
# self.blacklisted_e_nodes.add(v2)
# self.blacklist_edges[v1].add(v2)
# self.blacklist_edges[v2].add(v1)
def check_for_blacklist(self, v1, v2):
v1, v2 = self.get_id(v1), self.get_id(v2)
if v2 > v1:
v1, v2 = v2, v1
for e1, e2 in self.blacklisted_edges:
c1, c2 = self.get_id(e1), self.get_id(e2)
if c2 > c1:
c1, c2 = c2, c1
if c1 == v1 and c2 == v2:
return True
return False
def trymerge(self, v1, v2):
if self.get_id(v1) != self.get_id(v2) and not self.check_for_blacklist(v1, v2):
self.mergeset(v1, v2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('edgelist')
parser.add_argument('outfile', nargs='?')
parser.add_argument('-t', '--interconnectivity', default=0.82, type=float)
parser.add_argument('-A', '--with-analysis', action='store_true')
parser.add_argument('-a', '--authorprefeat', default='generated/Author_prefeat.pickle')
parser.add_argument('-s', '--seedset', nargs='*', default=['data/goldstd_clusters.csv', 'data/seedset_clusters.csv'])
parser.add_argument('-S', '--seededges', nargs='*', default=['data/train.csv'])
parser.add_argument('-b', '--blacklist', nargs='*', default=['data/blacklist_edges.csv', 'data/train.csv', 'data/train_extra.csv'])
args = parser.parse_args()
if args.outfile == None:
args.outfile = args.edgelist.replace('.prob','') + '.clusters'
threshold_interconnectivity = args.interconnectivity
print_err("Loading graph")
reader = csv.reader(enforce_min(skip_comments(open(args.edgelist, 'rb')), threshold_interconnectivity))
edges = []
for i, line in enumerate(reader):
line[0:2] = list(map(int, line[0:2]))
line[2] = float(line[2])
edges.append((line[2], line[0], line[1]))
if (i+1) % 10000 == 0:
print_err(i+1, "edges done")
print_err("Sorting edges by weight")
edges = sorted(edges, reverse=True)
uf = unionfind()
if args.blacklist:
for filename in args.blacklist:
with open(filename, 'rb') as f:
reader = csv.reader(skip_comments(f))
for line in reader:
line[0:3] = list(map(int, line[0:3]))
if len(line) > 2:
if line[0] != 0:
continue
line = line[1:]
uf.disallow(line[0], line[1])
if args.seedset:
print_err("Loading seedset(s)")
for filename in args.seedset:
for cl in loadClusters(filename):
if len(cl) < 2:
continue
uf.mergeall(cl)
if args.seededges:
for filename in args.blacklist:
with open(filename, 'rb') as f:
reader = csv.reader(skip_comments(f))
for line in reader:
line[0:3] = list(map(int, line[0:3]))
if line[0] != 1:
continue
line = line[1:]
uf.trymerge(line[0], line[1])
print_err("Clustering")
for i, (w, v1, v2) in enumerate(edges):
uf.trymerge(v1, v2)
if (i+1) % 10000 == 0:
print_err(i+1, "edges done")
clusters = defaultdict(list)
for v in uf.mp:
clusters[uf.get_id(v)].append(v)
clusters = [v for v in list(clusters.values()) if len(v) > 1]
clusters = sorted(clusters, key=len, reverse=True)
print_err("Writing clusters")
f_out = open(args.outfile, 'wb')
if not args.with_analysis:
for cl in clusters:
f_out.write(','.join(map(str, sorted(cl))) + '\n')
if args.with_analysis:
print_err("Loading pickled author pre-features")
authors = pickle.load(open(args.authorprefeat, 'rb'))
import networkx as nx
G_sim = nx.read_weighted_edgelist(skip_comments(open(args.edgelist, 'rb')), nodetype=int, delimiter=',')
outputClusters(clusters, f_out, G_sim, authors)
if __name__ == "__main__":
main()
| 29.855263
| 133
| 0.642794
| 1,252
| 0.275892
| 0
| 0
| 0
| 0
| 0
| 0
| 841
| 0.185324
|
585b50403351ad785a902fa91bf54e0474f5e68a
| 4,019
|
py
|
Python
|
third_party/gsutil/oauth2_plugin/oauth2_helper.py
|
bdero/depot_tools
|
685577439cbf9cb8c660e3da39bdcbb64c197c95
|
[
"BSD-3-Clause"
] | 20
|
2015-12-07T06:08:27.000Z
|
2021-11-08T11:06:18.000Z
|
third_party/gsutil/oauth2_plugin/oauth2_helper.py
|
bdero/depot_tools
|
685577439cbf9cb8c660e3da39bdcbb64c197c95
|
[
"BSD-3-Clause"
] | 1
|
2019-01-14T00:36:35.000Z
|
2019-01-14T00:36:35.000Z
|
third_party/gsutil/oauth2_plugin/oauth2_helper.py
|
bdero/depot_tools
|
685577439cbf9cb8c660e3da39bdcbb64c197c95
|
[
"BSD-3-Clause"
] | 23
|
2015-05-05T08:22:59.000Z
|
2021-11-10T06:24:46.000Z
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper routines to facilitate use of oauth2_client in gsutil."""
import sys
import time
import webbrowser
import oauth2_client
GSUTIL_CLIENT_ID = '909320924072.apps.googleusercontent.com'
# Google OAuth2 clients always have a secret, even if the client is an installed
# application/utility such as gsutil. Of course, in such cases the "secret" is
# actually publicly known; security depends entirly on the secrecy of refresh
# tokens, which effectively become bearer tokens.
GSUTIL_CLIENT_NOTSOSECRET = 'p3RlpR10xMFh9ZXBS/ZNLYUu'
GOOGLE_OAUTH2_PROVIDER_LABEL = 'Google'
GOOGLE_OAUTH2_PROVIDER_AUTHORIZATION_URI = (
'https://accounts.google.com/o/oauth2/auth')
GOOGLE_OAUTH2_PROVIDER_TOKEN_URI = (
'https://accounts.google.com/o/oauth2/token')
OOB_REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
def OAuth2ClientFromBotoConfig(config):
token_cache = None
token_cache_type = config.get('OAuth2', 'token_cache', 'file_system')
if token_cache_type == 'file_system':
if config.has_option('OAuth2', 'token_cache_path_pattern'):
token_cache = oauth2_client.FileSystemTokenCache(
path_pattern=config.get('OAuth2', 'token_cache_path_pattern'))
else:
token_cache = oauth2_client.FileSystemTokenCache()
elif token_cache_type == 'in_memory':
token_cache = oauth2_client.InMemoryTokenCache()
else:
raise Exception(
"Invalid value for config option OAuth2/token_cache: %s" %
token_cache_type)
proxy = None
if (config.has_option('Boto', 'proxy')
and config.has_option('Boto', 'proxy_port')):
proxy = "%s:%s" % (config.get('Boto', 'proxy'),
config.get('Boto', 'proxy_port'))
provider_label = config.get(
'OAuth2', 'provider_label', GOOGLE_OAUTH2_PROVIDER_LABEL)
provider_authorization_uri = config.get(
'OAuth2', 'provider_authorization_uri',
GOOGLE_OAUTH2_PROVIDER_AUTHORIZATION_URI)
provider_token_uri = config.get(
'OAuth2', 'provider_token_uri', GOOGLE_OAUTH2_PROVIDER_TOKEN_URI)
client_id = config.get('OAuth2', 'client_id', GSUTIL_CLIENT_ID)
client_secret = config.get(
'OAuth2', 'client_secret', GSUTIL_CLIENT_NOTSOSECRET)
return oauth2_client.OAuth2Client(
oauth2_client.OAuth2Provider(
provider_label, provider_authorization_uri, provider_token_uri),
client_id, client_secret,
proxy=proxy, access_token_cache=token_cache)
def OAuth2ApprovalFlow(oauth2_client, scopes, launch_browser=False):
approval_url = oauth2_client.GetAuthorizationUri(OOB_REDIRECT_URI, scopes)
if launch_browser:
sys.stdout.write(
'Attempting to launch a browser with the OAuth2 approval dialog at '
'URL: %s\n\n'
'[Note: due to a Python bug, you may see a spurious error message "object is not\n'
'callable [...] in [...] Popen.__del__" which can be ignored.]\n\n' % approval_url)
else:
sys.stdout.write(
'Please navigate your browser to the following URL:\n%s\n\n' %
approval_url)
if (launch_browser and
not webbrowser.open(approval_url, new=1, autoraise=True)):
sys.stdout.write(
'Launching browser appears to have failed; please navigate a browser '
'to the following URL:\n%s\n' % approval_url)
code = raw_input('Enter the authorization code: ')
refresh_token, access_token = oauth2_client.ExchangeAuthorizationCode(
code, OOB_REDIRECT_URI, scopes)
return refresh_token
| 38.644231
| 91
| 0.736502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,927
| 0.479473
|
585b570f1181a34255df0bd7a81ffc1c67034916
| 5,311
|
py
|
Python
|
csl-tracking-dependents.py
|
Marcool04/utilities
|
d9bf0aae7decdad111fc0c8cefacf10c230ce9ee
|
[
"MIT"
] | 10
|
2015-04-14T16:49:43.000Z
|
2020-06-01T14:31:04.000Z
|
csl-tracking-dependents.py
|
Marcool04/utilities
|
d9bf0aae7decdad111fc0c8cefacf10c230ce9ee
|
[
"MIT"
] | 23
|
2015-01-20T04:13:35.000Z
|
2021-09-07T18:36:00.000Z
|
csl-tracking-dependents.py
|
Marcool04/utilities
|
d9bf0aae7decdad111fc0c8cefacf10c230ce9ee
|
[
"MIT"
] | 6
|
2015-01-10T13:00:37.000Z
|
2021-09-19T09:25:22.000Z
|
# -*- coding: utf-8 -*-
# Python script to manage automatically generated dependents
# Author: Rintze M. Zelle
# Version: 2014-04-17
# * Requires lxml library (http://lxml.de/)
import os, glob, re, inspect, shutil
from lxml import etree
# http://stackoverflow.com/questions/50499
folderPath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentFolderPath = os.path.dirname (folderPath)
path = os.path.join(parentFolderPath, 'styles')
pathGeneratedStyles = os.path.join(parentFolderPath, 'utilities', 'generate_dependent_styles', 'generated_styles', 'aacr')
pathRemovedStyles = os.path.join(parentFolderPath, 'removed-styles')
dependentStyles = []
commentMatchingStyles = []
parentMatchingStyles = []
timestampMatchingStyles = []
generatedStyles = []
for stylepath in glob.glob( os.path.join(path, 'dependent', '*.csl') ):
dependentStyles.append(os.path.join(stylepath))
for stylepath in glob.glob( os.path.join(pathGeneratedStyles, '*.csl') ):
generatedStyles.append(os.path.basename(stylepath))
#Filter dependent styles by their parent (set A), print number
#Of set A, print style ID if XML comment doesn't match that of dependent style template
#Of set A, print style ID if timestamp doesn't match that of dependent style template
#Have a toggle to move remaining styles out of root folder
#(it would be better to filter by the XML comment on the first pass, since styles from
#a set may have different parents, but XML comments aren't currently unique to a set)
for style in dependentStyles:
parser = etree.XMLParser(remove_blank_text=True)
parsedStyle = etree.parse(style, parser)
styleElement = parsedStyle.getroot()
parentLink = styleElement.find(".//{http://purl.org/net/xbiblio/csl}link[@rel='independent-parent']")
if(parentLink.attrib.get("href") == "http://www.zotero.org/styles/american-association-for-cancer-research"):
parentMatchingStyles.append(os.path.basename(style))
comments = styleElement.xpath("//comment()", namespaces={"cs": "http://purl.org/net/xbiblio/csl"})
for comment in comments:
if(comment.text == " Generated with https://github.com/citation-style-language/utilities/tree/master/generate_dependent_styles/data/aacr "):
commentMatchingStyles.append(os.path.basename(style))
timestamp = styleElement.find(".//{http://purl.org/net/xbiblio/csl}updated")
if(timestamp.text == "2014-04-23T12:00:00+00:00"):
timestampMatchingStyles.append(os.path.basename(style))
print("Number of dependent styles with selected parent: " + str(len(parentMatchingStyles)))
print("Number of generated styles: " + str(len(generatedStyles)))
for style in parentMatchingStyles:
badStyle = False
if not (style in commentMatchingStyles):
print "bad comment!: " + style
badStyle = True
if not (style in timestampMatchingStyles):
print "bad timestamp!: " + style
badStyle = True
if not (style in generatedStyles):
print "not generated!: " + style
badStyle = True
if badStyle:
parentMatchingStyles.remove(style)
print("Number of consistent styles: " + str(len(parentMatchingStyles)))
moveStyles = False
if moveStyles == True:
#move styles out of "styles/dependent" folder
if not os.path.exists(pathRemovedStyles):
os.makedirs(pathRemovedStyles)
for style in parentMatchingStyles:
shutil.move(os.path.join(path, 'dependent', style), os.path.join(pathRemovedStyles, style))
# counter = []
# for infoNodeIndex, infoNode in enumerate(csInfo):
# # check if node is an element
# if isinstance(infoNode.tag, basestring):
# # get rid of namespace
# infoElement = infoNode.tag.replace("{http://purl.org/net/xbiblio/csl}","")
# if(infoElement == "link"):
# infoElement += "[@" + infoNode.get("rel") + "]"
# if((infoElement == "category") & (infoNode.get("citation-format") is not None)):
# infoElement += "[@citation-format]"
# if((infoElement == "category") & (infoNode.get("field") is not None)):
# infoElement += "[@field]"
# # check if node is a comment
# elif (etree.tostring(infoNode, encoding='UTF-8', xml_declaration=False) == ("<!--" + infoNode.text.encode("utf-8") + "-->")):
# # keep comments that precede any element at the top
# if(sum(counter) == 0):
# counter.append(desiredOrder.index("preceding-comment"))
# # keep a comment at the end at the end
# elif(len(counter) == (len(csInfo) - 1)):
# counter.append(desiredOrder.index("end-comment"))
# # keep other comments with preceding element
# else:
# counter.append(counter[-1])
#
# # Possible improvements:
# # * exceptions for recognizable comments (issn, category)
# else:
# print(infoNode)
#
# # Reorder attributes on cs:link
# try:
# links = styleElement.findall(".//{http://purl.org/net/xbiblio/csl}link")
# for link in links:
# rel = link.get("rel")
# del link.attrib["rel"]
# link.set("rel",rel)
# except:
# pass
| 44.630252
| 148
| 0.656562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,147
| 0.592544
|
585f06a860286b312d33973ef25ef2866dfc0808
| 642
|
py
|
Python
|
selenium_browser/__resources/constants.py
|
kkristof200/selenium_browser
|
b8144fe935073367911e90b50f078bfa985d6c0f
|
[
"MIT"
] | 1
|
2021-06-25T06:55:43.000Z
|
2021-06-25T06:55:43.000Z
|
selenium_browser/__resources/constants.py
|
kkristof200/selenium_browser
|
b8144fe935073367911e90b50f078bfa985d6c0f
|
[
"MIT"
] | null | null | null |
selenium_browser/__resources/constants.py
|
kkristof200/selenium_browser
|
b8144fe935073367911e90b50f078bfa985d6c0f
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------- class: Constants ------------------------------------------------------- #
class Constants:
# --------------------------------------------------- Public properties -------------------------------------------------- #
USER_AGENT_FILE_NAME = 'ua.txt'
GENERAL_COOKIES_FOLDER_NAME = 'cookies'
GENERAL_PROFILE_FOLDER_NAME = 'selenium-python-profile'
DEFAULT_PROFILE_ID = 'test'
DEFAULT_FIND_FUNC_TIMEOUT = 2.5
# -------------------------------------------------------------------------------------------------------------------------------- #
| 45.857143
| 132
| 0.311526
| 373
| 0.580997
| 0
| 0
| 0
| 0
| 0
| 0
| 436
| 0.679128
|
585fbd132230f1c1b7c7d02416766ecbbe4a68a2
| 2,893
|
py
|
Python
|
api/models/__init__.py
|
victorabarros/challenge-alloy-card
|
a3188fea298541130c24ebf4639d2af4700ba362
|
[
"MIT"
] | null | null | null |
api/models/__init__.py
|
victorabarros/challenge-alloy-card
|
a3188fea298541130c24ebf4639d2af4700ba362
|
[
"MIT"
] | null | null | null |
api/models/__init__.py
|
victorabarros/challenge-alloy-card
|
a3188fea298541130c24ebf4639d2af4700ba362
|
[
"MIT"
] | null | null | null |
class Game:
def __init__(self, *args, **kwargs):
self.player_0 = 'white'
self.player_1 = 'black'
self._new_pieces_game()
self.current_player_turn = self.player_0
def _new_pieces_game(self):
self.board = {
0: {
0: Piece(self.player_0, "rook", 0, (0, 0))
1: Piece(self.player_0, "knight", 0, (0, 1))
2: Piece(self.player_0, "bishop", 0, (0, 2))
3: Piece(self.player_0, "king", 0, (0, 3))
4: Piece(self.player_0, "queen", 0, (0, 4))
5: Piece(self.player_0, "bishop", 1, (0, 5))
6: Piece(self.player_0, "knight", 1, (0, 6))
7: Piece(self.player_0, "rook", 1, (0, 7))
},
1: {
{ii: Piece(self.player_0, "pawn", ii, (1, ii))
},
7: {
0: Piece(self.player_1, "rook", 0, (7, 0))
1: Piece(self.player_1, "knight", 0, (7, 1))
2: Piece(self.player_1, "bishop", 0, (7, 2))
3: Piece(self.player_1, "king", 0, (7, 3))
4: Piece(self.player_1, "queen", 0, (7, 4))
5: Piece(self.player_1, "bishop", 1, (7, 5))
6: Piece(self.player_1, "knight", 1, (7, 6))
7: Piece(self.player_1, "rook", 1, (7, 7))
},
6: {
{ii: Piece(self.player_1, "pawn", ii, (6, ii))
}
}
pieces = {
self.player_0: {
'rook': {0: self.board[0][0], 1: self.board[0][7]},
'knight': {0: self.board[0][1], 1: self.board[0][6]},
'bishop': {0: self.board[0][2], 1: self.board[0][5]},
'king': {0: self.board[0][3]},
'queen': {0: self.board[0][4]},
'pawn': {}
},
self.player_1: {
'rook': {0: self.board[7][0], 1: self.board[7][7]},
'knight': {0: self.board[7][1], 1: self.board[7][6]},
'bishop': {0: self.board[7][2], 1: self.board[7][5]},
'king': {0: self.board[7][3]},
'queen': {0: self.board[7][4]},
'pawn': {}
}
}
for ii in range(0, 8):
pieces[self.player_0]["pawn"][ii] = self.board[1][ii]
pieces[self.player_1]["pawn"][ii] = [6][ii]
self.pieces = pieces
def find_piece(self, x_coordinate, y_coordinate):
piece = self.board.get(x_coordinate, {}).get(y_coordinate)
return piece
def to_dict(self):
return {'current_player_turn': self.current_player_turn,
'pieces': self.pieces}
class Piece:
def __init__(self, player, kind, ii, coordinate):
self.player = player
self.kind = kind
self.ii = ii
self.coordinate = coordinate
| 36.620253
| 69
| 0.444867
| 1,359
| 0.469755
| 0
| 0
| 0
| 0
| 0
| 0
| 263
| 0.090909
|
58610c3f91576fd189f2c5eb7bc06289b39922a3
| 50,976
|
py
|
Python
|
spinta/manifests/tabular/helpers.py
|
atviriduomenys/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 2
|
2019-03-14T06:41:14.000Z
|
2019-03-26T11:48:14.000Z
|
spinta/manifests/tabular/helpers.py
|
sirex/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 44
|
2019-04-05T15:52:45.000Z
|
2022-03-30T07:41:33.000Z
|
spinta/manifests/tabular/helpers.py
|
sirex/spinta
|
77a10e201f8cdc63143fce7996fd0898acb1ff58
|
[
"MIT"
] | 1
|
2019-04-01T09:54:27.000Z
|
2019-04-01T09:54:27.000Z
|
from __future__ import annotations
import csv
import pathlib
import textwrap
from operator import itemgetter
from typing import Any
from typing import Callable
from typing import Dict
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
from typing import cast
import openpyxl
import xlsxwriter
from lark import ParseError
from spinta import commands
from spinta import spyna
from spinta.backends import Backend
from spinta.backends.components import BackendOrigin
from spinta.components import Context
from spinta.datasets.components import Resource
from spinta.dimensions.comments.components import Comment
from spinta.dimensions.enum.components import EnumItem
from spinta.components import Model
from spinta.components import Namespace
from spinta.components import Property
from spinta.core.enums import Access
from spinta.core.ufuncs import unparse
from spinta.datasets.components import Dataset
from spinta.dimensions.enum.components import Enums
from spinta.dimensions.lang.components import LangData
from spinta.dimensions.prefix.components import UriPrefix
from spinta.exceptions import MultipleErrors
from spinta.exceptions import PropertyNotFound
from spinta.manifests.components import Manifest
from spinta.manifests.helpers import load_manifest_nodes
from spinta.manifests.tabular.components import ACCESS
from spinta.manifests.tabular.components import BackendRow
from spinta.manifests.tabular.components import BaseRow
from spinta.manifests.tabular.components import CommentData
from spinta.manifests.tabular.components import DESCRIPTION
from spinta.manifests.tabular.components import DatasetRow
from spinta.manifests.tabular.components import ParamRow
from spinta.manifests.tabular.components import EnumRow
from spinta.manifests.tabular.components import ID
from spinta.manifests.tabular.components import MANIFEST_COLUMNS
from spinta.manifests.tabular.components import ManifestColumn
from spinta.manifests.tabular.components import ManifestRow
from spinta.manifests.tabular.components import ManifestTableRow
from spinta.manifests.tabular.components import ModelRow
from spinta.manifests.tabular.components import PREPARE
from spinta.manifests.tabular.components import PROPERTY
from spinta.manifests.tabular.components import PrefixRow
from spinta.manifests.tabular.components import PropertyRow
from spinta.manifests.tabular.components import REF
from spinta.manifests.tabular.components import ResourceRow
from spinta.manifests.tabular.components import SOURCE
from spinta.manifests.tabular.components import TITLE
from spinta.manifests.tabular.components import TabularFormat
from spinta.manifests.tabular.constants import DATASET
from spinta.manifests.tabular.formats.gsheets import read_gsheets_manifest
from spinta.spyna import SpynaAST
from spinta.types.datatype import Ref
from spinta.utils.data import take
from spinta.utils.schema import NA
from spinta.utils.schema import NotAvailable
ParsedRow = Tuple[int, Dict[str, Any]]
MAIN_DIMENSIONS = [
'dataset',
'resource',
'base',
'model',
'property',
]
EXTRA_DIMENSIONS = [
'',
'prefix',
'enum',
'param',
'comment',
'ns',
'lang',
]
class TabularManifestError(Exception):
pass
def _detect_header(
path: Optional[str],
line: int, # Line number
row: Iterable[str],
) -> List[str]:
header = [h.strip().lower() for h in row]
unknown_columns = set(header[:len(DATASET)]) - set(DATASET)
if unknown_columns:
unknown_columns = ', '.join(sorted(unknown_columns, key=header.index))
raise TabularManifestError(
f"{path}:{line}: Unknown columns: {unknown_columns}."
)
return header
def _detect_dimension(
path: Optional[pathlib.Path],
line: str, # Line number with a prefix (depends on manifest format)
row: Dict[str, str],
) -> Optional[str]:
dimensions = [k for k in MAIN_DIMENSIONS if row[k]]
if len(dimensions) == 1:
return dimensions[0]
if len(dimensions) > 1:
dimensions = ', '.join(dimensions)
raise TabularManifestError(
f"{path}:{line}: In one row only single dimension can be used, "
f"but found more than one: {dimensions}"
)
if row['type']:
if row['type'] not in EXTRA_DIMENSIONS:
raise TabularManifestError(
f"{path}:{line}:type: Unknown additional dimension name "
f"{row['type']}."
)
return row['type']
return ''
def _parse_spyna(
reader: TabularReader,
formula: str,
) -> Union[SpynaAST, NotAvailable, None]:
if formula:
try:
return spyna.parse(formula)
except ParseError as e:
reader.error(f"Error while parsing formula {formula!r}:\n{e}")
return NA
class TabularReader:
state: State
path: str
line: str
type: str
name: str
data: ManifestRow # Used when `appendable` is False
rows: List[Dict[str, Any]] # Used when `appendable` is True
appendable: bool = False # Tells if reader is appendable.
def __init__(
self,
state: State,
path: str,
line: str,
):
self.state = state
self.path = path
self.line = line
self.data = {}
self.rows = []
def __str__(self):
return f"<{type(self).__name__} name={self.name!r}>"
def read(self, row: Dict[str, str]) -> None:
raise NotImplementedError
def append(self, row: Dict[str, str]) -> None:
if any(row.values()):
self.error(
f"Updates are not supported in context of {self.type!r}."
)
def release(self, reader: TabularReader = None) -> bool:
raise NotImplementedError
def items(self) -> Iterator[ParsedRow]:
if self.appendable:
for data in self.rows:
yield self.line, data
else:
yield self.line, self.data
def enter(self) -> None:
raise NotImplementedError
def leave(self) -> None:
raise NotImplementedError
def error(self, message: str) -> None:
raise TabularManifestError(f"{self.path}:{self.line}: {message}")
class ManifestReader(TabularReader):
type: str = 'manifest'
datasets: Set[str]
namespaces: Set[str]
data: ManifestTableRow
def read(self, row: ManifestRow) -> None:
self.name = str(self.path)
self.data = {
'type': 'manifest',
}
def release(self, reader: TabularReader = None) -> bool:
return reader is None
def enter(self) -> None:
self.datasets = set()
self.namespaces = set()
self.state.manifest = self
def leave(self) -> None:
self.state.manifest = None
class DatasetReader(TabularReader):
type: str = 'dataset'
data: DatasetRow
def read(self, row: Dict[str, str]) -> None:
self.name = row['dataset']
if row['dataset'] in self.state.manifest.datasets:
self.error("Dataset already defined.")
self.data = {
'type': 'dataset',
'id': row['id'],
'name': row['dataset'],
'level': row['level'],
'access': row['access'],
'title': row['title'],
'description': row['description'],
'resources': {},
}
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
))
def enter(self) -> None:
self.state.dataset = self
def leave(self) -> None:
self.state.dataset = None
class ResourceReader(TabularReader):
type: str = 'resource'
data: Union[BackendRow, ResourceRow]
def read(self, row: Dict[str, str]) -> None:
self.name = row['resource']
if self.state.dataset is None:
self.read_backend(row)
else:
self.read_resource(row)
def read_backend(self, row: Dict[str, str]) -> None:
# Backends will be loaded using
# `spinta.manifests.helpers._load_manifest_backends`.
if 'backends' not in self.state.manifest.data:
self.state.manifest.data['backends'] = {}
backends = self.state.manifest.data['backends']
if self.name in backends:
self.error(
f"Backend {self.name!r} with the same name already defined."
)
self.data = {
'type': row['type'],
'name': self.name,
'dsn': row['source'],
'title': row['title'],
'description': row['description'],
}
backends[self.name] = self.data
def read_resource(self, row: Dict[str, str]) -> None:
dataset = self.state.dataset.data
if self.name in dataset['resources']:
self.error("Resource with the same name already defined in ")
self.data = {
'type': row['type'],
'backend': row['ref'],
'external': row['source'],
'prepare': _parse_spyna(self, row[PREPARE]),
'level': row['level'],
'access': row['access'],
'title': row['title'],
'description': row['description'],
}
dataset['resources'][self.name] = self.data
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
))
def enter(self) -> None:
self.state.resource = self
def leave(self) -> None:
self.state.resource = None
class BaseReader(TabularReader):
type: str = 'base'
data: BaseRow
def read(self, row: Dict[str, str]) -> None:
self.name = row['base']
dataset = self.state.dataset.data if self.state.dataset else None
self.data = {
'model': get_relative_model_name(dataset, row['base']),
'pk': row['ref'],
}
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
BaseReader,
))
def enter(self) -> None:
self.state.base = self
def leave(self) -> None:
self.state.base = None
class ModelReader(TabularReader):
type: str = 'model'
data: ModelRow
def read(self, row: Dict[str, str]) -> None:
dataset = self.state.dataset
resource = self.state.resource
base = self.state.base
name = get_relative_model_name(
dataset.data if dataset else None,
row['model'],
)
if self.state.rename_duplicates:
dup = 1
_name = name
while _name in self.state.models:
_name = f'{name}_{dup}'
dup += 1
name = _name
elif name in self.state.models:
self.error(f"Model {name!r} with the same name is already defined.")
self.name = name
self.data = {
'type': 'model',
'id': row['id'],
'name': name,
'base': base.name if base else None,
'level': row['level'],
'access': row['access'],
'title': row['title'],
'description': row['description'],
'properties': {},
'external': {
'dataset': dataset.name if dataset else '',
'resource': resource.name if dataset and resource else '',
'pk': (
[x.strip() for x in row['ref'].split(',')]
if row['ref'] else []
),
'name': row['source'],
'prepare': _parse_spyna(self, row[PREPARE]),
},
}
if resource and not dataset:
self.data['backend'] = resource.name
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
BaseReader,
ModelReader,
))
def enter(self) -> None:
self.state.model = self
self.state.models.add(self.name)
def leave(self) -> None:
self.state.model = None
def _parse_property_ref(ref: str) -> Tuple[str, List[str]]:
if '[' in ref:
ref = ref.rstrip(']')
ref_model, ref_props = ref.split('[', 1)
ref_props = [p.strip() for p in ref_props.split(',')]
else:
ref_model = ref
ref_props = []
return ref_model, ref_props
class PropertyReader(TabularReader):
type: str = 'property'
data: PropertyRow
enums: Set[str]
def read(self, row: Dict[str, str]) -> None:
self.name = row['property']
if self.state.model is None:
context = self.state.stack[-1]
self.error(
f"Property {self.name!r} must be defined in a model context. "
f"Now it is defined in {context.name!r} {context.type} context."
)
if row['property'] in self.state.model.data['properties']:
self.error(
f"Property {self.name!r} with the same name is already "
f"defined for this {self.state.model.name!r} model."
)
self.data = {
'type': row['type'],
'prepare': _parse_spyna(self, row[PREPARE]),
'level': row['level'],
'access': row['access'],
'uri': row['uri'],
'title': row['title'],
'description': row['description'],
}
dataset = self.state.dataset.data if self.state.dataset else None
if row['ref']:
if row['type'] in ('ref', 'backref', 'generic'):
ref_model, ref_props = _parse_property_ref(row['ref'])
self.data['model'] = get_relative_model_name(dataset, ref_model)
self.data['refprops'] = ref_props
else:
# TODO: Detect if ref is a unit or an enum.
self.data['enum'] = row['ref']
if dataset or row['source']:
self.data['external'] = {
'name': row['source'],
'prepare': self.data.pop('prepare'),
}
self.state.model.data['properties'][row['property']] = self.data
def release(self, reader: TabularReader = None) -> bool:
return reader is None or isinstance(reader, (
ManifestReader,
DatasetReader,
ResourceReader,
BaseReader,
ModelReader,
PropertyReader,
))
def enter(self) -> None:
self.state.prop = self
def leave(self) -> None:
self.state.prop = None
class AppendReader(TabularReader):
type: str = 'append'
data: ManifestRow
def read(self, row: ManifestRow) -> None:
self.name = row[REF]
self.data = row
def release(self, reader: TabularReader = None) -> bool:
return True
def enter(self) -> None:
pass
def leave(self) -> None:
self.state.stack[-1].append(self.data)
class PrefixReader(TabularReader):
type: str = 'prefix'
data: PrefixRow
def read(self, row: Dict[str, str]) -> None:
if not row['ref']:
# `ref` is a required parameter.
return
self.name = row['ref']
node = (
self.state.prop or
self.state.model or
self.state.base or
self.state.resource or
self.state.dataset or
self.state.manifest
)
if 'prefixes' not in node.data:
node.data['prefixes'] = {}
prefixes = node.data['prefixes']
if self.name in prefixes:
self.error(
f"Prefix {self.name!r} with the same name is already "
f"defined for this {node.name!r} {node.type}."
)
self.data = {
'id': row['id'],
'eid': f'{self.path}:{self.line}',
'type': self.type,
'name': self.name,
'uri': row['uri'],
'title': row['title'],
'description': row['description'],
}
prefixes[self.name] = self.data
def append(self, row: Dict[str, str]) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class NamespaceReader(TabularReader):
type: str = 'ns'
appendable: bool = True
def read(self, row: Dict[str, str]) -> None:
if not row['ref']:
# `ref` is a required parameter.
return
self.name = row['ref']
manifest = self.state.manifest
if self.name in manifest.namespaces:
self.error(
f"Namespace {self.name!r} with the same name is already "
f"defined."
)
manifest.namespaces.add(self.name)
self.rows.append({
'id': row['id'],
'type': self.type,
'name': self.name,
'title': row['title'],
'description': row['description'],
})
def append(self, row: Dict[str, str]) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class ParamReader(TabularReader):
type: str = 'param'
data: ParamRow
name: str = None
def _get_node(self) -> TabularReader:
return (
self.state.prop or
self.state.model or
self.state.base or
self.state.resource or
self.state.dataset or
self.state.manifest
)
def _get_data(self, name: str, row: ManifestRow):
return {
'name': name,
'source': [row[SOURCE]],
'prepare': [_parse_spyna(self, row[PREPARE])],
'title': row[TITLE],
'description': row[DESCRIPTION],
}
def _ensure_params_list(self, node: TabularReader, name: str) -> None:
if 'params' not in node.data:
node.data['params'] = {}
if name not in node.data['params']:
node.data['params'][name] = []
def _check_param_name(self, node: TabularReader, name: str) -> None:
if 'params' in node.data and name in node.data['params']:
self.error(
f"Parameter {name!r} with the same name already defined!"
)
def read(self, row: ManifestRow) -> None:
node = self._get_node()
self.name = row[REF]
if not self.name:
self.error("Parameter must have a name.")
self._check_param_name(node, self.name)
self._ensure_params_list(node, self.name)
self.data = self._get_data(self.name, row)
node.data['params'][self.name].append(self.data)
def append(self, row: ManifestRow) -> None:
node = self._get_node()
if row[REF]:
self.name = row[REF]
self._check_param_name(node, self.name)
self._ensure_params_list(node, self.name)
self.data = self._get_data(self.name, row)
node.data['params'][self.name].append(self.data)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, (AppendReader, LangReader))
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class EnumReader(TabularReader):
type: str = 'enum'
data: EnumRow
name: str = None
def read(self, row: ManifestRow) -> None:
if row[REF]:
self.name = row[REF]
else:
self.name = self.name or ''
if not any([
row[SOURCE],
row[PREPARE],
row[ACCESS],
row[TITLE],
row[DESCRIPTION],
]):
return
# source = row[SOURCE] if row[SOURCE] is not None else row[PREPARE]
source = str(row[SOURCE]) or row[PREPARE]
if not source:
self.error(
"At least source or prepare must be specified for an enum."
)
self.data = {
'name': self.name,
'source': row[SOURCE],
'prepare': _parse_spyna(self, row[PREPARE]),
'access': row[ACCESS],
'title': row[TITLE],
'description': row[DESCRIPTION],
}
node = (
self.state.prop or
self.state.model or
self.state.base or
self.state.resource or
self.state.dataset or
self.state.manifest
)
if 'enums' not in node.data:
node.data['enums'] = {}
if self.name not in node.data['enums']:
node.data['enums'][self.name] = {}
enum = node.data['enums'][self.name]
if source in enum:
self.error(
f"Enum {self.name!r} item {source!r} with the same value is "
f"already defined."
)
enum[source] = self.data
def append(self, row: ManifestRow) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, (AppendReader, LangReader))
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class LangReader(TabularReader):
type: str = 'lang'
def read(self, row: ManifestRow) -> None:
reader = self.state.stack[-1]
if not isinstance(reader, (
DatasetReader,
ResourceReader,
BaseReader,
ModelReader,
PropertyReader,
EnumReader,
)):
self.error(f'Language metadata is not supported on {reader.type}.')
return
if 'lang' not in reader.data:
reader.data['lang'] = {}
lang = reader.data['lang']
self.name = row[REF]
if self.name in lang:
self.error(
f"Language {self.name!r} with the same name is already "
f"defined for this {reader.name!r} {reader.type}."
)
lang[self.name] = {
'id': row[ID],
'eid': f'{self.path}:{self.line}',
'type': self.type,
'ref': self.name,
'title': row[TITLE],
'description': row[DESCRIPTION],
}
def append(self, row: ManifestRow) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
class CommentReader(TabularReader):
type: str = 'comment'
data: CommentData
def read(self, row: ManifestRow) -> None:
reader = self.state.stack[-1]
if 'comments' not in reader.data:
reader.data['comments'] = []
comments = reader.data['comments']
comments.append({
'id': row[ID],
'parent': row[REF],
'author': row[SOURCE],
'access': row[ACCESS],
# TODO: parse datetime
'created': row[TITLE],
'comment': row[DESCRIPTION],
})
def append(self, row: ManifestRow) -> None:
self.read(row)
def release(self, reader: TabularReader = None) -> bool:
return not isinstance(reader, AppendReader)
def enter(self) -> None:
pass
def leave(self) -> None:
pass
READERS = {
# Main dimensions
'dataset': DatasetReader,
'resource': ResourceReader,
'base': BaseReader,
'model': ModelReader,
'property': PropertyReader,
# Extra dimensions
'': AppendReader,
'prefix': PrefixReader,
'ns': NamespaceReader,
'param': ParamReader,
'enum': EnumReader,
'lang': LangReader,
'comment': CommentReader,
}
class State:
stack: List[TabularReader]
backends: Dict[str, Dict[str, str]] = None
models: Set[str]
manifest: ManifestReader = None
dataset: DatasetReader = None
resource: ResourceReader = None
base: BaseReader = None
model: ModelReader = None
prop: PropertyReader = None
rename_duplicates: bool = False
def __init__(self):
self.stack = []
self.models = set()
def release(self, reader: TabularReader = None) -> Iterator[ParsedRow]:
for parent in list(reversed(self.stack)):
if parent.release(reader):
if isinstance(parent, (
ManifestReader,
NamespaceReader,
DatasetReader,
ModelReader,
)):
yield from parent.items()
self.stack.pop()
parent.leave()
else:
break
if reader:
reader.enter()
self.stack.append(reader)
def _read_tabular_manifest_rows(
path: Optional[str],
rows: Iterator[Tuple[str, List[str]]],
*,
rename_duplicates: bool = True,
) -> Iterator[ParsedRow]:
_, header = next(rows, (None, None))
if header is None:
# Looks like an empty file.
return
header = _detect_header(path, 1, header)
defaults = {k: '' for k in MANIFEST_COLUMNS}
state = State()
state.rename_duplicates = rename_duplicates
reader = ManifestReader(state, path, '1')
reader.read({})
yield from state.release(reader)
for line, row in rows:
row = dict(zip(header, row))
row = {**defaults, **row}
dimension = _detect_dimension(path, line, row)
Reader = READERS[dimension]
reader = Reader(state, path, line)
reader.read(row)
yield from state.release(reader)
yield from state.release()
def read_tabular_manifest(
format_: TabularFormat = None,
*,
path: str = None,
file: IO = None,
rename_duplicates: bool = False,
) -> Iterator[ParsedRow]:
if format_ == TabularFormat.GSHEETS:
rows = read_gsheets_manifest(path)
elif format_ == TabularFormat.CSV:
rows = _read_csv_manifest(path, file)
elif format_ == TabularFormat.ASCII:
rows = _read_txt_manifest(path, file)
elif format_ == TabularFormat.XLSX:
rows = _read_xlsx_manifest(path)
else:
raise ValueError(f"Unknown tabular manifest format {format_!r}.")
yield from _read_tabular_manifest_rows(
path,
rows,
rename_duplicates=rename_duplicates,
)
def _read_txt_manifest(
path: str,
file: IO[str] = None,
) -> Iterator[Tuple[str, List[str]]]:
if file:
yield from _read_ascii_tabular_manifest(file)
else:
with pathlib.Path(path).open(encoding='utf-8-sig') as f:
yield from _read_ascii_tabular_manifest(f)
def _read_csv_manifest(
path: str,
file: IO[str] = None,
) -> Iterator[Tuple[str, List[str]]]:
if file:
rows = csv.reader(file)
for i, row in enumerate(rows, 1):
yield str(i), row
else:
with pathlib.Path(path).open(encoding='utf-8-sig') as f:
rows = csv.reader(f)
for i, row in enumerate(rows, 1):
yield str(i), row
def _read_xlsx_manifest(path: str) -> Iterator[Tuple[str, List[str]]]:
wb = openpyxl.load_workbook(path)
yield '1', DATASET
for sheet in wb:
rows = sheet.iter_rows(values_only=True)
cols = next(rows, None)
if cols is None:
continue
cols = normalizes_columns(cols)
cols = [cols.index(c) if c in cols else None for c in DATASET]
for i, row in enumerate(rows, 2):
row = [row[c] if c is not None else None for c in cols]
yield f'{sheet.title}:{i}', row
def striptable(table):
return textwrap.dedent(table).strip()
def _join_escapes(row: List[str]) -> List[str]:
res = []
for v in row:
if res and res[-1] and res[-1].endswith('\\'):
res[-1] = res[-1][:-1] + '|' + v
else:
res.append(v)
return res
def _read_ascii_tabular_manifest(
lines: Iterable[str],
*,
check_column_names: bool = True,
) -> Iterator[Tuple[str, List[str]]]:
lines = (line.strip() for line in lines)
lines = filter(None, lines)
# Read header
header = next(lines, None)
if header is None:
return
header = normalizes_columns(
header.split('|'),
check_column_names=check_column_names,
)
yield '1', header
# Find index where dimension columns end.
dim = sum(1 for h in header if h in DATASET[:6])
for i, line in enumerate(lines, 2):
row = _join_escapes(line.split('|'))
row = [x.strip() for x in row]
row = row[:len(header)]
rem = len(header) - len(row)
row = row[:dim - rem] + [''] * rem + row[dim - rem:]
assert len(header) == len(row), line
yield str(i), row
def read_ascii_tabular_rows(
manifest: str,
*,
strip: bool = False,
check_column_names: bool = True,
) -> Iterator[List[str]]:
if strip:
manifest = striptable(manifest)
rows = _read_ascii_tabular_manifest(
manifest.splitlines(),
check_column_names=check_column_names,
)
for line, row in rows:
yield row
def read_ascii_tabular_manifest(
manifest: str,
*,
strip: bool = False,
rename_duplicates: bool = False,
) -> Iterator[ParsedRow]:
if strip:
manifest = striptable(manifest)
rows = _read_ascii_tabular_manifest(manifest.splitlines())
yield from _read_tabular_manifest_rows(
None,
rows,
rename_duplicates=rename_duplicates,
)
def load_ascii_tabular_manifest(
context: Context,
manifest: Manifest,
manifest_ascii_table: str,
*,
strip: bool = False,
) -> None:
schemas = read_ascii_tabular_manifest(manifest_ascii_table, strip=strip)
load_manifest_nodes(context, manifest, schemas)
commands.link(context, manifest)
def get_relative_model_name(dataset: dict, name: str) -> str:
if name.startswith('/'):
return name[1:]
elif dataset is None:
return name
else:
return '/'.join([
dataset['name'],
name,
])
def to_relative_model_name(model: Model, dataset: Dataset = None) -> str:
"""Convert absolute model `name` to relative."""
if dataset is None:
return model.name
if model.name.startswith(dataset.name):
prefix = dataset.name
return model.name[len(prefix) + 1:]
else:
return '/' + model.name
def tabular_eid(model: Model):
if isinstance(model.eid, int):
return model.eid
else:
return 0
class OrderBy(NamedTuple):
func: Callable[[Union[Dataset, Model, Property, EnumItem]], Any]
reverse: bool = False
def _order_datasets_by_access(dataset: Dataset):
return dataset.access or Access.private
def _order_datasets_by_name(dataset: Dataset):
return dataset.name
DATASETS_ORDER_BY = {
'access': OrderBy(_order_datasets_by_access, reverse=True),
'default': OrderBy(_order_datasets_by_name),
}
def _order_models_by_access(model: Model):
return model.access or Access.private
MODELS_ORDER_BY = {
'access': OrderBy(_order_models_by_access, reverse=True),
'default': OrderBy(tabular_eid),
}
def _order_properties_by_access(prop: Property):
return prop.access or Access.private
PROPERTIES_ORDER_BY = {
'access': OrderBy(_order_properties_by_access, reverse=True),
}
T = TypeVar('T', Dataset, Model, Property, EnumItem)
def sort(
ordering: Dict[str, OrderBy],
items: Iterable[T],
order_by: Optional[str],
) -> Iterable[T]:
order: Optional[OrderBy] = None
if order_by:
order = ordering[order_by]
elif 'default' in ordering:
order = ordering['default']
if order:
return sorted(items, key=order.func, reverse=order.reverse)
else:
return items
def _prefixes_to_tabular(
prefixes: Dict[str, UriPrefix],
*,
separator: bool = False,
) -> Iterator[ManifestRow]:
first = True
for name, prefix in prefixes.items():
yield torow(DATASET, {
'id': prefix.id,
'type': prefix.type if first else '',
'ref': name,
'uri': prefix.uri,
'title': prefix.title,
'description': prefix.description,
})
first = False
if separator and prefixes:
yield torow(DATASET, {})
def _backends_to_tabular(
backends: Dict[str, Backend],
*,
separator: bool = False,
) -> Iterator[ManifestRow]:
for name, backend in backends.items():
yield torow(DATASET, {
'type': backend.type,
'resource': name,
'source': backend.config.get('dsn'),
})
if separator and backends:
yield torow(DATASET, {})
def _namespaces_to_tabular(
namespaces: Dict[str, Namespace],
*,
separator: bool = False,
) -> Iterator[ManifestRow]:
namespaces = {
k: ns
for k, ns in namespaces.items() if not ns.generated
}
first = True
for name, ns in namespaces.items():
yield torow(DATASET, {
'type': ns.type if first else '',
'ref': name,
'title': ns.title,
'description': ns.description,
})
first = False
if separator and namespaces:
yield torow(DATASET, {})
def _order_enums_by_access(item: EnumItem):
return item.access or Access.private
ENUMS_ORDER_BY = {
'access': OrderBy(_order_enums_by_access, reverse=True),
}
def _enums_to_tabular(
enums: Optional[Enums],
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
separator: bool = False,
) -> Iterator[ManifestRow]:
if enums is None:
return
for name, enum in enums.items():
first = True
items = sort(ENUMS_ORDER_BY, enum.values(), order_by)
for item in items:
if item.access is not None and item.access < access:
continue
yield torow(DATASET, {
'type': 'enum' if first else '',
'ref': name if first else '',
'source': item.source if external else '',
'prepare': unparse(item.prepare),
'access': item.given.access,
'title': item.title,
'description': item.description,
})
if lang := list(_lang_to_tabular(item.lang)):
first = True
yield from lang
else:
first = False
if separator and enums:
yield torow(DATASET, {})
def _lang_to_tabular(
lang: Optional[LangData],
) -> Iterator[ManifestRow]:
if lang is None:
return
first = True
for name, data in sorted(lang.items(), key=itemgetter(0)):
yield torow(DATASET, {
'type': 'lang' if first else '',
'ref': name if first else '',
'title': data['title'],
'description': data['description'],
})
first = False
def _comments_to_tabular(
comments: Optional[List[Comment]],
*,
access: Access = Access.private,
) -> Iterator[ManifestRow]:
if comments is None:
return
first = True
for comment in comments:
if comment.access < access:
return
yield torow(DATASET, {
'id': comment.id,
'type': 'comment' if first else '',
'ref': comment.parent,
'source': comment.author,
'access': comment.given.access,
'title': comment.created,
'description': comment.comment,
})
first = False
def _dataset_to_tabular(
dataset: Dataset,
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
yield torow(DATASET, {
'id': dataset.id,
'dataset': dataset.name,
'level': dataset.level,
'access': dataset.given.access,
'title': dataset.title,
'description': dataset.description,
})
yield from _lang_to_tabular(dataset.lang)
yield from _prefixes_to_tabular(dataset.prefixes, separator=True)
yield from _enums_to_tabular(
dataset.ns.enums,
external=external,
access=access,
order_by=order_by,
)
def _resource_to_tabular(
resource: Resource,
*,
external: bool = True,
) -> Iterator[ManifestRow]:
backend = resource.backend
yield torow(DATASET, {
'resource': resource.name,
'source': resource.external if external else '',
'prepare': unparse(resource.prepare or NA) if external else '',
'type': resource.type,
'ref': (
backend.name
if (
external and
backend and
backend.origin != BackendOrigin.resource
)
else ''
),
'level': resource.level,
'access': resource.given.access,
'title': resource.title,
'description': resource.description,
})
yield from _lang_to_tabular(resource.lang)
def _property_to_tabular(
prop: Property,
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
if prop.name.startswith('_'):
return
if prop.access < access:
return
data = {
'property': prop.place,
'type': prop.dtype.name,
'level': prop.level,
'access': prop.given.access,
'uri': prop.uri,
'title': prop.title,
'description': prop.description,
}
if external and prop.external:
if isinstance(prop.external, list):
# data['source'] = ', '.join(x.name for x in prop.external)
# data['prepare'] = ', '.join(
# unparse(x.prepare or NA)
# for x in prop.external if x.prepare
# )
raise DeprecationWarning(
"Source can't be a list, use prepare instead."
)
elif prop.external:
data['source'] = prop.external.name
data['prepare'] = unparse(prop.external.prepare or NA)
if isinstance(prop.dtype, Ref):
model = prop.model
if model.external and model.external.dataset:
data['ref'] = to_relative_model_name(
prop.dtype.model,
model.external.dataset,
)
pkeys = prop.dtype.model.external.pkeys
rkeys = prop.dtype.refprops
if rkeys and pkeys != rkeys:
rkeys = ', '.join([p.place for p in rkeys])
data['ref'] += f'[{rkeys}]'
else:
data['ref'] = prop.dtype.model.name
elif prop.enum is not None:
data['ref'] = prop.given.enum
elif prop.unit is not None:
data['ref'] = prop.given.unit
yield torow(DATASET, data)
yield from _comments_to_tabular(prop.comments, access=access)
yield from _lang_to_tabular(prop.lang)
yield from _enums_to_tabular(
prop.enums,
external=external,
access=access,
order_by=order_by,
)
def _model_to_tabular(
model: Model,
*,
external: bool = True,
access: Access = Access.private,
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
data = {
'id': model.id,
'model': model.name,
'level': model.level,
'access': model.given.access,
'title': model.title,
'description': model.description,
}
if model.external and model.external.dataset:
data['model'] = to_relative_model_name(
model,
model.external.dataset,
)
if external and model.external:
data.update({
'source': model.external.name,
'prepare': unparse(model.external.prepare or NA),
})
if (
not model.external.unknown_primary_key and
all(p.access >= access for p in model.external.pkeys)
):
# Add `ref` only if all properties are available in the
# resulting manifest.
data['ref'] = ', '.join([
p.name for p in model.external.pkeys
])
yield torow(DATASET, data)
yield from _comments_to_tabular(model.comments, access=access)
yield from _lang_to_tabular(model.lang)
props = sort(PROPERTIES_ORDER_BY, model.properties.values(), order_by)
for prop in props:
yield from _property_to_tabular(
prop,
external=external,
access=access,
order_by=order_by,
)
def datasets_to_tabular(
manifest: Manifest,
*,
external: bool = True, # clean content of source and prepare
access: Access = Access.private,
internal: bool = False, # internal models with _ prefix like _txn
order_by: ManifestColumn = None,
) -> Iterator[ManifestRow]:
yield from _prefixes_to_tabular(manifest.prefixes, separator=True)
yield from _backends_to_tabular(manifest.backends, separator=True)
yield from _namespaces_to_tabular(manifest.namespaces, separator=True)
yield from _enums_to_tabular(
manifest.enums,
external=external,
access=access,
order_by=order_by,
separator=True,
)
seen_datasets = set()
dataset = None
resource = None
models = manifest.models if internal else take(manifest.models)
models = sort(MODELS_ORDER_BY, models.values(), order_by)
separator = False
for model in models:
if model.access < access:
continue
if model.external:
if dataset is None or dataset.name != model.external.dataset.name:
dataset = model.external.dataset
if dataset:
seen_datasets.add(dataset.name)
resource = None
separator = True
yield from _dataset_to_tabular(
dataset,
external=external,
access=access,
order_by=order_by,
)
if external and model.external and model.external.resource and (
resource is None or
resource.name != model.external.resource.name
):
resource = model.external.resource
if resource:
separator = True
yield from _resource_to_tabular(resource, external=external)
if separator:
yield torow(DATASET, {})
else:
separator = False
yield from _model_to_tabular(
model,
external=external,
access=access,
order_by=order_by,
)
datasets = sort(DATASETS_ORDER_BY, manifest.datasets.values(), order_by)
for dataset in datasets:
if dataset.name in seen_datasets:
continue
yield from _dataset_to_tabular(
dataset,
external=external,
access=access,
order_by=order_by,
)
for resource in dataset.resources.values():
yield from _resource_to_tabular(resource)
def torow(keys, values) -> ManifestRow:
return {k: values.get(k) for k in keys}
def render_tabular_manifest(
manifest: Manifest,
cols: List[ManifestColumn] = None,
*,
sizes: Dict[ManifestColumn, int] = None,
) -> str:
rows = datasets_to_tabular(manifest)
return render_tabular_manifest_rows(rows, cols, sizes=sizes)
def render_tabular_manifest_rows(
rows: Iterable[ManifestRow],
cols: List[ManifestColumn] = None,
*,
sizes: Dict[ManifestColumn, int] = None,
) -> str:
cols = cols or MANIFEST_COLUMNS
hs = 1 if ID in cols else 0 # hierarchical cols start
he = cols.index(PROPERTY) # hierarchical cols end
hsize = 1 # hierarchical column size
bsize = 3 # border size
if sizes is None:
sizes = dict(
[(c, len(c)) for c in cols[:hs]] +
[(c, 1) for c in cols[hs:he]] +
[(c, len(c)) for c in cols[he:]]
)
rows = list(rows)
for row in rows:
for i, col in enumerate(cols):
val = '' if row[col] is None else str(row[col])
if col == ID:
sizes[col] = 2
elif i < he:
size = (hsize + bsize) * (he - hs - i) + sizes[PROPERTY]
if size < len(val):
sizes[PROPERTY] += len(val) - size
elif sizes[col] < len(val):
sizes[col] = len(val)
line = []
for col in cols:
size = sizes[col]
line.append(col[:size].ljust(size))
lines = [line]
for row in rows:
if ID in cols:
line = [row[ID][:2] if row[ID] else ' ']
else:
line = []
for i, col in enumerate(cols[hs:he + 1]):
val = row[col] or ''
if val:
depth = i
break
else:
val = ''
depth = 0
line += [' ' * hsize] * depth
size = (hsize + bsize) * (he - hs - depth) + sizes[PROPERTY]
line += [val.ljust(size)]
for col in cols[he + 1:]:
val = '' if row[col] is None else str(row[col])
val = val.replace('|', '\\|')
size = sizes[col]
line.append(val.ljust(size))
lines.append(line)
lines = [' | '.join(line) for line in lines]
lines = [l.rstrip() for l in lines]
return '\n'.join(lines)
SHORT_NAMES = {
'd': 'dataset',
'r': 'resource',
'b': 'base',
'm': 'model',
'p': 'property',
't': 'type',
}
def normalizes_columns(
cols: List[str],
*,
check_column_names: bool = True,
) -> List[ManifestColumn]:
result: List[ManifestColumn] = []
unknown: List[str] = []
invalid: List[str] = []
for col in cols:
col = col or ''
col = col.strip().lower()
col = SHORT_NAMES.get(col, col)
col = cast(ManifestColumn, col)
if col not in MANIFEST_COLUMNS:
unknown.append(col)
else:
if unknown:
result += unknown
invalid += unknown
unknown = []
result.append(col)
if check_column_names and invalid:
if len(invalid) == 1:
raise PropertyNotFound(property=invalid[0])
else:
raise MultipleErrors(
PropertyNotFound(property=col) for col in invalid
)
return result
def write_tabular_manifest(
path: str,
rows: Union[
Manifest,
Iterable[ManifestRow],
None,
] = None,
cols: List[ManifestColumn] = None,
) -> None:
cols = cols or DATASET
if rows is None:
rows = []
elif isinstance(rows, Manifest):
rows = datasets_to_tabular(rows)
rows = ({c: row[c] for c in cols} for row in rows)
if path.endswith('.csv'):
_write_csv(pathlib.Path(path), rows, cols)
elif path.endswith('.xlsx'):
_write_xlsx(pathlib.Path(path), rows, cols)
else:
raise ValueError(f"Unknown tabular manifest format {path!r}.")
def _write_csv(
path: pathlib.Path,
rows: Iterator[ManifestRow],
cols: List[ManifestColumn],
) -> None:
with path.open('w') as f:
writer = csv.DictWriter(f, fieldnames=cols)
writer.writeheader()
writer.writerows(rows)
def _write_xlsx(
path: pathlib.Path,
rows: Iterator[ManifestRow],
cols: List[ManifestColumn],
) -> None:
workbook = xlsxwriter.Workbook(path, {
'strings_to_formulas': False,
'strings_to_urls': False,
})
bold = workbook.add_format({'bold': True})
formats = {
'id': workbook.add_format({
'align': 'right',
'valign': 'top',
}),
'dataset': workbook.add_format({
'bold': True,
'valign': 'top',
'font_color': '#127622',
}),
'resource': workbook.add_format({
'valign': 'top',
}),
'base': workbook.add_format({
'valign': 'top',
}),
'model': workbook.add_format({
'bold': True,
'valign': 'top',
'font_color': '#127622',
}),
'property': workbook.add_format({
'valign': 'top',
'font_color': '#127622',
}),
'type': workbook.add_format({
'valign': 'top',
}),
'ref': workbook.add_format({
'valign': 'top',
'font_color': '#127622',
}),
'source': workbook.add_format({
'valign': 'top',
'font_color': '#c9211e',
}),
'prepare': workbook.add_format({
'valign': 'top',
'font_color': '#c9211e',
}),
'level': workbook.add_format({
'valign': 'top',
}),
'access': workbook.add_format({
'valign': 'top',
}),
'uri': workbook.add_format({
'valign': 'top',
'font_color': '#284f80',
}),
'title': workbook.add_format({
'valign': 'top',
'text_wrap': True,
}),
'description': workbook.add_format({
'valign': 'top',
'text_wrap': True,
}),
}
sheet = workbook.add_worksheet()
sheet.freeze_panes(1, 0) # Freeze the first row.
sheet.set_column('A:E', 2) # id, d, r, b, m
sheet.set_column('F:F', 20) # property
sheet.set_column('I:J', 20) # source, prepare
sheet.set_column('N:N', 20) # title
sheet.set_column('O:O', 30) # description
for j, col in enumerate(cols):
sheet.write(0, j, col, bold)
for i, row in enumerate(rows, 1):
for j, col in enumerate(cols):
val = row[col]
fmt = formats.get(col)
sheet.write(i, j, val, fmt)
workbook.close()
| 27.779837
| 80
| 0.568464
| 19,973
| 0.391812
| 16,499
| 0.323662
| 0
| 0
| 0
| 0
| 5,897
| 0.115682
|
5861890f40c195d9d9cd8464fdd3da892466f679
| 8,080
|
py
|
Python
|
python/mxnet/context.py
|
feevos/incubator-mxnet
|
275378a49a6035fd5bdead4a74ac36b6070295a7
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/context.py
|
feevos/incubator-mxnet
|
275378a49a6035fd5bdead4a74ac36b6070295a7
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/context.py
|
feevos/incubator-mxnet
|
275378a49a6035fd5bdead4a74ac36b6070295a7
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Context management API of mxnet."""
from __future__ import absolute_import
import threading
import warnings
from .base import classproperty, with_metaclass, _MXClassPropertyMetaClass
class Context(with_metaclass(_MXClassPropertyMetaClass, object)):
"""Constructs a context.
MXNet can run operations on CPU and different GPUs.
A context describes the device type and ID on which computation should be carried on.
One can use mx.cpu and mx.gpu for short.
See also
----------
`How to run MXNet on multiple CPU/GPUs <http://mxnet.io/faq/multi_devices.html>`
for more details.
Parameters
----------
device_type : {'cpu', 'gpu'} or Context.
String representing the device type.
device_id : int (default=0)
The device id of the device, needed for GPU.
Note
----
Context can also be used as a way to change the default context.
Examples
--------
>>> # array on cpu
>>> cpu_array = mx.nd.ones((2, 3))
>>> # switch default context to GPU(2)
>>> with mx.Context(mx.gpu(2)):
... gpu_array = mx.nd.ones((2, 3))
>>> gpu_array.context
gpu(2)
One can also explicitly specify the context when creating an array.
>>> gpu_array = mx.nd.ones((2, 3), mx.gpu(1))
>>> gpu_array.context
gpu(1)
"""
# static class variable
_default_ctx = threading.local()
devtype2str = {1: 'cpu', 2: 'gpu', 3: 'cpu_pinned', 5: 'cpu_shared'}
devstr2type = {'cpu': 1, 'gpu': 2, 'cpu_pinned': 3, 'cpu_shared': 5}
def __init__(self, device_type, device_id=0):
if isinstance(device_type, Context):
self.device_typeid = device_type.device_typeid
self.device_id = device_type.device_id
else:
self.device_typeid = Context.devstr2type[device_type]
self.device_id = device_id
self._old_ctx = None
@property
def device_type(self):
"""Returns the device type of current context.
Examples
-------
>>> mx.context.current_context().device_type
'cpu'
>>> mx.current_context().device_type
'cpu'
Returns
-------
device_type : str
"""
return Context.devtype2str[self.device_typeid]
def __hash__(self):
"""Compute hash value of context for dictionary lookup"""
return hash((self.device_typeid, self.device_id))
def __eq__(self, other):
"""Compares two contexts. Two contexts are equal if they
have the same device type and device id.
"""
return isinstance(other, Context) and \
self.device_typeid == other.device_typeid and \
self.device_id == other.device_id
def __str__(self):
return '%s(%d)' % (self.device_type, self.device_id)
def __repr__(self):
return self.__str__()
def __enter__(self):
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
self._old_ctx = Context._default_ctx.value
Context._default_ctx.value = self
return self
def __exit__(self, ptype, value, trace):
Context._default_ctx.value = self._old_ctx
#pylint: disable=no-self-argument
@classproperty
def default_ctx(cls):
warnings.warn("Context.default_ctx has been deprecated. "
"Please use Context.current_context() instead. "
"Please use test_utils.set_default_context to set a default context",
DeprecationWarning)
if not hasattr(Context._default_ctx, "value"):
cls._default_ctx.value = Context('cpu', 0)
return cls._default_ctx.value
@default_ctx.setter
def default_ctx(cls, val):
warnings.warn("Context.default_ctx has been deprecated. "
"Please use Context.current_context() instead. "
"Please use test_utils.set_default_context to set a default context",
DeprecationWarning)
cls._default_ctx.value = val
#pylint: enable=no-self-argument
# initialize the default context in Context
Context._default_ctx.value = Context('cpu', 0)
def cpu(device_id=0):
"""Returns a CPU context.
This function is a short cut for ``Context('cpu', device_id)``.
For most operations, when no context is specified, the default context is `cpu()`.
Examples
----------
>>> with mx.cpu():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu())
>>> cpu_array.context
cpu(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU context.
"""
return Context('cpu', device_id)
def cpu_pinned(device_id=0):
"""Returns a CPU pinned memory context. Copying from CPU pinned memory to GPU
is faster than from normal CPU memory.
This function is a short cut for ``Context('cpu_pinned', device_id)``.
Examples
----------
>>> with mx.cpu_pinned():
... cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu_pinned(0)
>>> cpu_array = mx.nd.ones((2, 3), ctx=mx.cpu_pinned())
>>> cpu_array.context
cpu_pinned(0)
Parameters
----------
device_id : int, optional
The device id of the device. `device_id` is not needed for CPU.
This is included to make interface compatible with GPU.
Returns
-------
context : Context
The corresponding CPU pinned memory context.
"""
return Context('cpu_pinned', device_id)
def gpu(device_id=0):
"""Returns a GPU context.
This function is a short cut for Context('gpu', device_id).
The K GPUs on a node are typically numbered as 0,...,K-1.
Examples
----------
>>> cpu_array = mx.nd.ones((2, 3))
>>> cpu_array.context
cpu(0)
>>> with mx.gpu(1):
... gpu_array = mx.nd.ones((2, 3))
>>> gpu_array.context
gpu(1)
>>> gpu_array = mx.nd.ones((2, 3), ctx=mx.gpu(1))
>>> gpu_array.context
gpu(1)
Parameters
----------
device_id : int, optional
The device id of the device, needed for GPU.
Returns
-------
context : Context
The corresponding GPU context.
"""
return Context('gpu', device_id)
def current_context():
"""Returns the current context.
By default, `mx.cpu()` is used for all the computations
and it can be overridden by using `with mx.Context(x)` statement where
x can be cpu(device_id) or gpu(device_id).
Examples
-------
>>> mx.current_context()
cpu(0)
>>> with mx.Context('gpu', 1): # Context changed in `with` block.
... mx.current_context() # Computation done here will be on gpu(1).
...
gpu(1)
>>> mx.current_context() # Back to default context.
cpu(0)
Returns
-------
default_ctx : Context
"""
if not hasattr(Context._default_ctx, "value"):
Context._default_ctx.value = Context('cpu', 0)
return Context._default_ctx.value
| 30.490566
| 91
| 0.627104
| 3,935
| 0.487005
| 0
| 0
| 1,194
| 0.147772
| 0
| 0
| 5,603
| 0.693441
|
58618a4465a15f955aaa88fd0bac8fbce9ce5c48
| 3,422
|
py
|
Python
|
theory/model/tconfig.py
|
ralfonso/theory
|
41684969313cfc545d74b306e409fd5bf21387b3
|
[
"MIT"
] | 4
|
2015-07-03T19:53:59.000Z
|
2016-04-25T03:03:56.000Z
|
theory/model/tconfig.py
|
ralfonso/theory
|
41684969313cfc545d74b306e409fd5bf21387b3
|
[
"MIT"
] | null | null | null |
theory/model/tconfig.py
|
ralfonso/theory
|
41684969313cfc545d74b306e409fd5bf21387b3
|
[
"MIT"
] | 2
|
2020-03-29T22:02:29.000Z
|
2021-07-13T07:17:19.000Z
|
# theory MPD client
# Copyright (C) 2008 Ryan Roemmich <ralfonso@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import pickle
from pylons import config
from pylons import app_globals as g
class ConfigFileError(Exception):
pass
class TConfig:
"""
handles the global configuration. loaded into app globals at application startup
also handles committing the configuration to disk to maintain across app restarts
"""
def __init__(self):
""" try to read the configuration from disk """
self.server = None
self.port = None
self.password = None
self.webpassword = ''
self.timeout = False
self.awskey = None
self.aws_secret = None
self.streams = []
self.default_search = 'Any'
conf = ConfigParser.ConfigParser()
conf.read(config['localconf'])
try:
self.server = conf.get('mpd','server')
self.port = conf.get('mpd','port')
self.password = conf.get('mpd','password')
self.awskey = conf.get('services','awskey')
self.aws_secret = conf.get('services','aws_secret')
self.webpassword = conf.get('main','webpassword')
self.timeout = conf.getboolean('main','timeout')
self.default_search = conf.get('main','default_search')
conf_stream = conf.get('ext','streams')
except (ConfigParser.NoSectionError,ConfigParser.NoOptionError):
pass
try:
self.streams = pickle.loads(eval(conf_stream))
except:
# we don't really care what happened, the user must have messed with the magic pickled string :)
pass
def commit_config(self):
""" commit the configuration to disk """
conf = ConfigParser.ConfigParser()
conf.add_section("mpd")
conf.set("mpd", "server",self.server)
conf.set("mpd", "port",self.port)
conf.set("mpd", "password",self.password)
conf.add_section("services")
conf.set('services','awskey',self.awskey)
conf.set('services','aws_secret',self.aws_secret)
conf.add_section('main')
conf.set('main','webpassword',self.webpassword)
conf.set('main','timeout',self.timeout)
conf.set('main','default_search',self.default_search)
conf.add_section('ext')
conf.set('ext','streams',repr(pickle.dumps(self.streams)))
try:
conffile = open(config['localconf'],"w")
conf.write(conffile)
except IOError,e:
raise ConfigFileErro
def get_stream_name(self,url):
""" search the list of streams for a particular name """
for s in self.streams:
if s[1] == url:
return s[0]
| 34.918367
| 108
| 0.630333
| 2,607
| 0.761835
| 0
| 0
| 0
| 0
| 0
| 0
| 1,476
| 0.431327
|
5861aaa87e16980cf7f95fd4b748950ec3d44176
| 5,055
|
py
|
Python
|
tests/test_error.py
|
iotanbo/iotanbo_py_utils
|
96a2728e051b5e5ee601459b4c449b5495768ba8
|
[
"MIT"
] | null | null | null |
tests/test_error.py
|
iotanbo/iotanbo_py_utils
|
96a2728e051b5e5ee601459b4c449b5495768ba8
|
[
"MIT"
] | 14
|
2021-06-07T17:36:02.000Z
|
2021-06-07T18:02:37.000Z
|
tests/test_error.py
|
iotanbo/iotanbo_py_utils
|
96a2728e051b5e5ee601459b4c449b5495768ba8
|
[
"MIT"
] | null | null | null |
"""Test `iotanbo_py_utils.error.py`."""
from iotanbo_py_utils.error import Error
from iotanbo_py_utils.error import ErrorKind
def test_error_one_of_arithmetic_errors() -> None:
errs = (
Error(ErrorKind.ArithmeticError),
Error(ErrorKind.FloatingPointError),
Error(ErrorKind.OverflowError),
Error(ErrorKind.ZeroDivisionError),
)
for err in errs:
assert err.one_of_arithmetic_errors()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_arithmetic_errors()
def test_error_one_of_import_errors() -> None:
errs = (
Error(ErrorKind.ImportError),
Error(ErrorKind.ModuleNotFoundError),
)
for err in errs:
assert err.one_of_import_errors()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_import_errors()
def test_error_one_of_lookup_errors() -> None:
errs = (
Error(ErrorKind.LookupError),
Error(ErrorKind.IndexError),
Error(ErrorKind.KeyError),
)
for err in errs:
assert err.one_of_lookup_errors()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_lookup_errors()
def test_error_one_of_name_errors() -> None:
errs = (
Error(ErrorKind.NameError),
Error(ErrorKind.UnboundLocalError),
)
for err in errs:
assert err.one_of_name_errors()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_name_errors()
def test_error_one_of_os_errors() -> None:
errs = (
Error(ErrorKind.OSError),
Error(ErrorKind.BlockingIOError),
Error(ErrorKind.ChildProcessError),
Error(ErrorKind.ConnectionError),
Error(ErrorKind.BrokenPipeError),
Error(ErrorKind.ConnectionAbortedError),
Error(ErrorKind.ConnectionRefusedError),
Error(ErrorKind.ConnectionResetError),
Error(ErrorKind.FileExistsError),
Error(ErrorKind.FileNotFoundError),
Error(ErrorKind.InterruptedError),
Error(ErrorKind.IsADirectoryError),
Error(ErrorKind.NotADirectoryError),
Error(ErrorKind.PermissionError),
Error(ErrorKind.ProcessLookupError),
Error(ErrorKind.TimeoutError),
)
for err in errs:
assert err.one_of_os_errors()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_os_errors()
def test_error_one_of_runtime_errors() -> None:
errs = (
Error(ErrorKind.RuntimeError),
Error(ErrorKind.NotImplementedError),
Error(ErrorKind.RecursionError),
)
for err in errs:
assert err.one_of_runtime_errors()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_runtime_errors()
def test_error_one_of_syntax_errors() -> None:
errs = (
Error(ErrorKind.SyntaxError),
Error(ErrorKind.IndentationError),
Error(ErrorKind.TabError),
)
for err in errs:
assert err.one_of_syntax_errors()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_syntax_errors()
def test_error_one_of_value_errors() -> None:
errs = (
Error(ErrorKind.ValueError),
Error(ErrorKind.UnicodeError),
Error(ErrorKind.UnicodeDecodeError),
Error(ErrorKind.UnicodeEncodeError),
Error(ErrorKind.UnicodeTranslateError),
)
for err in errs:
assert err.one_of_value_errors()
# == negative path ==
err = Error(ErrorKind.SyntaxError)
assert not err.one_of_value_errors()
def test_error_one_of_warnings() -> None:
errs = (
Error(ErrorKind.Warning),
Error(ErrorKind.DeprecationWarning),
Error(ErrorKind.PendingDeprecationWarning),
Error(ErrorKind.RuntimeWarning),
Error(ErrorKind.SyntaxWarning),
Error(ErrorKind.UserWarning),
Error(ErrorKind.FutureWarning),
Error(ErrorKind.ImportWarning),
Error(ErrorKind.UnicodeWarning),
Error(ErrorKind.BytesWarning),
Error(ErrorKind.ResourceWarning),
)
for err in errs:
assert err.one_of_warnings()
# == negative path ==
err = Error(ErrorKind.ValueError)
assert not err.one_of_warnings()
class _CustomException(Exception):
...
def test_error_from_exception() -> None:
# error from exception, preserve kind
e = Error.from_exception(ValueError("test"))
assert e.kind == ErrorKind.ValueError
assert not e.cause
assert e.msg == "test"
# new kind replaces exception's kind
e = Error.from_exception(ValueError("test"), new_kind=ErrorKind.Warning)
assert e.kind == ErrorKind.Warning
assert e.cause == ErrorKind.ValueError
assert e.msg == "test"
# error from custom exception, preserve kind
try:
raise _CustomException()
except _CustomException as ex:
e = Error.from_exception(ex)
assert e.kind == "_CustomException"
assert not e.cause
assert e.msg == ""
| 28.240223
| 76
| 0.669041
| 42
| 0.008309
| 0
| 0
| 0
| 0
| 0
| 0
| 389
| 0.076954
|
58641c0b89af2618e34db3686e77a3a4237fbad3
| 651
|
py
|
Python
|
setup.py
|
wontonst/orvibo
|
72722b16caa929ae3f07b0a6789a0f18cd3ebad3
|
[
"MIT"
] | null | null | null |
setup.py
|
wontonst/orvibo
|
72722b16caa929ae3f07b0a6789a0f18cd3ebad3
|
[
"MIT"
] | null | null | null |
setup.py
|
wontonst/orvibo
|
72722b16caa929ae3f07b0a6789a0f18cd3ebad3
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='orvibo',
version='1.5.0',
description='Python module to controll Orvibo devices, such as s20 wifi sockets and AllOne IR blasters',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/cherezov/orvibo',
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
)
| 27.125
| 108
| 0.725038
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.390169
|
58652e385a1acfda94718bdec15c5a91dde6b8c7
| 3,053
|
py
|
Python
|
autograder-master/autograder/test_runner.py
|
Diana1320622/AILabs
|
315a6f4b8f8dd60e4f53d348e06e23b4d827d179
|
[
"MIT"
] | null | null | null |
autograder-master/autograder/test_runner.py
|
Diana1320622/AILabs
|
315a6f4b8f8dd60e4f53d348e06e23b4d827d179
|
[
"MIT"
] | null | null | null |
autograder-master/autograder/test_runner.py
|
Diana1320622/AILabs
|
315a6f4b8f8dd60e4f53d348e06e23b4d827d179
|
[
"MIT"
] | null | null | null |
import glob, os
import subprocess
from difflib import context_diff
class TestRunner(object):
def __init__(self, context):
self.context = context
self.error_count = 0
self.test_count = 0
self.success_count = 0
def run(self):
os.getcwd()
os.chdir(self.context.repo_dir)
search = os.path.join(self.context.testcasedir, '*.in')
problem_files = glob.glob(search)
if len(problem_files) == 0:
self.warn("No problem files found. Does directory '" +
self.context.testcasedir + "' exist?")
return
file = self.context.runscript
if not os.path.isfile(file):
self.error("Could not find file '{}' to run the program.".format(file))
return
for problem_file in problem_files:
self.test_count += 1
output = self.get_output(problem_file)
expected_output = self.get_expected_output(problem_file)
if self.compare(problem_file, output, expected_output):
self.success_count += 1
else:
self.error_count += 1
def compare(self, problem_file, output, expected_output):
diff_iterator = context_diff(output, expected_output,
fromfile='program output', tofile='expected')
diff = ""
for char in diff_iterator:
diff += char
if len(diff) == 0:
self.info("Testing '" + problem_file + "'. Result: output CORRECT")
return True
else:
self.error("Testing '" + problem_file + "'. Result: output DIFFERENT")
self.error(" Expected:")
for line in expected_output.split('\n'):
self.error(" " + line)
self.error(" Actual:")
for line in output.split('\n'):
self.error(" " + line)
return False
def info(self, message):
self.context.logger.info(message)
def warn(self, message):
self.context.logger.warn(message)
def error(self, message):
self.context.logger.error(message)
def get_expected_output(self, problem_file):
expected_output_file = problem_file[:-2] + 'out'
with open(expected_output_file, 'r') as file:
return file.read()
def get_output(self, problem_file):
runscript = os.path.join(self.context.repo_dir, self.context.runscript)
out = ''
err = ''
try:
with open(problem_file, 'r') as input:
p = subprocess.Popen(runscript,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin = input)
out, err = p.communicate()
p.wait()
if len(err) > 0:
self.warn('Stderr is outputting text:')
for line in err.split('\n'):
self.warn(line)
except Exception as e:
self.error('Caught unexpected error: ' + str(e))
return out
| 33.184783
| 83
| 0.556174
| 2,983
| 0.977072
| 0
| 0
| 0
| 0
| 0
| 0
| 332
| 0.108745
|
5865c20624359a297b3a450c2e37573f88fc2710
| 245
|
py
|
Python
|
amazon/model/admin.py
|
Lakshmivijaykrishnan/mini-amazon
|
89ce7c5e2af127a2e8e027c87cb245fa82d184d6
|
[
"Unlicense"
] | null | null | null |
amazon/model/admin.py
|
Lakshmivijaykrishnan/mini-amazon
|
89ce7c5e2af127a2e8e027c87cb245fa82d184d6
|
[
"Unlicense"
] | null | null | null |
amazon/model/admin.py
|
Lakshmivijaykrishnan/mini-amazon
|
89ce7c5e2af127a2e8e027c87cb245fa82d184d6
|
[
"Unlicense"
] | null | null | null |
from amazon.model import db
def __search_by_admin_name(username):
query={'username': username}
matching_user = db['users'].find(query)
if matching_user.count() > 0:
return matching_user.next()
else:
return None
| 22.272727
| 43
| 0.669388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.069388
|
58672ac219aa158da24cd5ab42129bffccff6013
| 2,429
|
py
|
Python
|
tests/fixtures/test_abstracts/content_03_expected.py
|
elifesciences/elife-tools
|
ee345bf0e6703ef0f7e718355e85730abbdfd117
|
[
"MIT"
] | 9
|
2015-04-16T08:13:31.000Z
|
2020-05-18T14:03:06.000Z
|
tests/fixtures/test_abstracts/content_03_expected.py
|
elifesciences/elife-tools
|
ee345bf0e6703ef0f7e718355e85730abbdfd117
|
[
"MIT"
] | 310
|
2015-02-11T00:30:09.000Z
|
2021-07-14T23:58:50.000Z
|
tests/fixtures/test_abstracts/content_03_expected.py
|
elifesciences/elife-tools
|
ee345bf0e6703ef0f7e718355e85730abbdfd117
|
[
"MIT"
] | 9
|
2015-02-04T01:21:28.000Z
|
2021-06-15T12:50:47.000Z
|
expected = [
{
"abstract_type": None,
"content": "RET can be activated in cis or trans by its co-receptors and ligands in vitro, but the physiological roles of trans signaling are unclear. Rapidly adapting (RA) mechanoreceptors in dorsal root ganglia (DRGs) express Ret and the co-receptor Gfr\u03b12 and depend on Ret for survival and central projection growth. Here, we show that Ret and Gfr\u03b12 null mice display comparable early central projection deficits, but Gfr\u03b12 null RA mechanoreceptors recover later. Loss of Gfr\u03b11, the co-receptor implicated in activating RET in trans, causes no significant central projection or cell survival deficit, but Gfr\u03b11;Gfr\u03b12 double nulls phenocopy Ret nulls. Finally, we demonstrate that GFR\u03b11 produced by neighboring DRG neurons activates RET in RA mechanoreceptors. Taken together, our results suggest that trans and cis RET signaling could function in the same developmental process and that the availability of both forms of activation likely enhances but not diversifies outcomes of RET signaling.",
"full_content": "<p>RET can be activated in <italic>cis</italic> or <italic>trans</italic> by its co-receptors and ligands <italic>in vitro</italic>, but the physiological roles of <italic>trans</italic> signaling are unclear. Rapidly adapting (RA) mechanoreceptors in dorsal root ganglia (DRGs) express <italic>Ret</italic> and the co-receptor <italic>Gfr\u03b12</italic> and depend on <italic>Ret</italic> for survival and central projection growth. Here, we show that <italic>Ret</italic> and <italic>Gfr\u03b12</italic> null mice display comparable early central projection deficits, but <italic>Gfr\u03b12</italic> null RA mechanoreceptors recover later. Loss of <italic>Gfr\u03b11</italic>, the co-receptor implicated in activating RET <italic>in trans</italic>, causes no significant central projection or cell survival deficit, but <italic>Gfr\u03b11;Gfr\u03b12</italic> double nulls phenocopy <italic>Ret</italic> nulls. Finally, we demonstrate that GFR\u03b11 produced by neighboring DRG neurons activates RET in RA mechanoreceptors. Taken together, our results suggest that <italic>trans</italic> and <italic>cis</italic> RET signaling could function in the same developmental process and that the availability of both forms of activation likely enhances but not diversifies outcomes of RET signaling.</p>",
},
]
| 303.625
| 1,326
| 0.792096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,361
| 0.972005
|
5868165c78c75470f0c193bcf63d0eb76394d605
| 5,059
|
py
|
Python
|
NER-BiLSTM-CRF-PyTorch-main/src/eval.py
|
OscarChang46/de-identify-sensitive-data-for-cybersecurity-use-cases-e.g.-PII-and-PHI-data-in-unstructured-text-an
|
e77ca0d5c19206a349ecc94fc71febdc10824482
|
[
"MIT"
] | 22
|
2021-04-28T04:19:04.000Z
|
2022-03-20T17:17:57.000Z
|
NER-BiLSTM-CRF-PyTorch-main/src/eval.py
|
OscarChang46/de-identify-sensitive-data-for-cybersecurity-use-cases-e.g.-PII-and-PHI-data-in-unstructured-text-an
|
e77ca0d5c19206a349ecc94fc71febdc10824482
|
[
"MIT"
] | 3
|
2021-09-03T12:14:36.000Z
|
2022-03-07T10:43:51.000Z
|
NER-BiLSTM-CRF-PyTorch-main/src/eval.py
|
OscarChang46/de-identify-sensitive-data-for-cybersecurity-use-cases-e.g.-PII-and-PHI-data-in-unstructured-text-an
|
e77ca0d5c19206a349ecc94fc71febdc10824482
|
[
"MIT"
] | 9
|
2021-03-25T13:44:51.000Z
|
2022-02-19T03:56:38.000Z
|
# coding=utf-8
import optparse
import torch
import time
import pickle
from torch.autograd import Variable
from loader import *
from utils import *
# python -m visdom.server
optparser = optparse.OptionParser()
optparser.add_option(
"-t", "--test", default="data/eng.testb",
help="Test set location"
)
optparser.add_option(
'--score', default='evaluation/temp/score.txt',
help='score file location'
)
optparser.add_option(
"-g", '--use_gpu', default='1',
type='int', help='whether or not to ues gpu'
)
optparser.add_option(
'--loss', default='loss.txt',
help='loss file location'
)
optparser.add_option(
'--model_path', default='models/test',
help='model path'
)
optparser.add_option(
'--map_path', default='models/mapping.pkl',
help='model path'
)
optparser.add_option(
'--char_mode', choices=['CNN', 'LSTM'], default='CNN',
help='char_CNN or char_LSTM'
)
opts = optparser.parse_args()[0]
mapping_file = opts.map_path
with open(mapping_file, 'rb') as f:
mappings = pickle.load(f)
word_to_id = mappings['word_to_id']
tag_to_id = mappings['tag_to_id']
id_to_tag = {k[1]: k[0] for k in tag_to_id.items()}
char_to_id = mappings['char_to_id']
parameters = mappings['parameters']
word_embeds = mappings['word_embeds']
use_gpu = opts.use_gpu == 1 and torch.cuda.is_available()
assert os.path.isfile(opts.test)
assert parameters['tag_scheme'] in ['iob', 'iobes']
if not os.path.isfile(eval_script):
raise Exception('CoNLL evaluation script not found at "%s"' % eval_script)
if not os.path.exists(eval_temp):
os.makedirs(eval_temp)
lower = parameters['lower']
zeros = parameters['zeros']
tag_scheme = parameters['tag_scheme']
test_sentences = load_sentences(opts.test, lower, zeros)
update_tag_scheme(test_sentences, tag_scheme)
test_data = prepare_dataset(
test_sentences, word_to_id, char_to_id, tag_to_id, lower
)
model = torch.load(opts.model_path)
model_name = opts.model_path.split('/')[-1].split('.')[0]
if use_gpu:
model.cuda()
model.eval()
def eval(model, datas):
prediction = []
confusion_matrix = torch.zeros((len(tag_to_id) - 2, len(tag_to_id) - 2))
for data in datas:
ground_truth_id = data['tags']
words = data['str_words']
chars2 = data['chars']
caps = data['caps']
if parameters['char_mode'] == 'LSTM':
chars2_sorted = sorted(chars2, key=lambda p: len(p), reverse=True)
d = {}
for i, ci in enumerate(chars2):
for j, cj in enumerate(chars2_sorted):
if ci == cj and not j in d and not i in d.values():
d[j] = i
continue
chars2_length = [len(c) for c in chars2_sorted]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_sorted), char_maxl), dtype='int')
for i, c in enumerate(chars2_sorted):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
if parameters['char_mode'] == 'CNN':
d = {}
chars2_length = [len(c) for c in chars2]
char_maxl = max(chars2_length)
chars2_mask = np.zeros((len(chars2_length), char_maxl), dtype='int')
for i, c in enumerate(chars2):
chars2_mask[i, :chars2_length[i]] = c
chars2_mask = Variable(torch.LongTensor(chars2_mask))
dwords = Variable(torch.LongTensor(data['words']))
dcaps = Variable(torch.LongTensor(caps))
if use_gpu:
val, out = model(dwords.cuda(), chars2_mask.cuda(), dcaps.cuda(),chars2_length, d)
else:
val, out = model(dwords, chars2_mask, dcaps, chars2_length, d)
predicted_id = out
for (word, true_id, pred_id) in zip(words, ground_truth_id, predicted_id):
line = ' '.join([word, id_to_tag[true_id], id_to_tag[pred_id]])
prediction.append(line)
confusion_matrix[true_id, pred_id] += 1
prediction.append('')
predf = eval_temp + '/pred.' + model_name
scoref = eval_temp + '/score.' + model_name
with open(predf, 'w') as f:
f.write('\n'.join(prediction))
os.system('%s < %s > %s' % (eval_script, predf, scoref))
with open(scoref, 'r') as f:
for l in f.readlines():
print(l.strip())
print(("{: >2}{: >7}{: >7}%s{: >9}" % ("{: >7}" * confusion_matrix.size(0))).format(
"ID", "NE", "Total",
*([id_to_tag[i] for i in range(confusion_matrix.size(0))] + ["Percent"])
))
for i in range(confusion_matrix.size(0)):
print(("{: >2}{: >7}{: >7}%s{: >9}" % ("{: >7}" * confusion_matrix.size(0))).format(
str(i), id_to_tag[i], str(confusion_matrix[i].sum().item()),
*([confusion_matrix[i][j] for j in range(confusion_matrix.size(0))] +
["%.3f" % (confusion_matrix[i][i] * 100. / max(1, confusion_matrix[i].sum()))])
))
t = time.time()
eval(model, test_data)
print(time.time() - t)
| 32.22293
| 94
| 0.613758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 757
| 0.149634
|
586904d063488a1bde40ac6c380144e572f09389
| 789
|
py
|
Python
|
src/openbiolink/graph_creation/metadata_db_file/edge/dbMetaEdgeSiderInd.py
|
cthoyt/OpenBioLink
|
c5f85b99f9104f70493136c343e4554261e990a5
|
[
"MIT"
] | null | null | null |
src/openbiolink/graph_creation/metadata_db_file/edge/dbMetaEdgeSiderInd.py
|
cthoyt/OpenBioLink
|
c5f85b99f9104f70493136c343e4554261e990a5
|
[
"MIT"
] | null | null | null |
src/openbiolink/graph_creation/metadata_db_file/edge/dbMetaEdgeSiderInd.py
|
cthoyt/OpenBioLink
|
c5f85b99f9104f70493136c343e4554261e990a5
|
[
"MIT"
] | null | null | null |
from openbiolink.graph_creation.metadata_db_file.edge.dbMetadataEdge import DbMetadataEdge
from openbiolink.graph_creation.types.dbType import DbType
class DbMetaEdgeSiderInd(DbMetadataEdge):
NAME = 'Edge - Sider - Indications'
URL = "http://sideeffects.embl.de/media/download/meddra_all_indications.tsv.gz"
OFILE_NAME = "SIDER_dis_drug.tsv.gz"
COLS = ['stichID', 'umlsID', 'method', 'umlsName', 'medDRAumlsType',
'medDRAumlsID', 'medDRAumlsName']
FILTER_COLS = ['umlsID', 'stichID', 'method']
HEADER = 0
DB_TYPE = DbType.DB_EDGE_SIDER_IND
def __init__(self):
super().__init__(url= DbMetaEdgeSiderInd.URL,
ofile_name= DbMetaEdgeSiderInd.OFILE_NAME,
dbType= DbMetaEdgeSiderInd.DB_TYPE)
| 43.833333
| 90
| 0.69455
| 637
| 0.807351
| 0
| 0
| 0
| 0
| 0
| 0
| 230
| 0.291508
|
586a480b0504292a0e113f2a2851c35d28765f0b
| 377
|
py
|
Python
|
rename_files.py
|
ssinhaleite/util
|
b65a6e0d7ff270cc2bbdbc09b7894ffc77edaf8d
|
[
"MIT"
] | 1
|
2018-10-10T11:37:45.000Z
|
2018-10-10T11:37:45.000Z
|
rename_files.py
|
ssinhaleite/util
|
b65a6e0d7ff270cc2bbdbc09b7894ffc77edaf8d
|
[
"MIT"
] | null | null | null |
rename_files.py
|
ssinhaleite/util
|
b65a6e0d7ff270cc2bbdbc09b7894ffc77edaf8d
|
[
"MIT"
] | null | null | null |
import mahotas as mh
import glob
import os
# rename files from 0 to number of files.
files = glob.glob("/path/*")
files.sort()
if not os.path.isdir("/path/renamed/"):
os.mkdir("/path/renamed/")
for i in range(len(files)):
print("Processing " + files[i])
renamedFile = mh.imread(files[i])
mh.imsave("/path/renamed/" + str(i).zfill(5) + ".tif", renamedFile)
| 20.944444
| 71
| 0.649867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 117
| 0.310345
|
586b210947d0b950e98da84e776f0d34d976b0d2
| 1,749
|
py
|
Python
|
scripts/download_lookml.py
|
orf/lkml
|
2175a22e0fe8a894ef9312c73c6a062df2447795
|
[
"MIT"
] | 110
|
2019-06-25T14:26:41.000Z
|
2022-02-01T13:27:19.000Z
|
scripts/download_lookml.py
|
orf/lkml
|
2175a22e0fe8a894ef9312c73c6a062df2447795
|
[
"MIT"
] | 56
|
2019-06-26T22:11:43.000Z
|
2022-03-15T20:37:30.000Z
|
scripts/download_lookml.py
|
orf/lkml
|
2175a22e0fe8a894ef9312c73c6a062df2447795
|
[
"MIT"
] | 28
|
2019-07-08T17:34:49.000Z
|
2022-03-25T14:36:00.000Z
|
import os
import re
from base64 import b64decode
from pathlib import Path
import requests
username = os.environ["GITHUB_USERNAME"]
password = os.environ["GITHUB_PERSONAL_ACCESS_TOKEN"]
auth = requests.auth.HTTPBasicAuth(username, password)
directory = Path(__file__).resolve().parent.parent / "github"
directory.mkdir(exist_ok=True)
start_url = "https://api.github.com/search/code?q=view+language:lookml"
next_url = None
page = 1
with requests.Session() as session:
session.auth = auth
while True:
response = session.get(next_url or start_url)
response.raise_for_status()
links = response.headers["Link"]
finds = re.findall(
r"<(https://api.github.com/search/code\?"
r'q=view\+language%3Alookml&page=\d+)>; rel="next"',
links,
)
if finds:
next_url = finds[0]
else:
next_url = None
print(next_url)
urls = [item["url"] for item in response.json()["items"]]
print(f"Downloading all content from page {page}")
for url in urls:
response = session.get(url)
response.raise_for_status()
response_json = response.json()
name = response_json["name"]
encoded = response_json["content"]
content = b64decode(encoded).decode("utf-8")
if (
name.endswith(".lookml")
or content.startswith("-")
or "- view" in content
):
continue
file_path = directory / name
with file_path.open("w+") as file:
file.write(content)
if next_url is None:
break
else:
page += 1
| 26.5
| 71
| 0.573471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 313
| 0.178959
|
586cd0144d170eb0711f6999768b768351ab3215
| 579
|
py
|
Python
|
docs/conf.py
|
RTBHOUSE/carreralib
|
1daa959ef411b29601c92f86c0f6876fe8367837
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
RTBHOUSE/carreralib
|
1daa959ef411b29601c92f86c0f6876fe8367837
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
RTBHOUSE/carreralib
|
1daa959ef411b29601c92f86c0f6876fe8367837
|
[
"MIT"
] | 1
|
2020-02-25T20:40:50.000Z
|
2020-02-25T20:40:50.000Z
|
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
def get_version(filename):
from re import findall
with open(filename) as f:
metadata = dict(findall(r"__([a-z]+)__ = '([^']+)'", f.read()))
return metadata['version']
project = 'carreralib'
copyright = '2015-2017 Thomas Kemmer'
version = get_version(b'../carreralib/__init__.py')
release = version
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.todo'
]
exclude_patterns = ['_build']
master_doc = 'index'
html_theme = 'default'
| 20.678571
| 71
| 0.658031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.357513
|
586d9bd962737e276a73a87798d6fdc63e31cd16
| 503
|
py
|
Python
|
algos/lcs.py
|
asaini/algo-py
|
e9d18ef82d14e6304430bbd8b065430e76aa7eb8
|
[
"MIT"
] | 1
|
2015-10-01T21:17:10.000Z
|
2015-10-01T21:17:10.000Z
|
algos/lcs.py
|
asaini/algo-py
|
e9d18ef82d14e6304430bbd8b065430e76aa7eb8
|
[
"MIT"
] | null | null | null |
algos/lcs.py
|
asaini/algo-py
|
e9d18ef82d14e6304430bbd8b065430e76aa7eb8
|
[
"MIT"
] | null | null | null |
def lcs(x, y):
"""
Longest Common Subsequence
"""
n = len(x) + 1
m = len(y) + 1
table = [ [0]*m for i in range(n) ]
for i in range(n):
for j in range(m):
# If either string is empty, then lcs = 0
if i == 0 or j == 0:
table[i][j] = 0
elif x[i - 1] == y[j - 1]:
table[i][j] = 1 + table[i-1][j-1]
else:
table[i][j] = max(table[i-1][j], table[i][j-1])
return table[len(x)][len(y)]
if __name__ == '__main__':
x = "AGGTAB"
y = "GXTXAYB"
print lcs(x, y)
| 16.225806
| 51
| 0.499006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 0.208748
|
586f8df02b72779b1db755aeb20b5f85e4d788d2
| 350
|
py
|
Python
|
app/models/methods/set_device_info_method.py
|
luisalvesmartins/TAPO-P100
|
02bc929a87bbe4681739b14a716f6cef2b159fd1
|
[
"MIT"
] | null | null | null |
app/models/methods/set_device_info_method.py
|
luisalvesmartins/TAPO-P100
|
02bc929a87bbe4681739b14a716f6cef2b159fd1
|
[
"MIT"
] | 1
|
2021-06-23T09:21:40.000Z
|
2021-07-02T17:21:12.000Z
|
app/models/methods/set_device_info_method.py
|
luisalvesmartins/TAPO-P100
|
02bc929a87bbe4681739b14a716f6cef2b159fd1
|
[
"MIT"
] | null | null | null |
from models.methods import method
from typing import Any
class SetDeviceInfoMethod(method.Method):
def __init__(self, params: Any):
super().__init__("set_device_info", params)
def set_request_time_milis(self, t: float):
self.requestTimeMils = t
def set_terminal_uuid(self, uuid: str):
self.terminalUUID = uuid
| 25
| 51
| 0.708571
| 290
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.048571
|
586fa3eca62dcbd41023f2732e592c80e6a5d80c
| 3,077
|
py
|
Python
|
projects/InterpretationReID/train_net.py
|
SheldongChen/AMD.github.io
|
5f3018f239127949b2d3995162ffe033dcf8051a
|
[
"Apache-2.0"
] | 17
|
2021-11-01T01:14:06.000Z
|
2022-03-02T14:59:39.000Z
|
projects/InterpretationReID/train_net.py
|
SheldongChen/AMD.github.io
|
5f3018f239127949b2d3995162ffe033dcf8051a
|
[
"Apache-2.0"
] | 2
|
2021-12-22T07:56:13.000Z
|
2022-03-18T10:26:21.000Z
|
projects/InterpretationReID/train_net.py
|
SheldongChen/AMD.github.io
|
5f3018f239127949b2d3995162ffe033dcf8051a
|
[
"Apache-2.0"
] | 2
|
2022-02-18T07:42:38.000Z
|
2022-02-18T10:16:26.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: sherlock
@contact: sherlockliao01@gmail.com
"""
import logging
import os
import sys
sys.path.append('.')
from fastreid.config import get_cfg
from projects.InterpretationReID.interpretationreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from fastreid.utils.checkpoint import Checkpointer
from projects.InterpretationReID.interpretationreid.evaluation import ReidEvaluator
import projects.InterpretationReID.interpretationreid as PII
from fastreid.utils.logger import setup_logger
class Trainer(DefaultTrainer):
@classmethod
def build_train_loader(cls, cfg):
"""
Returns:
iterable
It now calls :func:`fastreid.data.build_detection_train_loader`.
Overwrite it if you'd like a different data loader.
"""
logger = logging.getLogger(__name__)
logger.info("Prepare training set")
return PII.add_build_reid_train_loader(cfg)
@classmethod
def build_test_loader(cls, cfg, dataset_name):
"""
Returns:
iterable
It now calls :func:`fastreid.data.build_detection_test_loader`.
Overwrite it if you'd like a different data loader.
"""
return PII.add_build_reid_test_loader(cfg, dataset_name)
@classmethod
def build_evaluator(cls, cfg, num_query, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return ReidEvaluator(cfg, num_query)
def setup(args):
"""
Create configs_old and perform basic setups.
"""
cfg = get_cfg()
PII.add_interpretation_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = Trainer.build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.INTERPRETATION.PRETRAIN_MODEL:
#print("trainer.load_n_or_not()")
trainer.load_n_or_not()
#print("load success")
#print(trainer.model)
#for p in trainer.model.backbone_1.parameters():
#p.requires_grad=False
#for p in trainer.model.backbone_2.parameters():
#p.requires_grad=False
#for p in trainer.model.heads.parameters(): #.module
#p.requires_grad=False
#print("trainer.train()")
#print(cfg)
#print(trainer._hooks)
#setup_logger()
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 26.756522
| 128
| 0.677283
| 986
| 0.320442
| 0
| 0
| 935
| 0.303867
| 0
| 0
| 979
| 0.318167
|
586fc6bf6803a3f84dbc2e58fa7867211fee503b
| 826
|
py
|
Python
|
asynciomeasures/collectors.py
|
Poogles/aiomeasures
|
d62482e8de56a00bce310e7c422d1a1e7a114ef7
|
[
"MIT"
] | 2
|
2018-12-27T22:01:41.000Z
|
2019-04-29T11:51:15.000Z
|
asynciomeasures/collectors.py
|
Poogles/aiomeasures
|
d62482e8de56a00bce310e7c422d1a1e7a114ef7
|
[
"MIT"
] | null | null | null |
asynciomeasures/collectors.py
|
Poogles/aiomeasures
|
d62482e8de56a00bce310e7c422d1a1e7a114ef7
|
[
"MIT"
] | null | null | null |
from collections import deque
from asynciomeasures.events import Event
class Collector(deque):
"""Caped list of metrics
"""
def flush(self, rate=None, formatter=None):
while True:
try:
metric = self.popleft()
if isinstance(metric, Event):
yield formatter(metric)
continue
if metric.value is None:
continue
if rate and metric.rate and rate < metric.rate:
continue
if formatter:
try:
yield formatter(metric)
except ValueError:
continue
else:
yield metric
except IndexError:
return
| 28.482759
| 63
| 0.457627
| 752
| 0.910412
| 686
| 0.830508
| 0
| 0
| 0
| 0
| 32
| 0.038741
|
58708500568a55067da2b5aa34b23852b3efa570
| 1,576
|
py
|
Python
|
hardware/testbenches/common/drivers/state/driver.py
|
Intuity/nexus
|
0d1414fa2ea518dae9f031930c40692ebac5d154
|
[
"Apache-2.0"
] | 6
|
2021-06-28T05:52:15.000Z
|
2022-03-27T20:45:28.000Z
|
hardware/testbenches/common/drivers/state/driver.py
|
Intuity/nexus
|
0d1414fa2ea518dae9f031930c40692ebac5d154
|
[
"Apache-2.0"
] | null | null | null |
hardware/testbenches/common/drivers/state/driver.py
|
Intuity/nexus
|
0d1414fa2ea518dae9f031930c40692ebac5d154
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Peter Birch, mailto:peter@lightlogic.co.uk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cocotb_bus.drivers import Driver
from cocotb.triggers import RisingEdge
from ..driver_common import BaseDriver
class StateInitiator(BaseDriver):
""" Drivers signal state updates as an initiator """
async def _driver_send(self, transaction, sync=True, **kwargs):
""" Send queued transactions onto the interface.
Args:
transaction: Transaction to send
sync : Align to the rising clock edge before sending
**kwargs : Any other arguments
"""
# Synchronise to the rising edge
if sync: await RisingEdge(self.clock)
# Wait for reset to clear
while self.reset == 1: await RisingEdge(self.clock)
# Drive the request
self.intf.index <= transaction.index
self.intf.is_seq <= transaction.sequential
self.intf.value <= transaction.state
self.intf.update <= 1
await RisingEdge(self.clock)
self.intf.update <= 0
| 37.52381
| 74
| 0.692259
| 853
| 0.541244
| 0
| 0
| 0
| 0
| 757
| 0.48033
| 954
| 0.60533
|
5870b28965fe63f49e7e9e53bb51b1566ea6452e
| 1,463
|
py
|
Python
|
python/primsAlgo.py
|
Ayushd70/RetartedCodes
|
301ced178a0ec352b2d127e19028845de950551d
|
[
"MIT"
] | null | null | null |
python/primsAlgo.py
|
Ayushd70/RetartedCodes
|
301ced178a0ec352b2d127e19028845de950551d
|
[
"MIT"
] | null | null | null |
python/primsAlgo.py
|
Ayushd70/RetartedCodes
|
301ced178a0ec352b2d127e19028845de950551d
|
[
"MIT"
] | null | null | null |
# Prim's Algorithm in Python
INF = 9999999
# number of vertices in graph
V = 5
# create a 2d array of size 5x5
# for adjacency matrix to represent graph
G = [
[0, 9, 75, 0, 0],
[9, 0, 95, 19, 42],
[75, 95, 0, 51, 66],
[0, 19, 51, 0, 31],
[0, 42, 66, 31, 0],
]
# create a array to track selected vertex
# selected will become true otherwise false
selected = [0, 0, 0, 0, 0]
# set number of edge to 0
no_edge = 0
# the number of egde in minimum spanning tree will be
# always less than(V - 1), where V is number of vertices in
# graph
# choose 0th vertex and make it true
selected[0] = True
# print for edge and weight
print("Edge : Weight\n")
while no_edge < V - 1:
# For every vertex in the set S, find the all adjacent vertices
# , calculate the distance from the vertex selected at step 1.
# if the vertex is already in the set S, discard it otherwise
# choose another vertex nearest to selected vertex at step 1.
minimum = INF
x = 0
y = 0
for i in range(V):
if selected[i]:
for j in range(V):
if (not selected[j]) and G[i][j]:
# not in selected and there is an edge
if minimum > G[i][j]:
minimum = G[i][j]
x = i
y = j
print(str(x) + "-" + str(y) + ":" + str(G[x][y]))
selected[y] = True
no_edge += 1
| 31.12766
| 68
| 0.546822
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 746
| 0.509911
|
5870fd20f646ac17e765716a8a7674f3e6c452db
| 13,449
|
py
|
Python
|
tests/test_likelihood.py
|
sa2c/care-home-fit
|
58a2639c74b53e24f062d0dfc3e21df6d53b3077
|
[
"MIT"
] | null | null | null |
tests/test_likelihood.py
|
sa2c/care-home-fit
|
58a2639c74b53e24f062d0dfc3e21df6d53b3077
|
[
"MIT"
] | null | null | null |
tests/test_likelihood.py
|
sa2c/care-home-fit
|
58a2639c74b53e24f062d0dfc3e21df6d53b3077
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''Tests for the likelihood.py module'''
from time import perf_counter_ns
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
from scipy.stats import gamma
import likelihood
SMALL_FIT_PARAMS = {
'baseline_intensities': np.asarray([1, 2, np.nan, np.nan]),
'r_h': 1.5,
'r_c': 0.5
}
SIMPLE_DIST_PARAMS = {
'self_excitation_shape': 2,
'self_excitation_scale': 1,
'discharge_excitation_shape': 3,
'discharge_excitation_scale': 2
}
SMALL_CASES_FILE = 'tests/fixtures/small.csv'
SMALL_COVARIATES_FILE = 'tests/fixtures/small_covariates.csv'
LARGE_FIT_PARAMS = {
'baseline_intensities': np.asarray([0.3, 0.4, 0.6, 0.9]),
'r_h': 1.5,
'r_c': 0.5
}
FULL_DIST_PARAMS = {
'self_excitation_shape': 2.6,
'self_excitation_scale': 2.5,
'discharge_excitation_shape': 2.6,
'discharge_excitation_scale': 2.5
}
def test_gamma_pdf():
x = np.linspace(0, 10, 100)
shape = FULL_DIST_PARAMS['self_excitation_shape']
scale = FULL_DIST_PARAMS['self_excitation_scale']
assert_almost_equal(
gamma.pdf(x, a=shape, scale=scale),
likelihood.gamma_pdf(x, shape, scale)
)
@pytest.mark.parametrize(
"test_element,result_dtype",
[(123_456_789, np.uint32), (65_535, np.uint16), (255, np.uint8)]
)
def test_compactify(test_element, result_dtype):
'''Test that arrays compactify correctly, and to the correct data types'''
array = np.asarray([[1, 2], [3, test_element]], dtype=np.uint32)
result = likelihood.compactify(array)
assert result.dtype == result_dtype
assert_array_equal(array, result)
def test_read_and_tidy_data():
'''Test that a CSV file with care home IDs as a header row
is read, sorted, and split correctly.'''
ids, values = likelihood.read_and_tidy_data(SMALL_CASES_FILE)
assert_array_equal(ids, [14, 16, 35])
assert_array_equal(
values,
[[4, 1, 6], [4, 0, 3], [6, 66, 2]]
)
@pytest.fixture
def small_cases():
'''Get a small data file that could be cases or discharges.'''
return likelihood.read_and_tidy_data(SMALL_CASES_FILE)
@pytest.fixture
def small_covariates():
'''Get a small data file containing covariates.'''
return likelihood.read_and_tidy_data(SMALL_COVARIATES_FILE)
def test_carehome_intensity_null(small_cases, small_covariates):
'''Test that calculating the null-case intensity (based on mapping banded
carehome size to a base intensity) gives the correct result'''
_, cases = small_cases
_, covariates = small_covariates
intensity = likelihood.carehome_intensity_null(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS
)
assert_array_equal(intensity, [[1, 2, 2], [1, 2, 2], [1, 2, 2]])
def test_single_excitation(small_cases):
'''Test that excitation terms of the form
e_i(t) = \\sum_{s<t} f(t - s) triggers_i(s)
are correctly calculated'''
_, cases = small_cases
excitation = likelihood.single_excitation(cases, 2, 1)
assert_almost_equal(
excitation,
[[0, 0, 0], [1.472, 0.368, 2.207], [2.554, 0.271, 2.728]],
decimal=3
)
def test_cached_single_excitation(small_cases):
'''
Test that the caching of the single_excitation function works correctly.
'''
_, cases = small_cases
cases.flags.writeable = False
shape = SIMPLE_DIST_PARAMS['self_excitation_shape']
scale = SIMPLE_DIST_PARAMS['self_excitation_scale']
uncached_start = perf_counter_ns()
uncached_excitation = likelihood.single_excitation(cases, shape, scale)
uncached_end = perf_counter_ns()
first_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
assert_array_equal(uncached_excitation, first_excitation)
cached_start = perf_counter_ns()
cached_excitation = likelihood.cached_single_excitation(
cases, shape, scale
)
cached_end = perf_counter_ns()
assert_array_equal(uncached_excitation, cached_excitation)
# Cached version should be quicker
assert (cached_end - cached_start) < (uncached_end - uncached_start)
def test_carehome_intensity_no_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity in the case where
discharges are not considered.'''
_, cases = small_cases
_, covariates = small_covariates
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=fit_params_no_rh,
dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(
intensity,
[[1, 2, 2], [1.736, 2.184, 3.104], [2.277, 2.135, 3.364]],
decimal=3
)
def test_carehome_intensity_with_discharges(small_cases, small_covariates):
'''Test that the behaviour of carehome_intensity is correct in the case
where discharges are considered.'''
_, cases = small_cases
_, covariates = small_covariates
discharges = cases[::-1]
intensity = likelihood.carehome_intensity(
covariates=covariates,
cases=cases,
fit_params=SMALL_FIT_PARAMS,
dist_params=SIMPLE_DIST_PARAMS,
discharges=discharges
)
assert_almost_equal(
intensity,
[[1, 2, 2], [2.077, 5.937, 3.217], [3.332, 11.240, 3.810]],
decimal=3
)
@pytest.mark.parametrize("mean, cv, expected_shape, expected_scale",
[(1, 1, 1, 1), (6.5, 0.62, 2.601, 2.499)])
def test_calculate_gamma_parameters(mean, cv, expected_shape, expected_scale):
'''Test that calculation of Scipy-style gamma parameters from "descriptive"
gamma parameters is correct.'''
shape, scale = likelihood.calculate_gamma_parameters(mean, cv)
assert_almost_equal([shape, scale], [expected_shape, expected_scale],
decimal=3)
def test_likelihood():
'''Test that the likelihood calculation is correct'''
cases = np.asarray([[3, 1, 0, 1], [1, 0, 2, 1], [0, 0, 0, 1]])
intensity = np.asarray(
[[1, 3, 1.5, 6], [4.2, 3.1, 7, 1.4], [2, 5.1, 4.2, 8.9]]
)
result = likelihood.likelihood(intensity, cases)
assert_almost_equal(result, -39.145, decimal=3)
def test_calculate_likelihood_from_files_no_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -187.443, decimal=3)
def test_calculate_likelihood_from_files_no_cases():
'''Test that likelihood is correctly calculated from input files
when cases are not considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -189.046, decimal=3)
def test_calculate_likelihood_from_files_no_discharges_or_cases():
'''Test that likelihood is correctly calculated from input files
when neither cases nor discharges are considered.'''
fit_params_no_rh = {**SMALL_FIT_PARAMS, 'r_h': None, 'r_c': 0}
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=fit_params_no_rh, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -196.466, decimal=3)
def test_calculate_likelihood_from_files_with_discharges():
'''Test that likelihood is correctly calculated from input files
when discharges are considered.'''
result = likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
discharges_file=SMALL_CASES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
assert_almost_equal(result, -182.761, decimal=3)
def test_calculate_likelihood_from_files_missing_discharges():
'''Test that an error is generated when r_h is provided but discharge data
are not'''
with pytest.raises(AssertionError):
likelihood.calculate_likelihood_from_files(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE,
fit_params=SMALL_FIT_PARAMS, dist_params=SIMPLE_DIST_PARAMS
)
@pytest.mark.parametrize(
'r_c, r_h, expect',
[(0, 0, 196.466),
(0.5, 1.5, 182.761),
(0.5, 0, 187.443),
(0, 1.5, 189.046)]
)
def test_fittable_likelihood(r_c, r_h, expect):
'''Test that the closure to give a version of intensity and likelihood that
can be fitted by scipy works correctly.'''
fittable_likelihood = likelihood.get_fittable_likelihood(
SMALL_CASES_FILE, SMALL_COVARIATES_FILE, SMALL_CASES_FILE
)
fit_params = np.asarray(
[r_c, r_h, *SMALL_FIT_PARAMS['baseline_intensities']]
)
assert_almost_equal(
fittable_likelihood(
fit_params, *map(
SIMPLE_DIST_PARAMS.get,
(('self_excitation_shape', 'self_excitation_scale',
'discharge_excitation_shape', 'discharge_excitation_scale'))
)
),
expect,
decimal=3
)
@pytest.fixture
def large_test_data():
'''Generate test data of the size expected from SAIL.'''
max_categories = 4
num_care_homes = 1000
num_cases = 2000
num_case_homes = 330
num_discharges = 3000
num_discharge_homes = 500
num_days = 181
num_covariates = 1
max_carehome_id = 32767
cases = np.zeros((num_days, num_care_homes), dtype=np.int8)
discharges = np.zeros((num_days, num_care_homes), dtype=np.int8)
covariates = np.zeros((num_covariates, num_care_homes), dtype=np.int8)
# For runs with the same version of numpy, we should get the same
# test data each time. Not guaranteed to work between versions
# because default_rng can change.
rng = np.random.default_rng(seed=0)
care_home_ids = rng.choice(
max_carehome_id, size=num_care_homes, replace=False
)
for sample_array, num_instances, num_places in (
(cases, num_cases, num_case_homes),
(discharges, num_discharges, num_discharge_homes)
):
for _ in range(num_instances):
sample_array[rng.integers(num_days), rng.integers(num_places)] += 1
covariates[0] = rng.choice(max_categories, size=num_care_homes)
for array in care_home_ids, cases, covariates, discharges:
array.flags.writeable = False
return care_home_ids, cases, covariates, discharges
def test_intensity_performance_base(large_test_data, benchmark):
'''
Test the performance of the intensity function for the base case
'''
_, cases, covariates, _ = large_test_data
kwargs = {
'fit_params': {**LARGE_FIT_PARAMS, 'r_h': None, 'r_c': None},
'covariates': covariates,
'cases': cases
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity_null(**kwargs)
benchmark(likelihood.carehome_intensity_null, **kwargs)
@pytest.mark.parametrize("use_cache", [True, False])
def test_intensity_performance_self(large_test_data, benchmark, use_cache):
'''
Test the performance of the intensity function with self-excitation
'''
_, cases, covariates, _ = large_test_data
if not use_cache:
# Writeable arrays are not cached
cases.flags.writeable = True
covariates.flags.writeable = True
kwargs = {
'fit_params': {**LARGE_FIT_PARAMS, 'r_h': None},
'covariates': covariates,
'cases': cases,
'dist_params': FULL_DIST_PARAMS
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity(**kwargs)
benchmark(likelihood.carehome_intensity, **kwargs)
@pytest.mark.parametrize("use_cache", [True, False])
def test_intensity_performance_hospitals(
large_test_data, benchmark, use_cache
):
'''
Test the performance of the intensity function with self- and
discharge excitations.'''
_, cases, covariates, discharges = large_test_data
if not use_cache:
# Writeable arrays are not cached
cases.flags.writeable = True
covariates.flags.writeable = True
discharges.flags.writeable = True
kwargs = {
'fit_params': LARGE_FIT_PARAMS,
'covariates': covariates,
'cases': cases,
'discharges': discharges,
'dist_params': FULL_DIST_PARAMS
}
# Ensure that numba can jit the function before timing it
likelihood.carehome_intensity(**kwargs)
benchmark(likelihood.carehome_intensity, **kwargs)
def test_likelihood_performance(large_test_data, benchmark):
'''
Test the performance of the calculation of likelihood from the intensity
and case distribution.'''
_, cases, covariates, discharges = large_test_data
intensity = likelihood.carehome_intensity(
fit_params=LARGE_FIT_PARAMS,
covariates=covariates,
cases=cases,
discharges=discharges,
dist_params=FULL_DIST_PARAMS
)
benchmark(likelihood.likelihood, intensity, cases)
| 32.485507
| 79
| 0.693509
| 0
| 0
| 0
| 0
| 5,112
| 0.380103
| 0
| 0
| 3,412
| 0.253699
|
5871c92a8c31f58780367e72dd645d194490519d
| 266
|
py
|
Python
|
HalleyComet/bit/models.py
|
ryanduan/Halley_Comet
|
bd3263e4575c820dd14c265c2c0d4b6b44197682
|
[
"Apache-2.0"
] | null | null | null |
HalleyComet/bit/models.py
|
ryanduan/Halley_Comet
|
bd3263e4575c820dd14c265c2c0d4b6b44197682
|
[
"Apache-2.0"
] | null | null | null |
HalleyComet/bit/models.py
|
ryanduan/Halley_Comet
|
bd3263e4575c820dd14c265c2c0d4b6b44197682
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Url(models.Model):
long_url = models.CharField(max_length=200)
short_url = models.CharField(max_length=100)
visit_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.long_url
| 26.6
| 56
| 0.740602
| 235
| 0.883459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
587220b8fa00eda9456728bff03e4461bf290254
| 2,411
|
py
|
Python
|
data-structure/queue.py
|
66chenbiao/sleepace_verification_tool
|
6271312d9d78ee50703e27a75787510cab4c7f4d
|
[
"Apache-2.0"
] | null | null | null |
data-structure/queue.py
|
66chenbiao/sleepace_verification_tool
|
6271312d9d78ee50703e27a75787510cab4c7f4d
|
[
"Apache-2.0"
] | null | null | null |
data-structure/queue.py
|
66chenbiao/sleepace_verification_tool
|
6271312d9d78ee50703e27a75787510cab4c7f4d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
class Queue:
"""Simple Queue implementation - First in First Out"""
def __init__(self):
self.__data = []
def enqueue(self, text):
"""Add new element to queue
Arguments:
text {string} -- An element which needs to be added to an end of a queue
"""
self.__data.append(text)
def dequeue(self):
"""Gets a first element in a front of a queue
Returns:
string -- A first element in a front of a queue
"""
if len(self.__data) == 0:
return None
taken = self.__data[0]
new_queue = []
for index in range(1, len(self.__data)):
new_queue.append(self.__data[index])
self.__data = new_queue
return taken
def front(self):
"""Checks a first element in a front of a queue
Returns:
string -- A first element in a front of a queue
"""
if len(self.__data) == 0:
return None
return self.__data[0]
def rear(self):
"""Checks a last element in a queue
Returns:
string -- A last element in a queue
"""
if len(self.__data) == 0:
return None
return self.__data[-1]
class QueueTest(unittest.TestCase):
def test_empty_queue(self):
queue = Queue()
self.assertIsNone(queue.front())
self.assertIsNone(queue.rear())
self.assertIsNone(queue.dequeue())
def test_add_one(self):
queue = Queue()
queue.enqueue("one")
self.assertEqual(queue.front(), "one", "Should be 'one'")
self.assertEqual(queue.rear(), "one", "Should be 'one'")
def test_add_three(self):
queue = Queue()
queue.enqueue("one")
queue.enqueue("two")
queue.enqueue("three")
self.assertEqual(queue.front(), "one", "Should be 'one'")
self.assertEqual(queue.rear(), "three", "Should be 'three'")
def test_add_three_get_one(self):
queue = Queue()
queue.enqueue("one")
queue.enqueue("two")
queue.enqueue("three")
taken = queue.dequeue()
self.assertEqual(queue.front(), "two", "Should be 'two'")
self.assertEqual(queue.rear(), "three", "Should be 'three'")
self.assertEqual(taken, "one", "Should be 'one'")
if __name__ == "__main__":
unittest.main()
| 27.397727
| 84
| 0.561178
| 2,340
| 0.970552
| 0
| 0
| 0
| 0
| 0
| 0
| 794
| 0.329324
|
58731f101956f7789a536c381812a3703830d466
| 262
|
py
|
Python
|
m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py
|
CCI-MOC/ABMI
|
955c12ae9d2dc7afe7323f6c25f2af120f5b281a
|
[
"Apache-2.0"
] | 108
|
2015-07-21T10:40:36.000Z
|
2021-07-01T06:54:51.000Z
|
m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py
|
CCI-MOC/ABMI
|
955c12ae9d2dc7afe7323f6c25f2af120f5b281a
|
[
"Apache-2.0"
] | 320
|
2015-07-21T01:33:20.000Z
|
2020-07-21T15:57:02.000Z
|
m2-modified/ims/common/agentless-system-crawler/crawler/plugins/emitters/http_emitter.py
|
CCI-MOC/ABMI
|
955c12ae9d2dc7afe7323f6c25f2af120f5b281a
|
[
"Apache-2.0"
] | 61
|
2015-07-20T18:26:37.000Z
|
2021-03-17T01:18:54.000Z
|
import logging
from iemit_plugin import IEmitter
from plugins.emitters.base_http_emitter import BaseHttpEmitter
logger = logging.getLogger('crawlutils')
class HttpEmitter(BaseHttpEmitter, IEmitter):
def get_emitter_protocol(self):
return 'http'
| 20.153846
| 62
| 0.793893
| 104
| 0.396947
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.068702
|
58733d23c2ee586468a2e3bd18d3eae0569b7613
| 1,946
|
py
|
Python
|
frequency_domain/dwt.py
|
StephenTaylor1998/Research
|
193dc88d368caf5a458be24456c4f6d5045d341f
|
[
"Apache-2.0"
] | null | null | null |
frequency_domain/dwt.py
|
StephenTaylor1998/Research
|
193dc88d368caf5a458be24456c4f6d5045d341f
|
[
"Apache-2.0"
] | null | null | null |
frequency_domain/dwt.py
|
StephenTaylor1998/Research
|
193dc88d368caf5a458be24456c4f6d5045d341f
|
[
"Apache-2.0"
] | 1
|
2022-03-27T14:04:46.000Z
|
2022-03-27T14:04:46.000Z
|
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import pywt
class DWTForward(nn.Module):
def __init__(self, wave_name="haar"):
super(DWTForward, self).__init__()
wavelet = pywt.Wavelet(wave_name)
ll = np.outer(wavelet.dec_lo, wavelet.dec_lo)
lh = np.outer(wavelet.dec_hi, wavelet.dec_lo)
hl = np.outer(wavelet.dec_lo, wavelet.dec_hi)
hh = np.outer(wavelet.dec_hi, wavelet.dec_hi)
filts = np.stack([ll[None, :, :], lh[None, :, :],
hl[None, :, :], hh[None, :, :]],
axis=0)
self.weight = nn.Parameter(
torch.tensor(filts).to(torch.get_default_dtype()),
requires_grad=False)
def forward(self, x):
channel = x.shape[1]
filters = torch.cat([self.weight, ] * channel, dim=0)
# in tf2 self.strides = [1, 1, 2, 2, 1]
# x = tf.nn.conv3d(x, self.filter, padding='VALID', strides=self.strides)
y = F.conv2d(x, filters, groups=channel, stride=2)
return y
class DWTInverse(nn.Module):
def __init__(self, wave_name="haar"):
super(DWTInverse, self).__init__()
wavelet = pywt.Wavelet(wave_name)
ll = np.outer(wavelet.dec_lo, wavelet.dec_lo)
lh = np.outer(wavelet.dec_hi, wavelet.dec_lo)
hl = np.outer(wavelet.dec_lo, wavelet.dec_hi)
hh = np.outer(wavelet.dec_hi, wavelet.dec_hi)
filts = np.stack([ll[None, :, :], lh[None, :, :],
hl[None, :, :], hh[None, :, :]],
axis=0)
self.weight = nn.Parameter(
torch.tensor(filts).to(torch.get_default_dtype()),
requires_grad=False)
def forward(self, x):
channel = int(x.shape[1] / 4)
filters = torch.cat([self.weight, ] * channel, dim=0)
y = F.conv_transpose2d(x, filters, groups=channel, stride=2)
return y
| 38.156863
| 81
| 0.571429
| 1,843
| 0.947071
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.06372
|
58747a38ec4c868ae85caa7d4f7a021b2a655030
| 7,973
|
py
|
Python
|
facebook_messenger_conversation.py
|
davidkrantz/FacebookChatStatistics
|
01fc2a022d45ed695fa7f4ad53d6532a160379db
|
[
"MIT"
] | 35
|
2018-02-22T09:04:21.000Z
|
2022-03-21T18:28:21.000Z
|
facebook_messenger_conversation.py
|
davidkrantz/FacebookChatStatistics
|
01fc2a022d45ed695fa7f4ad53d6532a160379db
|
[
"MIT"
] | 5
|
2018-05-03T17:56:35.000Z
|
2022-02-24T08:19:58.000Z
|
facebook_messenger_conversation.py
|
davidkrantz/FacebookChatStatistics
|
01fc2a022d45ed695fa7f4ad53d6532a160379db
|
[
"MIT"
] | 12
|
2018-05-15T19:15:25.000Z
|
2022-02-24T08:20:15.000Z
|
import sys
import numpy as np
import json
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import emoji
class FacebookMessengerConversation():
"""Module for getting stats of a Facebook Messenger conversation.
Attributes:
data (dict): The conversation of interest.
title (str) : Title of the conversation.
p (list): List of conversation participants.
"""
def __init__(self, conversation):
"""Prepares `conversation` and fetches its participants.
Args:
conversation (json): Conversation downloaded from
Facebook (see https://www.facebook.com/help/
212802592074644?helpref=uf_permalink)
"""
self.data = json.load(open(conversation))
self.title = self.data['title']
# Convert unicode characters
for p in self.data['participants']:
p['name'] = p['name'].encode('raw_unicode_escape').decode('utf-8')
for message in self.data['messages']:
message['sender_name'] = message['sender_name'].encode(
'raw_unicode_escape').decode('utf-8')
if 'content' in message:
message['content'] = message['content'].encode(
'raw_unicode_escape').decode('utf-8')
# Set names of conversation participants
nbr_participants = len(self.data['participants'])
self.p = nbr_participants * [None]
for i in range(nbr_participants):
self.p[i] = self.data['participants'][i]['name']
def get_participants(self):
"""Returns the names of the conversation participants.
Returns:
list: Contains the conversation participants
"""
return self.p
def get_time_interval(self, type):
"""Returns the start and end time of the conversation.
Args:
type (str): Decides what type should be returned. Either
'datetime' or 'str'.
Returns:
tuple: (start, end). Either as datetimes or strings.
Raises:
ValueError: If a not supported `type` was entered.
"""
start = datetime.fromtimestamp(
self.data['messages'][-1]['timestamp_ms']/1000)
end = datetime.fromtimestamp(
self.data['messages'][0]['timestamp_ms']/1000)
if type == 'datetime':
return start, end
elif type == 'str':
return start.strftime('%Y-%m-%d %H:%M:%S'), \
end.strftime('%Y-%m-%d %H:%M:%S')
else:
raise ValueError('Type not supported. Must be '\
'either datetime or str.')
def get_nbr_days(self):
"""Returns the number days between the first and last message.
Returns:
int: Days between start and end.
"""
start, end = self.get_time_interval('datetime')
return (end - start).days + 1
def get_nbr_msg(self):
"""Returns the total number of messages.
Returns:
int: Number of messages.
"""
return len(self.data['messages'])
def get_nbr_words(self):
"""Returns the total number of words.
Returns:
int: Number of words.
"""
nbr_words = 0
for message in self.data['messages']:
if 'content' in message:
nbr_words += len(message['content'].split())
return nbr_words
def get_avg_len_msg(self):
"""Returns the average length of a message.
Returns:
float: Average length of message.
"""
return round(self.get_nbr_words()/self.get_nbr_msg(), 1)
def get_avg_msg_day(self):
"""Returns the average number of messages sent each day.
Returns:
float: Average number of messages sent per day.
"""
return round(self.get_nbr_msg()/self.get_nbr_days(), 1)
def activity(self):
"""Activity by each conversation participant.
Returns:
dict: Contains a list (value) with the number of messages
sent and the percentage it corresponds to per
participant (key).
"""
nbr_msg = self.get_nbr_msg()
act = {p: 0 for p in self.p}
for message in self.data['messages']:
try:
act[message['sender_name']] += 1
except KeyError:
pass
for key in act:
nbr_msg_p = act[key]
act[key] = [nbr_msg_p, 100*round(nbr_msg_p/nbr_msg, 2)]
return act
def timeline(self):
"""Fetches data when messages are sent.
Returns:
tuple: Containing which days messages were sent and also
how many were sent per day, weekday and hour.
"""
nbr_days = self.get_nbr_days()
timeline = [None] * nbr_days
hour = list(range(24))
weekday_arr = [0, 1, 2, 3, 4, 5, 6]
nbr_times_hour = [0] * 24
nbr_times_weekday = [0] * 7
nbr_times_day = [0] * nbr_days
_, end = self.get_time_interval('datetime')
current_day = end.date()
index = len(timeline) - 1
timeline[index] = current_day
nbr_times_day[index] = 1
for message in self.data['messages']:
current = datetime.fromtimestamp(
message['timestamp_ms']/1000)
h = int(round(current.hour + current.minute/60. +\
current.second/3600))
if h == 24:
h = 0
nbr_times_hour[h] = nbr_times_hour[h] + 1
wd = current.weekday()
nbr_times_weekday[wd] = nbr_times_weekday[wd] + 1
current = current.date()
if current == current_day:
nbr_times_day[index] = nbr_times_day[index] + 1
elif current < current_day:
diff = (current_day - current).days
index = index - diff
current_day = current
timeline[index] = current_day
nbr_times_day[index] = 1
dates = [None] * len(timeline)
for i in range(0, len(timeline)):
if timeline[i] == None:
timeline[i] = timeline[i - 1] + timedelta(days=1)
dates[i] = timeline[i].strftime('%Y-%m-%d')
return timeline, nbr_times_day, nbr_times_weekday, nbr_times_hour
def top_emojis(self, nbr):
"""Returns the top `nbr` emojis used and who sent them.
Args:
nbr (int): The number of emojis to include in top list.
Returns:
tuple: List of top emojis and dict showing how many of
these were sent by each participant.
"""
emojis = {e: 0 for e in iter(emoji.UNICODE_EMOJI['en'].values())}
emojis_p = {p: 0 for p in self.p}
for p in emojis_p:
emojis_p[p] = {e: 0 for e in iter(emoji.UNICODE_EMOJI['en'].values())}
for message in self.data['messages']:
if 'content' in message:
msg = message['content']
sender = message['sender_name']
for c in msg:
emoji_str = emoji.demojize(c)
if emoji_str in emojis and sender in emojis_p:
emojis_p[sender][emoji_str] += 1
emojis[emoji_str] += 1
top_emojis = [emoji_key for emoji_key, count in sorted(emojis.items(),
key=lambda kv: (-kv[1], kv[0]))[:nbr]]
emojis_count_p = {p: {} for p in self.p}
for p in self.p:
emojis_count_p[p] = [emojis_p[p][e] for e in top_emojis]
top_emojis = [emoji.emojize(top_emoji) for top_emoji in top_emojis]
return top_emojis, emojis_count_p
| 34.07265
| 82
| 0.56014
| 7,712
| 0.967265
| 0
| 0
| 0
| 0
| 0
| 0
| 2,945
| 0.369372
|
5876eec474b4c0410b6e104f6c352d08e47e01ce
| 1,758
|
py
|
Python
|
setup.py
|
stevearc/pyramid_duh
|
af14b185533d00b69dfdb8ab1cab6f1d1d8d4647
|
[
"MIT"
] | 5
|
2015-12-15T09:27:16.000Z
|
2017-12-12T12:56:04.000Z
|
setup.py
|
stevearc/pyramid_duh
|
af14b185533d00b69dfdb8ab1cab6f1d1d8d4647
|
[
"MIT"
] | null | null | null |
setup.py
|
stevearc/pyramid_duh
|
af14b185533d00b69dfdb8ab1cab6f1d1d8d4647
|
[
"MIT"
] | null | null | null |
""" Setup file """
import os
import sys
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, 'README.rst')).read()
CHANGES = open(os.path.join(HERE, 'CHANGES.rst')).read()
REQUIREMENTS = [
'pyramid',
'six',
]
TEST_REQUIREMENTS = [
'mock',
]
if sys.version_info[:2] < (2, 7):
TEST_REQUIREMENTS.extend(['unittest2'])
if __name__ == "__main__":
setup(
name='pyramid_duh',
version='0.1.2',
description='Useful utilities for every pyramid app',
long_description=README + '\n\n' + CHANGES,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pyramid',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities',
],
author='Steven Arcangeli',
author_email='arcangeli07@gmail.com',
url='http://pyramid-duh.readthedocs.org/',
keywords='pyramid util utility',
license='MIT',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIREMENTS,
tests_require=REQUIREMENTS + TEST_REQUIREMENTS,
test_suite='tests',
)
| 30.310345
| 61
| 0.580205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 745
| 0.423777
|
5877683380ccf730fab956e05f9d48490796920d
| 555
|
py
|
Python
|
telegramhelpers.py
|
olgamirete/log-public-ip
|
0e53c27e62f0709a9d0adf52c860b407a841a252
|
[
"MIT"
] | null | null | null |
telegramhelpers.py
|
olgamirete/log-public-ip
|
0e53c27e62f0709a9d0adf52c860b407a841a252
|
[
"MIT"
] | null | null | null |
telegramhelpers.py
|
olgamirete/log-public-ip
|
0e53c27e62f0709a9d0adf52c860b407a841a252
|
[
"MIT"
] | null | null | null |
import requests, os
from dotenv import load_dotenv
load_dotenv()
API_TOKEN = os.getenv('API_TOKEN')
def sendMessage(user_id: str, text: str, max_retries: int = 1):
url = f'https://api.telegram.org/bot{API_TOKEN}/sendMessage'
payload = {
"chat_id": user_id,
"text": text
}
for i in range(max_retries):
r = requests.get(url, params=payload)
isOk = False
try:
isOk = r.json()["ok"] == True
except:
pass
if isOk == True:
return isOk
return isOk
| 24.130435
| 64
| 0.572973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.151351
|
58793e76d3fcb25dfcdd3339f4cd5621aa988f33
| 1,880
|
py
|
Python
|
datawinners/questionnaire/tests/test_questionnaire_template.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | 1
|
2015-11-02T09:11:12.000Z
|
2015-11-02T09:11:12.000Z
|
datawinners/questionnaire/tests/test_questionnaire_template.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
datawinners/questionnaire/tests/test_questionnaire_template.py
|
ICT4H/dcs-web
|
fb0f53fad4401cfac1c1789ff28b9d5bda40c975
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from mock import patch, MagicMock
from mangrove.datastore.database import DatabaseManager
from datawinners.questionnaire.library import QuestionnaireLibrary
class TestQuestionnaireTemplate(TestCase):
def test_get_category_to_doc_mappings(self):
with patch('datawinners.questionnaire.library.get_db_manager') as get_db_manager:
mock_dbm = MagicMock(spec=DatabaseManager)
get_db_manager.return_value = mock_dbm
mock_dbm.load_all_rows_in_view.return_value = [
{'key': 'Health', 'value': {'name': 'one', 'id': 'health1'}},
{'key': 'Health', 'value': {'name': 'two', 'id': 'health2'}},
{'key': 'Agriculture', 'value': {'name': 'three', 'id': 'agri1'}}
]
library = QuestionnaireLibrary()
result = library.get_template_groupings('en')
expected = [
{'category': 'Agriculture', 'templates': [{'id': 'agri1', 'name': 'three'}]},
{'category': 'Health',
'templates': [{'id': 'health1', 'name': 'one'}, {'id': 'health2', 'name': 'two'}]}]
self.assertDictEqual(expected[0], result[0])
self.assertDictEqual(expected[1], result[1])
mock_dbm.load_all_rows_in_view.assert_called_with('by_template_category_en')
def test_template_details_with_french_loaded_when_language_is_french(self):
with patch('datawinners.questionnaire.library.get_db_manager') as get_db_manager:
mock_dbm = MagicMock(spec=DatabaseManager)
get_db_manager.return_value = mock_dbm
mock_dbm.load_all_rows_in_view.return_value = []
library = QuestionnaireLibrary()
library.get_template_groupings('fr')
mock_dbm.load_all_rows_in_view.assert_called_with('by_template_category_fr')
| 47
| 100
| 0.643617
| 1,688
| 0.897872
| 0
| 0
| 0
| 0
| 0
| 0
| 430
| 0.228723
|
58795121905bb3b21c3853525b46782e8fe333ee
| 2,826
|
py
|
Python
|
chainer_chemistry/links/update/ggnn_update.py
|
pfnet/chainerchem
|
efe323aa21f63a815130d673781e7cca1ccb72d2
|
[
"MIT"
] | 184
|
2019-11-27T12:59:01.000Z
|
2022-03-29T19:18:54.000Z
|
chainer_chemistry/links/update/ggnn_update.py
|
pfnet/chainerchem
|
efe323aa21f63a815130d673781e7cca1ccb72d2
|
[
"MIT"
] | 21
|
2019-12-08T01:53:33.000Z
|
2020-10-23T01:19:56.000Z
|
chainer_chemistry/links/update/ggnn_update.py
|
pfnet/chainerchem
|
efe323aa21f63a815130d673781e7cca1ccb72d2
|
[
"MIT"
] | 45
|
2019-11-28T09:59:54.000Z
|
2022-02-07T02:42:46.000Z
|
import chainer
from chainer import functions
from chainer import links
import chainer_chemistry
from chainer_chemistry.links.connection.graph_linear import GraphLinear
from chainer_chemistry.utils import is_sparse
class GGNNUpdate(chainer.Chain):
"""GGNN submodule for update part.
Args:
in_channels (int or None): input dim of feature vector for each node
hidden_channels (int): dimension of feature vector for each node
out_channels (int or None): output dime of feature vector for each node
When `None`, `hidden_channels` is used.
n_edge_types (int): number of types of edge
"""
def __init__(self, in_channels=None, hidden_channels=16,
out_channels=None, n_edge_types=4, **kwargs):
if out_channels is None:
out_channels = hidden_channels
super(GGNNUpdate, self).__init__()
if in_channels is None:
gru_in_channels = None
else:
gru_in_channels = in_channels + hidden_channels
with self.init_scope():
self.graph_linear = GraphLinear(
in_channels, n_edge_types * hidden_channels)
self.update_layer = links.GRU(gru_in_channels, out_channels)
self.n_edge_types = n_edge_types
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
def __call__(self, h, adj, **kwargs):
hidden_ch = self.hidden_channels
# --- Message part ---
mb, atom, in_ch = h.shape
m = functions.reshape(self.graph_linear(h),
(mb, atom, hidden_ch, self.n_edge_types))
# m: (minibatch, atom, ch, edge_type)
# Transpose
m = functions.transpose(m, (0, 3, 1, 2))
# m: (minibatch, edge_type, atom, ch)
# (minibatch * edge_type, atom, out_ch)
m = functions.reshape(m, (mb * self.n_edge_types, atom, hidden_ch))
if is_sparse(adj):
m = functions.sparse_matmul(adj, m)
else:
adj = functions.reshape(adj, (mb * self.n_edge_types, atom, atom))
m = chainer_chemistry.functions.matmul(adj, m)
# (minibatch * edge_type, atom, out_ch)
m = functions.reshape(m, (mb, self.n_edge_types, atom, hidden_ch))
m = functions.sum(m, axis=1)
# (minibatch, atom, out_ch)
# --- Update part ---
# Contraction
h = functions.reshape(h, (mb * atom, in_ch))
# Contraction
m = functions.reshape(m, (mb * atom, hidden_ch))
out_h = self.update_layer(functions.concat((h, m), axis=1))
# Expansion
out_h = functions.reshape(out_h, (mb, atom, self.out_channels))
return out_h
def reset_state(self):
self.update_layer.reset_state()
| 36.230769
| 79
| 0.624204
| 2,608
| 0.922859
| 0
| 0
| 0
| 0
| 0
| 0
| 657
| 0.232484
|
587a404ecb9909eff07171e4499fcb5702d3abd5
| 78
|
py
|
Python
|
samples/ast/test.py
|
Ryoich/python_zero
|
fe4a5fd8b11c8c059d82b797cd1668f96d54e541
|
[
"CC-BY-4.0"
] | 203
|
2018-12-14T10:16:33.000Z
|
2022-03-10T07:23:34.000Z
|
samples/ast/test.py
|
Ryoich/python_zero
|
fe4a5fd8b11c8c059d82b797cd1668f96d54e541
|
[
"CC-BY-4.0"
] | 39
|
2019-06-21T12:28:03.000Z
|
2022-01-17T10:41:53.000Z
|
samples/ast/test.py
|
Ryoich/python_zero
|
fe4a5fd8b11c8c059d82b797cd1668f96d54e541
|
[
"CC-BY-4.0"
] | 29
|
2018-12-30T06:48:59.000Z
|
2022-03-10T07:43:42.000Z
|
def func(a, b):
return a + b
def func2(a):
print(a)
print("Hello")
| 8.666667
| 16
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.089744
|
587a892ea698fcb43f251688aa0bd017aec53e6b
| 1,621
|
py
|
Python
|
badboids/test/test_simulation_parameters.py
|
RiannaK/Coursework2
|
471589593fd09c61fae39cb5975cc88fee36971c
|
[
"MIT"
] | null | null | null |
badboids/test/test_simulation_parameters.py
|
RiannaK/Coursework2
|
471589593fd09c61fae39cb5975cc88fee36971c
|
[
"MIT"
] | 2
|
2017-01-02T11:11:31.000Z
|
2017-01-02T22:09:15.000Z
|
badboids/test/test_simulation_parameters.py
|
RiannaK/Coursework2
|
471589593fd09c61fae39cb5975cc88fee36971c
|
[
"MIT"
] | null | null | null |
from numpy.testing import assert_array_almost_equal as array_assert
from badboids.boids import SimulationParameters
def test_simulation_parameters_init():
"""Tests Simulation Parameters constructor"""
# Arrange
formation_flying_distance = 800
formation_flying_strength = 0.10
alert_distance = 8
move_to_middle_strength = 0.2
delta_t = 1.5
# Act
sut = SimulationParameters(formation_flying_distance, formation_flying_strength, alert_distance,
move_to_middle_strength, delta_t)
# Assert
array_assert(sut.formation_flying_distance, formation_flying_distance)
array_assert(sut.formation_flying_strength, formation_flying_strength)
array_assert(sut.alert_distance, alert_distance)
array_assert(sut.move_to_middle_strength, move_to_middle_strength)
array_assert(sut.delta_t, delta_t)
def test_get_defaults():
"""Tests Simulation Parameters get defaults method"""
# Arrange
expected_formation_flying_distance = 10000
expected_formation_flying_strength = 0.125
expected_alert_distance = 100
expected_move_to_middle_strength = 0.01
expected_delta_t = 1.0
# Act
parameters = SimulationParameters.get_defaults()
# Assert
assert parameters.formation_flying_distance == expected_formation_flying_distance
assert parameters.formation_flying_strength == expected_formation_flying_strength
assert parameters.alert_distance == expected_alert_distance
assert parameters.move_to_middle_strength == expected_move_to_middle_strength
assert parameters.delta_t == expected_delta_t
| 33.770833
| 100
| 0.779766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.0876
|
587e704ad57d09ab05a6f91557e90faddd8fb439
| 3,247
|
py
|
Python
|
django_town/oauth2/models.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
django_town/oauth2/models.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
django_town/oauth2/models.py
|
uptown/django-town
|
4c3b078a8ce5dcc275d65faa4a1cdfb7ebc74a50
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
from django_town.core.settings import OAUTH2_SETTINGS
try:
if not OAUTH2_SETTINGS.ACCESS_TOKEN_SECRET_KEY:
raise ImportError
except KeyError:
# import traceback
# traceback.print_exc()
raise ImportError
from django.db import models
from django.conf import settings
from django.contrib import admin
from django_town.cache.model import CachingModel
from django_town.core.fields import JSONField
from django_town.utils import generate_random_from_vschar_set
class Service(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
# class ServiceSecretKey(CachingModel):
# cache_key_format = "_ut_o2ss:%(service__pk)d"
#
# service = models.ForeignKey(Service, unique=True)
# secret_key = models.CharField(max_length=OAUTH2_SETTINGS.SERVICE_SECRET_KEY_LENGTH,
# default=lambda: generate_random_from_vschar_set(
# OAUTH2_SETTINGS.SERVICE_SECRET_KEY_LENGTH))
def _generate_random_from_vschar_set_for_client_id():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)
def _generate_random_from_vschar_set_for_client_secret():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.CLIENT_ID_LENGTH)
class Client(CachingModel):
IOS_CLIENT = 1
CLIENT_TYPE = (
(0, "Web"),
(1, "iOS"),
(2, "Android"),
(3, "Win"),
)
cache_key_format = "_ut_o2c:%(client_id)s"
name = models.CharField(max_length=200)
service = models.ForeignKey(Service)
client_id = models.CharField(max_length=OAUTH2_SETTINGS.CLIENT_ID_LENGTH, unique=True,
default=_generate_random_from_vschar_set_for_client_id)
client_secret = models.CharField(max_length=OAUTH2_SETTINGS.CLIENT_SECRET_LENGTH,
default=_generate_random_from_vschar_set_for_client_secret)
redirect_uris = JSONField(blank=True)
default_redirect_uri = models.URLField()
available_scope = JSONField(blank=True)
client_type = models.IntegerField(default=IOS_CLIENT, choices=CLIENT_TYPE)
client_min_version = models.CharField(max_length=20, default="")
client_cur_version = models.CharField(max_length=20, default="")
client_store_id = models.CharField(max_length=30, default="")
def __unicode__(self):
return self.name
def _generate_random_from_vschar_set_for_secret_key():
return generate_random_from_vschar_set(OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH)
class UserClientSecretKey(CachingModel):
cache_key_format = "_ut_o2u:%(user_id)d:%(client__pk)d"
user_id = models.IntegerField()
client = models.ForeignKey(Client)
secret_key = models.CharField(max_length=OAUTH2_SETTINGS.USER_SECRET_KEY_LENGTH,
default=_generate_random_from_vschar_set_for_secret_key)
unique_together = (("user_id", "client"),)
class Scope(models.Model):
name = models.CharField(max_length=30, unique=True)
class ClientAdmin(admin.ModelAdmin):
readonly_fields = ['client_id', 'client_secret']
admin.site.register(Client, admin.ModelAdmin)
admin.site.register(Service, admin.ModelAdmin)
| 33.132653
| 92
| 0.73206
| 1,816
| 0.559285
| 0
| 0
| 0
| 0
| 0
| 0
| 597
| 0.183862
|
587e7271e86565dcf7c4f99ca8d0228de3d2839e
| 265
|
py
|
Python
|
util_list_files.py
|
jhu-alistair/image_utilities
|
07fcf2fb78b57b3e8ac798daffa9f4d7b05d9063
|
[
"Apache-2.0"
] | null | null | null |
util_list_files.py
|
jhu-alistair/image_utilities
|
07fcf2fb78b57b3e8ac798daffa9f4d7b05d9063
|
[
"Apache-2.0"
] | null | null | null |
util_list_files.py
|
jhu-alistair/image_utilities
|
07fcf2fb78b57b3e8ac798daffa9f4d7b05d9063
|
[
"Apache-2.0"
] | null | null | null |
# List files in a directory. Useful for testing the path
from local_tools import *
from image_renamer import ImageRenamer
if confirm_config('path'):
img_path = get_config('path')
fl = ImageRenamer(img_path)
for ff in fl.image_files():
print(ff)
| 29.444444
| 56
| 0.720755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.256604
|
587f22d6d391706fced03d26fcfcf342a5722cf3
| 1,394
|
py
|
Python
|
deepmedic_config.py
|
farrokhkarimi/deepmedic_project
|
b0c916171673ce3259d2458146f2db941f0bf270
|
[
"MIT"
] | 2
|
2021-07-15T18:40:18.000Z
|
2021-08-03T17:10:12.000Z
|
deepmedic_config.py
|
farrokhkarimi/deepmedic_project
|
b0c916171673ce3259d2458146f2db941f0bf270
|
[
"MIT"
] | null | null | null |
deepmedic_config.py
|
farrokhkarimi/deepmedic_project
|
b0c916171673ce3259d2458146f2db941f0bf270
|
[
"MIT"
] | 1
|
2022-01-17T12:11:51.000Z
|
2022-01-17T12:11:51.000Z
|
import os
def deepmedic_config(config_files_path, niftis_path, test_flair_file_name, test_t1c_file_name, mask, prediction_file_name, output_path):
with open(os.path.join(config_files_path, 'model', 'modelConfig.cfg'), 'r') as f:
lines = f.readlines()
lines[8] = 'folderForOutput = "%s"\n' % output_path
with open(os.path.join(config_files_path, 'model', 'modelConfig.cfg'), 'w') as f:
f.writelines(lines)
with open(os.path.join(config_files_path, 'test', 'testConfig.cfg'), 'r') as f:
lines = f.readlines()
lines[8] = 'folderForOutput = "%s"\n' % output_path
with open(os.path.join(config_files_path, 'test', 'testConfig.cfg'), 'w') as f:
f.writelines(lines)
with open(os.path.join(config_files_path, 'test', 'testChannels_flair.cfg'), 'w') as f:
f.write(os.path.join(niftis_path, test_flair_file_name))
with open (os.path.join(config_files_path, 'test', 'testChannels_t1c.cfg'), 'w') as f:
f.write(os.path.join(niftis_path, test_t1c_file_name))
with open(os.path.join(config_files_path, 'test', 'testRoiMasks.cfg'), 'w') as f:
f.write(os.path.join(niftis_path, mask))
with open(os.path.join(config_files_path, 'test' 'testNamesOfPredictions.cfg'), 'w') as f:
f.write(prediction_file_name)
| 48.068966
| 137
| 0.636298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 284
| 0.20373
|
587fce66d43c23ddc2eed105e1033650f3ef5080
| 174
|
py
|
Python
|
configs/models/aott.py
|
yoxu515/aot-benchmark
|
5a7665fc8e0f0e64bc8ba6028b15d9ab32f4c56a
|
[
"BSD-3-Clause"
] | 105
|
2021-11-16T12:43:59.000Z
|
2022-03-31T08:05:11.000Z
|
configs/models/aott.py
|
lingyunwu14/aot-benchmark
|
99f74f051c91ac221e44f3edab3534ae4dd233f7
|
[
"BSD-3-Clause"
] | 14
|
2021-11-18T09:52:36.000Z
|
2022-03-31T16:26:32.000Z
|
configs/models/aott.py
|
lingyunwu14/aot-benchmark
|
99f74f051c91ac221e44f3edab3534ae4dd233f7
|
[
"BSD-3-Clause"
] | 17
|
2021-11-16T13:28:29.000Z
|
2022-03-29T02:14:48.000Z
|
import os
from .default import DefaultModelConfig
class ModelConfig(DefaultModelConfig):
def __init__(self):
super().__init__()
self.MODEL_NAME = 'AOTT'
| 21.75
| 39
| 0.706897
| 122
| 0.701149
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.034483
|
58803dca4ce3bbdf62d3eb2aca41861e7c2239d8
| 1,125
|
py
|
Python
|
main.py
|
philipperemy/tensorflow-fifo-queue-example
|
1e38e6e907b7856e954caeeb01d52f7d66e54de0
|
[
"MIT"
] | 42
|
2017-03-17T07:22:30.000Z
|
2022-03-31T16:11:50.000Z
|
main.py
|
afcarl/tensorflow-fifo-queue-example
|
885bcfe417f6d3a9beb4180922cd221d95abc1ef
|
[
"MIT"
] | 2
|
2017-08-15T19:23:26.000Z
|
2017-08-18T02:49:11.000Z
|
main.py
|
afcarl/tensorflow-fifo-queue-example
|
885bcfe417f6d3a9beb4180922cd221d95abc1ef
|
[
"MIT"
] | 15
|
2017-05-04T07:27:24.000Z
|
2022-03-31T16:26:28.000Z
|
from __future__ import print_function
import time
import tensorflow as tf
from data import DataGenerator
def define_net(input_batch):
return input_batch + 20 # simplest network I could think of.
def main():
batch_size = 1
coord = tf.train.Coordinator()
with tf.name_scope('create_inputs'):
reader = DataGenerator(coord)
input_batch = reader.dequeue(batch_size)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
init = tf.global_variables_initializer()
sess.run(init)
threads = reader.start_threads(sess)
net = define_net(input_batch)
queue_size = reader.queue_size
for step in range(10000):
print('size queue =', queue_size.eval(session=sess))
print(sess.run(net))
# Make this thread slow. You can comment this line. If you do so, you will dequeue
# faster than you enqueue, so expect the queue not to reach its maximum (32 by default)
time.sleep(1)
coord.request_stop()
print("stop requested.")
for thread in threads:
thread.join()
if __name__ == '__main__':
main()
| 25
| 95
| 0.683556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 261
| 0.232
|
58803eeb0ae16d220e9ed2f74395fa7c80ff5afa
| 1,285
|
py
|
Python
|
Chapter_3/OO_DimmerSwitch_Model1.py
|
zjwillie/Object-Oriented-Python-Code
|
017b07084c7937c870926b96a856f60b9d7077aa
|
[
"BSD-2-Clause"
] | 38
|
2021-11-16T03:04:42.000Z
|
2022-03-27T05:57:50.000Z
|
Chapter_3/OO_DimmerSwitch_Model1.py
|
zjwillie/Object-Oriented-Python-Code
|
017b07084c7937c870926b96a856f60b9d7077aa
|
[
"BSD-2-Clause"
] | null | null | null |
Chapter_3/OO_DimmerSwitch_Model1.py
|
zjwillie/Object-Oriented-Python-Code
|
017b07084c7937c870926b96a856f60b9d7077aa
|
[
"BSD-2-Clause"
] | 22
|
2021-11-11T15:57:58.000Z
|
2022-03-18T12:58:07.000Z
|
# Dimmer Switch class
class DimmerSwitch():
def __init__(self, label):
self.label = label
self.isOn = False
self.brightness = 0
def turnOn(self):
self.isOn = True
# turn the light on at self.brightness
def turnOff(self):
self.isOn = False
# turn the light off
def raiseLevel(self):
if self.brightness < 10:
self.brightness = self.brightness + 1
def lowerLevel(self):
if self.brightness > 0:
self.brightness = self.brightness - 1
# Extra method for debugging
def show(self):
print('Label:', self.label)
print('Light is on?', self.isOn)
print('Brightness is:', self.brightness)
print()
# Main code
# Create first DimmerSwitch, turn on and raise level twice
oDimmer1 = DimmerSwitch('Dimmer1')
oDimmer1.turnOn()
oDimmer1.raiseLevel()
oDimmer1.raiseLevel()
# Create second DimmerSwitch, turn on and raise level 3 times
oDimmer2 = DimmerSwitch('Dimmer2')
oDimmer2.turnOn()
oDimmer2.raiseLevel()
oDimmer2.raiseLevel()
oDimmer2.raiseLevel()
# Create third DimmerSwitch, using the default settings
oDimmer3 = DimmerSwitch('Dimmer3')
# Ask each switch to show itself
oDimmer1.show()
oDimmer2.show()
oDimmer3.show()
| 23.363636
| 61
| 0.654475
| 730
| 0.568093
| 0
| 0
| 0
| 0
| 0
| 0
| 389
| 0.302724
|
5881480162c7fded411cf84766484caa36fe07ea
| 2,101
|
py
|
Python
|
python/vosk/transcriber/cli.py
|
madkote/vosk-api
|
8cf64ee93e5cc21a6d53595c6a80fc638a45b1d7
|
[
"Apache-2.0"
] | 33
|
2019-09-03T23:21:14.000Z
|
2020-01-02T10:18:15.000Z
|
python/vosk/transcriber/cli.py
|
madkote/vosk-api
|
8cf64ee93e5cc21a6d53595c6a80fc638a45b1d7
|
[
"Apache-2.0"
] | 7
|
2019-09-11T09:40:03.000Z
|
2019-12-31T10:04:21.000Z
|
python/vosk/transcriber/cli.py
|
madkote/vosk-api
|
8cf64ee93e5cc21a6d53595c6a80fc638a45b1d7
|
[
"Apache-2.0"
] | 10
|
2019-09-05T05:30:16.000Z
|
2020-01-02T10:18:17.000Z
|
#!/usr/bin/env python3
import logging
import argparse
from pathlib import Path
from vosk import list_models, list_languages
from vosk.transcriber.transcriber import Transcriber
parser = argparse.ArgumentParser(
description = 'Transcribe audio file and save result in selected format')
parser.add_argument(
'--model', '-m', type=str,
help='model path')
parser.add_argument(
'--list-models', default=False, action='store_true',
help='list available models')
parser.add_argument(
'--list-languages', default=False, action='store_true',
help='list available languages')
parser.add_argument(
'--model-name', '-n', type=str,
help='select model by name')
parser.add_argument(
'--lang', '-l', default='en-us', type=str,
help='select model by language')
parser.add_argument(
'--input', '-i', type=str,
help='audiofile')
parser.add_argument(
'--output', '-o', default='', type=str,
help='optional output filename path')
parser.add_argument(
'--output-type', '-t', default='txt', type=str,
help='optional arg output data type')
parser.add_argument(
'--log-level', default='INFO',
help='logging level')
def main():
args = parser.parse_args()
log_level = args.log_level.upper()
logging.getLogger().setLevel(log_level)
if args.list_models == True:
list_models()
return
if args.list_languages == True:
list_languages()
return
if not args.input:
logging.info('Please specify input file or directory')
exit(1)
if not Path(args.input).exists():
logging.info("File/folder '%s' does not exist, please specify an existing file/directory" % (args.input))
exit(1)
transcriber = Transcriber(args)
if Path(args.input).is_dir():
transcriber.process_dir(args)
return
elif Path(args.input).is_file():
transcriber.process_file(args)
else:
logging.info('Wrong arguments')
exit(1)
if __name__ == "__main__":
main()
| 28.013333
| 113
| 0.634936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 599
| 0.285102
|
58816407aff8c18d528cadd21c391b1d398c40c2
| 2,923
|
py
|
Python
|
OCR-Flask-app-master/tesseract.py
|
ChungNPH/OCR
|
06a78fa5f2c8f5891db1969ac2076ef8d20b74a8
|
[
"MIT"
] | null | null | null |
OCR-Flask-app-master/tesseract.py
|
ChungNPH/OCR
|
06a78fa5f2c8f5891db1969ac2076ef8d20b74a8
|
[
"MIT"
] | null | null | null |
OCR-Flask-app-master/tesseract.py
|
ChungNPH/OCR
|
06a78fa5f2c8f5891db1969ac2076ef8d20b74a8
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
from imutils.object_detection import non_max_suppression
import pytesseract
from matplotlib import pyplot as plt
def ocr(images):
results = []
for image in images:
args = {"image": image, "east": "frozen_east_text_detection.pb", "min_confidence": 0.5, "width": 320,
"height": 320}
args['image'] = image
image = cv2.imread(args['image'])
orig = image.copy()
(origH, origW) = image.shape[:2]
(newW, newH) = (args["width"], args["height"])
rW = origW / float(newW)
rH = origH / float(newH)
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net = cv2.dnn.readNet(args["east"])
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
def predictions(prob_score, geo):
(numR, numC) = prob_score.shape[2:4]
boxes = []
confidence_val = []
for y in range(0, numR):
scoresData = prob_score[0, 0, y]
x0 = geo[0, 0, y]
x1 = geo[0, 1, y]
x2 = geo[0, 2, y]
x3 = geo[0, 3, y]
anglesData = geo[0, 4, y]
for i in range(0, numC):
if scoresData[i] < args["min_confidence"]:
continue
(offX, offY) = (i * 4.0, y * 4.0)
angle = anglesData[i]
cos = np.cos(angle)
sin = np.sin(angle)
h = x0[i] + x2[i]
w = x1[i] + x3[i]
endX = int(offX + (cos * x1[i]) + (sin * x2[i]))
endY = int(offY - (sin * x1[i]) + (cos * x2[i]))
startX = int(endX - w)
startY = int(endY - h)
boxes.append((startX, startY, endX, endY))
confidence_val.append(scoresData[i])
return (boxes, confidence_val)
(boxes, confidence_val) = predictions(scores, geometry)
boxes = non_max_suppression(np.array(boxes), probs=confidence_val)
result = []
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
r = orig[startY:endY, startX:endX]
configuration = ("-l eng --oem 1 --psm 8")
text = pytesseract.image_to_string(r, config=configuration)
result.append(text)
results.append(result)
return results
print(ocr(["./images/car_wash.png"]))
| 30.134021
| 109
| 0.485118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 229
| 0.078344
|
5882bcb6d8e741c3012ffb7ce72cd027f9aee6d9
| 727
|
py
|
Python
|
Scripts/autotest/bug/migrations/0003_auto_20180128_2144.py
|
ludechu/DJevn
|
ee97447da3f6f55c92bfa1b6a20436a4f3098150
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/autotest/bug/migrations/0003_auto_20180128_2144.py
|
ludechu/DJevn
|
ee97447da3f6f55c92bfa1b6a20436a4f3098150
|
[
"bzip2-1.0.6"
] | null | null | null |
Scripts/autotest/bug/migrations/0003_auto_20180128_2144.py
|
ludechu/DJevn
|
ee97447da3f6f55c92bfa1b6a20436a4f3098150
|
[
"bzip2-1.0.6"
] | null | null | null |
# Generated by Django 2.0 on 2018-01-28 21:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bug', '0002_auto_20180110_1107'),
]
operations = [
migrations.AlterField(
model_name='bug',
name='buglevel',
field=models.CharField(choices=[('1', '1'), ('2', '2'), ('3', '3')], default='3', max_length=200, null=True, verbose_name='严重程度'),
),
migrations.AlterField(
model_name='bug',
name='bugstatus',
field=models.CharField(choices=[('激活', '激活'), ('已解决', '已解决'), ('已关闭', '已关闭')], default='激活', max_length=200, null=True, verbose_name='解决状态'),
),
]
| 30.291667
| 153
| 0.558459
| 688
| 0.883184
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.286264
|
5882ce3c784a4517fd7838c8665ee9d1be914598
| 17,439
|
py
|
Python
|
tests/test_validators.py
|
STYLER-Inc/styler-validation
|
73e6e3ac241cf26ca3ccee070f2736778d3d4849
|
[
"MIT"
] | null | null | null |
tests/test_validators.py
|
STYLER-Inc/styler-validation
|
73e6e3ac241cf26ca3ccee070f2736778d3d4849
|
[
"MIT"
] | null | null | null |
tests/test_validators.py
|
STYLER-Inc/styler-validation
|
73e6e3ac241cf26ca3ccee070f2736778d3d4849
|
[
"MIT"
] | null | null | null |
""" Tests for validators
"""
from decimal import Decimal
from unittest.mock import Mock
import random
import string
from styler_validation import validators as va
from styler_validation import messages as msg
class MyModel:
pass
class TestIsRequired:
def test_is_required(self):
val = va.is_required()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.REQUIRED_VALUE,)
def test_valid(self):
val = va.is_required()
model = MyModel()
model.prop = 'something'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_accepts(self):
val = va.is_required(accepts={0})
model = MyModel()
model.prop = 0
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsInteger:
def test_is_integer(self):
val = va.is_integer()
model = MyModel()
model.prop = '123'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_is_none(self):
val = va.is_integer()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_integer()
model = MyModel()
model.prop = {'123'}
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_value_mismatch(self):
val = va.is_integer()
model = MyModel()
model.prop = 'abc123'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIsBetween:
def test_is_between(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = 2
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_outside_interval(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = 20
model.prop2 = -1
valid, error = val(model, 'prop')
valid2, error2 = val(model, 'prop2')
assert not valid
assert error == (msg.LESS_OR_EQUAL_THAN, 10)
assert not valid2
assert error2 == (msg.GREATER_OR_EQUAL_THAN, 0)
def test_no_interval_set(self):
val = va.is_between()
model = MyModel()
model.prop = 20
model.prop2 = -1
valid, error = val(model, 'prop')
valid2, error2 = val(model, 'prop2')
assert valid
assert error is None
assert valid2
assert error2 is None
def test_one_sided_interval(self):
val = va.is_between(min_=0)
val2 = va.is_between(max_=10)
model = MyModel()
model.prop = 20
model.prop2 = -1
valid, error = val(model, 'prop')
valid2, error2 = val(model, 'prop2')
assert valid
assert error is None
assert not valid2
assert error2 == (msg.GREATER_OR_EQUAL_THAN, 0)
valid, error = val2(model, 'prop')
valid2, error2 = val2(model, 'prop2')
assert not valid
assert error == (msg.LESS_OR_EQUAL_THAN, 10)
assert valid2
assert error2 is None
def test_none(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_between(min_=0, max_=10)
model = MyModel()
model.prop = {'123'}
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIsInside:
def test_is_inside(self):
accepted_values = {'a', 'b'}
val = va.is_inside(accepted=accepted_values)
model = MyModel()
model.prop = 'b'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_inside(self):
accepted_values = {'a', 'b'}
val = va.is_inside(accepted=accepted_values)
model = MyModel()
model.prop = 'c'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_none(self):
accepted_values = {'a', 'b'}
val = va.is_inside(accepted=accepted_values)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsOfType:
def test_is_of_type(self):
val = va.is_of_type(Decimal)
model = MyModel()
model.prop = Decimal('12.33')
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_type(self):
val = va.is_of_type(Decimal)
model = MyModel()
model.prop = '12.33'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_none(self):
val = va.is_of_type(Decimal)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsMoney:
def test_is_money(self):
val = va.is_money()
model = MyModel()
model.prop = Decimal('12.33')
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_allow_zero(self):
val = va.is_money(allow_zero=False)
model = MyModel()
model.prop = Decimal('0.0')
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.NOT_ZERO,)
def test_negative(self):
val = va.is_money()
model = MyModel()
model.prop = Decimal('-12.33')
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.POSITIVE_VALUE,)
def test_none(self):
val = va.is_money()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_money()
model = MyModel()
model.prop = {'sdfads'}
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_value_mismatch(self):
val = va.is_money()
model = MyModel()
model.prop = 'sdfads'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIsValidTime:
def test_is_valid_time(self):
val = va.is_valid_time()
model = MyModel()
model.prop = '12:33'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_valid_time()
model = MyModel()
model.prop = '12:73'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_TIME,)
def test_none(self):
val = va.is_valid_time()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsGreaterThanField:
def test_is_greater_than_field(self):
val = va.is_greater_than_field('prop2')
model = MyModel()
model.prop = 333
model.prop2 = 222
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_greater_than_field('prop2')
model = MyModel()
model.prop = 11
model.prop2 = 12
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.GREATER_THAN, 'mymodel.prop2')
def test_none(self):
val = va.is_greater_than_field('prop2')
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_greater_than_field('prop2', default=True)
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_greater_than_field('prop2', allow_equal=True)
model = MyModel()
model.prop = 1
model.prop2 = 1
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsLessThanField:
def test_is_less_than_field(self):
val = va.is_less_than_field('prop2')
model = MyModel()
model.prop = 111
model.prop2 = 222
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_less_than_field('prop2')
model = MyModel()
model.prop = 13
model.prop2 = 12
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.LESS_THAN, 'mymodel.prop2')
def test_none(self):
val = va.is_less_than_field('prop2')
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_less_than_field('prop2', default=True)
model = MyModel()
model.prop = 1
model.prop2 = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_less_than_field('prop2', allow_equal=True)
model = MyModel()
model.prop = 1
model.prop2 = 1
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsGreaterThanNumber:
def test_is_greater_than_number(self):
val = va.is_greater_than_number(10)
model = MyModel()
model.prop = 111
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_greater_than_number(10)
model = MyModel()
model.prop = 1
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.GREATER_THAN, 10)
def test_none(self):
val = va.is_greater_than_number(10)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_greater_than_number(10, default=True)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_greater_than_number(10, allow_equal=True)
model = MyModel()
model.prop = 10
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsLessThanNumber:
def test_is_less_than_number(self):
val = va.is_less_than_number(10)
model = MyModel()
model.prop = 1
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_not_valid(self):
val = va.is_less_than_number(10)
model = MyModel()
model.prop = 11
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.LESS_THAN, 10)
def test_none(self):
val = va.is_less_than_number(10)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_less_than_number(10, default=True)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_allow_equal(self):
val = va.is_less_than_number(10, allow_equal=True)
model = MyModel()
model.prop = 10
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestIsNotEmpty:
def test_is_not_empty(self):
val = va.is_not_empty()
model = MyModel()
model.prop = 'something'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid(self):
val = va.is_not_empty()
model = MyModel()
model.prop = ' '
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.NOT_EMPTY,)
def test_none(self):
val = va.is_not_empty()
model = MyModel()
model.prop = 0
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_default(self):
val = va.is_not_empty(default=True)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
class TestObjectValidator:
def test_child_object(self):
child = Mock()
child.is_valid.return_value = (True, {})
model = MyModel()
model.prop = child
val = va.object_validator()
valid, error = val(model, 'prop')
assert valid
assert error == {}
child.is_valid.assert_called_once()
def test_multiple_child_objects(self):
child1 = Mock()
child1.is_valid.return_value = (False, {'error1': 'error'})
child2 = Mock()
child2.is_valid.return_value = (True, {})
child3 = Mock()
child3.is_valid.return_value = (False, {'error3': 'error'})
model = MyModel()
model.prop = [child1, child2, child3]
val = va.object_validator()
valid, error = val(model, 'prop')
assert not valid
assert error == {
'error1': 'error',
'error3': 'error'
}
child1.is_valid.assert_called_once()
child2.is_valid.assert_called_once()
child3.is_valid.assert_called_once()
def test_none(self):
model = MyModel()
model.prop = None
val = va.object_validator()
valid, error = val(model, 'prop')
assert valid
assert error == {}
class TestIsUuid:
def test_is_uuid(self):
val = va.is_uuid()
model = MyModel()
model.prop = '42fb4cf1-bd85-469c-8266-9dfcd54796a4'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid(self):
val = va.is_uuid()
model = MyModel()
model.prop = 'anything'
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
def test_none(self):
val = va.is_uuid()
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_type_mismatch(self):
val = va.is_uuid()
model = MyModel()
model.prop = 1234
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
class TestIf_:
def test_if_true(self):
validation = Mock(return_value=(True, None))
val = va.if_(lambda x: True, validation)
model = MyModel()
model.prop = '123'
valid, error = val(model, 'prop')
assert valid
assert error is None
validation.assert_called_once()
def test_if_false(self):
validation = Mock(return_value=(True, None))
val = va.if_(lambda x: False, validation)
model = MyModel()
model.prop = '123'
valid, error = val(model, 'prop')
assert valid
assert error is None
validation.assert_not_called()
class TestMaxLength:
def test_valid_max_length(self):
val = va.max_length(255)
model = MyModel()
model.prop = 'string_with_length_under_255'
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid_max_length(self):
length = 255
val = va.max_length(length)
model = MyModel()
model.prop = ''.join(random.choices(string.ascii_uppercase +
string.digits, k=256))
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.STRING_TOO_LONG, length)
def test_none(self):
val = va.max_length(255)
model = MyModel()
model.prop = None
valid, error = val(model, 'prop')
assert valid
assert error is None
def test_invalid_type(self):
val = va.max_length(255)
model = MyModel()
model.prop = 1
valid, error = val(model, 'prop')
assert not valid
assert error == (msg.INVALID_VALUE,)
| 23.159363
| 68
| 0.565113
| 17,177
| 0.984976
| 0
| 0
| 0
| 0
| 0
| 0
| 838
| 0.048053
|
5883b253d513cb80bd2362de5b0a8311d18ca8c7
| 3,305
|
py
|
Python
|
blog/models.py
|
Libor03/django-final
|
e29dc3237252c7b0fcfea13d948ed54ffe6d0339
|
[
"CC0-1.0"
] | null | null | null |
blog/models.py
|
Libor03/django-final
|
e29dc3237252c7b0fcfea13d948ed54ffe6d0339
|
[
"CC0-1.0"
] | null | null | null |
blog/models.py
|
Libor03/django-final
|
e29dc3237252c7b0fcfea13d948ed54ffe6d0339
|
[
"CC0-1.0"
] | null | null | null |
from django.core.files.storage import FileSystemStorage
from django.db import models
# Create your models here.
from datetime import date
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
from django.contrib.auth.models import User #Blog author or commenter
def attachment_path(instance, filename):
return "media/animal/" + str(instance.animal.id) + "/attachments/" + filename
""" Metoda vrací cestu k uploadovanému plakátu. """
def poster_path(instance, filename):
return "animals/" + str(instance.name) +"/foto/"+ filename
class Type(models.Model):
name = models.CharField(max_length=50, unique=True, verbose_name="Type of animal", help_text='Enter a type of animal (e.g. Savec)')
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
class Animal(models.Model):
name = models.CharField(max_length=50, unique=True, verbose_name="Name of animal")
poster = models.ImageField(upload_to=poster_path, verbose_name="Poster")
latin = models.CharField(max_length=50, unique=True, verbose_name="Latin name for animal")
description = models.CharField(max_length=5000, verbose_name="Description of animal")
# Metadata
class Meta:
ordering = ["name"]
# Methods
def __str__(self):
"""Součástí textové reprezentace filmu bude jeho název, rok uvedení a hodnocení"""
return f"{self.name}, {str(self.poster)}, {str(self.latin)}"
def get_absolute_url(self):
"""Metoda vrací URL stránky, na které se vypisují podrobné informace o filmu"""
return reverse('animal-detail', args=[str(self.id)])
class Attachment(models.Model):
# Fields
# Povinný titulek přílohy - text do délky 200 znaků
title = models.CharField(max_length=200, verbose_name="Title")
# Časový údaj o poslední aktualizaci přílohy - automaticky se ukládá aktuální čas
last_update = models.DateTimeField(auto_now=True)
# Pole pro upload souboru
# Parametr upload_to zajistí uložení souboru do složky specifikované v návratové hodnotě metody attachment_path
file = models.FileField(upload_to=attachment_path, null=True, verbose_name="File")
# Konstanta, v níž jsou ve formě n-tic (tuples) předdefinovány různé typy příloh
TYPE_OF_ATTACHMENT = (
('audio', 'Audio'),
('image', 'Image'),
('text', 'Text'),
('video', 'Video'),
('other', 'Other'),
)
# Pole s definovanými předvolbami pro uložení typu přílohy
#type = models.CharField(max_length=5, choices=TYPE_OF_ATTACHMENT, blank=True, default='image',
# help_text='Select allowed attachment type', verbose_name="Attachment type")
# Cizí klíč, který zajišťuje propojení přílohy s daným filmem (vztah N:1)
# Parametr on_delete slouží k zajištění tzv. referenční integrity - v případě odstranění filmu
# budou odstraněny i všechny jeho přílohy (models.CASCADE)
film = models.ForeignKey(Animal, on_delete=models.CASCADE)
# Metadata
class Meta:
# Primární seřazeno podle poslední aktualizace souborů, sekundárně podle typu přílohy
ordering = ["-last_update"]
# Methods
def __str__(self):
""" Textová reprezentace objektu """
return f"{self.title})"
| 36.722222
| 135
| 0.698033
| 2,803
| 0.825626
| 0
| 0
| 0
| 0
| 0
| 0
| 1,760
| 0.518409
|
5884a5b1746cc453a881292d3b4f5da9a92c1838
| 369
|
py
|
Python
|
1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 11/E1.py
|
codeclubbentleigh/Python
|
94d6a937aa3520b201ee1641c2009bd90566d52a
|
[
"MIT"
] | 12
|
2018-11-14T03:55:58.000Z
|
2021-12-12T01:13:05.000Z
|
1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 11/E1.py
|
codeclubbentleigh/Python
|
94d6a937aa3520b201ee1641c2009bd90566d52a
|
[
"MIT"
] | null | null | null |
1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 11/E1.py
|
codeclubbentleigh/Python
|
94d6a937aa3520b201ee1641c2009bd90566d52a
|
[
"MIT"
] | 7
|
2019-10-10T06:28:58.000Z
|
2022-02-15T07:18:12.000Z
|
print(list(range(10, 0, -2)))
# if start > end and step > 0:
# a list generated from start to no more than end with step as constant increment
# if start > end and step < 0:
# an empty list generated
# if start < end and step > 0:
# an empty list generated
# if start < end and step < 0
# a list generated from start to no more than end with step as constant decrement
| 36.9
| 81
| 0.704607
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 331
| 0.897019
|
588580ba517b618b5770a81ab628f3858c9ed41b
| 3,141
|
py
|
Python
|
DeepModels/KerasConvNetMNIST.py
|
amingolnari/Deep-Learning-Course
|
00d4fe10db8d1dde3d9b2a94fe93531e8f836cbc
|
[
"MIT"
] | 17
|
2018-12-05T06:50:34.000Z
|
2021-05-26T04:03:18.000Z
|
DeepModels/KerasConvNetMNIST.py
|
amingolnari/Deep-Learning-Course
|
00d4fe10db8d1dde3d9b2a94fe93531e8f836cbc
|
[
"MIT"
] | null | null | null |
DeepModels/KerasConvNetMNIST.py
|
amingolnari/Deep-Learning-Course
|
00d4fe10db8d1dde3d9b2a94fe93531e8f836cbc
|
[
"MIT"
] | 3
|
2018-12-08T14:59:47.000Z
|
2019-12-26T17:52:09.000Z
|
"""
github : https://github.com/amingolnari/Deep-Learning-Course
Author : Amin Golnari
Keras Version : 2.2.4
Date : 4/12/2018
Keras CNN Classification on MNIST Data
Code 301
"""
## If your GPU is AMD , you can use PlaidML Backend
# import os
# os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout, Conv2D, MaxPool2D
from keras.optimizers import SGD
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
# Load MNIST Data (Download for First)
def LoadData():
(Xtrain, Ytrain), (Xtest, Ytest) = mnist.load_data()
Xtrain = Xtrain.reshape(60000, 28, 28, 1).astype('float32')
Xtrain = Xtrain / 255 # Normalize to 0-1
Xtest = Xtest.reshape(10000, 28, 28, 1).astype('float32')
Xtest = Xtest / 255
Ytrain = to_categorical(Ytrain, 10) # for exam Label 2 : [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
Ytest = to_categorical(Ytest, 10)
return (Xtrain, Xtest), (Ytrain, Ytest)
def BuildModel():
model = Sequential()
model.add(Conv2D(filters = 128, kernel_size = (5, 5),
activation = 'relu',
padding = 'same',
input_shape = (28, 28, 1))) # Just First (Input) Layer Need Init input_shape
model.add(Conv2D(filters = 64, kernel_size = (3, 3),
padding = 'same',
activation = 'relu'))
model.add(MaxPool2D(pool_size = (2, 2), padding = 'same'))
model.add(Conv2D(filters = 64, kernel_size = (3, 3),
activation = 'relu',
padding = 'same'))
model.add(MaxPool2D(pool_size = (2, 2)))
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(300, activation = 'tanh')) # Hidden Layer #1
model.add(Dense(200, activation = 'sigmoid')) # Hidden Layer #1
model.add(Dense(10, activation = 'softmax'))# Output Layer
return model
def PlotHistory(history):
plt.title('Keras Model loss/accuracy')
plt.ylabel('loss/accuracy')
plt.xlabel('epochs')
# Accuracy
plt.plot(history.history['acc'], '.-')
plt.plot(history.history['val_acc'], '-.')
# Loss
plt.plot(history.history['loss'], '-*')
plt.plot(history.history['val_loss'], '*-')
plt.legend(['Train loss', 'Validation loss', 'Train acc', 'Validation acc'], loc='upper right')
plt.grid(True, linestyle = '-.')
plt.tick_params(labelcolor = 'b', labelsize = 'medium', width = 3)
fig = plt.gcf()
fig.savefig('images/model loss.jpg')
plt.show()
return
def main():
(Xtrain, Xtest), (Ytrain, Ytest) = LoadData()
model = BuildModel()
model.summary()
model.compile(loss = 'categorical_crossentropy',
optimizer = SGD(lr = 0.1),
metrics = ['accuracy'])
History = model.fit(Xtrain, Ytrain,
batch_size = 256,
epochs = 100,
validation_split = .3)
PlotHistory(History)
score, acc = model.evaluate(Xtest, Ytest)
print('Test Accuracy : ', acc)
if __name__ == "__main__":
main()
| 32.381443
| 97
| 0.61127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 869
| 0.276663
|
588a0eb119fc13cdd266d6e4d090c38c400126a9
| 526
|
py
|
Python
|
armstrong.py
|
Sanchr-sys/Letsupgrade_python7
|
f130cb1dff259878193fb65dd414fc936a7c760d
|
[
"Apache-2.0"
] | null | null | null |
armstrong.py
|
Sanchr-sys/Letsupgrade_python7
|
f130cb1dff259878193fb65dd414fc936a7c760d
|
[
"Apache-2.0"
] | null | null | null |
armstrong.py
|
Sanchr-sys/Letsupgrade_python7
|
f130cb1dff259878193fb65dd414fc936a7c760d
|
[
"Apache-2.0"
] | null | null | null |
start = 104200
end = 702648265
for arm1 in range(start, end + 1):
exp = len(str(arm1))
num_sum = 0
c = arm1
while c > 0:
num = c % 10
num_sum += num ** exp
c //= 10
if arm1 != num_sum:
continue
else:
if arm1 == num_sum:
print("The first Armstrong number encountered is:", arm1)
break
#####OUTPUT#####
## The first Armstrong number encountered is: 548834
## Process finished with exit code 0
| 17.533333
| 70
| 0.503802
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.287072
|
588a5c56f6208237e504aa5b6fa4afab73e5300c
| 787
|
py
|
Python
|
neji.py
|
NejiViraj/Viraj
|
d5bfc60e29100e00a87596b5e16961ab97a3dc4c
|
[
"BSD-2-Clause"
] | null | null | null |
neji.py
|
NejiViraj/Viraj
|
d5bfc60e29100e00a87596b5e16961ab97a3dc4c
|
[
"BSD-2-Clause"
] | null | null | null |
neji.py
|
NejiViraj/Viraj
|
d5bfc60e29100e00a87596b5e16961ab97a3dc4c
|
[
"BSD-2-Clause"
] | null | null | null |
import requests
import json
def Neji_version():
# import neji
# print(neji.Neji_version())
# output: Hydrogen-0.0.0.1
return "Hydrogen-0.0.0.1"
def Neji_upload(put_file_directory_in_string_format):
# import neji
# print(neji.Neji_upload(r"C:\...\Pictures\trial\black1 - Copy (5).png"))
# output: https://file.io/CfJlLI
r = requests.post('https://file.io', files={'file': open(str(put_file_directory_in_string_format), 'rb')})
al1 = json.loads(r.text)
if al1['link'][0] == "h":
return al1['link']
else:
r2 = requests.post('https://api.anonymousfiles.io', files={'file': open(str(put_file_directory_in_string_format), 'rb')})
al2 = json.loads(r2.text)
if al2["url"][0] == "h":
return al2["url"]
else:
return "inform us"
| 24.59375
| 125
| 0.645489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 316
| 0.401525
|
588aa8a3b88a98a9d49032a085a9b2d4f04e667f
| 9,731
|
py
|
Python
|
xmaintnote/ticketing.py
|
0xmc/maint-notification
|
bdf27f7b863a45d2191068c46f729db3c94386d1
|
[
"BSD-2-Clause"
] | null | null | null |
xmaintnote/ticketing.py
|
0xmc/maint-notification
|
bdf27f7b863a45d2191068c46f729db3c94386d1
|
[
"BSD-2-Clause"
] | null | null | null |
xmaintnote/ticketing.py
|
0xmc/maint-notification
|
bdf27f7b863a45d2191068c46f729db3c94386d1
|
[
"BSD-2-Clause"
] | null | null | null |
#!/bin/env python3
"""Handling events as tickets
The goal here is, provided a maintenance event, create an event if not a
duplicate. To determine if not duplicate, use some combination of values to
form a key. Methods to delete, update, and otherwise transform the ticket
should be available
A base class, Ticket, is provided to do some boiler plate things and enforce a
consistent interface.
"""
from textwrap import dedent
from jira import JIRA
class Ticket(object):
"""Base class for a ticket
Purpose of this is to provide standard methods for retrieving duplicates,
creating event, and deleting.
Implementation details should be self-contained to each subclass but not
really different from the interface perspective.
Attributes:
event (XMaintNoteEvent)
acconut (str)
impact (str)
maintenance_id (str)
object_id (str)
provider (str)
key (str): String that can try to be used to be unique among
maintenances
title (str): Generated title that may be used as a ticket title
body (str): Generated body thath may be used as a ticket description
ticket: Optional to add by subclass, instance of ticket in the ticket
system
"""
def __init__(self, event, **kwargs):
"""Initializes and runs _post_init()
Event is the only required input with any kwargs being accepted and
forworded to ``self._post_init``. Purpose of the ``_post_init`` method
is to facilitate each type of ticketing system to mutate the event data
in however it needs without overloading ``__init__`` itself.
A key is created using the provider, account, and maintenance-id keys
of the event. How this is implemented by a ticketing system to take
advantage of is up to the subclass.
Args:
event (XMaintNoteEvent): Maintenance Event
"""
self.event = event
self.account = event['X-MAINTNOTE-ACCOUNT']
self.impact = event['X-MAINTNOTE-IMPACT']
self.maintenance_id = event['X-MAINTNOTE-MAINTENANCE-ID']
self.object_id = event['X-MAINTNOTE-OBJECT-ID']
self.provider = event['X-MAINTNOTE-PROVIDER']
self.ticket = None
factors = [
self.provider,
self.account,
self.maintenance_id,
]
self.key = '{}:{}:{}'.format(*factors)
self.title = '{provider} {impact} Maintenance for {account}'.format(
provider=self.provider,
impact=self.impact,
account=self.account,
)
body = '''
{provider} is having a maintenance of {impact}. Affected account number
is {account}.
Start time: {start_time}
End time: {end_time}
Impact: {impact}
Account: {account}
'''.format(
provider=self.provider,
impact=self.impact,
account=self.account,
start_time=str(event['DTSTART'].dt),
end_time=str(event['DTEND'].dt),
)
self.body = dedent(body)
self._post_init(**kwargs)
def _post_init(self, **kwargs):
pass
def create(self):
"""Overload to create a ticket in the system"""
raise NotImplemented('Subclass must overload this method')
def close(self):
"""Overload to close a ticket in the system"""
raise NotImplemented('Subclass must overload this method')
def exists(self):
"""Overload to determine if this event exists in ticket form already"""
raise NotImplemented('Subclass must overload this method')
class JiraTicket(Ticket):
"""Ticket driver for JIRA
Supports adding list of watchers to maintenance issues created, custom
finishing transition for when calling close, and custom issue types.
Priorities will be mapped according to the impact status of the
maintenance. A preferred mapping can be provided otherwise it defaults to
using the Vanilla JIRA install names, eg:
>>> {
'NO-IMPACT': {'name': 'Low'},
'REDUCED-REDUNDANCY': {'name': 'Medium'},
'DEGRADED': {'name': 'High'},
'OUTAGE': {'name': 'Highest'},
}
Example:
>>> type(event)
xmaintnote.event.XMaintNoteEvent
>>> tkt = JiraTicket(
event,
url='http://localhost',
username='admin',
password='admin',
watchers='noc',
)
>>> tkt.exists()
False
>>> tkt.create()
True
>>> tkt.exists()
True
>>> tkt.ticket
<JIRA Issue: key=u'MAINT-14', id=u'10013'>
>>> tkt.impact
vText('NO-IMPACT')
>>> tkt.ticket.fields.priority
<JIRA Priority: name=u'Low', id=u'4'>
>>> tkt.ticket.fields.labels
[u'example.com:137.035999173:WorkOrder-31415']
"""
def _post_init(
self,
url='http://localhost:8080',
username=None,
password=None,
project='MAINT',
issuetype='Task',
finished_transition='Done',
watchers=None,
pri_mapping=None,
):
"""Setup to initialize Jira client and any required settings
If username or password aren't provided, will attempt to do actions as
anonymous
Args:
url (str): URL to jira server. MUST have the URL scheme (http://)
username (str): Username (if applicable)
password (str): Password (if applicable)
project (str): JIRA project handle
issuetype (str): Issue type to file these issues as
watchers (list): List of usernames to add as watchers to the maints
finished_transition (str): Transition to move the issue into when
calling the ``.close`` method. Default: Done
pri_mapping (str): Map of maintenance impact name to JIRA priority
dict. eg, {'NO-IMPACT': {'name': 'Low'}}
"""
# If either part of the credential tuple is unprovided, default to
# anonymous
credentials = (username, password)
if not all(credentials):
basic_auth = None
else:
basic_auth = credentials
if not watchers:
watchers = []
if not pri_mapping:
pri_mapping = {
'NO-IMPACT': {'name': 'Low'},
'REDUCED-REDUNDANCY': {'name': 'Medium'},
'DEGRADED': {'name': 'High'},
'OUTAGE': {'name': 'Highest'},
}
self.jira = JIRA(url, basic_auth=basic_auth)
self.project = project
self.issuetype = issuetype
self.finished_transition = finished_transition
self.watchers = watchers
self.pri_mapping = pri_mapping
def exists(self):
"""Return bool for whether maintenance issue exists for this event
Improvements: Currently not handling the case where multiple issues are
returned which may hint that the key used isn't unique enough or people
have manually added the same label to other things. Also no exception
handling mostly because the exception return by JIRA is pretty
descriptive
Returns:
exists (bool)
"""
existing = self.jira.search_issues('labels = {}'.format(self.key))
if existing:
self.ticket = existing[0]
return True if existing else False
def create(self):
"""Create issue for event
Pre-check factors such as chehcking if this is a duplicate. If so, stop
further actions.
Returns:
success (bool)
"""
jira = self.jira
# If issue doesn't exist, create it. Else return False for inability
# Add watchers to the new ticket
if not self.exists():
options = {
'project': self.project,
'summary': self.title,
'labels': [self.key],
'description': self.body,
'issuetype': {'name': self.issuetype},
'priority': self.pri_mapping[self.impact],
}
new_issue = jira.create_issue(fields=options)
self.ticket = new_issue
[self._add_watcher(new_issue, w) for w in self.watchers]
return True
else:
return False
def close(self):
"""Return bool representing success or failure for closing issue
If issue doesn't exist, will return False because it can't close.
Returns:
success (bool)
"""
jira = self.jira
finished_transition = self.finished_transition
if self.exists():
# Fetch the transitions that we can put the current issue into.
# Search through these for the provided ``finished_transition``
# from init. If not found, raise error.
tkt = self.ticket
transitions = jira.transitions(tkt)
transition_ids = [
t['id'] for t in transitions
if t['name'] == self.finished_transition
]
if not transition_ids:
raise ValueError(
'Transition "{}" not found'.format(finished_transition)
)
t = transition_ids[0]
jira.transition_issue(tkt, t)
else:
return False
def _add_watcher(self, issue, watcher):
"""Add watcher to issue"""
self.jira.add_watcher(issue, watcher)
| 33.439863
| 79
| 0.582468
| 9,273
| 0.952934
| 0
| 0
| 0
| 0
| 0
| 0
| 6,103
| 0.627171
|
588c7a9a7c5017d9a74e862c7ceb6ee60b5f425a
| 554
|
py
|
Python
|
pycell/prologue/native/set_.py
|
andybalaam/cell
|
03d0670f9ebd513a983b9327108a84f2eff8ee75
|
[
"MIT"
] | 118
|
2016-10-17T09:04:42.000Z
|
2021-12-31T03:00:55.000Z
|
pycell/prologue/native/set_.py
|
JoeyCluett/cell
|
a3203731e0c63a55955509e843fb99e38cf7cc7c
|
[
"MIT"
] | 4
|
2019-01-23T09:59:43.000Z
|
2020-11-02T11:00:38.000Z
|
pycell/prologue/native/set_.py
|
JoeyCluett/cell
|
a3203731e0c63a55955509e843fb99e38cf7cc7c
|
[
"MIT"
] | 21
|
2016-06-05T08:05:53.000Z
|
2022-01-29T10:08:47.000Z
|
def _do_set(env, name, value):
if env.contains(name):
env.set(name, value)
elif env.parent is not None:
_do_set(env.parent, name, value)
else:
raise Exception(
"Attempted to set name '%s' but it does not exist." %
name
)
def set_(env, symbol_name, value):
if symbol_name[0] != "string":
raise Exception(
"set() takes a string as its first argument, but was: %s" %
str(symbol_name)
)
_do_set(env, symbol_name[1], value)
return value
| 26.380952
| 71
| 0.561372
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.209386
|
588c8d695d5770a68e19da667343fb316670eec3
| 195
|
py
|
Python
|
tests/commands/types/test_flag_param.py
|
cicdenv/cicdenv
|
5b72fd9ef000bf07c2052471b59edaa91af18778
|
[
"MIT"
] | 8
|
2020-08-10T20:57:24.000Z
|
2021-08-08T10:46:20.000Z
|
tests/commands/types/test_flag_param.py
|
cicdenv/cicdenv
|
5b72fd9ef000bf07c2052471b59edaa91af18778
|
[
"MIT"
] | null | null | null |
tests/commands/types/test_flag_param.py
|
cicdenv/cicdenv
|
5b72fd9ef000bf07c2052471b59edaa91af18778
|
[
"MIT"
] | 1
|
2020-08-10T20:42:09.000Z
|
2020-08-10T20:42:09.000Z
|
from cicdctl.commands.types.flag import FlagParamType
def test_flag():
flag = '-no-color'
parsed = FlagParamType().convert(value=flag, param=None, context={})
assert flag == parsed
| 24.375
| 72
| 0.702564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.05641
|
588dd58a16badefdc5a90e5e3a2eb711ee8221dd
| 194
|
py
|
Python
|
modules/util/objects/query_parts/postgres_query_part.py
|
stevekineeve88/doubloon
|
4c7c9163e96877ad23663c3dd9a73ef6ccde3e22
|
[
"MIT"
] | null | null | null |
modules/util/objects/query_parts/postgres_query_part.py
|
stevekineeve88/doubloon
|
4c7c9163e96877ad23663c3dd9a73ef6ccde3e22
|
[
"MIT"
] | 8
|
2021-01-29T15:49:17.000Z
|
2021-10-14T01:03:27.000Z
|
modules/util/objects/query_parts/postgres_query_part.py
|
stevekineeve88/doubloon
|
4c7c9163e96877ad23663c3dd9a73ef6ccde3e22
|
[
"MIT"
] | null | null | null |
class PostgresQueryPart:
""" Object representing Postgres query part
"""
def get_query(self) -> str:
""" Get query
Returns:
str
"""
pass
| 17.636364
| 47
| 0.505155
| 193
| 0.994845
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.561856
|
588fc576880c0f000634f775d5b9b45b44869222
| 7,957
|
py
|
Python
|
tools/merge_messages.py
|
cclauss/personfinder
|
62417192e79c9711d0c6c7cfc042f6d6b0dc2dc2
|
[
"Apache-2.0"
] | 1
|
2021-11-18T20:09:09.000Z
|
2021-11-18T20:09:09.000Z
|
tools/merge_messages.py
|
ZhengC1/personfinder
|
7e40f2783ac89b91efd1d8497f1acc5b006361fa
|
[
"Apache-2.0"
] | null | null | null |
tools/merge_messages.py
|
ZhengC1/personfinder
|
7e40f2783ac89b91efd1d8497f1acc5b006361fa
|
[
"Apache-2.0"
] | 1
|
2022-01-05T07:06:43.000Z
|
2022-01-05T07:06:43.000Z
|
#!/usr/bin/env python
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Merge translations from a set of .po or XMB files into a set of .po files.
Usage:
../tools/merge_messages <source-dir> <template-file>
../tools/merge_messages <source-dir> <template-file> <target-dir>
../tools/merge_messages <source-po-file> <template-file> <target-po-file>
<source-dir> should be a directory containing a subdirectories named with
locale codes (e.g. pt_BR). For each locale, this script looks for the first
.po or .xml file it finds anywhere under <source-dir>/<locale-code>/ and
adds all its messages and translations to the corresponding django.po file
in the target directory, at <target-dir>/<locale-code>/LC_MESSAGES/django.po.
<template-file> is the output file from running:
'find_missing_translations --format=po'
With the name that corresponds to the --format=xmb output.
Make sure to run this in a tree that corresponds to the version used for
generating the xmb file or the resulting merge will be wrong. See
validate_merge for directions on verifying the merge was correct.
If <target-dir> is unspecified, it defaults to the app/locale directory of
the current app. Alternatively, you can specify a single source file and
a single target file to update.
When merging messages from a source file into a target file:
- Empty messages and messages marked "fuzzy" in the source file are ignored.
- Translations in the source file will replace any existing translations
for the same messages in the target file.
- Other translations in the source file will be added to the target file.
- If the target file doesn't exist, it will be created.
- To minimize unnecessary changes from version to version, the target file
has no "#: filename:line" comments and the messages are sorted by msgid.
"""
import babel.messages
from babel.messages import pofile
import codecs
import os
import sys
import xml.sax
class XmbCatalogReader(xml.sax.handler.ContentHandler):
"""A SAX handler that populates a babel.messages.Catalog with messages
read from an XMB file."""
def __init__(self, template):
"""template should be a Catalog containing the untranslated messages
in the same order as the corresponding messages in the XMB file."""
self.tags = []
self.catalog = babel.messages.Catalog()
self.iter = iter(template)
assert self.iter.next().id == '' # skip the blank metadata message
def startElement(self, tag, attrs):
self.tags.append(tag)
if tag == 'msg':
self.string = ''
self.message = babel.messages.Message(self.iter.next().id)
if tag == 'ph':
self.string += '%(' + attrs['name'] + ')s'
self.message.flags.add('python-format')
def endElement(self, tag):
assert self.tags.pop() == tag
if tag == 'msg':
self.message.string = self.string
self.catalog[self.message.id] = self.message
def characters(self, content):
if self.tags[-1] == 'msg':
self.string += content
def log(text):
"""Prints out Unicode text."""
print text.encode('utf-8')
def log_change(old_message, new_message):
"""Describes an update to a message."""
if not old_message:
if new_message.id:
log('+ msgid "%s"' % str(new_message.id))
else:
print >>sys.stderr, 'no message id: %s' % new_message
log('+ msgstr "%s"' % str(new_message.string.encode('ascii', 'ignore')))
if new_message.flags:
log('+ #, %s' % ', '.join(sorted(new_message.flags)))
else:
if (new_message.string != old_message.string or
new_message.flags != old_message.flags):
log(' msgid "%s"' % old_message.id)
log('- msgstr "%s"' % old_message.string)
if old_message.flags:
log('- #, %s' % ', '.join(sorted(old_message.flags)))
log('+ msgstr "%s"' % new_message.string)
if new_message.flags:
log('+ #, %s' % ', '.join(sorted(new_message.flags)))
def create_file(filename):
"""Opens a file for writing, creating any necessary parent directories."""
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
return open(filename, 'w')
def merge(source, target_filename):
"""Merges the messages from the source Catalog into a .po file at
target_filename. Creates the target file if it doesn't exist."""
if os.path.exists(target_filename):
target = pofile.read_po(open(target_filename))
for message in source:
if message.id and message.string and not message.fuzzy:
log_change(message.id in target and target[message.id], message)
# This doesn't actually replace the message! It just updates
# the fields other than the string. See Catalog.__setitem__.
target[message.id] = message
# We have to mutate the message to update the string and flags.
target[message.id].string = message.string
target[message.id].flags = message.flags
else:
for message in source:
log_change(None, message)
target = source
target_file = create_file(target_filename)
pofile.write_po(target_file, target,
no_location=True, sort_output=True, ignore_obsolete=True)
target_file.close()
def merge_file(source_filename, target_filename, template_filename):
if source_filename.endswith('.po'):
merge(pofile.read_po(open(source_filename)), target_filename)
elif source_filename.endswith('.xml'):
handler = XmbCatalogReader(pofile.read_po(open(template_filename)))
xml.sax.parse(open(source_filename), handler)
merge(handler.catalog, target_filename)
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) not in [1, 2, 3]:
print __doc__
sys.exit(1)
args = (args + [None, None])[:3]
source_path = args[0]
template_path = args[1]
target_path = args[2] or os.path.join(os.environ['APP_DIR'], 'locale')
# If a single file is specified, merge it.
if ((source_path.endswith('.po') or source_path.endswith('.xml')) and
target_path.endswith('.po')):
print target_path
merge_file(source_path, target_path, template_path)
sys.exit(0)
# Otherwise, we expect two directories.
if not os.path.isdir(source_path) or not os.path.isdir(target_path):
print __doc__
sys.exit(1)
# Find all the source files.
source_filenames = {} # {locale: po_filename}
def find_po_file(key, dir, filenames):
"""Looks for a .po file and records it in source_filenames."""
for filename in filenames:
if filename.endswith('.po') or filename.endswith('.xml'):
source_filenames[key] = os.path.join(dir, filename)
for locale in os.listdir(source_path):
os.path.walk(os.path.join(source_path, locale), find_po_file,
locale.replace('-', '_'))
# Merge them into the target files.
for locale in sorted(source_filenames.keys()):
target = os.path.join(target_path, locale, 'LC_MESSAGES', 'django.po')
print target
merge_file(source_filenames[locale], target, template_path)
| 39.004902
| 80
| 0.662184
| 1,161
| 0.145909
| 0
| 0
| 0
| 0
| 0
| 0
| 3,621
| 0.455071
|
5890360ab5457f3e208d3176b19465a1fa0b29ad
| 621
|
py
|
Python
|
misc/derwin.py
|
ssebs/nccsv
|
f5e94dab833a5f0822308299e154f13fd68d88f6
|
[
"MIT"
] | null | null | null |
misc/derwin.py
|
ssebs/nccsv
|
f5e94dab833a5f0822308299e154f13fd68d88f6
|
[
"MIT"
] | null | null | null |
misc/derwin.py
|
ssebs/nccsv
|
f5e94dab833a5f0822308299e154f13fd68d88f6
|
[
"MIT"
] | null | null | null |
# derwin.py - testing a window within a window
import curses
def main(stdscr):
# Create container window from stdscr
sh, sw = stdscr.getmaxyx()
container_win = curses.newwin(sh-1, sw-1, 1, 1)
# Create inner window from container win
bh, bw = container_win.getmaxyx()
box_win = container_win.derwin(bh-2, bw-2, 1, 1)
# Add size of inner win
box_win.addstr(1, 1, f"{bh}x{bw}")
# Draw borders
container_win.box()
box_win.box()
# Render and wait for char
container_win.refresh()
container_win.getch()
# main
if __name__ == "__main__":
curses.wrapper(main)
| 20.7
| 52
| 0.653784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 214
| 0.344605
|
589099f22121deb7215ea93a44c6ab088a52a57b
| 1,110
|
py
|
Python
|
test/z_emulator_autoload.py
|
DXCyber409/AndroidNativeEmulator
|
11a0360a947114375757724eecd9bd9dbca43a56
|
[
"Apache-2.0"
] | 3
|
2020-05-21T09:15:11.000Z
|
2022-01-12T13:52:20.000Z
|
test/z_emulator_autoload.py
|
DXCyber409/AndroidNativeEmulator
|
11a0360a947114375757724eecd9bd9dbca43a56
|
[
"Apache-2.0"
] | null | null | null |
test/z_emulator_autoload.py
|
DXCyber409/AndroidNativeEmulator
|
11a0360a947114375757724eecd9bd9dbca43a56
|
[
"Apache-2.0"
] | null | null | null |
import sys
import logging
from unicorn import *
from unicorn.arm_const import *
from androidemu.emulator import Emulator
from UnicornTraceDebugger import udbg
logging.basicConfig(stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s %(levelname)7s %(name)34s | %(message)s")
logger = logging.getLogger(__name__)
emulator = Emulator()
libc = emulator.load_library('jnilibs/libc.so', do_init=False)
libso = emulator.load_library('jnilibs/libnative-lib.so', do_init=False)
# data segment
data_base = 0xa00000
data_size = 0x10000 * 3
emulator.mu.mem_map(data_base, data_size)
emulator.mu.mem_write(data_base, b'123')
emulator.mu.reg_write(UC_ARM_REG_R0, data_base)
try:
dbg = udbg.UnicornDebugger(emulator.mu)
addr_start = 0xcbc66000 + 0x9B68 + 1
addr_end = 0xcbc66000 + 0x9C2C
emulator.mu.emu_start(addr_start, addr_end)
r2 = emulator.mu.reg_read(UC_ARM_REG_R2)
result = emulator.mu.mem_read(r2, 16)
print(result.hex())
except UcError as e:
list_tracks = dbg.get_tracks()
for addr in list_tracks[-100:-1]:
print(hex(addr - 0xcbc66000))
print (e)
| 29.210526
| 72
| 0.73964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.104505
|
58919030577b20ce04be8ee22121a25618dfdeb8
| 816
|
py
|
Python
|
community_ext/__init__.py
|
altsoph/community_loglike
|
ea8800217097575558f8bfb97f7737d12cad2339
|
[
"BSD-3-Clause"
] | 16
|
2018-02-14T23:14:32.000Z
|
2021-09-15T09:38:47.000Z
|
community_ext/__init__.py
|
altsoph/community_loglike
|
ea8800217097575558f8bfb97f7737d12cad2339
|
[
"BSD-3-Clause"
] | null | null | null |
community_ext/__init__.py
|
altsoph/community_loglike
|
ea8800217097575558f8bfb97f7737d12cad2339
|
[
"BSD-3-Clause"
] | 7
|
2019-05-09T10:25:24.000Z
|
2020-06-06T09:37:18.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This package implements several community detection.
Originally based on community aka python-louvain library from Thomas Aynaud
(https://github.com/taynaud/python-louvain)
"""
from .community_ext import (
partition_at_level,
modularity,
best_partition,
generate_dendrogram,
induced_graph,
load_binary,
estimate_gamma,
estimate_mu,
ilfr_mu_loglikelihood,
compare_partitions,
model_log_likelihood
)
__author__ = """Aleksey Tikhonov (altsoph@gmail.com)"""
__author__ = """Liudmila Ostroumova Prokhorenkova (ostroumova-la@yandex-team.ru)"""
# Copyright (C) 2018 by
# Aleksey Tikhonov (altsoph@gmail.com>
# Liudmila Ostroumova Prokhorenkova (ostroumova-la@yandex-team.ru)
# All rights reserved.
# BSD license.
| 24.727273
| 83
| 0.72549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 511
| 0.626225
|
589531a8cfe2795a9b90146b7a85879eaadf036f
| 895
|
py
|
Python
|
youbot_gazebo_publisher/src/listener.py
|
ingjavierpinilla/youBot-Gazebo-Publisher
|
9314f5c471cde91127d76ba205ce6259e595145a
|
[
"MIT"
] | null | null | null |
youbot_gazebo_publisher/src/listener.py
|
ingjavierpinilla/youBot-Gazebo-Publisher
|
9314f5c471cde91127d76ba205ce6259e595145a
|
[
"MIT"
] | null | null | null |
youbot_gazebo_publisher/src/listener.py
|
ingjavierpinilla/youBot-Gazebo-Publisher
|
9314f5c471cde91127d76ba205ce6259e595145a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from trajectory_msgs.msg import JointTrajectory
from control_msgs.msg import JointTrajectoryControllerState
def callback_odom(data):
print("odom\n" + str(data))
def callback_JointTrajectory(data):
print("gripper_controller/command\n" + str(data))
def callback_gripper_JointTrajectory(data):
print("gripper_controller\n" + str(data))
def listener():
rospy.init_node("listener12", anonymous=True)
# rospy.Subscriber("/odom", Odometry, callback_odom)
# rospy.Subscriber("/arm_1/gripper_controller/command", JointTrajectory, callback_JointTrajectory)
rospy.Subscriber(
"/arm_1/gripper_controller/state",
JointTrajectoryControllerState,
callback_gripper_JointTrajectory,
)
rospy.spin()
if __name__ == "__main__":
listener()
| 24.861111
| 102
| 0.746369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 286
| 0.319553
|
5895c3e0dafc21f11b778b930d6d27f00014cab8
| 75,699
|
py
|
Python
|
main.py
|
hustleer/Discord-Encouragement-Bot
|
4105d1e81fa0e76ade7cfd293dd82ea610064f58
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
hustleer/Discord-Encouragement-Bot
|
4105d1e81fa0e76ade7cfd293dd82ea610064f58
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
hustleer/Discord-Encouragement-Bot
|
4105d1e81fa0e76ade7cfd293dd82ea610064f58
|
[
"Apache-2.0"
] | null | null | null |
#Botpic:https://upload.wikimedia.org/wikipedia/commons/thumb/b/b8/Red_Rose_Photography.jpg/800px-Red_Rose_Photography.jpg
#Botpic:https://commons.wikimedia.org/wiki/File:Red_Rose_Photography.jpg
#reference:https://www.youtube.com/watch?v=SPTfmiYiuok
import discord
import os
import requests
import json
import math, random
from replit import db
from keep_alive import keep_alive
import asyncpraw, asyncprawcore
#import commands
import time, asyncio, datetime
from discord.ext import tasks
from discord import Member
from discord.ext.commands import has_permissions, MissingPermissions
from prawcore import NotFound
import ffmpeg
from discord import FFmpegPCMAudio
from dotenv import load_dotenv
from youtube_search import YoutubeSearch
load_dotenv()
client = discord.Client()
# To cache the every user For on_remove_reaction to be usable
# Also enable members intent from https://discord.com/developers/ in bot secition
intents = discord.Intents.default()
intents.members = True
global playing, stream
global currently_playing_message
def say_hello():
print(time.ctime())
#await message.channel.send("hello :-)" + str(joke))
#t1 = threading.Timer(10, say_hello)
#t1.start()
#---------- To keep the bot alive --------------------------
#1. keeping the bot alive
'''
#------------------- adding a background task -----------------
status = cycle(['with Python','JetHub'])
@bot.event
async def on_ready():
change_status.start()
print("Your bot is ready")
@tasks.loop(seconds=10)
async def change_status():
await bot.change_presence(activity=discord.Game(next(status)))
#--------------------------------------------------------------
3. Setup the Uptime Robot :
create an account on uptime robot.
After creating an account, go to the dashboard and click on Add new monitor (preview)
select monitor type Http(s) (preview)
then go to to ur project on repl.it and copy the url from the top of the console and paste it in url section of the monitor (preview)
now set the monitoring interval to every 5 mins (so that it will ping the bot every 5 mins) and click on create monitor twice (preview)
That’s it…Now go to ur project on repl.it and hit the Run button
'''
class MySchedule:
async def schedule_message(sth,
author='anonymous',
message='please provide a message',
id=863298114949218324,
seconds=0):
print('received:')
print(author, message, id, seconds)
#await ctx.message.delete()
if author == 'anonymous':
#author = 'anonymous'
description = 'command: .anon your_message'
else:
author = author + ' <scheduled_message>'
description = "command: .schedule time_in_seconds your_message"
time.sleep(seconds)
print('sleep 10 seconds')
print('author : ', author)
#channel = bot.get_channel(id=ctx.channel.id)
#print('sending {}'.format(message))
#retStr = str("""```css\nThis is some colored Text```""")
#embed = discord.Embed(title="Random test")
#embed.add_field(name="Name field can't be colored as it seems",value=retStr)
#await ctx.send(embed=embed)
#message = str(ctx.message.author).split('#')[0] + ' : ' + message
embed = discord.Embed(title=author, colour=discord.Color.blue())
embed.add_field(
name=message,
value=description,
)
channel = bot.get_channel(id=id)
await channel.send(embed=embed)
reddit = asyncpraw.Reddit(
client_id="nnhGBCiBxSJysTobl6SLPQ",
client_secret=os.environ['rd_client_secret'],
password=os.environ['rd_pass'],
user_agent="praw_test",
username="Alternative-Ad-8849",
)
async def sub_exists(subreddit_name):
exists = True
if subreddit_name.startswith(('/r/', 'r/')):
subreddit_name = subreddit_name.split('r/')[-1] # -1 gets the last element in the list
try:
subreddit = await reddit.subreddit(subreddit_name, fetch=True) # by default Async PRAW doesn't make network requests when subreddit is called
# do something with subreddit
except asyncprawcore.Redirect:
exists=False
return(exists)
# Reddit will redirect to reddit.com/search if the subreddit doesn't exist
#await ctx.send(f"Subreddit {subreddit_name} doesn't exist.")
def get_nude():
memes_submissions = reddit.subreddit('BustyPetite').hot()
print('got memes')
post_to_pick = random.randint(1, 15)
print('choosen random')
for i in range(0, post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
return (submission.url)
def get_crazy(sub_reddit_name='memes'):
memes_submissions = reddit.subreddit(sub_reddit_name).hot()
#print('got memes')
#post_to_pick = random.randint(1, 15)
#print('choosen random')
start = random.randint(100, 1000)
end = random.randint(start, start + 100)
print('start:{} end:{}'.format(start, end))
for i in range(start, end):
#print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
yield (submission.url)
def get_memes_crazy():
memes_submissions = reddit.subreddit('memes').hot()
print('got memes')
#post_to_pick = random.randint(1, 50)
print('choosen random')
for i in range(0, 50): #post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)
yield (submission.url)
#return submission
async def get_one(sub_reddit='memes'):
#Working
#submission = list(reddit.subreddit(sub_reddit_name).random()#.hot(limit=None))
#submissions = list(reddit.subreddit('redditdev').hot(limit=None))
'''urls=[]
submissions = await list(reddit.subreddit('redditdev').hot(limit=None))
print(await submissions)'''
#submissions = await reddit.subreddit("memes").hot(limit=random.randint(1,150))
#for submission in submissions:
# pass
subreddit = await reddit.subreddit(sub_reddit)
async for submission in subreddit.random_rising(
limit=random.randint(1, 150)):
pass
#print(submission.title)
#urls.append([submission.title,submission.url])
#yield(submission.title, submission.url)
#print(submission.title)'''
#submissionn = random.choice(submissions)
#submission = reddit.subreddit("AskReddit").random()
#submissions = reddit.subreddit('redditdev').hot(limit=None))
#submission = random.choice(submissions)
#print('got memes')
#post_to_pick = random.randint(1, 50)
#print('choosen random')
'''for i in range(0, 50):#post_to_pick):
print('for loop:{}'.format(i))
submission = next(x for x in memes_submissions if not x.stickied)'''
#submission = await random.choice(memes_submissions)
#return(submission.url)
#print(submissionn.url)
#print(submission.title)
#return('hi')
embed = discord.Embed(title=submission.title,
url=submission.url,
description=submission.selftext,
colour=discord.Color.red())
embed.set_image(url=submission.url)
#await channel.send(embed=embed)
return (embed)
from discord.ext import commands
bot = commands.Bot(command_prefix='.', help_command=None, intents=intents)
'''
class MyHelpCommand(commands.MinimalHelpCommand):
async def send_pages(self):
destination = self.get_destination()
e = discord.Embed(colour=discord.Color.blurple(), description='')
for page in self.paginator.pages:
e.description += page
await destination.send(embed=e)
bot.help_command = MyHelpCommand()'''
# My sample help command:
@bot.command(name='help',
brief='`.help` for help',
help='Plesae enter `.help` for help')
async def help(ctx, args=None):
""" Check which mods are online on current guild """
help_embed = discord.Embed(
title="Encouragement Bot Help!",
#url="https:ioee.herokuapp.com/",
description=
"Type `.help <command name>` for more details about each command. e.g. `.help joke`",
)
command_names_list = [x.name for x in bot.commands]
# If there are no arguments, just list the commands:
if not args:
help_embed.add_field(
name="List of supported commands:",
value='value',
#value="\n".join([str(i+1)+". "+x.name for i,x in enumerate(bot.commands)]),
inline=False,
#colour=discord.Color.blue()
)
#bot.get_command(x.name).help
for i in bot.commands:
help_embed.add_field(
name='***{}***'.format(i.name),
#value='value'
value='> {}\n\n\n'.format(bot.get_command(i.name).brief),
inline=False,
#colour=discord.Color.blue()
)
#print(i.name)
#print(i)
#print(bot.get_command(i.name).help)
'''for i,command in enumerate(bot.commands):
help_embed.add_field(
name = command,
value = bot.get_command(command),
inline=True
)'''
help_embed.add_field(
name="Details",
value=
"Type `.help <command name>` for more details about each command.",
inline=False)
# If the argument is a command, get the help text from that command:
elif args in command_names_list:
help_embed.add_field(name=args,
value=str(bot.get_command(args).brief) + ' \n' +
str(bot.get_command(args).help))
# If someone is just trolling:
else:
help_embed.add_field(name="Nope.",
value="Don't think I got that command, boss!")
await ctx.send(embed=help_embed)
# My sample help command:
@bot.command(name='share_info',
brief='`.share_info` for share_info',
help='Plesae enter `.share_info` for mero_share_info')
async def info(ctx, args=None):
response = requests.get('http://ioee.herokuapp.com/meroshare/')
response = response.text.strip()
print(response)
try:
previous_messages = await ctx.channel.history(limit=1).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
pass
if (str(prev_message).strip() != response):
print('not same messages:prev_message and rseponse')
await ctx.send(response)
else:
print('same message as previous message, so not sending')
pass
@bot.command(name='ping',
brief=" short_help:to test if bot responding ",
help='long_help: e.g. .ping')
async def ping(ctx, subreddit='jokes', no_of_posts=1, user='.'):
#channel = bot.get_channel(id=int(channel_id))
'''for n, submission in enumerate(reddit.subreddit('memes').top('day',limit=int(no_of_posts/3))):
print('Unleash for loop:{}'.format(n))
title = submission.title
body = submission.selftext
embed = discord.Embed(
title=submission.title,
url=submission.url,
description=body,
colour=discord.Color.green())
embed.set_image(url=submission.url)
await ctx.send(embed=embed)'''
await ctx.send('pong ')
print('Ping-Pong is invoked: ', user, ctx)
@bot.command(name='embed', help='e.g.`.embed`', brief='embedding help')
async def embed(ctx):
embed = discord.Embed(title="Text Formatting",
url="https://realdrewdata.medium.com/",
description="Here are some ways to format text",
colour=discord.Color.blue())
embed.set_author(
name="RealDrewData",
url="https://twitter.com/RealDrewData",
icon_url=
"https://cdn-images-1.medium.com/fit/c/32/32/1*QVYjh50XJuOLQBeH_RZoGw.jpeg"
)
#embed.set_author(name=ctx.author.display_name, url="https://twitter.com/RealDrewData", icon_url=ctx.author.avatar_url)
embed.set_thumbnail(url="https://i.imgur.com/axLm3p6.jpeg")
embed.add_field(name="*Italics*",
value="Surround your text in asterisks (\*)",
inline=False)
embed.add_field(name="**Bold**",
value="Surround your text in double asterisks (\*\*)",
inline=False)
embed.add_field(name="__Underline__",
value="Surround your text in double underscores (\_\_)",
inline=False)
embed.add_field(name="~~Strikethrough~~",
value="Surround your text in double tildes (\~\~)",
inline=False)
embed.add_field(name="`Code Chunks`",
value="Surround your text in backticks (\`)",
inline=False)
embed.add_field(name="Blockquotes",
value="> Start your text with a greater than symbol (\>)",
inline=False)
embed.add_field(name="Secrets",
value="||Surround your text with double pipes (\|\|)||",
inline=False)
embed.set_footer(text="Learn more here: realdrewdata.medium.com")
await ctx.send(embed=embed)
@bot.command(name='schedule',
brief='to schedule message to be sent in any group.',
help='e.g. `.schedule 10 scheduled for ten seconds.')
async def schedule(ctx, seconds: int = 3, *, message='Hello There'):
#print(ctx.channel.id)
print('Seconds: ', seconds)
msg = str(message)
#print(msg)
await ctx.message.delete()
id = ctx.channel.id
author = str(ctx.message.author).split('#')[0]
#print(author)
#print(type(id))
sch = {
'1': '',
'2': '',
'3': '',
'4': '',
'5': '',
}
r = random.randint(1, 5)
sch[str(r)] = MySchedule()
await sch[str(r)].schedule_message(author=author,
message=msg,
id=id,
seconds=int(seconds))
#await schedule_message(author=author, message=msg, id=id, seconds=3)
#print(id)
#print(channel)
#await channel.send('hi')
'''@bot.command()
async def schedule(ctx, message='Hello There', seconds = 3):
#print(ctx.channel.id)
m=str(message)
id = ctx.message.id
print('\n\n\n{}\n\n'.format(m))
author = str(ctx.message.author).split('#')[0]
await ctx.message.delete()
#id=ctx.channel.id
channel = bot.get_channel(id=id)
print(id)
print(channel)
#await channel.send('hi')
#await schedule_message(author, m, id, seconds = seconds)
#print(ctx.message)
#await ctx.message.delete(ctx.message)
#await channel.send('hi')
#await ctx.send('pong')
#print('Im invoked')'''
@bot.command(name='anon',
brief='to send message anonymously',
help='e.g. `.anon Guess who!`')
async def anon(ctx, *, message='please provide a message'):
msg = str(message)
#print(msg)
await ctx.message.delete()
id = ctx.channel.id
a = {'anon': ''}
a['anon'] = MySchedule()
await a['anon'].schedule_message('anonymous', msg, id)
print('send')
print(msg, id)
#await schedule_message(author='', message=msg, id=id)
@bot.command(name="echo",
pass_context=True,
brief='ehhoes/repeat the message deleting the user\'s message',
help='e.g. `.echo I am echoed`')
async def echo(ctx, *, message='please provide a message'):
msg = message
#print(ctx.message)
try:
await ctx.message.delete()
except:
pass
#id=ctx.channel.id
await ctx.send(msg)
@echo.error
async def echo_error(ctx, error):
if isinstance(error, MissingPermissions):
text = "Sorry {}, you do not have permissions to do that!".format(
ctx.message.author)
await bot.send_message(ctx.message.channel, text)
@bot.command(name='unleash',
brief='unleahes the subreddit to c channel',
help='e.g.To unleash r/jokes `.unleash jokes`')
async def unleash(ctx, subreddit='none'):
if subreddit == 'none':
await ctx.send('Please enter the subreddit to be unleashed')
else:
print(ctx.channel.id)
#if "unleash" not in db.keys():db['unleash']={}
if await sub_exists(subreddit):
if str(ctx.channel.id) not in db['unleash']:
#i.e. channel doesn't exists in database
db['unleash'][str(ctx.channel.id)] = []
#db['unleash'][str(ctx.channel.id)].append(str(subreddit))
else:
#i.e. channel doesn't exists in database
if str(subreddit) not in db['unleash'][str(ctx.channel.id)]:
db['unleash'][str(ctx.channel.id)].append(str(subreddit))
await ctx.send('unleashing r/{} to {}'.format(subreddit, ctx.channel))
else:
await ctx.send('r/{} already unleashed to {}'.format(subreddit, ctx.channel))
else:
await ctx.send('Sorry! subreddit r/{} doesnot exists.'.format(
subreddit, ctx.channel))
@bot.command(name='contain',
brief='to contain/stop unleashed subreddit message',
help='e.g. `.contain jokes`')
async def contain(ctx, subreddit='none'):
if subreddit == 'none':
await ctx.send('Please enter the subreddit to be unleashed')
else:
print(ctx.channel.id)
if str(ctx.channel.id) in db['unleash'] and str(
subreddit) in db['unleash'][str(ctx.channel.id)]:
db['unleash'][str(ctx.channel.id)].remove(str(subreddit))
await ctx.send(
'successfully contained subreddit r/{} from {}'.format(
subreddit, ctx.channel))
else:
await ctx.send('Subreddit r/{} not unleashed in .'.format(
subreddit, ctx.channel))
#print(ctx.channel.id)
#await ctx.send(ctx.channel.id)
@bot.command(
name='go',
brief='to see memes from r/memes or nude from r/\'BustyPetite\'',
help='e.g. `.go meme`, `.go meme crazy`, `.go nude`, `.go nude crazy`')
async def go(ctx, what='', what2=''):
print('wHat:{} what2:{}'.format(what, what2))
if what == 'nude':
if what2 == 'crazy':
print('1')
urls = get_crazy('BustyPetite')
for url in urls:
print('sending nude')
await ctx.send(url)
else:
print('11')
urls = get_one('BustyPetite')
await ctx.send(urls)
#print(urls)
else:
if what == 'meme' and what2 == 'crazy':
urls = get_crazy()
#print(submission.url)
for url in urls:
await ctx.send(url)
else:
urls = get_one()
await ctx.send(urls)
#name='', brief='', help='e.g. `.`'
'''@bot.command(name='', brief='', help='e.g. `.`')
async def h(ctx, what='general'):
#await ctx.send('pong')
if str(what).lower()=='general':
for command in commands:
await ctx.send(command)
elif str(what).lower() == 'fuse':
for command in fuse_help_commands:
await ctx.send(command)'''
@bot.command(
name='add_user',
brief='to activate fuse user\'s auto attendance',
help='get code from https://ioee.herokuapp.com e.g. `.add_user *code*`')
async def add_user(ctx, pseudo_id):
add_fuse_user(pseudo_id)
await ctx.send("User {} has been activated successfully.".format(pseudo_id)
)
@bot.command(name='check',
brief='checks if live class has been started',
help='e.g. `.check`')
async def check(ctx, pseudo_id):
add_fuse_user(pseudo_id)
await ctx.send("Sorry mannual checking is unavailable for a while")
#await test.start(True)
@bot.command(
name='remove_user',
brief='to deactivate fuse auto attadance of specific',
help='get code from https://ioee.herokuapp.com e.g. `.remove_user *code*`')
async def remove_user(ctx, pseudo_id):
remove_fuse_user(pseudo_id)
#users = []
if "users" in db.keys():
#pseudocode = pseudo_id.split(".remove_user",1)[1].strip()
remove_fuse_user(pseudo_id)
await ctx.send("User {} has been removed successfully.".format(pseudo_id))
#------------------------------------------
@bot.command(name='joke',
brief='to get jokes',
help='e.g. `.joke`, `.joke 10`')
async def joke(ctx, n=1):
if n == 1:
joke = get_joke()
await ctx.send("\n\n\nJoke:" + str(joke))
else:
jokes = list(get_jokes(n))
for joke in jokes:
ctx.send("\n\n\nJoke:" + str(joke))
@bot.command(name='jokes',
brief='to get jokes',
help='e.g. `.jokes`, `.jokes 10`')
async def jokes(ctx, n=5, subreddit='jokes'):
await unleash_reddit(subreddit, str(ctx.channel.id), n)
@bot.command(name='riddle', brief='to get a riddle', help='e.g. `.riddle`')
async def riddle(ctx):
riddle = get_riddles()
await ctx.send(riddle)
@bot.command(name='quote', brief='to get an inspiring quote', help='e.g. `.`')
async def quote(ctx):
quote = get_quote()
await ctx.send(quote)
@bot.command(name='inspire',
brief='To get inspirational message',
help='e.g. `.inspire`')
async def inspire(ctx):
inspiration = get_quote()
await ctx.send(inspiration)
@bot.command(name='puns', brief='To get puns', help='e.g. `.puns`')
async def puns(ctx):
puns = get_puns()
await ctx.send(puns)
@bot.command(name='one_liners',
brief='to get one liner jokes',
help='e.g. `.one_liners`')
async def one_liners(ctx):
#add_fuse_user()
await ctx.send("Sorry mannual checking is unavailable for a while")
@bot.command(name='meme',
brief='to display meme from r/memes',
help='e.g. `.meme`')
async def meme(ctx, what='memes'):
embed = await get_one(what)
await ctx.send(embed=embed)
#for i,j in get_one():
# await ctx.send(i)
# await ctx.send(j)
@bot.command(name='memes',
brief='to display memes from r/memes',
help='e.g. `.memes 3`')
async def memes(ctx, n=5, subreddit='memes'):
await unleash_reddit(subreddit, str(ctx.channel.id), n)
#embed = await get_one(what)
#await ctx.send(embed=embed)
@bot.command(name='reddit',
brief='to display subreddit from r/subreddit',
help='e.g. `.reddit motivation 3`')
async def memes(ctx, subreddit='motivation', n=3):
await unleash_reddit(subreddit, str(ctx.channel.id), n)
@bot.command(name='deactivate',
brief='to deactivate the bot',
help='e.g. `.deactivate`')
async def deactivate(ctx):
db["responding"] = False
await ctx.send(
"Encouragement bot is deactivated.\nPlease enter: .activate to activate."
)
@bot.command(name='activate', brief='to activate bot', help='e.g. `.activate`')
async def activate(ctx):
db["responding"] = True
#await ctx.send("YaY.. I'm turned on baby...")"
await ctx.send("Encouragement bot is enabled.. sorry for being rude.")
@bot.command(
name='mute',
brief=
'to mute sucesful/unsuccessful attendance attempts of fuse auto attend.',
help='e.g. `.mute successful`, `.mute unsuccessful`')
async def mute(ctx, what):
if what == 'unsuccessful':
db["unsuccessful_logs"] = False
await ctx.send("unsuccessful attending_logs are muted.")
elif what == 'successful':
db["successful_logs"] = False
await ctx.send(
"successful attending_log are muted. to unmute please enter: .unmute successful"
)
@bot.command(
name='unmute',
brief=
'to unmute sucesful/unsuccessful attendance attempts of fuse auto attend.',
help='e.g. `.unmute successful`, `.unmute unsuccessful`')
async def unmute(ctx, what):
if what == 'unsuccessful':
db["unsuccessful_logs"] = True
await ctx.send("successful attending_logs are unmuted.")
elif what == 'successful':
db["successful_logs"] = True
await ctx.send(
"usuccessful attending_log are unmuted. to mute please enter: .mute unsuccessful"
)
#print('\n\nwhat==\'\'', end=' ')
#print(what=='')
# db["responding"] = False
# await message.channel.send("Encouragement bot is deactivated.\nPlease Enter: .activate to activate")
@bot.command(name='list',
brief='to list the current encouraging messages',
help='e.g. `.list`')
async def list(ctx, what='encouragements'):
if what == 'users':
users = []
if "users" in db.keys():
users = list(db["users"])
await ctx.send('Users: ' + str(users))
else:
encouragements = []
if "encouragements" in db.keys():
encouragements = list(db["encouragements"])
await ctx.send(encouragements)
@bot.command(name='delete',
brief='To delete encouragement message',
help='e.g. `.delete Every passsing second is makingyou better`')
async def delete(ctx, index):
encouragements = []
if "encouragements" in db.keys():
index = index.split(".del", 1)[1].strip()
delete_encouragment(index)
encouragements = list(db["encouragements"])
await ctx.send(encouragements)
@bot.command(name='new',
brief='To add new encouraging message to database',
help='e.g. `.new Every passsing second is makingyou better`')
async def new(ctx, msg):
encouraging_message = msg.strip()
update_encouragements(encouraging_message)
await ctx.send("New encouraging message added.")
@bot.command(name='avatar',
brief='To see avatar of specific member in the group',
help='e.g. `.avatar @Encouragement Bot`')
async def avatar(ctx, *, avamember: discord.Member = None):
userAvatarUrl = avamember.avatar_url
await ctx.send(userAvatarUrl)
# _______________________________________________________________________
# ---------------------------- For Music Bot : https://medium.com/pythonland/build-a-discord-bot-in-python-that-plays-music-and-send-gifs-856385e605a1
# _______________________________________________________________________
import os, youtube_dl
import ffmpeg
@bot.command(
name='join',
help='Tells the bot to join the voice channel before playing music ')
async def join(ctx):
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(
ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
await channel.connect()
@bot.command(name='leave', help='To make the bot leave the voice channel')
async def leave(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_connected():
await voice_client.disconnect()
else:
await ctx.send("The bot is not connected to a voice channel.")
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = ""
@classmethod
async def from_url(cls, url, *, loop=None, stream=False, download=False):
SAVE_PATH = os.path.join(os.getcwd(), 'downloads')
ydl_opts = {
'format': 'bestaudio/best',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address':
'0.0.0.0', # bind to ipv4 since ipv6 addresses cause issues sometimes
'preferredcodec': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'webm',
'preferredquality': '192',
}],
'outtmpl':SAVE_PATH + '/%(title)s.%(ext)s',
}
#results = YoutubeSearch(url, max_results=3).to_dict()
#vid_url = 'https://www.youtube.com' + results[0]['url_suffix']
#thumbnails = results[0]['thumbnails']
#title = results[0]['title']
#print('vid_url:{}, thumbnails:{}, title:{}, download:{},url:{}'.format(vid_url, thumbnails, title, download, url))
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
data = ydl.extract_info(f"ytsearch:{url}", download=download)['entries'][0]
URL = data['url']
thumbnails = data['thumbnails']
title = data['title']
vid_url = data['webpage_url']
print(URL)
#Renaming files if downloaded
if download==True:
files = os.listdir(os.path.join(os.getcwd(), 'downloads'))
for file_name in files:
if not file_name.endswith('.part'):
# To download files as .mp3
#mp3_format = os.path.join(os.getcwd(), 'downloads', file_name.replace(file_name.split('.')[-1], 'mp3'))
file_name = os.path.join(os.getcwd(), 'downloads', file_name)
os.rename(file_name, title + '.mp3')
return(URL,thumbnails, title, vid_url)
@bot.command(name='p',
brief='To play song note: Please enter: `.join` first',
help="example: `.play gangnam style`")
async def play(ctx, *, url):
global playing
playing = url
if not ctx.message.author.voice:
await ctx.send("{} is not connected to a voice channel".format(
ctx.message.author.name))
return
else:
channel = ctx.message.author.voice.channel
try:
global player
player = await channel.connect()
except:
pass
#joined the channel
try:
server = ctx.message.guild
voice_channel = server.voice_client
#print('voice_channel : ' + str(voice_channel))
async with ctx.typing():
URL, thumbnails, title, vid_url = await YTDLSource.from_url(url, loop=bot.loop)
#to stop playing if already playing another
player.stop()
player.play(discord.FFmpegPCMAudio(URL))
print('vid_url:{}, thumbnails:{}, title:{}, URL:{},url:{}'.format(vid_url, thumbnails, title, URL, url))
embed=discord.Embed(title=title,
#description=stream['longDesc'],
color=0x00FFFF,
url=vid_url)
embed.set_author(name=ctx.message.author)
embed.set_thumbnail(url=thumbnails[0]['url'])
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {ctx.author}')
message = await ctx.send(embed=embed)
emos=['⏸️','⏹️', '⬇️']#['⏮️', '⏸️', '⏹️', '⏭️', '⬇️']
for emoji in emos:
await message.add_reaction(emoji)
except Exception as e:
print(e)
await ctx.send("The bot is not connected to a voice channel.")
#Downloads videb name/url and returns full filename
async def download_from_youtube(url):
SAVE_PATH = os.path.join(os.getcwd(), 'downloads')
ydl_opts = {
'format': 'bestaudio/best',
'preferredcodec': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'webm',
'preferredquality': '192',
}],'outtmpl':SAVE_PATH + '/%(title)s.%(ext)s',
}
print(' downloading!!! ')
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
ydl.download([url])
except:
video = ydl.extract_info(f"ytsearch:{url}", download=True)['entries'][0]
else:
video = ydl.extract_info(url, download=False)
#return video
#print('type_of'+str(type(video)))
# Didnot work for filename we extracted did not match with actual file_name
'''file_name=str(video['title'] + '-' +video['id'] + '.' +video['formats'][3]['ext'])
file_name = file_name.replace('/','_')
'''
files = os.listdir(os.path.join(os.getcwd(), 'downloads'))
for file_name in files:
if not file_name.endswith('.part'):
# To download files as .mp3
mp3_format = os.path.join(os.getcwd(), 'downloads', file_name.replace(file_name.split('.')[-1], 'mp3'))
file_name = os.path.join(os.getcwd(), 'downloads', file_name)
os.rename(file_name, mp3_format)
print('file_name: {}'.format(file_name))
print('mp3_format: {}'.format(mp3_format))
return(mp3_format)
@bot.command(name='d',
brief='To download song note: Please enter: `.d song name` ',
help="example: `.d gangnam style`")
async def d(ctx, *, url:str):
if not 'downloads' in os.listdir():
os.mkdir('downloads')
print('Try download')
async with ctx.typing():
URL, thumbnails, title, vid_url = await YTDLSource.from_url(url, loop=bot.loop, download=True)
full_downloaded_file_name = title + '.mp3'
await ctx.send(file=discord.File(full_downloaded_file_name))
os.remove(full_downloaded_file_name)
print(' downloaded!!! ')
@bot.command(name='pause', help='This command pauses the song')
async def pause(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.pause()
else:
await ctx.send("The bot is not playing anything at the moment.")
@bot.command(name='resume', help='Resumes the song')
async def resume(ctx):
voice_client = ctx.message.guild.voice_client
if voice_client.is_paused():
await voice_client.resume()
else:
await ctx.send(
"The bot was not playing anything before this. Use play_song command"
)
@bot.command(name='stop', help='Stops the song')
async def stop(ctx):
await ctx.message.add_reaction('🛑')
voice_client = ctx.message.guild.voice_client
if voice_client.is_playing():
await voice_client.stop()
voice_client
#os.remove(
else:
await ctx.send("The bot is not playing anything at the moment.")
#To make leave voice channel if bot is alone in voice channel
@bot.event
async def on_voice_state_update(member, before, after):
print('\n\n Fired on_voice_state_update function \n\n')
voice_state = member.guild.voice_client
if voice_state is None:
# Exiting if the bot it's not connected to a voice channel
return
if len(voice_state.channel.members) == 1:
await voice_state.disconnect()
@bot.command(aliases=['donation', 'support'])
async def donate(ctx, url: str = 'http://stream.radioparadise.com/rock-128'):
embed=discord.Embed(title='Support:',
description='''Thank you :-) \nesewa/khalti id:\n 9840445934 \n\n Paytreon:\nhttps://www.patreon.com/join/7095305? \n\n Coinbase:\n https://commerce.coinbase.com/checkout/63a4b635-8510-459f-b091-a4f0697993e6
\n\n
And please vote for me here: https://top.gg/bot/862191340355715093/vote
''',
color=0x00FFFF,
#url=stream['url']
)
embed.set_author(
name=ctx.message.author,
)
#embed.set_thumbnail(url=stream['image'])
#embed.pfp = author.avatar_url
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {ctx.author}')
message = await ctx.send(embed=embed)
#_______________________________________________________________________
# ----------------------------- ---------------------------------------
# _______________________________________________________________________
# ----------------------------- FM Player -----------------------------
from discord import FFmpegPCMAudio
from discord.ext.commands import Bot
from dotenv import load_dotenv
load_dotenv()
#To be implemented
global streams
streams = None
def start_load_streams():
global streams
try:
streams[0]
except:
with open('test_fm_list.json','r') as F:
streams = json.load(F)
#To get current, next, previous streams
def get_stream(which=None, current=None):
global streams
try:
streams[0]
print('Streams already defined')
except:
with open('test_fm_list.json','r') as F:
streams = json.load(F)
streams = streams['stream_links']
print(streams)
#global streams_url
#streams=streams['stream_links']
#streams_url = [i['url'] for i in streams]
finally:
if current==None:
current={
"name": "Radio Nepal",
"city" : "kathmandu",
"url": "https://radionepal.news/live/audio/mp3",
"image": "https://radionepal.gov.np/wp-content/themes/rdnp/images/logo-en.png",
"desc": "am/sw/fm radio",
"longDesc": "Radio Nepal, oldest radio of nepal."
}
if which=='next':
nxt = streams.index(current) + 1
# Triggred to get next station at the end of stations list
if nxt >= len(streams):
nxt -= len(streams)
current = streams[nxt]
print(nxt)
elif which=='prev':
prev = streams.index(current) - 1
print(prev)
# Triggred to get previous station at the beginning of stations list
if prev < 0:
prev += len(streams)
print('current:{}, prev:{}'.format(streams.index(current),prev))
current = streams[prev]
return(current)
@bot.command(aliases=['fm', 'radio'])
async def playfm(ctx, url: str = 'http://stream.radioparadise.com/rock-128'):
global playing
playing = "fm"
global currently_playing_message
global stream
stream = get_stream()
#url = "https://radio-streaming-serv-1.hamropatro.com/radio/8050/radio.mp3"
#url = 'https://radionepal.news/live/audio/mp3'
#global channel
channel = ctx.message.author.voice.channel
global player
try:
player = await channel.connect()
except:
pass
player.play(FFmpegPCMAudio(stream['url']))
#global message
embed=discord.Embed(title=stream['name'],
description=stream['longDesc'],
color=0x00FFFF,
url=stream['url'])
embed.set_author(
name=ctx.message.author,
)
#icon_url=ctx.message.author.avatar_url)
embed.set_thumbnail(url=stream['image'])
#embed.pfp = author.avatar_url
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {ctx.author}')
currently_playing_message = await ctx.send(embed=embed)
#emojis = [':track_previous:', ':pause_button:', ':stop_button:', ':track_next:', ':record_button:', ':arrow_down:']
emos=['⏮️', '⏸️', '⏹️', '⏭️']#, '⏺️', '⬇️']
for emoji in emos:
await currently_playing_message.add_reaction(emoji)
def get_embed(reaction, user, stream):
embed=discord.Embed(title=stream['name'],
#description=stream['longDesc'],
color=0x00FFFF,
url=stream['url'])
embed.set_author(
name=user,
)
#icon_url=ctx.message.author.avatar_url)
embed.set_thumbnail(url=stream['image'])
#embed.pfp = author.avatar_url
embed.timestamp = datetime.datetime.utcnow()
embed.set_footer(text=f'Added by {user}')
return embed
@bot.event
async def on_reaction_add(reaction, user,a=''):
#embed = reaction.embeds[0]
#emoji = reaction.emoji
#print('hii')
#await reaction.message.add_reaction('♥️')
global stream
if not user.bot:
# stop emoji
if str(reaction.emoji) == "⏹️":
player.stop()
# pause emoji
elif str(reaction.emoji) == "⏸️":
if player.is_playing():
player.pause()
print('paused')
else:
player.resume()
print('resume')
# next emoji
elif str(reaction.emoji) == "⏭️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('next',stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
#message.send('Hello World')
#play_next()
# previous emoji
elif str(reaction.emoji) == "⏮️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('prev', stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
print('Playing next')
# download emoji
elif str(reaction.emoji) == "⬇️":
if playing!='fm':
if not 'downloads' in os.listdir():
os.mkdir('downloads')
print('Try download')
async with reaction.message.channel.typing():
URL, thumbnails, title, vid_url = await YTDLSource.from_url(playing, loop=bot.loop, download=True)
full_downloaded_file_name = title + '.mp3'
await reaction.message.channel.send(file=discord.File(full_downloaded_file_name))
os.remove(full_downloaded_file_name)
print(' downloaded!!! ')
else:
await reaction.message.add_reaction(reaction)
#print('hii')
#print(reaction)
#print(reaction.message)
#print(user)
#if user.bot:
# return
#else:
# previous_messages = await channel.history(limit=1).flatten()
# prev_message.add_reaction('♥️')
'''if emoji == "emoji 1":
fixed_channel = bot.get_channel(channel_id)
await fixed_channel.send(embed=embed)
elif emoji == "emoji 2":
#do stuff
elif emoji == "emoji 3":
#do stuff
else:
return'''
@bot.event
async def on_reaction_remove(reaction, user):
print('\nremoved reaction\n')
global stream
if not user.bot:
# stop emoji
if str(reaction.emoji) == "⏹️":
player.stop()
# pause emoji
elif str(reaction.emoji) == "⏸️":
if player.is_playing():
player.pause()
print('paused')
else:
player.resume()
print('resume')
# next emoji
elif str(reaction.emoji) == "⏭️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('next',stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
#message.send('Hello World')
#play_next()
# previous emoji
elif str(reaction.emoji) == "⏮️":
if playing=='fm':
print('Playing next, current:{}'.format(stream))
stream = get_stream('prev', stream)
player.stop()
player.play(FFmpegPCMAudio(stream['url']))
embed=get_embed(reaction, user, stream)
await currently_playing_message.edit(embed=embed)
print('Playing next')
# download emoji
elif str(reaction.emoji) == "⬇️":
if playing=='fm':
if not 'downloads' in os.listdir():
os.mkdir('downloads')
print('Try download')
async with reaction.message.channel.typing():
full_downloaded_file_name = await download_from_youtube(playing)
await reaction.message.channel.send(file=discord.File(full_downloaded_file_name))
os.remove(full_downloaded_file_name)
print(' downloaded!!! ')
else:
await reaction.message.add_reaction(reaction)
# _____________________________________________________
# ///////////////////// FM Player /////////////////////
# _____________________________________________________
@bot.command(aliases=['s', 'sto'])
async def stopfm(ctx):
player.stop()
@bot.command(
name='disable_unleashing',
brief='To disable/stop add unleashing all reddit posts to the server',
help='e.g. `.disable_unleashing`')
async def disable_unleashing(ctx):
try:
unleashing.stop()
await ctx.send('unleashing disabled successfully.')
except:
await ctx.send('already disabled.')
@bot.command(
name='enable_unleashing',
brief=
'To enable/start unleashing previously stopped reddit posts to the server',
help='e.g. `.enable_unleashing`')
async def enable_unleashing(ctx):
try:
unleashing.start()
await ctx.send('unleashing enabled successfully.')
except:
await ctx.send('already enabled.')
@bot.command(name='disable_autoattend',
brief='To start autoattending in fuse classroom.',
help='e.g. `.start_unleashing`')
async def disable_autoattend(ctx):
try:
auto_attend.stop()
await ctx.send('fuse auto-attend disabled successfully.')
except:
await ctx.send('already disabled.')
@bot.command(name='enable_autoattend',
brief='To enable/stopsrt autoattending in fuse classroom.',
help='e.g. `.enable_unleashing`')
async def enable_autoattend(ctx):
try:
auto_attend.start()
await ctx.send('fuse auto-attend enabled successfully.')
except:
await ctx.send('already enabled.')
@bot.command(name='video_embed_test', brief='', help='e')
async def video(ctx):
embed = discord.Embed(
title='title',
url='https://thumbs2.redgifs.com/WelcomeSweetTadpole-mobile.mp4',
description='body',
colour=discord.Color.red())
embed.set_image(
url="https://thumbs2.redgifs.com/WelcomeSweetTadpole-mobile.mp4")
embed.set_video(
url="https://www.redgifs.com/watch/blissfulimperfectyardant")
await ctx.send(embed=embed)
'''
async def my_background_task():
await client.wait_until_ready()
counter = 0
channel = client.get_channel(id=123456789) # replace with channel_id
while not client.is_closed():
counter += 1
await channel.send(counter)
print(counter)
await asyncio.sleep(60) # task runs every 60 seconds''' ''
sad_words = [
"sad", "depressed", "unhappy", "angry", "miserable", "depressing", "hurt",
"pain"
]
starter_encouragements = [
"Cheer up!",
"You are a great person / bot!",
]
commandss = [
'\".h fuse\" or \".help fuse\" -> for fuse_auto_attend help',
'fuse auto-attend registration at: https://ioee.herokuapp.com/',
'\".inspire\" or \".quote\" -> to display quote ',
'\".joke\" -> to display joke',
'\".meme\" -> displays best random meme',
'\".riddle\" -> displays best random riddle',
'\".puns\" -> displays best random puns',
'\".knock knock\" -> displays knock knock joke',
'\".deactivate\" -> deactivates the bot .activate -> activates the bot',
'\".new inspirational_message\" -> Adds new inspirationsl message to db',
'\".del inspirational_message\" -> deletes inspirational message from db',
'\".list\" -> lists the current inspirational messages',
]
fuse_help_commands = [
'\".h\" or \".help\" - for general help',
'----------- ------------------------- -----------',
'fuse auto-attend registration at: https://ioee.herokuapp.com/',
'---------------------------------',
'\".add_user user_token\" -> to add user for auto-fuse attandance',
'.remove_user user_token -> to remove user',
'\".list_user\" -> to list available users',
'\".check class\" or \".snoop class\" -> checks if live class started.',
'\".mute unsuccessful\" -> to mute unsuccessful attending_logs. ie. hide \"Live Class not started\" messages',
'\".mute successful\" -> to mute successful attending_logs ie. hide messages when attended successfully',
'\".unmute unsuccessful\" -> to unmute unsuccessful attending_logs ie. show \"Live Class not started\" messages',
'\".umute successful\" -> to unmute successful attending_logs ie. show messages when attended successfully',
]
#from discord.ext import commands
#bot = commands.Bot(command_prefix='.')
#@bot.command()
#async def test(ctx):
# await ctx.send('I heard you! {0}'.format(ctx.author))
'''print('--------------Test Mode--------------------------------')
print(client.servers)
print('-------------------------------------------------------')'''
if "responding" not in db.keys():
db["responding"] = True
if "unsuccessful_logs" not in db.keys():
db["unsuccessful_logs"] = False
if "successful_logs" not in db.keys():
db["successful_logs"] = True
def get_quote():
response = requests.get("https://zenquotes.io/api/random")
json_data = json.loads(response.text)
quote = json_data[0]['q'] + " -" + json_data[0]['a']
return (quote)
def get_joke():
response = requests.get("https://imao.herokuapp.com/jokes/api/random/")
json_data = response.json()
joke = str(json_data['title']) + ' : ' + str(
json_data['body']) + ' - ' + str(json_data['author'])
return (joke)
def get_jokes(no_of_jokes):
response = requests.get("https://imao.herokuapp.com/jokes/api/{}/".format(
int(no_of_jokes)))
jokes = []
for joke in response.json()['jokes']:
jokes.append(
str(joke['title']) + ' : ' + str(joke['body']) + ' - ' +
str(joke['author']))
return (jokes)
def get_puns():
return ('Puns are comming very very soon!')
def get_riddles():
return ('Riddles are comming very very soon!')
def add_fuse_user(pseudoid):
if "users" in db.keys():
users = db["users"]
if pseudoid not in users:
users.append(pseudoid)
db["users"] = users
else:
db["users"] = [pseudoid]
def remove_fuse_user(pseudoid):
users = list(db["users"])
#if len(encouragements) > index:
if pseudoid in users:
#del encouragements[index]
users.remove(pseudoid)
db["users"] = users
def update_encouragements(encouraging_message):
if "encouragements" in db.keys():
encouragements = db["encouragements"]
encouragements.append(encouraging_message)
db["encouragements"] = encouragements
else:
db["encouragements"] = [encouraging_message]
def delete_encouragment(index):
encouragements = list(db["encouragements"])
#if len(encouragements) > index:
if index in encouragements:
#del encouragements[index]
encouragements.remove(index)
db["encouragements"] = encouragements
def sanitize_db():
users = list(set(list(db["users"])))
users_sanitized = []
for user in users:
users_sanitized.append(
user.replace('\'', '').replace('\"', '').strip())
db["users"] = users_sanitized
print('Users sanitized. \n Users:')
print(list(db["users"]))
def attend_each(usr):
custom_url = 'https:ioee.herokuapp.com/attend/{}/'.format(usr)
response = requests.get(custom_url)
return (response.text)
#---------------Working------------------------
# For scrapping quotes every 1 min.
@tasks.loop(minutes=1)
async def start_scrapping():
with open('quotes.json','r') as f:
saved_quotes = json.load(f)
# Got saved quotes
saved_quotes = saved_quotes['quotes']
new_quotes=requests.get('https://zenquotes.io/api/quotes').json()
# To combine new and old quotes
n=0
for quote in new_quotes:
if quote not in saved_quotes:
saved_quotes.append(quote)
n+=1
total_quotes = len(saved_quotes)
with open('quotes.json','w') as file:
json.dump({'quotes' : saved_quotes}, file, indent = 4)
print('Saved {} quotes, total:{}'.format(n,total_quotes))
@tasks.loop(minutes=30)
async def auto_attend(mannual_attempt=False):
intents = discord.Intents.default()
intents.members = True
#user= await client.get_user("487904509670337509")
#await client.send_message(user, "Your message goes here")
#client.get_user(487904509670337509).send('hi')
#sanitize_db()
print("Users: ")
users = list(db['users'])
print(users)
# To limit attend time from 9 am to 5:45
# i.e. (03:15 to 11:45) UTC
now = datetime.datetime.now()
morning = now.replace(hour=3, minute=5, second=0, microsecond=0)
evening = now.replace(hour=11, minute=45, second=0, microsecond=0)
#print('hello fella')
if (now.strftime("%A") != "Saturday") and (now >= morning
and now <= evening):
channel = bot.get_channel(id=862205194283253763)
#user = client.get_user("487904509670337509")
#await user.send_message('hi')
#username = bot.get_user('861131196779331624')
#print('bot:')
#print(username)
users = []
if "users" in db.keys():
users = db["users"]
for user in users:
#response = str(attend_each(user))
custom_url = 'https://ioee.herokuapp.com/attend/{}/'.format(user)
print(custom_url)
response = requests.get(str(custom_url))
response = response.text.strip()
print(response)
print(response)
if response == "Live Class not started" and db[
"unsuccessful_logs"] == False and mannual_attempt == False:
continue
elif db["successful_logs"] == False and mannual_attempt == False:
await channel.send(
"Successful attending attempt_logs are muted. to unmute please enter: .unmute unsuccessful"
)
else:
try:
previous_messages = await channel.history(
limit=1).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
prev_message = "Your Attendance is done.Discord prevened previous message view"
if (str(prev_message).strip() != response):
#print("mannual_attempt:{} db[\"successful_logs\"]:{} db[\"unsuccessful_logs\"]:{} response=={}, response:{}".format(mannual_attempt, db["successful_logs"],db["unsuccessful_logs"], response == "Live Class not started", response ) )
print('not same messages:prev_message and rseponse')
await channel.send(response)
#await channel.send('user:'+str(user))
else:
print('same message as previous message, so not sending')
#print(prev_message)
#print(response)
#print(str(prev_message)==str(response))
#print(type(response))
pass
#----------- To list discord servers ---------
@tasks.loop(hours=25)
async def share_info():
intents = discord.Intents.default()
intents.members = True
channel = bot.get_channel(id=882664470692909056)
response = requests.get('http://ioee.herokuapp.com/meroshare/')
response = response.text.strip()
print(response)
try:
previous_messages = await channel.history(limit=1).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
pass
if (str(prev_message).strip() != response):
print('not same messages:prev_message and rseponse')
await channel.send(response)
else:
print('same message as previous message, so not sending')
pass
@tasks.loop(hours=25)
async def meroshare(mannual_attempt=False):
intents = discord.Intents.default()
intents.members = True
#user= await client.get_user("487904509670337509")
#await client.send_message(user, "Your message goes here")
#client.get_user(487904509670337509).send('hi')
#sanitize_db()
#print("Users: ")
#users=list(db['users'])
#print(users)
# To limit attend time from 9 am to 5:45
# i.e. (03:15 to 11:45) UTC
now = datetime.datetime.now()
morning = now.replace(hour=1, minute=00, second=0, microsecond=0)
evening = now.replace(hour=10, minute=15, second=0, microsecond=0)
#print('hello fella')
if (now.strftime("%A") != "Saturday") and (now >= morning
and now <= evening):
channel = bot.get_channel(id=882655060050444288)
#user = client.get_user("487904509670337509")
#await user.send_message('hi')
#username = bot.get_user('861131196779331624')
#print('bot:')
#print(username)
users = []
if "users" in db.keys():
users = db["users"]
for user in users:
#response = str(attend_each(user))
custom_url = 'https://ioee.herokuapp.com/attend/{}/'.format(user)
print(custom_url)
response = requests.get(str(custom_url))
response = response.text.strip()
print(response)
print(response)
if response == "Live Class not started" and db[
"unsuccessful_logs"] == False and mannual_attempt == False:
continue
elif db["successful_logs"] == False and mannual_attempt == False:
await channel.send(
"Successful attending attempt_logs are muted. to unmute please enter: .unmute unsuccessful"
)
else:
try:
previous_messages = await channel.history(limit=1
).flatten()
prev_message = previous_messages[0].content
print('previous_message:')
print(prev_message)
except:
prev_message = "Your Attendance is done.Discord prevened previous message view"
if (str(prev_message).strip() != response):
#print("mannual_attempt:{} db[\"successful_logs\"]:{} db[\"unsuccessful_logs\"]:{} response=={}, response:{}".format(mannual_attempt, db["successful_logs"],db["unsuccessful_logs"], response == "Live Class not started", response ) )
print('not same messages:prev_message and rseponse')
await channel.send(response)
#await channel.send('user:'+str(user))
else:
print('same message as previous message, so not sending')
#print(prev_message)
#print(response)
#print(str(prev_message)==str(response))
#print(type(response))
pass
#----------- To list discord servers ---------
class OwnerCommands(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
print("OwnerCommands Is Ready")
@bot.command()
async def servers(ctx):
discord.Intents.members = True
activeservers = bot.guilds
embed = discord.Embed(
title='Servers and members',
description='',
colour=discord.Color.green(),
)
'''for guild in activeservers:
print('guild.channels')
print(guild.channels)
embed.add_field(
name = str(guild.name) + ' ({}) own({})'.format(guild.member_count, guild.owner),
value=str([i.name for i in guild.members]),
)
'''
print('members:')
for i in bot.guilds[1:]:
a = i.fetch_members(limit=None)
aa = []
async for ii in a:
aa.append(ii.name)
#print(i, ii)
embed.add_field(name=str(i) +
' ({}) own({})'.format(i.member_count, i.owner),
value=str(aa))
await ctx.send(embed=embed)
#print(a)
# print(channel)
#await ctx.send(guild.name)
#print(guild.name)
def setup(client):
bot.add_cog(OwnerCommands(bot))
#-------------------------
async def unleash_reddit_jokes(subreddit, channel_id, no_of_posts=7):
channel = bot.get_channel(id=int(channel_id))
for n, submission in enumerate(
reddit.subreddit('jokes').top('day', limit=int(no_of_posts / 2))):
print('Unleash for loop:{}'.format(n))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.blue())
embed.set_image(url=submission.url)
await channel.send(embed=embed)
for n, submission in enumerate(
reddit.subreddit('jokes').hot(limit=no_of_posts -
int(no_of_posts / 4))):
print('Unleash for loop:{}'.format(n))
title = str(submission.title)[:256]
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.blue())
await channel.send(embed=embed)
for n, submission in enumerate(
reddit.subreddit('jokes').new(limit=no_of_posts -
math.ceil(no_of_posts / 4))):
print('Unleash for loop:{}'.format(n))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.blue())
await channel.send(embed=embed)
async def unleash_reddit(subreddit, channel_id, no_of_posts=5):
channel = bot.get_channel(id=int(channel_id))
submissions = await reddit.subreddit(subreddit)
donot_proceed = 0
#To display hot post if only one is to be fetched
if no_of_posts == 1:
donot_proceed = 1
no_of_posts = 2
async for submission in submissions.hot(limit=int(no_of_posts / 4)):
print('Unleash for loop:{}'.format(0))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.red())
embed.set_image(url=submission.url)
print('Submission_url: ', submission.url)
try:
#To filter lenthy messages > 2500 letters
if len(str(body)) < 2500:
image_formats = ['jpg', 'jpeg', 'png']
#checks if image_format in submission.url
if sum([(i in str(submission.url)) for i in image_formats]):
await channel.send(embed=embed)
else:
await channel.send(submission.url)
except:
pass
if donot_proceed != 1:
async for submission in submissions.top('day',
limit=int(no_of_posts / 2)):
print('Unleash for loop:{}'.format('n'))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.red())
embed.set_image(url=submission.url)
print('Submission_url: \"', submission.url, '\"')
if submission.url == '':
print('Guess What')
try:
if len(str(body)) < 2500:
image_formats = ['jpg', 'jpeg', 'png']
#checks if image_format in submission.url
if sum([(i in str(submission.url))
for i in image_formats]):
await channel.send(embed=embed)
else:
await channel.send(submission.url)
except:
pass
async for submission in submissions.new(limit=no_of_posts -
math.ceil(no_of_posts / 4)):
print('Unleash for loop:{}'.format(0))
title = submission.title
body = submission.selftext
embed = discord.Embed(title=title,
url=submission.url,
description=body,
colour=discord.Color.red())
embed.set_image(url=submission.url)
print('Submission_url: ', submission.url)
try:
if len(str(body)) < 2500:
image_formats = ['jpg', 'jpeg', 'png']
#checks if image_format in submission.url
if sum([(i in str(submission.url))
for i in image_formats]):
await channel.send(embed=embed)
else:
await channel.send(submission.url)
except:
pass
'''
async def unleash_reddit(subreddit, channel_id, no_of_posts=5):
channel = bot.get_channel(id=int(channel_id))
submissions_top = await reddit.subreddit(subreddit)
submissions_hot = await reddit.subreddit(subreddit)
submissions_new = await reddit.subreddit(subreddit)
#30% top, 40%hot, 30%new
for i in range(0, no_of_posts):
print('Unleash for loop:{}'.format(i))
if i < int(no_of_posts/3):
submission=random.choice([x async for x in submissions_top.top(limit=25)])
print(a)
''async for x in submissions_top.top(limit=15):
if not x.stickied:
submission = x
#submission = next(x async for x in submissions_top.top('all') if not x.stickied)''
elif i < int(no_of_posts/7):
#submission = next(x async for x in submissions_hot.hot('all') if not x.stickied)
submission=random.choice([x async for x in submissions_top.hot(limit=35)])
else:
#submission = next(x async for x in submissions_new.new('all') if not x.stickied)
submission=random.choice([x async for x in #submissions_top.new(limit=15)])
embed=discord.Embed(
title=submission.title,
description=submission.selftext,
#description=submission.title,
colour=discord.Color.green())
embed.set_image(url=submission.url)
await channel.send(embed=embed)'''
@tasks.loop(hours=6)
async def unleashing():
print('\nstart Unleashing')
intents = discord.Intents.default()
#discord.Intents.members = True
intents.members = True
intents.all()
for channel_id in dict(db['unleash']).keys():
for each_subreddit in db['unleash'][str(channel_id)]:
await unleash_reddit(each_subreddit, str(channel_id), 10)
print('Unleashed')
@bot.event
async def on_ready():
print('We have logged in as \"{0.user.name}\"'.format(bot))
print(bot.user.id)
#For fuse attendance trying
#auto_attend.start()
#For viewing share_info
#share_info.start()
#for unleashing from reddit
unleashing.start()
start_scrapping.start()
game = discord.Game("Chilling out.")
streaming = discord.Streaming(name='pubg lite',
url="https://www.twitch.tv/caterpileer")
#movie.url="https://thumbs2.redgifs.com/WelcomeSweetTadpole-mobile.mp4"
await bot.change_presence(status=discord.Status.online, activity=streaming)
#await bot.process_commands(message)
@bot.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == bot.user:
return
if message.content.startswith('.guess'):
await message.channel.send('Guess a number between 1 to 10')
def guess_check(m):
return m.content.isdigit()
guess = await bot.wait_for('message', timeout=5.0, check=guess_check)
answer = random.randint(1, 10)
if guess is None:
fmt = 'Sorry, you took too long. It was {}.'
await message.channel.send(fmt.format(answer))
return
if int(guess.content) == answer:
await message.channel.send('You are right!')
else:
await message.channel.send(
'Sorry. It is actually {}.'.format(answer))
else:
if message.guild is None and message.author != bot.user:
#await channel.send(str(message.author) + str(message.content))
embed = discord.Embed(title=message.author,
description=message.content)
channel = bot.get_channel(id=873477235477184522)
await channel.send(embed=embed)
print(str(message.content))
if any(word in str(message) for word in sad_words):
options = starter_encouragements
if "encouragements" in db.keys():
#print(list(db["encouragements"]))
options = options #+ list(db["encouragements"])
await message.channel.send(random.choice(options))
await bot.process_commands(message)
#await message.channel.send('hello')
#if message!='fuck':
# await message.add_reaction('♥️')
# return
'''print('Author_id:')
print(message.author.id)
#message.author.send_message('hi')
print('Hello')
msg = message.content.strip()
if msg==".help" or msg == '.h':
for command in commands:
await message.channel.send(command)
elif msg==".help fuse" or msg == '.h fuse':
for command in fuse_help_commands:
await message.channel.send(command)
elif msg.startswith(".add_user"):
pseudocode = msg.split(".add_user",1)[1].replace('\"','').replace('\'','').strip()
add_fuse_user(pseudocode)
await message.channel.send("User {} has been added/activated successfully.".format(pseudocode))
elif msg.startswith(".remove_user"):
users = []
if "users" in db.keys():
pseudocode = msg.split(".remove_user",1)[1].strip()
remove_fuse_user(pseudocode)
await message.channel.send("User {} has been removed successfully.".format(pseudocode))
elif msg==".snoop class" or msg==".check class" or message == ".check live-class" or message == ".check live_class" or message == ".check live class":
await message.channel.send("Sorry mannual checking is unavailable for a while");
await test.start(True)
\'''if db["successful_logs"] == False:
await message.channel.send("Successful attending attempt_logs are muted. to unmute please enter: .unmute unsuccessful")
await message.channel.send("Checking Live Classes: ")
m = test_mannual()
await message.channel.send(m)\'''
elif msg.startswith(".deactivate"):
#switch = msg.split(".deac ",1)[1].lower()
db["responding"] = False
await message.channel.send("Encouragement bot is deactivated.\nPlease Enter: .activate to activate")
elif msg.startswith(".activate"):
db["responding"] = True
await message.channel.send("Encouragement bot is activated.\nPlease enter: .deactivate to deactivate.")
elif msg == (".mute unsuccessful") or msg == (".mute unsuccessful logs"):
db["unsuccessful_logs"] = False
await message.channel.send("unsuccessful attending_logs are muted.")
elif msg == (".unmute unsuccessful") or msg == (".unmute unsuccessful logs"):
db["unsuccessful_logs"] = True
await message.channel.send("unsuccessful attending_logs are unmuted.")
elif msg == (".mute successful") or msg == (".mute successful logs"):
db["successful_logs"] = False
await message.channel.send("successful attending_log are muted. to unmute please enter: .unmute successful")
elif msg == (".unmute successful") or msg == (".unmute successful logs"):
db["successful_logs"] = True
await message.channel.send("successful attending_logs are unmuted.")
if db["responding"]:
if msg == ".list_users" or msg == ".list_user":
users = []
if "users" in db.keys():
users = list(db["users"])
await message.channel.send('Users: '+str(users))
elif msg.startswith('.inspire') or msg.startswith('.quote'):
quote = get_quote()
await message.channel.send(quote)
elif msg.startswith('.joke'):
if msg == '.joke':
joke = get_joke()
await message.channel.send("\n\n\nJoke:" + str(joke))
else:
try:
n = int(msg.split(' ')[1].strip())
except:
n = int(msg.split(' ')[2].strip())
jokes = list(get_jokes(n))
for joke in jokes:
await message.channel.send("\n\n\nJoke:" + str(joke))
elif msg.startswith('.riddle'):
riddle = get_riddles()
await message.channel.send(riddle)
elif msg.startswith('.puns'):
puns = get_puns()
await message.channel.send(puns)
elif msg.startswith('.memes') or msg.startswith('.knock knock'):
await message.channel.send('Sorry! ' + str(msg) + ' are comming very very soon!' )
elif msg.startswith(".new"):
encouraging_message = msg.split(".new ",1)[1].strip()
update_encouragements(encouraging_message)
await message.channel.send("New encouraging message added.")
elif msg.startswith(".del"):
encouragements = []
if "encouragements" in db.keys():
index = msg.split(".del",1)[1].strip()
delete_encouragment(index)
encouragements = list(db["encouragements"])
await message.channel.send(encouragements)
elif msg.startswith(".list"):
encouragements = []
if "encouragements" in db.keys():
encouragements = list(db["encouragements"])
await message.channel.send(encouragements)'''
keep_alive()
bot.run(os.environ['TOKEN'])
#client.loop.create_task(my_background_task())
bot.run('token') #
| 34.03732
| 253
| 0.588462
| 5,127
| 0.067626
| 889
| 0.011726
| 49,373
| 0.651239
| 54,015
| 0.712467
| 33,656
| 0.443929
|
589634be0915da002b383091ea3d6a080249430a
| 9,862
|
py
|
Python
|
mwp_solver/module/Layer/transformer_layer.py
|
max-stack/MWP-SS-Metrics
|
01268f2d6da716596216b04de4197e345b96c219
|
[
"MIT"
] | null | null | null |
mwp_solver/module/Layer/transformer_layer.py
|
max-stack/MWP-SS-Metrics
|
01268f2d6da716596216b04de4197e345b96c219
|
[
"MIT"
] | null | null | null |
mwp_solver/module/Layer/transformer_layer.py
|
max-stack/MWP-SS-Metrics
|
01268f2d6da716596216b04de4197e345b96c219
|
[
"MIT"
] | null | null | null |
# Code Taken from https://github.com/LYH-YF/MWPToolkit
# -*- encoding: utf-8 -*-
# @Author: Yihuai Lan
# @Time: 2021/08/29 22:05:03
# @File: transformer_layer.py
import torch
import math
from torch import nn
from torch.nn import functional as F
from transformers.activations import gelu_new as gelu_bert
from module.Attention.multi_head_attention import MultiHeadAttention
from module.Attention.multi_head_attention import EPTMultiHeadAttention
from module.Attention.group_attention import GroupAttention
from utils.utils import clones
class TransformerLayer(nn.Module):
r"""Transformer Layer, including
a multi-head self-attention,
a external multi-head self-attention layer (only for conditional decoder) and
a point-wise feed-forward layer.
Args:
self_padding_mask (torch.bool): the padding mask for the multi head attention sublayer.
self_attn_mask (torch.bool): the attention mask for the multi head attention sublayer.
external_states (torch.Tensor): the external context for decoder, e.g., hidden states from encoder.
external_padding_mask (torch.bool): the padding mask for the external states.
Returns:
feedforward_output (torch.Tensor): the output of the point-wise feed-forward sublayer, is the output of the transformer layer
"""
def __init__(self, embedding_size, ffn_size, num_heads, attn_dropout_ratio=0.0, attn_weight_dropout_ratio=0.0, ffn_dropout_ratio=0.0, with_external=False):
super(TransformerLayer, self).__init__()
self.multi_head_attention = MultiHeadAttention(embedding_size, num_heads, attn_weight_dropout_ratio)
self.feed_forward_1 = nn.Linear(embedding_size, ffn_size)
self.feed_forward_2 = nn.Linear(ffn_size, embedding_size)
self.attn_layer_norm = nn.LayerNorm(embedding_size, eps=1e-6)
self.ffn_layer_norm = nn.LayerNorm(embedding_size, eps=1e-6)
self.attn_dropout = nn.Dropout(attn_dropout_ratio)
self.ffn_dropout = nn.Dropout(ffn_dropout_ratio)
self.with_external = with_external
if self.with_external:
self.external_multi_head_attention = MultiHeadAttention(embedding_size, num_heads, attn_weight_dropout_ratio)
self.external_layer_norm = nn.LayerNorm(embedding_size)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.feed_forward_1.weight, std=0.02)
nn.init.normal_(self.feed_forward_2.weight, std=0.02)
nn.init.constant_(self.feed_forward_1.bias, 0.)
nn.init.constant_(self.feed_forward_2.bias, 0.)
def gelu(self, x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def forward(self, x, kv=None, self_padding_mask=None, self_attn_mask=None, external_states=None, external_padding_mask=None):
residual = x
if kv is None:
x, self_attn_weights = self.multi_head_attention(query=x, key=x, value=x, key_padding_mask=self_padding_mask, attn_mask=self_attn_mask)
else:
x, self_attn_weights = self.multi_head_attention(query=x, key=kv, value=kv, key_padding_mask=self_padding_mask, attn_mask=self_attn_mask)
x = self.attn_dropout(x)
x = self.attn_layer_norm(residual + x)
if self.with_external:
residual = x
x, external_attn_weights = self.external_multi_head_attention(query=x, key=external_states, value=external_states, key_padding_mask=external_padding_mask)
x = self.attn_dropout(x)
x = self.external_layer_norm(residual + x)
else:
external_attn_weights = None
residual = x
x = self.feed_forward_2(self.gelu(self.feed_forward_1(x)))
x = self.ffn_dropout(x)
x = self.ffn_layer_norm(residual + x)
return x, self_attn_weights, external_attn_weights
class GAEncoderLayer(nn.Module):
"""Group attentional encoder layer, encoder is made up of self-attn and feed forward.
"""
def __init__(self, size, self_attn, feed_forward, dropout):
super(GAEncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"""Follow Figure 1 (left) for connections."""
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
#self.norm = LayerNorm(size)
self.norm = nn.LayerNorm(size, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
class LayerNorm(nn.Module):
"""Construct a layernorm module (See citation for details)."""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class PositionwiseFeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class EPTTransformerLayer(nn.Module):
"""
Class for Transformer Encoder/Decoder layer (follows the paper, 'Attention is all you need')
"""
def __init__(self, hidden_dim = None, num_decoder_heads = None, layernorm_eps = None,intermediate_dim= None):
"""
Initialize TransformerLayer class
:param ModelConfig config: Configuration of this Encoder/Decoder layer
"""
super().__init__()
# Self-attention layer
self.attn = EPTMultiHeadAttention(hidden_dim=hidden_dim, num_heads=num_decoder_heads,
layernorm_eps=layernorm_eps, dropout=0.0)
# Source-Target attention layer
self.mem = EPTMultiHeadAttention(hidden_dim=hidden_dim, num_heads=num_decoder_heads,
layernorm_eps=layernorm_eps, dropout=0.0)
# Dropout for self-attention
self.dropout_attn = nn.Dropout(0.0)
# Dropout for source-target attention
self.dropout_mem = nn.Dropout(0.0)
# Dropout for expansion before outputting
self.dropout_expand = nn.Dropout(0.0)
# Dropout for outputting
self.dropout_out = nn.Dropout(0.0)
# Linear transformation layer for expansion (H -> I) where I = vector dimension of intermediate state
self.lin_expand = nn.Linear(hidden_dim, intermediate_dim)
# Linear transformation layer for output (I -> H)
self.lin_collapse = nn.Linear(intermediate_dim, hidden_dim)
# Post Layer Normalization for self-attention
self.norm_attn = nn.LayerNorm(hidden_dim, eps=layernorm_eps)
# Post Layer Normalization for source-target attention
self.norm_mem = nn.LayerNorm(hidden_dim, eps=layernorm_eps)
# Post Layer Normalization for outputting
self.norm_out = nn.LayerNorm(hidden_dim, eps=layernorm_eps)
def forward(self, target, target_ignorance_mask=None, target_attention_mask=None,
memory=None, memory_ignorance_mask=None):
"""
Forward-computation of Transformer Encoder/Decoder layers
Args:
target (torch.Tensor): FloatTensor indicating Sequence of target vectors. Shape [batch_size, target_length, hidden_size].
target_ignorance_mask (torch.Tensor): BoolTensor indicating Mask for target tokens that should be ignored. Shape [batch_size, target_length].
target_attention_mask (torch.Tensor) : BoolTensor indicating Target-to-target Attention mask for target tokens. Shape [target_length, target_length].
memory (torch.Tensor): FloatTensor indicating Sequence of source vectors. Shape [batch_size, sequence_length, hidden_size]. This can be None when you want to use this layer as an encoder layer.
memory_ignorance_mask (torch.Tensor): BoolTensor indicating Mask for source tokens that should be ignored. Shape [batch_size, sequence_length].
Returns:
torch.FloatTensor: Decoder hidden states per each target token, shape [batch_size, sequence_length, hidden_size].
"""
# Compute self-attention
attented = self.attn(query=target, attention_mask=target_attention_mask,
key_ignorance_mask=target_ignorance_mask)
target = target + self.dropout_attn(attented)
target = self.norm_attn(target)
# Compute attention over targets with source as queries.
if memory is not None:
attented = self.mem(query=target, key_value=memory, key_ignorance_mask=memory_ignorance_mask)
target = target + self.dropout_mem(attented)
target = self.norm_mem(target)
# Pass linear transformations
output = self.lin_collapse(self.dropout_expand(gelu_bert(self.lin_expand(target))))
target = target + self.dropout_out(output)
target = self.norm_out(target)
return target
| 43.831111
| 205
| 0.688806
| 9,303
| 0.943318
| 0
| 0
| 0
| 0
| 0
| 0
| 3,247
| 0.329244
|
5897a699b6d877a1d06ab69aa68b4566e5a0268c
| 6,564
|
py
|
Python
|
tests/4_ckks_basics.py
|
TimTam725/SEAL-true
|
87c3f3f345b7dc5f49380556c55a85a7efa45bb6
|
[
"MIT"
] | null | null | null |
tests/4_ckks_basics.py
|
TimTam725/SEAL-true
|
87c3f3f345b7dc5f49380556c55a85a7efa45bb6
|
[
"MIT"
] | null | null | null |
tests/4_ckks_basics.py
|
TimTam725/SEAL-true
|
87c3f3f345b7dc5f49380556c55a85a7efa45bb6
|
[
"MIT"
] | null | null | null |
import math
from seal import *
from seal_helper import *
def example_ckks_basics():
print_example_banner("Example: CKKS Basics")
parms = EncryptionParameters(scheme_type.CKKS)
poly_modulus_degree = 8192
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.Create(
poly_modulus_degree, [60, 40, 40, 60]))
scale = pow(2.0, 40)
context = SEALContext.Create(parms)
print_parameters(context)
keygen = KeyGenerator(context)
public_key = keygen.public_key()
secret_key = keygen.secret_key()
relin_keys = keygen.relin_keys()
encryptor = Encryptor(context, public_key)
evaluator = Evaluator(context)
decryptor = Decryptor(context, secret_key)
encoder = CKKSEncoder(context)
slot_count = encoder.slot_count()
print("Number of slots: " + str(slot_count))
inputs = DoubleVector()
curr_point = 0.0
step_size = 1.0 / (slot_count - 1)
for i in range(slot_count):
inputs.append(curr_point)
curr_point += step_size
print("Input vector: ")
print_vector(inputs, 3, 7)
print("Evaluating polynomial PI*x^3 + 0.4x + 1 ...")
'''
We create plaintexts for PI, 0.4, and 1 using an overload of CKKSEncoder::encode
that encodes the given floating-point value to every slot in the vector.
'''
plain_coeff3 = Plaintext()
plain_coeff1 = Plaintext()
plain_coeff0 = Plaintext()
encoder.encode(3.14159265, scale, plain_coeff3)
encoder.encode(0.4, scale, plain_coeff1)
encoder.encode(1.0, scale, plain_coeff0)
x_plain = Plaintext()
print("-" * 50)
print("Encode input vectors.")
encoder.encode(inputs, scale, x_plain)
x1_encrypted = Ciphertext()
encryptor.encrypt(x_plain, x1_encrypted)
x3_encrypted = Ciphertext()
print("-" * 50)
print("Compute x^2 and relinearize:")
evaluator.square(x1_encrypted, x3_encrypted)
evaluator.relinearize_inplace(x3_encrypted, relin_keys)
print(" + Scale of x^2 before rescale: " +
"%.0f" % math.log(x3_encrypted.scale(), 2) + " bits")
print("-" * 50)
print("Rescale x^2.")
evaluator.rescale_to_next_inplace(x3_encrypted)
print(" + Scale of x^2 after rescale: " +
"%.0f" % math.log(x3_encrypted.scale(), 2) + " bits")
print("-" * 50)
print("Compute and rescale PI*x.")
x1_encrypted_coeff3 = Ciphertext()
evaluator.multiply_plain(x1_encrypted, plain_coeff3, x1_encrypted_coeff3)
print(" + Scale of PI*x before rescale: " +
"%.0f" % math.log(x1_encrypted_coeff3.scale(), 2) + " bits")
evaluator.rescale_to_next_inplace(x1_encrypted_coeff3)
print(" + Scale of PI*x after rescale: " +
"%.0f" % math.log(x1_encrypted_coeff3.scale(), 2) + " bits")
print("-" * 50)
print("Compute, relinearize, and rescale (PI*x)*x^2.")
evaluator.multiply_inplace(x3_encrypted, x1_encrypted_coeff3)
evaluator.relinearize_inplace(x3_encrypted, relin_keys)
print(" + Scale of PI*x^3 before rescale: " +
"%.0f" % math.log(x3_encrypted.scale(), 2) + " bits")
evaluator.rescale_to_next_inplace(x3_encrypted)
print(" + Scale of PI*x^3 after rescale: " +
"%.0f" % math.log(x3_encrypted.scale(), 2) + " bits")
print("-" * 50)
print("Compute and rescale 0.4*x.")
evaluator.multiply_plain_inplace(x1_encrypted, plain_coeff1)
print(" + Scale of 0.4*x before rescale: " +
"%.0f" % math.log(x1_encrypted.scale(), 2) + " bits")
evaluator.rescale_to_next_inplace(x1_encrypted)
print(" + Scale of 0.4*x after rescale: " +
"%.0f" % math.log(x1_encrypted.scale(), 2) + " bits")
print()
print("-" * 50)
print("Parameters used by all three terms are different.")
print(" + Modulus chain index for x3_encrypted: " +
str(context.get_context_data(x3_encrypted.parms_id()).chain_index()))
print(" + Modulus chain index for x1_encrypted: " +
str(context.get_context_data(x1_encrypted.parms_id()).chain_index()))
print(" + Modulus chain index for x1_encrypted: " +
str(context.get_context_data(plain_coeff0.parms_id()).chain_index()))
print()
print("-" * 50)
print("The exact scales of all three terms are different:")
print(" + Exact scale in PI*x^3: " + "%.10f" % x3_encrypted.scale())
print(" + Exact scale in 0.4*x: " + "%.10f" % x1_encrypted.scale())
print(" + Exact scale in 1: " + "%.10f" % plain_coeff0.scale())
print("-" * 50)
print("Normalize scales to 2^40.")
# set_scale() this function should be add to seal/ciphertext.h line 632
x3_encrypted.set_scale(pow(2.0, 40))
x1_encrypted.set_scale(pow(2.0, 40))
'''
We still have a problem with mismatching encryption parameters. This is easy
to fix by using traditional modulus switching (no rescaling). CKKS supports
modulus switching just like the BFV scheme, allowing us to switch away parts
of the coefficient modulus when it is simply not needed.
'''
print("-" * 50)
print("Normalize encryption parameters to the lowest level.")
last_parms_id = x3_encrypted.parms_id()
evaluator.mod_switch_to_inplace(x1_encrypted, last_parms_id)
evaluator.mod_switch_to_inplace(plain_coeff0, last_parms_id)
'''
All three ciphertexts are now compatible and can be added.
'''
print("-" * 50)
print("Compute PI*x^3 + 0.4*x + 1.")
encrypted_result = Ciphertext()
evaluator.add(x3_encrypted, x1_encrypted, encrypted_result)
evaluator.add_plain_inplace(encrypted_result, plain_coeff0)
'''
First print the true result.
'''
plain_result = Plaintext()
print("-" * 50)
print("Decrypt and decode PI*x^3 + 0.4x + 1.")
print(" + Expected result:")
true_result = []
for x in inputs:
true_result.append((3.14159265 * x * x + 0.4) * x + 1)
print_vector(true_result, 3, 7)
'''
Decrypt, decode, and print the result.
'''
decryptor.decrypt(encrypted_result, plain_result)
result = DoubleVector()
encoder.decode(plain_result, result)
print(" + Computed result ...... Correct.")
print_vector(result, 3, 7)
'''
While we did not show any computations on complex numbers in these examples,
the CKKSEncoder would allow us to have done that just as easily. Additions
and multiplications of complex numbers behave just as one would expect.
'''
if __name__ == '__main__':
example_ckks_basics()
| 35.673913
| 84
| 0.655088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,289
| 0.34872
|
5899ee9d789144345b8642bab6672fe498055f42
| 2,422
|
py
|
Python
|
FigureTable/NeuroPathRegions/barplots.py
|
vkola-lab/multi-task
|
6a61db4223e1812744f13028747b07e2f840cc0b
|
[
"MIT"
] | 1
|
2021-12-19T01:45:01.000Z
|
2021-12-19T01:45:01.000Z
|
FigureTable/NeuroPathRegions/barplots.py
|
vkola-lab/multi-task
|
6a61db4223e1812744f13028747b07e2f840cc0b
|
[
"MIT"
] | null | null | null |
FigureTable/NeuroPathRegions/barplots.py
|
vkola-lab/multi-task
|
6a61db4223e1812744f13028747b07e2f840cc0b
|
[
"MIT"
] | 1
|
2022-03-14T18:30:23.000Z
|
2022-03-14T18:30:23.000Z
|
from correlate import *
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import rc, rcParams
rc('axes', linewidth=1)
rc('font', weight='bold', size=10)
def barplots(prefixes, regions, stains, corre, error, name, folder, ylim):
for stain in stains:
barplot(prefixes, regions, stain, corre, error, name, folder, ylim)
def barplot(prefixes, regions, stain, corre, error, name, folder, ylim):
colors = ['#bfe2e3', '#69869c', '#36896e', '#c22e00', '#c6d645', '#ffd3b6', '#b2b2b2', '#4724a9',
'#9bc84d', '#7141ae', '#d2a782', '#933b61', '#435299', '#d88770', '#765aa8', '#719795']
Val, Std = [], []
for i, prefix in enumerate(prefixes):
val, std = corre[prefix][stain], error[prefix][stain]
Val.append(val)
Std.append(std)
fig, ax = plt.subplots(dpi=300, figsize=(6, 3))
index = [i for i in range(len(regions))]
ax.bar(index, Val, yerr=Std, capsize=2, color=colors[:len(prefixes)])
ax.set_ylabel('Spearman\'s rank correlation', fontweight='bold')
ax.set_ylim(ylim[0]-0.05, ylim[1]+0.05)
ax.set_xticks(index)
ax.set_xticklabels(regions, rotation=45, ha="right")
ax.grid(which='major', axis='both', linestyle='--')
plt.savefig(folder + 'bar_{}_{}.png'.format(stain, name), bbox_inches='tight')
plt.close()
if __name__ == "__main__":
years = 2
layername = 'block2BN'
time_threshold, type = 365*years, 'COG'
folder = type + '_correlation_{}_years/'.format(years)
if not os.path.exists(folder):
os.mkdir(folder)
interval = file_interval_info(type)
y_lim = [0, 0]
corre = collections.defaultdict(dict)
error = collections.defaultdict(dict)
pool = [[0, prefixes[i], regions[i]] for i in range(len(regions))]
for i, region in enumerate(prefixes):
for stain in stains:
corr, std = get_correlation(region + '_' + stain, prefix_idx[region], time_threshold, interval, folder, type, layername, missing=0)
corre[region][stain] = corr
error[region][stain] = 0
y_lim[1] = max(y_lim[1], corr)
y_lim[0] = min(y_lim[0], corr)
pool[i][0] -= corr
pool.sort()
prefixes = [p[1] for p in pool]
regions = [p[2] for p in pool]
barplots(prefixes, regions, stains, corre, error, '{}days_{}shap_{}'.format(time_threshold, type, layername), folder, y_lim)
| 36.69697
| 143
| 0.627168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 314
| 0.129645
|
589a8463ce8f13fdbedded623d8ccbad3c17d953
| 4,549
|
py
|
Python
|
examples/distributed_autofaiss_n_indices.py
|
Rexiome/autofaiss
|
79d7c396819ffd6859edde17c6958c1c3338b29b
|
[
"Apache-2.0"
] | null | null | null |
examples/distributed_autofaiss_n_indices.py
|
Rexiome/autofaiss
|
79d7c396819ffd6859edde17c6958c1c3338b29b
|
[
"Apache-2.0"
] | null | null | null |
examples/distributed_autofaiss_n_indices.py
|
Rexiome/autofaiss
|
79d7c396819ffd6859edde17c6958c1c3338b29b
|
[
"Apache-2.0"
] | null | null | null |
"""
An example of running autofaiss by pyspark to produce N indices.
You need to install pyspark before using the following example.
"""
from typing import Dict
import faiss
import numpy as np
from autofaiss import build_index
# You'd better create a spark session before calling build_index,
# otherwise, a spark session would be created by autofaiss with the least configuration.
_, index_path2_metric_infos = build_index(
embeddings="hdfs://root/path/to/your/embeddings/folder",
distributed="pyspark",
file_format="parquet",
temporary_indices_folder="hdfs://root/tmp/distributed_autofaiss_indices",
current_memory_available="10G",
max_index_memory_usage="100G",
nb_indices_to_keep=10,
)
index_paths = sorted(index_path2_metric_infos.keys())
###########################################
# Use case 1: merging 10 indices into one #
###########################################
merged = faiss.read_index(index_paths[0])
for rest_index_file in index_paths[1:]:
index = faiss.read_index(rest_index_file)
faiss.merge_into(merged, index, shift_ids=False)
with open("merged-knn.index", "wb") as f:
faiss.write_index(merged, faiss.PyCallbackIOWriter(f.write))
########################################
# Use case 2: searching from N indices #
########################################
K, DIM, all_distances, all_ids, NB_QUERIES = 5, 512, [], [], 2
queries = faiss.rand((NB_QUERIES, DIM))
for rest_index_file in index_paths:
index = faiss.read_index(rest_index_file)
distances, ids = index.search(queries, k=K)
all_distances.append(distances)
all_ids.append(ids)
dists_arr = np.stack(all_distances, axis=1).reshape(NB_QUERIES, -1)
knn_ids_arr = np.stack(all_ids, axis=1).reshape(NB_QUERIES, -1)
sorted_k_indices = np.argsort(-dists_arr)[:, :K]
sorted_k_dists = np.take_along_axis(dists_arr, sorted_k_indices, axis=1)
sorted_k_ids = np.take_along_axis(knn_ids_arr, sorted_k_indices, axis=1)
print(f"{K} nearest distances: {sorted_k_dists}")
print(f"{K} nearest ids: {sorted_k_ids}")
############################################
# Use case 3: on disk merging of N indices #
############################################
# using faiss.merge_ondisk (https://github.com/facebookresearch/faiss/blob/30abcd6a865afef7cf86df7e8b839a41b5161505/contrib/ondisk.py )
# https://github.com/facebookresearch/faiss/blob/151e3d7be54aec844b6328dc3e7dd0b83fcfa5bc/demos/demo_ondisk_ivf.py
# to merge indices on disk without using memory
# this is useful in particular to use a very large index with almost no memory usage.
from faiss.contrib.ondisk import merge_ondisk
import faiss
block_fnames = index_paths
empty_index = faiss.read_index(block_fnames[0], faiss.IO_FLAG_MMAP)
empty_index.ntotal = 0
merge_ondisk(empty_index, block_fnames, "merged_index.ivfdata")
faiss.write_index(empty_index, "populated.index")
pop = faiss.read_index("populated.index", faiss.IO_FLAG_ONDISK_SAME_DIR)
########################################################
# Use case 4: use N indices using HStackInvertedLists #
########################################################
# This allows using N indices as a single combined index
# without changing anything on disk or loading anything to memory
# it works well but it's slower than first using merge_ondisk
# because it requires explore N pieces of inverted list for each
# list to explore
import os
class CombinedIndex:
"""
combines a set of inverted lists into a hstack
adds these inverted lists to an empty index that contains
the info on how to perform searches
"""
def __init__(self, invlist_fnames):
ilv = faiss.InvertedListsPtrVector()
for fname in invlist_fnames:
if os.path.exists(fname):
index = faiss.read_index(fname, faiss.IO_FLAG_MMAP)
index_ivf = faiss.extract_index_ivf(index)
il = index_ivf.invlists
index_ivf.own_invlists = False
else:
raise FileNotFoundError
ilv.push_back(il)
self.big_il = faiss.HStackInvertedLists(ilv.size(), ilv.data())
ntotal = self.big_il.compute_ntotal()
self.index = faiss.read_index(invlist_fnames[0], faiss.IO_FLAG_MMAP)
index_ivf = faiss.extract_index_ivf(self.index)
index_ivf.replace_invlists(self.big_il, True)
index_ivf.ntotal = self.index.ntotal = ntotal
def search(self, x, k):
D, I = self.index.search(x, k)
return D, I
index = CombinedIndex(index_paths)
index.search(queries, K)
| 34.992308
| 135
| 0.675533
| 1,103
| 0.242471
| 0
| 0
| 0
| 0
| 0
| 0
| 1,921
| 0.422291
|
589dcbc08792dc79d40776858af24dca67ad7bfe
| 4,170
|
py
|
Python
|
rbkcli/core/handlers/callback.py
|
rubrikinc/rbkcli
|
62bbb20d15c78d2554d7258bdae655452ac826c7
|
[
"MIT"
] | 10
|
2019-07-23T13:13:16.000Z
|
2022-03-04T17:48:10.000Z
|
rbkcli/core/handlers/callback.py
|
rubrikinc/rbkcli
|
62bbb20d15c78d2554d7258bdae655452ac826c7
|
[
"MIT"
] | 19
|
2019-08-22T06:23:09.000Z
|
2021-12-28T04:04:52.000Z
|
rbkcli/core/handlers/callback.py
|
rubrikinc/rbkcli
|
62bbb20d15c78d2554d7258bdae655452ac826c7
|
[
"MIT"
] | 5
|
2019-08-06T14:29:35.000Z
|
2021-06-17T20:35:17.000Z
|
"""Callback module for rbkcli."""
import json
from rbkcli.core.handlers.inputs import InputHandler
from rbkcli.base.essentials import DotDict, RbkcliException
from rbkcli.core.handlers import ApiTargetTools
from rbkcli.core.handlers.outputs import OutputHandler
class CallBack(ApiTargetTools):
"""Class to provide rbkcli internal api calls."""
def __init__(self, operations, base_kit):
"""Initialize callback class."""
ApiTargetTools.__init__(self, base_kit)
self.operations = operations
self.base_kit = base_kit
self.validator = InputHandler(self.base_kit, self.operations)
self.formatter = OutputHandler(base_kit, self.operations)
def parseit(self, args):
"""Parse arguments provided."""
self.args = args
if not isinstance(self.args, list):
self.args = self.args.replace('rbkcli ', '')
new_args = []
self.args = self.args.split('"')
if len(self.args) % 2 != 0:
for arg in enumerate(self.args):
newarg = arg[1]
if arg[0] % 2 == 0:
newarg = arg[1].split()
if isinstance(newarg, list):
new_args = new_args + newarg
else:
new_args.append(newarg)
else:
print('Error ## Danger, danger, high voltage...')
self.args = new_args
self.request = self.base_kit.parser.parse_args(self.args)
self.request = vars(self.request)
self.request = self.base_kit.parser.un_list(self.request)
return self.request, self.args
def structreit(self, args, request):
"""Reestructure arguments provided."""
self.args = args
self.request = {}
requet_1 = {}
for key in request.keys():
self.request[key] = request[key]
requet_1[key] = request[key]
# Structure the request to the needed format.
stct_request = self.base_kit.parser.create_request_structure(requet_1, self.args)
# Pass data to dot dictionary
self.stct_request = DotDict()
for key in stct_request.keys():
self.stct_request[key] = stct_request[key]
# Normalizing request dictionary
self.stct_request.endpoint = ' '.join(self.stct_request.api_endpoint)
self.stct_request.formatt = 'raw'
self.stct_request.param = self.stct_request.query
self.stct_request.data = self.stct_request.parameter
self.stct_request.structured = True
return self.stct_request
def callit(self, stct_request, args=None):
"""Call endpoint provided with arguments."""
if 'structured' not in stct_request.keys():
if args is None:
args = []
stct_request = self.structreit(args, stct_request)
self.stct_request = stct_request
self.req = self.validator.validate(self.stct_request)
api_result = self.operations.execute(self.req)
if '{' in api_result.text or '[' in api_result.text:
try:
self.call_result = json.loads(api_result.text)
except:
self.call_result = { 'result_text': api_result.text}
else:
self.call_result = { 'result_text': api_result.text}
return self.call_result
def call_back(self, args):
"""Perform same level of parsing (even CLI) as any other request."""
self.request, self.args = self.parseit(args)
self.stct_request = self.structreit(self.args, self.request)
result = DotDict()
result.text = self.callit(self.stct_request)
result.status_code = 200
result.text = json.dumps(result.text, indent=2)
return self.formatter.outputfy(self.req, result)
def call_back_text(self, args):
"""Returns dict directly instead of API result."""
result = self.call_back(args)
return json.loads(result.text)
| 37.232143
| 90
| 0.591367
| 3,894
| 0.933813
| 0
| 0
| 0
| 0
| 0
| 0
| 562
| 0.134772
|
589e413db07bdd7cf6dcd6e3ab66ffc0b716eb5c
| 1,001
|
py
|
Python
|
portrait/webapp/migrations/0001_initial.py
|
andela-sjames/Portrait
|
83074e3d16d8009a71b674b6859f7c276b8d6537
|
[
"MIT"
] | null | null | null |
portrait/webapp/migrations/0001_initial.py
|
andela-sjames/Portrait
|
83074e3d16d8009a71b674b6859f7c276b8d6537
|
[
"MIT"
] | null | null | null |
portrait/webapp/migrations/0001_initial.py
|
andela-sjames/Portrait
|
83074e3d16d8009a71b674b6859f7c276b8d6537
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-16 23:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SocialProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('provider', models.SmallIntegerField(choices=[('1', 'Facebook')])),
('social_id', models.CharField(max_length=255, unique=True)),
('photo', models.TextField(blank=True)),
('extra_data', models.TextField(blank=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='social_profile', to=settings.AUTH_USER_MODEL)),
],
),
]
| 34.517241
| 152
| 0.631369
| 842
| 0.841159
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.144855
|
589e51c361515efc2c983bdbd855621e6ab93aac
| 9,950
|
py
|
Python
|
src/greenbudget/app/group/serializers.py
|
nickmflorin/django-proper-architecture-testing
|
da7c4019697e85f921695144375d2f548f1e98ad
|
[
"MIT"
] | null | null | null |
src/greenbudget/app/group/serializers.py
|
nickmflorin/django-proper-architecture-testing
|
da7c4019697e85f921695144375d2f548f1e98ad
|
[
"MIT"
] | null | null | null |
src/greenbudget/app/group/serializers.py
|
nickmflorin/django-proper-architecture-testing
|
da7c4019697e85f921695144375d2f548f1e98ad
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers, exceptions
from greenbudget.lib.rest_framework_utils.serializers import (
EnhancedModelSerializer)
from greenbudget.app.account.models import BudgetAccount, TemplateAccount
from greenbudget.app.tagging.serializers import ColorField
from greenbudget.app.subaccount.models import (
BudgetSubAccount, TemplateSubAccount)
from .models import (
Group,
BudgetAccountGroup,
TemplateAccountGroup,
BudgetSubAccountGroup,
TemplateSubAccountGroup
)
class GroupSerializer(EnhancedModelSerializer):
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(
required=False,
allow_blank=False,
allow_null=False
)
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
updated_by = serializers.PrimaryKeyRelatedField(read_only=True)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
estimated = serializers.FloatField(read_only=True)
color = ColorField(content_type_model=Group, required=False)
class Meta:
model = Group
fields = (
'id', 'name', 'created_by', 'created_at', 'updated_by',
'updated_at', 'color', 'estimated')
def validate_name(self, value):
parent = self.context.get('parent')
if parent is None:
parent = self.instance.parent
validator = serializers.UniqueTogetherValidator(
queryset=parent.groups.all(),
fields=('name', ),
)
validator({'name': value}, self)
return value
def create(self, *args, **kwargs):
"""
Overridden to perform cleanup of empty :obj:`Group` instances.
When a :obj:`Group` is created, it can be created with children that
already belong to another :obj:`Group`. When this happens, the children
are removed from the other :obj:`Group` and included in the new
:obj:`Group`. We want to perform cleanup, and remove empty :obj:`Group`
instances that have no children.
This is partially accomplished with :obj:`track_model`. However, when
the above situation occurs, the :obj:`track_model` will not work,
because both of these implementations will fail:
(1) :obj:`track_model` on :obj:`Group`
@track_model(on_field_change_hooks={'parent': remove_empty_groups})
class BudgetAccountGroup(Group):
...
parent = models.ForeignKey(
to='budget.Budget',
...
)
The mechanics of :obj:`track_model` responsible for removing the
:obj:`Group` if it has no more children will not be triggered because
:obj:`track_model` will only trigger when the :obj:`Group` with no more
children itself is updated. In this situation, we are not updating the
:obj:`Group` that has no more children, we are updating the new
:obj:`Group` that will have the children that previously existed on the
old :obj:`Group`.
(2) :obj:`track_model` on :obj:`Account` or :obj:`SubAccount`
class BudgetAccountGroup(Group):
...
parent = models.ForeignKey(
to='budget.Budget',
...
)
@track_model(on_field_change_hooks={'group': remove_empty_groups})
class BudgetAccount(Account):
group = models.ForeignKey(
to='group.BudgetAccountGroup',
related_name='children'
)
Here, we cannot use the :obj:`track_model` on the :obj:`Account` or
:obj:`SubAccount` models to remove empty groups after the group assigned
to those models changes because for DRF we are updating the models via
the `children` attribute, which is the reverse FK accessor, and
apparently that does not trigger the post_save hooks on the primary
model:
PATCH /v1/groups/<pk>/ { children: [...] } -> Updating the `children`
on the :obj:`BudgetAccountGroup` (i.e. updating a reverse FK accessor)
will not trigger the `post_save` on :obj:`BudgetAccount`.
For the above reason, we need to address this problem without the
:obj:`track_model` behavior.
(3) `post_save` signals directly on :obj:`Group`
We cannot accomplish this at the database level (or model level) via
post_save signals. Consider we try to accomplish this with the
following signal:
@dispatch.receiver(post_save, sender=BudgetSubAccountGroup)
def remove_empty_groups(instance, **kwargs):
for sibling_group in instance.parent.groups.all():
if sibling_group.children.count() == 0:
sibling_group.delete()
If we were to do this, we would run into issues creating instances of
:obj:`Group`. Since the `children` field is a reverse FK accessor,
the :obj:`Group` has to be created before an entity can be assigned
a group. That means a :obj:`Group` will at times have no children just
before children are assigned - and we cannot have those groups incident-
ally deleted before children are assigned.
For this reason, we need to accomplish this behavior at the request/
response interface - which is why we override this method here.
TODO:
----
We should investigate whether or not there is a better way around this
problem. At the very least, we should develop CRON tasks that should
remove remnant empty groups.
"""
instance = super().create(*args, **kwargs)
for sibling_group in instance.parent.groups.all():
if sibling_group != instance \
and sibling_group.children.count() == 0:
sibling_group.delete()
return instance
def update(self, *args, **kwargs):
"""
Overridden to perform cleanup of empty :obj:`Group` instances. See
docstring in `.create()` method for a more detailed explanation.
"""
instance = super().update(*args, **kwargs)
siblings = [
sib for sib in instance.parent.groups.all()
if sib != instance
]
for sibling_group in siblings:
if sibling_group.children.count() == 0:
sibling_group.delete()
return instance
class AbstractAccountGroupSerializer(GroupSerializer):
class Meta:
abstract = True
def validate_children(self, value):
parent = self.context.get('parent')
if parent is None:
parent = self.instance.parent
for child in value:
if child.budget != parent:
raise exceptions.ValidationError(
"The %s %s does not belong to the same %s "
"that the Group does (%s)." % (
type(child).__name__, child.pk, type(parent).__name__,
parent.pk)
)
return value
class BudgetAccountGroupSerializer(AbstractAccountGroupSerializer):
actual = serializers.FloatField(read_only=True)
variance = serializers.FloatField(read_only=True)
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=BudgetAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = BudgetAccountGroup
fields = GroupSerializer.Meta.fields + (
'children', 'actual', 'variance')
class TemplateAccountGroupSerializer(AbstractAccountGroupSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=TemplateAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = TemplateAccountGroup
fields = GroupSerializer.Meta.fields + ('children', )
class AbstractSubAccountGroupSerializer(GroupSerializer):
class Meta:
abstract = True
def validate_children(self, value):
parent = self.context.get('parent')
if parent is None:
parent = self.instance.parent
for child in value:
if child.parent != parent:
raise exceptions.ValidationError(
"The %s %s does not belong to the same %s "
"that the Group does (%s)." % (
type(child).__name__, child.pk, type(parent).__name__,
parent.pk)
)
# Is this check necessary? Would this otherwise be constrained
# by model restrictions?
elif child.budget != parent.budget:
raise exceptions.ValidationError(
"The %s %s does not belong to the same %s "
"that the Group does (%s)." % (
type(child).__name__, child.pk,
type(child.budget).__name__, parent.pk)
)
return value
class BudgetSubAccountGroupSerializer(AbstractSubAccountGroupSerializer):
actual = serializers.FloatField(read_only=True)
variance = serializers.FloatField(read_only=True)
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=BudgetSubAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = BudgetSubAccountGroup
fields = GroupSerializer.Meta.fields + (
'children', 'actual', 'variance')
class TemplateSubAccountGroupSerializer(AbstractSubAccountGroupSerializer):
children = serializers.PrimaryKeyRelatedField(
many=True,
required=True,
queryset=TemplateSubAccount.objects.active()
)
class Meta(GroupSerializer.Meta):
model = TemplateSubAccountGroup
fields = GroupSerializer.Meta.fields + ('children', )
| 37.689394
| 80
| 0.63206
| 9,420
| 0.946734
| 0
| 0
| 0
| 0
| 0
| 0
| 4,643
| 0.466633
|
589f45393545cfab296cec7cb3f2cc4a519dd5d0
| 570
|
py
|
Python
|
vaccine_tracker/email.py
|
Shishir-Bondre/content-alerts
|
37be34c781338ef3968e1257f5c0d1148bb022e5
|
[
"Apache-2.0"
] | null | null | null |
vaccine_tracker/email.py
|
Shishir-Bondre/content-alerts
|
37be34c781338ef3968e1257f5c0d1148bb022e5
|
[
"Apache-2.0"
] | null | null | null |
vaccine_tracker/email.py
|
Shishir-Bondre/content-alerts
|
37be34c781338ef3968e1257f5c0d1148bb022e5
|
[
"Apache-2.0"
] | null | null | null |
def email_subject(center_name):
return f"[Urgent Reminder] Vaccine slot is now available at {center_name}"
def email_body(email_data):
return f"Hi, \n" \
f"Vaccine slot is available for below centers \n " \
f"Center name and available data \n {email_data} \n" \
f"Please register at cowin website https://cowin.gov.in \n" \
f"Have a lovely day ahead! :) \n" \
f"Thanks, \n" \
f"Shishir Bondre \n" \
f"To unsubscribe click here http://3.6.160.147:8000/vaccine_tracker/unsubscribe/"
| 38
| 92
| 0.615789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 385
| 0.675439
|
58a0bffac08dce61ed79b44c63defce1adefa9d1
| 12,103
|
py
|
Python
|
objects/CSCG/_2d/mesh/domain/inputs/base.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | 1
|
2020-10-14T12:48:35.000Z
|
2020-10-14T12:48:35.000Z
|
objects/CSCG/_2d/mesh/domain/inputs/base.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
objects/CSCG/_2d/mesh/domain/inputs/base.py
|
mathischeap/mifem
|
3242e253fb01ca205a76568eaac7bbdb99e3f059
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
INTRO
@author: Yi Zhang. Created on Tue May 21 11:57:52 2019
Department of Aerodynamics
Faculty of Aerospace Engineering
TU Delft,
Delft, the Netherlands
"""
import inspect
from screws.freeze.main import FrozenOnly
from typing import Dict, Union
import numpy as np
from screws.decorators.classproperty.main import classproperty
class DomainInputBase(FrozenOnly):
def __init__(self, domain_name='domain without name'):
self.domain_name = domain_name
self._ndim_ = 2
self._region_corner_coordinates_ = None
self._region_edge_types_ = None
self._boundary_region_edges_ = None
self._region_interpolators_ = None
self._boundary_names_ = None
self._periodic_boundary_pairs_ = dict()
self._periodic_boundaries_ = set()
self._region_sequence_ = None
self._region_type_wr2_metric_ = None
self._internal_parameters_ = list()
INSP = inspect.getfullargspec(self.__init__)
self.__arg_names___ = INSP[0][1:]
assert INSP[1] is None and INSP[2] is None, "A domain input class can not have *args and **kwargs."
assert len(INSP[3]) == len(self.__arg_names___), "A domain input class can only have kwargs."
self._freeze_self_()
@property
def internal_parameters(self):
"""Internal parameters only affect internal metric, does not affect the domain shape."""
return self._internal_parameters_
@internal_parameters.setter
def internal_parameters(self, internal_parameters):
if isinstance(internal_parameters, list):
pass
elif isinstance(internal_parameters, str):
internal_parameters = [internal_parameters,]
elif isinstance(internal_parameters, (tuple, set)):
internal_parameters = list(internal_parameters)
else:
raise NotImplementedError(f"internal_parameters = {internal_parameters} not acceptable.")
assert isinstance(internal_parameters, list), \
f"please put internal_parameters({internal_parameters}) in list."
if len(internal_parameters) > 0:
assert all([ip in self.__arg_names___ for ip in internal_parameters])
self._internal_parameters_ = internal_parameters
@property
def domain_name(self):
""" Mesh name. """
return self._domain_name_
@domain_name.setter
def domain_name(self, dn):
assert isinstance(dn, str), " <DomainInput> : domain name needs to be str."
self._domain_name_ = dn
@property
def ndim(self):
""" dimensions n. """
return self._ndim_
@property
def region_interpolators(self):
return self._region_interpolators_
@region_interpolators.setter
def region_interpolators(self, region_interpolators):
self._region_interpolators_ = region_interpolators
def ___PRIVATE_region_name_requirement_checker___(self, regionDict):
"""
Requirements:
1). must be str
2). != domain name.
3). length > 2
4). Starts with 'R:'
5). can only have letters and _
"""
for R in regionDict:
assert isinstance(R, str), f"region name={R} wrong, need be str!"
assert R != self.domain_name, f"region name == domain.name! wrong!"
assert len(R) > 2, f"region name = {R} too short, must > 2."
assert R[0:2] == 'R:', f"regions name = {R} does not start with 'R:'"
R2 = R[2:].replace('_', '')
assert R2.isalpha(),f"region_name = {R} wrong, can only have letter and _ (at >2)."
@property
def region_corner_coordinates(self):
"""
Store the regions 4 corners' coordinates.
Returns
-------
region_coordinates : dict
A dict whose keys represent the regions names, and values represent
the coordinates of regions corner points.
In 2D: (UL, DL, UR, DR).
L: Left, R: Right, U: Upper, D: Down
"""
return self._region_corner_coordinates_
@region_corner_coordinates.setter
def region_corner_coordinates(self, _dict_):
assert isinstance(_dict_, dict), " <DomainInput> : region_coordinates needs to be a dict."
self.___PRIVATE_region_name_requirement_checker___(_dict_)
for R in _dict_:
assert np.shape(_dict_[R])[0] == 4, \
" <DomainInput> : region_coordinates[{}]={} is wrong.".format(R, _dict_[R])
self._region_corner_coordinates_ = _dict_
@property
def region_edge_types(self):
"""
Store the regions' boundaries' types.
Returns
-------
region_boundary_type : dict
A dict that contains the region boundary info. The keys indicate
the region boundary, the value indicate the info. value[0] indicate
the type, value[1:] indicate the rest info which will be parsed
into full information. The not mentioned regions boundaries will be
set into default type: ('plane',)
Notice that the value will be sent to edge_geometry. And
if this info (value[1:]) to be parsed, it will be done there in
edge_geometry. And the value is stored in the
`edge_geometry.edge_types`.
"""
return self._region_edge_types_
@region_edge_types.setter
def region_edge_types(self, _dict_):
assert self.region_corner_coordinates is not None, " <DomainInput> : please first set region_coordinates."
assert isinstance(_dict_, dict), " <DomainInput> : region_boundary_type needs to be a dict."
for item in _dict_:
R, S = item.split('-')
assert R in self.region_corner_coordinates and S in ('U', 'D', 'L', 'R'), \
" <DomainInput> : region_edge_type key {} is wrong.".format(item)
self._region_edge_types_ = _dict_
def ___PRIVATE_boundary_name_requirement_checker___(self, boundaryRegionSidesDict):
"""
Requirements:
1). != domain name.
2). Length > 2
3). Can not start with 'R:' (So it must be different from regions names).
4). Only have letters
"""
for boundary_name in boundaryRegionSidesDict.keys():
assert boundary_name != self.domain_name
assert len(boundary_name) > 2, f"boundary_name = {boundary_name} is too short (>2 must)."
assert boundary_name[0:2] != 'R:', f"boundary_name = {boundary_name} wrong."
assert boundary_name.isalpha(), f"boundary_name = {boundary_name} wrong, boundary_name can only contain letters."
@property
def boundary_region_edges(self):
"""
Store the domain boundary information.
Returns
-------
domain_boundary : dict
For example:
{'Down': ("Body_center-D", 'Body_back-D', ...),
'West': ("Body_center-R", 'Body_back-R', ...),
......}
This means we have domain boundaries: South, West and so on.
"""
return self._boundary_region_edges_
@boundary_region_edges.setter
def boundary_region_edges(self, _dict_):
assert self.region_corner_coordinates is not None, " <DomainInput> : please first set region_coordinates."
assert isinstance(_dict_, dict), " <DomainInput> : domain_boundary needs to be a dict."
self.___PRIVATE_boundary_name_requirement_checker___(_dict_)
for boundary_names in _dict_.keys():
assert isinstance(boundary_names, str) and boundary_names != '' and '-' not in boundary_names, \
" <DomainInput> : boundary_names = {} is wrong.".format(boundary_names)
assert boundary_names not in self.region_corner_coordinates.keys(), \
" <DomainInput>: boundary_names={} is taken by one of the regions.".format(boundary_names)
for item in _dict_:
if isinstance(_dict_[item], str):
_dict_[item] = (_dict_[item],)
if isinstance(_dict_[item], list) or isinstance(_dict_[item], tuple):
for item_i in _dict_[item]:
R, S = item_i.split('-')
assert R in self.region_corner_coordinates and S in ('U', 'D', 'L', 'R'), \
" <DomainInput> : domain_boundary[{}]={} is wrong.".format(item, _dict_[item])
else:
raise Exception(" <DomainInput> : boundary_region_edges input value accepts only str, tuple of list.")
self._boundary_region_edges_ = _dict_
self._boundary_names_ = list(_dict_.keys())
def ___PRIVATE_periodic_boundary_requirement_checker___(self, pBd):
"""
Here we only do a simple check. We make sure that the keys are in format of:
0). boundary_name_1=boundary_name_2.
1). A boundary name at most appear in one pair.
"""
assert isinstance(pBd, dict)
bnPOOL = set()
for pair in pBd:
assert '=' in pair
bn1, bn2 = pair.split('=')
lengthPOOL = len(bnPOOL)
assert bn1 in self._boundary_names_ and bn2 in self._boundary_names_
bnPOOL.add(bn1)
bnPOOL.add(bn2)
newLengthPOOL = len(bnPOOL)
assert newLengthPOOL == lengthPOOL + 2, "Boundary(s) used for multiple periodic pairs!"
self._periodic_boundaries_ = bnPOOL
@property
def periodic_boundary_pairs(self):
return self._periodic_boundary_pairs_
@periodic_boundary_pairs.setter
def periodic_boundary_pairs(self, pBd):
""" """
self.___PRIVATE_periodic_boundary_requirement_checker___(pBd)
self._periodic_boundary_pairs_ = pBd
@property
def periodic_boundaries(self):
"""(set) Return a set of all boundary names those involved in the periodic boundary setting."""
return self._periodic_boundaries_
@property
def periodic_boundaries_involved_regions(self):
"""The regions that involve periodic boundaries."""
regions = set()
for pb in self.periodic_boundaries:
region_sides = self.boundary_region_edges[pb]
for rs in region_sides:
rn = rs.split('-')[0]
if rn not in regions:
regions.add(rn)
return regions
@property
def region_sequence(self):
"""
This will fix the sequence of regions by fix their names in property
region_names or regions.names. This is very important for numbering. Sometimes, a bad
regions sequence can make the numbering wrong.
"""
return self._region_sequence_
@region_sequence.setter
def region_sequence(self, rS: tuple):
assert len(rS) == len(self.region_corner_coordinates.keys())
assert all([rSi in self.region_corner_coordinates for rSi in rS]) & \
all([rSi in rS for rSi in self.region_corner_coordinates.keys()]), \
f"region_sequence={rS} has invalid regions name(s)."
self._region_sequence_ = rS
@property
def region_type_wr2_metric(self):
return self._region_type_wr2_metric_
@region_type_wr2_metric.setter
def region_type_wr2_metric(self, rTwr2M: Union[str, Dict[str, str]]):
if isinstance(rTwr2M, str):
_D_ = dict()
for region_name in self.region_corner_coordinates:
_D_[region_name] = rTwr2M
rTwr2M = _D_
assert isinstance(rTwr2M, dict), "region_type_wr2_metric needs to be a dictionary."
for key in rTwr2M:
assert key in self.region_corner_coordinates, f"Region name={key} not valid."
self._region_type_wr2_metric_ = rTwr2M
# class properties -------------------------
@classproperty
def statistic(cls):
raise NotImplementedError()
@classproperty
def random_parameters(cls):
raise NotImplementedError()
| 36.345345
| 125
| 0.629513
| 11,703
| 0.96695
| 0
| 0
| 8,304
| 0.686111
| 0
| 0
| 4,411
| 0.364455
|
58a1f761a8b86a42b461b76e20b0ebb5fa21fa7a
| 4,098
|
py
|
Python
|
src/sage/modules/vector_symbolic_dense.py
|
sloebrich/sage
|
b8f53f72e817e78722ad1b40d70aa9071426700b
|
[
"BSL-1.0"
] | 1
|
2020-05-19T22:34:03.000Z
|
2020-05-19T22:34:03.000Z
|
src/sage/modules/vector_symbolic_dense.py
|
sbt4104/sage
|
2cbd93e0b78dec701f4b7ad9271d3b1e967bcd6c
|
[
"BSL-1.0"
] | null | null | null |
src/sage/modules/vector_symbolic_dense.py
|
sbt4104/sage
|
2cbd93e0b78dec701f4b7ad9271d3b1e967bcd6c
|
[
"BSL-1.0"
] | 3
|
2020-03-29T17:13:36.000Z
|
2021-05-03T18:11:28.000Z
|
"""
Vectors over the symbolic ring.
Implements vectors over the symbolic ring.
AUTHORS:
- Robert Bradshaw (2011-05-25): Added more element-wise simplification methods
- Joris Vankerschaver (2011-05-15): Initial version
EXAMPLES::
sage: x, y = var('x, y')
sage: u = vector([sin(x)^2 + cos(x)^2, log(2*y) + log(3*y)]); u
(cos(x)^2 + sin(x)^2, log(3*y) + log(2*y))
sage: type(u)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: u.simplify_full()
(1, log(3*y) + log(2*y))
TESTS:
Check that the outcome of arithmetic with symbolic vectors is again
a symbolic vector (:trac:`11549`)::
sage: v = vector(SR, [1, 2])
sage: w = vector(SR, [sin(x), 0])
sage: type(v)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(w)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(v + w)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(-v)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
sage: type(5*w)
<class 'sage.modules.free_module.FreeModule_ambient_field_with_category.element_class'>
Test pickling/unpickling::
sage: u = vector(SR, [sin(x^2)])
sage: loads(dumps(u)) == u
True
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2011 Joris Vankerschaver (jv@caltech.edu)
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from . import free_module_element
from sage.symbolic.all import Expression
def apply_map(phi):
"""
Returns a function that applies phi to its argument.
EXAMPLES::
sage: from sage.modules.vector_symbolic_dense import apply_map
sage: v = vector([1,2,3])
sage: f = apply_map(lambda x: x+1)
sage: f(v)
(2, 3, 4)
"""
def apply(self, *args, **kwds):
"""
Generic function used to implement common symbolic operations
elementwise as methods of a vector.
EXAMPLES::
sage: var('x,y')
(x, y)
sage: v = vector([sin(x)^2 + cos(x)^2, log(x*y), sin(x/(x^2 + x)), factorial(x+1)/factorial(x)])
sage: v.simplify_trig()
(1, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))
sage: v.canonicalize_radical()
(cos(x)^2 + sin(x)^2, log(x) + log(y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))
sage: v.simplify_rational()
(cos(x)^2 + sin(x)^2, log(x*y), sin(1/(x + 1)), factorial(x + 1)/factorial(x))
sage: v.simplify_factorial()
(cos(x)^2 + sin(x)^2, log(x*y), sin(x/(x^2 + x)), x + 1)
sage: v.simplify_full()
(1, log(x*y), sin(1/(x + 1)), x + 1)
sage: v = vector([sin(2*x), sin(3*x)])
sage: v.simplify_trig()
(2*cos(x)*sin(x), (4*cos(x)^2 - 1)*sin(x))
sage: v.simplify_trig(False)
(sin(2*x), sin(3*x))
sage: v.simplify_trig(expand=False)
(sin(2*x), sin(3*x))
"""
return self.apply_map(lambda x: phi(x, *args, **kwds))
apply.__doc__ += "\nSee Expression." + phi.__name__ + "() for optional arguments."
return apply
class Vector_symbolic_dense(free_module_element.FreeModuleElement_generic_dense):
pass
# Add elementwise methods.
for method in ['simplify', 'simplify_factorial',
'simplify_log', 'simplify_rational',
'simplify_trig', 'simplify_full', 'trig_expand',
'canonicalize_radical', 'trig_reduce']:
setattr(Vector_symbolic_dense, method, apply_map(getattr(Expression, method)))
| 34.728814
| 108
| 0.59346
| 90
| 0.021962
| 0
| 0
| 0
| 0
| 0
| 0
| 3,524
| 0.859932
|
58a347a92a051b6eeb3be14043e523039fd31e40
| 784
|
py
|
Python
|
backend/app/models/weather.py
|
francoiscolombo/webnews
|
2f4c3fa5343919e6c658d97aebec4997d4d7ea48
|
[
"MIT"
] | null | null | null |
backend/app/models/weather.py
|
francoiscolombo/webnews
|
2f4c3fa5343919e6c658d97aebec4997d4d7ea48
|
[
"MIT"
] | 4
|
2021-03-10T12:26:29.000Z
|
2022-02-27T02:00:32.000Z
|
backend/app/models/weather.py
|
francoiscolombo/webnews
|
2f4c3fa5343919e6c658d97aebec4997d4d7ea48
|
[
"MIT"
] | null | null | null |
from app import db
from app.models.serializer import Serializer
class Weather(db.Model, Serializer):
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.String(15), index=True, unique=True)
country = db.Column(db.String(80))
flag = db.Column(db.String(512))
town = db.Column(db.String(80))
tendency = db.Column(db.String(80))
wind_speed = db.Column(db.String(20))
temperature_min = db.Column(db.String(20))
temperature_max = db.Column(db.String(20))
temperature = db.Column(db.String(20))
humidity = db.Column(db.String(40))
clouds = db.Column(db.String(80))
def __repr__(self):
return '<Weather {} : {}>'.format(self.town, self.temperature)
def serialize(self):
return Serializer.serialize(self)
| 32.666667
| 70
| 0.672194
| 717
| 0.914541
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.024235
|
58a3ad8eacd907942afee36829131b2e139101c4
| 894
|
py
|
Python
|
conu/backend/nspawn/constants.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 95
|
2018-05-19T14:35:08.000Z
|
2022-01-08T23:31:40.000Z
|
conu/backend/nspawn/constants.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 179
|
2017-09-12T11:14:30.000Z
|
2018-04-26T05:36:13.000Z
|
conu/backend/nspawn/constants.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 16
|
2018-05-09T14:15:32.000Z
|
2021-08-02T21:11:33.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright Contributors to the Conu project.
# SPDX-License-Identifier: MIT
#
# TODO: move this line to some generic constants, instead of same in
# docker and nspawn
CONU_ARTIFACT_TAG = 'CONU.'
CONU_IMAGES_STORE = "/opt/conu-nspawn-images/"
CONU_NSPAWN_BASEPACKAGES = [
"dnf",
"iproute",
"dhcp-client",
"initscripts",
"passwd",
"systemd",
"rpm",
"bash",
"shadow-utils",
"sssd-client",
"util-linux",
"libcrypt",
"sssd-client",
"coreutils",
"glibc-all-langpacks",
"vim-minimal"]
BOOTSTRAP_IMAGE_SIZE_IN_MB = 5000
BOOTSTRAP_FS_UTIL = "mkfs.ext4"
BOOTSTRAP_PACKAGER = [
"dnf",
"-y",
"install",
"--nogpgcheck",
"--setopt=install_weak_deps=False",
"--allowerasing"]
DEFAULT_RETRYTIMEOUT = 30
DEFAULT_SLEEP = 1
| 22.35
| 68
| 0.587248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 488
| 0.545861
|
58a653052f6df764ec062ee02680225f5a15d5ec
| 805
|
py
|
Python
|
onnxruntime/python/tools/quantization/operators/qdq_base_operator.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 669
|
2018-12-03T22:00:31.000Z
|
2019-05-06T19:42:49.000Z
|
onnxruntime/python/tools/quantization/operators/qdq_base_operator.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 440
|
2018-12-03T21:09:56.000Z
|
2019-05-06T20:47:23.000Z
|
onnxruntime/python/tools/quantization/operators/qdq_base_operator.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 140
|
2018-12-03T21:15:28.000Z
|
2019-05-06T18:02:36.000Z
|
import itertools
from ..quant_utils import QuantizedValue, QuantizedValueType, attribute_to_kwarg, quantize_nparray
from .base_operator import QuantOperatorBase
class QDQOperatorBase:
def __init__(self, onnx_quantizer, onnx_node):
self.quantizer = onnx_quantizer
self.node = onnx_node
self.disable_qdq_for_node_output = (
True if onnx_node.op_type in onnx_quantizer.op_types_to_exclude_output_quantization else False
)
def quantize(self):
node = self.node
if self.disable_qdq_for_node_output:
tensors_to_quantize = node.input
else:
tensors_to_quantize = itertools.chain(node.input, node.output)
for tensor_name in tensors_to_quantize:
self.quantizer.quantize_tensor(tensor_name)
| 32.2
| 106
| 0.719255
| 640
| 0.795031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
58a66b1c1e9cbe9103fe7260fec4a45f53280f13
| 495
|
py
|
Python
|
baya/tests/models.py
|
hrichards/baya
|
f319cef5e95cd6a166265d51ae0ea236b6f65be3
|
[
"MIT"
] | null | null | null |
baya/tests/models.py
|
hrichards/baya
|
f319cef5e95cd6a166265d51ae0ea236b6f65be3
|
[
"MIT"
] | 1
|
2018-12-28T16:53:42.000Z
|
2018-12-28T16:53:42.000Z
|
baya/tests/models.py
|
hrichards/baya
|
f319cef5e95cd6a166265d51ae0ea236b6f65be3
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class Blag(models.Model):
name = models.CharField(max_length=128)
class BlagEntry(models.Model):
blag = models.ForeignKey(Blag)
title = models.CharField(max_length=255)
body = models.TextField()
class Meta:
verbose_name_plural = "Entries"
class PhotoBlagEntry(models.Model):
blag = models.ForeignKey(Blag)
title = models.CharField(max_length=255)
# and a file field for a photo but whatever
| 21.521739
| 47
| 0.711111
| 429
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 78
| 0.157576
|
58a67e64a1b78dbe199317a20cf65d4984a16e33
| 7,598
|
py
|
Python
|
code/rec_eval.py
|
dawenl/content_wmf
|
b3e0a8eeb1b28836f280997c47444786afe91d3f
|
[
"MIT"
] | 24
|
2016-09-18T10:28:07.000Z
|
2021-08-21T14:48:01.000Z
|
code/rec_eval.py
|
dawenl/content_wmf
|
b3e0a8eeb1b28836f280997c47444786afe91d3f
|
[
"MIT"
] | null | null | null |
code/rec_eval.py
|
dawenl/content_wmf
|
b3e0a8eeb1b28836f280997c47444786afe91d3f
|
[
"MIT"
] | 15
|
2015-10-29T14:46:03.000Z
|
2020-03-12T09:35:55.000Z
|
import bottleneck as bn
import numpy as np
from scipy import sparse
"""
All the data should be in the shape of (n_users, n_items)
All the latent factors should in the shape of (n_users/n_items, n_components)
1. train_data refers to the data that was used to train the model
2. heldout_data refers to the data that was used for evaluation (could be test
set or validation set)
3. vad_data refers to the data that should be excluded as validation set, which
should only be used when calculating test scores
"""
def prec_at_k(train_data, heldout_data, U, V, batch_users=5000, k=20,
vad_data=None):
n_users = train_data.shape[0]
res = list()
for user_idx in user_idx_generator(n_users, batch_users):
res.append(precision_at_k_batch(train_data, heldout_data,
U, V.T, user_idx, k=k,
vad_data=vad_data))
mn_prec = np.hstack(res)
return np.nanmean(mn_prec)
def recall_at_k(train_data, heldout_data, U, V, batch_users=5000, k=20,
vad_data=None):
n_users = train_data.shape[0]
res = list()
for user_idx in user_idx_generator(n_users, batch_users):
res.append(recall_at_k_batch(train_data, heldout_data,
U, V.T, user_idx, k=k,
vad_data=vad_data))
mrecall = np.hstack(res)
return np.nanmean(mrecall)
def ric_rank_at_k(train_data, heldout_data, U, V, batch_users=5000, k=5,
vad_data=None):
n_users = train_data.shape[0]
res = list()
for user_idx in user_idx_generator(n_users, batch_users):
res.append(mean_rrank_at_k_batch(train_data, heldout_data,
U, V.T, user_idx, k=k,
vad_data=vad_data))
mrrank = np.hstack(res)
#return mrrank[mrrank > 0].mean(), np.median(mrrank[mrrank > 0])
return mrrank[mrrank > 0].mean()
def mean_perc_rank(train_data, heldout_data, U, V, batch_users=5000,
vad_data=None):
n_users = train_data.shape[0]
mpr = 0
for user_idx in user_idx_generator(n_users, batch_users):
mpr += mean_perc_rank_batch(train_data, heldout_data, U, V.T, user_idx,
vad_data=vad_data)
mpr /= heldout_data.sum()
return mpr
def normalized_dcg(train_data, heldout_data, U, V, batch_users=5000,
vad_data=None):
n_users = train_data.shape[0]
res = list()
for user_idx in user_idx_generator(n_users, batch_users):
res.append(NDCG_binary_batch(train_data, heldout_data, U, V.T,
user_idx, vad_data=vad_data))
ndcg = np.hstack(res)
return np.nanmean(ndcg)
# helper functions #
def user_idx_generator(n_users, batch_users):
''' helper function to generate the user index to loop through the dataset
'''
for start in xrange(0, n_users, batch_users):
end = min(n_users, start + batch_users)
yield slice(start, end)
def _make_prediction(train_data, Et, Eb, user_idx, batch_users,
vad_data=None):
n_songs = train_data.shape[1]
# exclude examples from training and validation (if any)
item_idx = np.zeros((batch_users, n_songs), dtype=bool)
item_idx[train_data[user_idx].nonzero()] = True
if vad_data is not None:
item_idx[vad_data[user_idx].nonzero()] = True
X_pred = Et[user_idx].dot(Eb)
X_pred[item_idx] = -np.inf
return X_pred
def precision_at_k_batch(train_data, heldout_data, Et, Eb, user_idx,
k=20, normalize=True, vad_data=None):
batch_users = user_idx.stop - user_idx.start
X_pred = _make_prediction(train_data, Et, Eb, user_idx,
batch_users, vad_data=vad_data)
idx = bn.argpartsort(-X_pred, k, axis=1)
X_pred_binary = np.zeros_like(X_pred, dtype=bool)
X_pred_binary[np.tile(np.arange(batch_users), (k, 1)).T, idx[:, :k]] = True
X_true_binary = (heldout_data[user_idx] > 0).toarray()
tmp = (np.logical_and(X_true_binary, X_pred_binary).sum(axis=1)).astype(
np.float32)
if normalize:
precision = tmp / np.minimum(k, X_true_binary.sum(axis=1))
else:
precision = tmp / k
return precision
def recall_at_k_batch(train_data, heldout_data, Et, Eb, user_idx,
k=20, vad_data=None):
batch_users = user_idx.stop - user_idx.start
X_pred = _make_prediction(train_data, Et, Eb, user_idx,
batch_users, vad_data=vad_data)
idx = bn.argpartsort(-X_pred, k, axis=1)
X_pred_binary = np.zeros_like(X_pred, dtype=bool)
X_pred_binary[np.tile(np.arange(batch_users), (k, 1)).T, idx[:, :k]] = True
X_true_binary = (heldout_data[user_idx] > 0).toarray()
tmp = (np.logical_and(X_true_binary, X_pred_binary).sum(axis=1)).astype(
np.float32)
recall = tmp / X_true_binary.sum(axis=1)
return recall
def mean_rrank_at_k_batch(train_data, heldout_data, Et, Eb,
user_idx, k=5, vad_data=None):
'''
mean reciprocal rank@k: For each user, make predictions and rank for
all the items. Then calculate the mean reciprocal rank for the top K that
are in the held-out set.
'''
batch_users = user_idx.stop - user_idx.start
X_pred = _make_prediction(train_data, Et, Eb, user_idx,
batch_users, vad_data=vad_data)
all_rrank = 1. / (np.argsort(np.argsort(-X_pred, axis=1), axis=1) + 1)
X_true_binary = (heldout_data[user_idx] > 0).toarray()
heldout_rrank = X_true_binary * all_rrank
top_k = bn.partsort(-heldout_rrank, k, axis=1)
return -top_k[:, :k].mean(axis=1)
def NDCG_binary_batch(train_data, heldout_data, Et, Eb, user_idx,
vad_data=None):
'''
normalized discounted cumulative gain for binary relevance
'''
batch_users = user_idx.stop - user_idx.start
n_items = train_data.shape[1]
X_pred = _make_prediction(train_data, Et, Eb, user_idx,
batch_users, vad_data=vad_data)
all_rank = np.argsort(np.argsort(-X_pred, axis=1), axis=1)
# build the discount template
tp = np.hstack((1, 1. / np.log2(np.arange(2, n_items + 1))))
all_disc = tp[all_rank]
X_true_binary = (heldout_data[user_idx] > 0).tocoo()
disc = sparse.csr_matrix((all_disc[X_true_binary.row, X_true_binary.col],
(X_true_binary.row, X_true_binary.col)),
shape=all_disc.shape)
DCG = np.array(disc.sum(axis=1)).ravel()
IDCG = np.array([tp[:n].sum()
for n in heldout_data[user_idx].getnnz(axis=1)])
return DCG / IDCG
def mean_perc_rank_batch(train_data, heldout_data, Et, Eb, user_idx,
vad_data=None):
'''
mean percentile rank for a batch of users
MPR of the full set is the sum of batch MPR's divided by the sum of all the
feedbacks. (Eq. 8 in Hu et al.)
This metric not necessarily constrains the data to be binary
'''
batch_users = user_idx.stop - user_idx.start
X_pred = _make_prediction(train_data, Et, Eb, user_idx, batch_users,
vad_data=vad_data)
all_perc = np.argsort(np.argsort(-X_pred, axis=1), axis=1) / \
np.isfinite(X_pred).sum(axis=1, keepdims=True).astype(np.float32)
perc_batch = (all_perc[heldout_data[user_idx].nonzero()] *
heldout_data[user_idx].data).sum()
return perc_batch
| 37.613861
| 79
| 0.633325
| 0
| 0
| 262
| 0.034483
| 0
| 0
| 0
| 0
| 1,196
| 0.15741
|
58a6b4335eac35be6ee8f5597bc84e5d66427621
| 1,295
|
py
|
Python
|
pgdrive/tests/vis_block/vis_std_t_intersection.py
|
decisionforce/pgdrive
|
19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee
|
[
"Apache-2.0"
] | 97
|
2020-12-25T06:02:17.000Z
|
2022-01-16T06:58:39.000Z
|
pgdrive/tests/vis_block/vis_std_t_intersection.py
|
decisionforce/pgdrive
|
19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee
|
[
"Apache-2.0"
] | 192
|
2020-12-25T07:58:17.000Z
|
2021-08-28T10:13:59.000Z
|
pgdrive/tests/vis_block/vis_std_t_intersection.py
|
decisionforce/pgdrive
|
19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee
|
[
"Apache-2.0"
] | 11
|
2020-12-29T11:23:44.000Z
|
2021-12-06T23:25:49.000Z
|
from pgdrive.component.blocks.curve import Curve
from pgdrive.component.blocks.first_block import FirstPGBlock
from pgdrive.component.blocks.std_t_intersection import StdTInterSection
from pgdrive.component.blocks.straight import Straight
from pgdrive.component.road.road_network import RoadNetwork
from pgdrive.tests.vis_block.vis_block_base import TestBlock
if __name__ == "__main__":
test = TestBlock(True)
from pgdrive.engine.asset_loader import initialize_asset_loader
initialize_asset_loader(test)
global_network = RoadNetwork()
first = FirstPGBlock(global_network, 3.0, 2, test.render, test.world, 1)
curve = Curve(1, first.get_socket(0), global_network, 1)
curve.construct_block(test.render, test.world)
straight = Straight(2, curve.get_socket(0), global_network, 1)
straight.construct_block(test.render, test.world)
intersection = StdTInterSection(3, straight.get_socket(0), global_network, 1)
print(intersection.construct_block(test.render, test.world))
id = 4
for socket_idx in range(intersection.SOCKET_NUM):
block = Curve(id, intersection.get_socket(socket_idx), global_network, id + 1)
block.construct_block(test.render, test.world)
id += 1
test.show_bounding_box(global_network)
test.run()
| 40.46875
| 86
| 0.766023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.007722
|
58a742e0b8ad8aa4381262e4194d124ffb86733b
| 6,241
|
py
|
Python
|
version/v 4.0/spider/config.py
|
zhong-yan/neteasenews
|
4dda8ef13d44f08e90e3869f4a7d972fb4b9feed
|
[
"Apache-2.0"
] | null | null | null |
version/v 4.0/spider/config.py
|
zhong-yan/neteasenews
|
4dda8ef13d44f08e90e3869f4a7d972fb4b9feed
|
[
"Apache-2.0"
] | null | null | null |
version/v 4.0/spider/config.py
|
zhong-yan/neteasenews
|
4dda8ef13d44f08e90e3869f4a7d972fb4b9feed
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
BASE_URL = 'http://news.163.com/'
# 首页ajax内容,无人机ajax,其实后面?callback=data_callback'可以省略
# 如何快速找到json链接?现在只能手工完成..添加每一个json文档
JSON_INDEX_URLS = [
'http://temp.163.com/special/00804KVA/cm_yaowen.js?callback=data_callback',
'http://house.163.com/special/00078GU7/guangzhou_xw_news_v1.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_shehui.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_guonei.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_guoji.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_dujia.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_war.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_money.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_tech.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_sports.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_ent.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_lady.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_auto.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_houseguangzhou.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_hangkong.js?callback=data_callback',
'http://temp.163.com/special/00804KVA/cm_jiankang.js?callback=data_callback',
# 无人机标签
'http://news.163.com/uav/special/000189N0/uav_index.js?callback=data_callback',
# 体育
'http://sports.163.com/special/000587PR/newsdata_n_index.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_world.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_china.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_cba.js?callback=data_callback',
'http://sports.163.com/special/000587PR/newsdata_n_allsports.js?callback=data_callback',
# NBA
'http://sports.163.com/special/000587PK/newsdata_nba_hj.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_qsh.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_ysh.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_ketr.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_okc.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_huren.js?callback=data_callback',
'http://sports.163.com/special/000587PK/newsdata_nba_mc.js?callback=data_callback',
# 娱乐
'http://ent.163.com/special/000380VU/newsdata_index.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_music.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_star.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_movie.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_tv.js?callback=data_callback',
'http://ent.163.com/special/000380VU/newsdata_show.js?callback=data_callback',
# 财经
'http://money.163.com/special/002557S5/newsdata_idx_index.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_stock.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_finance.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_fund.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_licai.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_biz.js?callback=data_callback',
'http://money.163.com/special/002557S5/newsdata_idx_bitcoin.js?callback=data_callback',
# 股票
'http://money.163.com/special/002557S6/newsdata_gp_index.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_hkstock.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_usstock.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_ipo.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_bitcoin.js?callback=data_callback',
'http://money.163.com/special/002557S6/newsdata_gp_dy.js?callback=data_callback',
# 科技
'http://tech.163.com/special/00097UHL/tech_datalist.js?callback=data_callback',
# 全国,找不到规律,不想copy了 烦
'http://bendi.news.163.com/beijing/special/04388GGG/bjxinxiliu.js',
'http://bendi.news.163.com/shanghai/special/04188GP4/shxinxiliu.js',
'http://tj.news.163.com/special/04208F5D/tjxxl.js',
'http://bendi.news.163.com/jiangsu/special/04248H8U/njxxl.js',
'http://bendi.news.163.com/zhejiang/special/04098FBT/xinxiliu.js',
'http://sc.news.163.com/special/04268EVT/xinxiliu.js',
'http://bendi.news.163.com/heilongjiang/special/04238DR5/haerbin.js',
'http://bendi.news.163.com/jilin/special/04118E6D/center_news_cc.js',
'http://bendi.news.163.com/liaoning/special/04228EED/xinxiliu.js',
'http://bendi.news.163.com/neimengu/special/04138EHT/nmgxxl.js'
]
URLs = ['http://news.163.com/',
'http://news.163.com/rank/',
'http://news.163.com/photo/#Current',
'http://news.163.com/domestic/',
'http://news.163.com/world/',
'http://news.163.com/shehui/',
'http://data.163.com/special/datablog/',
'http://war.163.com/',
'http://news.163.com/air/',
'http://news.163.com/uav/',
'http://news.163.com/college',
'http://gov.163.com/',
'http://gongyi.163.com/',
'http://media.163.com/']
# config mongoDB
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_DBNAME = 'neteasenews'
# 存储热更新的内容, 即更新频率快的
MONGODB_TABLE_1 = 'article'
# 存储新闻排行榜内容, 即更新频率中等
MONGODB_TABLE_2 = 'newsrank'
# 存储冷更新的内容,即更新时间长的
MONGODB_TABLE_3 = 'coldpage'
# 存储图片数据
MONGODB_TABLE_4 = 'picture'
# config chromedriver:
prefs = {
'profile.default_content_setting_values': {
'images': 2,
# 'javascript': 2
# 'User-Agent': ua
}
}
options = webdriver.ChromeOptions()
options.add_experimental_option('prefs', prefs)
# 开启无界面模式
options.add_argument('--headless')
| 54.745614
| 92
| 0.737863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5,590
| 0.861723
|
58a7437e24bf8faeb840154530f279b8c6eee778
| 2,044
|
py
|
Python
|
assignments/10_conserved/conserved.py
|
brianUA/be434-fall-2021
|
bf0bb3f1c8129599818b98b7ee25b39aa926fd1f
|
[
"MIT"
] | null | null | null |
assignments/10_conserved/conserved.py
|
brianUA/be434-fall-2021
|
bf0bb3f1c8129599818b98b7ee25b39aa926fd1f
|
[
"MIT"
] | null | null | null |
assignments/10_conserved/conserved.py
|
brianUA/be434-fall-2021
|
bf0bb3f1c8129599818b98b7ee25b39aa926fd1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Author : Brian Scott <brianscott@email.arizona.edu>
Date : 2021-11-09
Purpose: FInd the similarities between sequences.
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description='Rock the Casbah',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
help='Input file',
metavar='FILE',
type=argparse.FileType('rt'))
return parser.parse_args()
# --------------------------------------------------
def main():
"""Makes it work"""
args = get_args()
sequences = []
for line in args.file:
temp_list = []
for char in line.strip():
temp_list.append(char)
sequences.append(temp_list)
out_seq = ""
for base in range(len(sequences[0])):
temp_base = []
# for line in range(len(sequences)):
# had to use enumerate to get pylint to shut up.
# also had to then use the variable so pylint would shut up
# because If i use the range(len()) method pylint doesn't like it
# even though it makes more sense than enumerate becuase
# it doesn't create unnecessary variables#
for line, value in enumerate(sequences):
temp_base.append(sequences[line][base])
# print(temp_base)
all_same(value) # only here to get enumerate to shup up
if all_same(temp_base):
out_seq = out_seq + "|"
else:
out_seq = out_seq + "X"
for line in sequences:
temp_line = ""
for char in line:
temp_line = temp_line + char
print(temp_line)
print(out_seq)
def all_same(list1):
"""checks if all items in list are equal"""
return all(x == list1[0] for x in list1)
# --------------------------------------------------
if __name__ == '__main__':
main()
| 27.621622
| 73
| 0.54501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 829
| 0.405577
|
58a9853e032d70b843b4faffe8df15e8491bea40
| 13,999
|
py
|
Python
|
ldss_spec/tools/spec_red.py
|
dsanmartim/ldss_specred
|
8274ce0cf0eddfc7649106d7b9d0ce733e69c722
|
[
"MIT"
] | null | null | null |
ldss_spec/tools/spec_red.py
|
dsanmartim/ldss_specred
|
8274ce0cf0eddfc7649106d7b9d0ce733e69c722
|
[
"MIT"
] | null | null | null |
ldss_spec/tools/spec_red.py
|
dsanmartim/ldss_specred
|
8274ce0cf0eddfc7649106d7b9d0ce733e69c722
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# Loading a few python packages
import os
import glob
import warnings
from astropy import log
from astropy.io import fits as pyfits
import json
# Loading iraf packages
from pyraf import iraf
from pyraf.iraf import onedspec
from pyraf.iraf import twodspec, apextract
class SpecRed():
"""
This class runs a few methods over a data set defined in an python dictionary.
"""
def __init__(self, raw_path, red_path, obs_log_file, flat_correction=True, fit_continuum=False):
project_dir = os.path.abspath(os.path.dirname(os.path.dirname((os.path.dirname(__file__)))))
os.system('export PYTHONPATH={}'.format(project_dir))
self.data_dir = '{}/ldss_spec/data/'.format(project_dir)
# Get code path
# Getting raw and red paths
self.raw_path = str(os.path.join(raw_path[0], ''))
self.red_path = str(os.path.join(red_path[0], ''))
self.obs_log_file = obs_log_file
self.fit_continuum = fit_continuum
# Getting observation log dictionary + sky definition
self.obs_dict = self.get_obs_log(self.obs_log_file[0])
self.database_dir = str(os.path.join(self.red_path+'database', ''))
if fit_continuum:
self.fit_continuum = True
if flat_correction:
self.flat_correction = True
## Getting disp solution dictionary.
# Keys are the names of the files inside tye dispsol folder
# Values are the names that files will have inside the database folder
self.disp_solution_files = {'vph_all_open': 'id_vph_all_open.0001',
'vph_blue_open': 'id_vph_blue_open.0001',
'vph_red_og590': 'id_vph_red_og590.0001'}
# Sci extraction parameters
self.spec_ext_flags = {'format': 'onedspec', 'interactive': 'yes', 'find': 'yes', 'nfind': '1', 'resize': 'no',
'recenter': 'yes', 'edit': 'yes', 'nsum': '10', 'width': '10', 'lower': '-10',
'upper': '10', 'b_naverage': '-100', 'extract': 'yes', 'extras': 'no',
'trace': 'yes', 'fittrace': 'yes', 't_function': 'chebyshev', 't_order': '3',
't_nsum': '15', 't_step': '15', 'background': 'fit', 'b_function': 'chebyshev',
'b_order': '1', 'clean': 'yes', 'readnoise': 'enoise', 'gain': 'egain',
'weights': 'variance'}
# Arc extraction parameters
self.arc_ext_flags = {'format': 'onedspec', 'interactive': 'no', 'find': 'no', 'resize': 'no',
'recenter': 'no', 'edit': 'no', 'lower': '-10.0', 'upper': '10.0',
'extract': 'yes', 'extras': 'no', 'trace': 'no', 'fittrace': 'no',
'background': 'none', 'review': 'yes'}
# Identify parameters
self.identify_flags = {'ftype': 'emission', 'fwidth': '40', 'cradius': '30', 'thresho': '10.', 'minsep': '10.'}
self.wrange = { 'vph_all': [3900.0, 10300.0], 'vph_red': [6050.0, 10300.0], 'vph_blue': [3850.0, 6580.0] }
# About warnings
warnings.filterwarnings('ignore')
log.propagate = False
def run(self, *args, **kwargs):
# cleaning up the reduction dir
self.clean_path(path=self.red_path, ext_list=['fits', 'log', 'txt', 'dat'])
# Loading dispsol content dictionary
self.disp_sol_content_dictionary = self.get_dict_of_dispsol_content(self.disp_solution_files,
dispsol_dirname='dispsol/')
# Changing to red_path dir
self.change_dir(path=self.red_path)
# Creating directory database if does not exist
if not os.path.exists(self.database_dir):
os.makedirs(str(self.database_dir))
# Extracting spectra and corresponding arcs
self.extract_spec(path=self.raw_path, obs_dict=self.obs_dict, out_prefix='e_')
# Obtaining and applying dispersion function
self.wcalibrate_spec(obs_dict=self.obs_dict, disp_sol_content_dict=self.disp_sol_content_dictionary,
prefix='e_', out_prefix='w')
@staticmethod
def get_obs_log(filename):
"""
It reads a json file and return it as a python dictionary.
Args:
filename (str): The filename of a json file with the obslog definitions.
Returns:
obs_log_dict (dict): A python dictionary containing the obs log definitions.
"""
filename = str(filename)
with open(filename, 'r') as f:
obs_log_dict = json.load(f)
#except FileNotFoundError as file_not_found_message:
# print(file_not_found_message)
return obs_log_dict
def get_spec_setup(self, file):
hdu = pyfits.open(file)
header = hdu[0].header
hdu.close()
# Getting spec setup
grism = header['GRISM'].replace('-', '_').lower()
filter = header['FILTER'].lower()
slit = header['APERTURE'].lower()
return slit, grism, filter
def check_spec_setup(self, sci_file, arc_file):
sci_slit, sci_grism, sci_filter = self.get_spec_setup(sci_file)
arc_slit, arc_grism, arc_filter = self.get_spec_setup(arc_file)
# Check Slits
message = 'SLIT {} from SCI file {} does not match SLIT {} from ARC file {}'.format(sci_slit.upper(), sci_file,
arc_slit.upper(), arc_file)
assert sci_slit == arc_slit, message
# Check Grisms
message = 'GRISM {} from SCI file {} does not match SLIT {} from ARC file {}'.format(sci_grism.upper(), sci_file,
arc_grism.upper(), arc_file)
assert sci_grism == arc_grism, message
# Check Filter
if sci_grism != 'vph_all':
message = 'FILTER {} from SCI file {} does not match FILTER {} from ARC file {}'.format(sci_filter.upper(), sci_file,
arc_filter.upper(), arc_file)
assert sci_filter == arc_filter, message
@staticmethod
def get_dispsol_content(fname):
# Getting disp sol content from input disp file
with open(fname, 'r') as f:
content = f.read()
f.close()
return content
def get_dict_of_dispsol_content(self, disp_solutions_files_dictionary, dispsol_dirname='dispsol'):
data_dir = '{}{}'.format(self.data_dir, dispsol_dirname)
keys = disp_solutions_files_dictionary.keys()
values = disp_solutions_files_dictionary.values()
content_dict = {}
for i, key in enumerate(keys):
content = self.get_dispsol_content(fname=data_dir + values[i])
content_dict[key] = content
return content_dict
def write_modified_disp_sol_to_database(self, arc_file, dispsol_content, database_dir):
# Getting arc_file setup
slit, grism, filter = self.get_spec_setup(arc_file)
# This is the file name format as given in the dispsol directory, which is based on the
# grism - filter combination
inp_disp_file = 'id_{}_{}.0001'.format(grism, filter)
# Output file name with the dispersion solution (as required by IRAF)
output_disp_file_name = 'id{}'.format(arc_file.replace('.fits', ''))
# Correspoing txt inside the dispersion solution file
txt_to_be_replaced = str(inp_disp_file)
txt_new = output_disp_file_name.replace('id', '')
modified_disp_content = dispsol_content.replace(txt_to_be_replaced, txt_new)
# Writting new content to out_disp_file in the self.database_dir
with open(database_dir + output_disp_file_name, 'w') as f:
print >> f, modified_disp_content
f.close()
@staticmethod
def change_dir(path):
"""
Change to directory defined in the path variable.
Args:
path (str): absolute or relative path.
"""
# Changing system to path
if os.getcwd() is not path:
os.chdir(path)
@staticmethod
def clean_path(path, ext_list):
"""
Clean up files from path. If the path does not exist, it will be created. It's not recursive, so it
will not clean up a sub-folder within the path.
Args:
path (string): path to be cleaned.
ext_list (list): list of strings containing the extension of files to be deleted.
"""
path = str(os.path.join(path, ''))
if not os.path.exists(path):
log.info('Provided path does not exist, but it was created!')
os.makedirs(path)
elif os.path.exists(path):
log.info('Cleaning up ' + path + '.')
iraf.imdelete('*tmp*', verify='no')
for ext in ext_list:
for _file in glob.glob(os.path.join(path, '*.' + str(ext))):
os.remove(_file)
def extract_spec(self, path, obs_dict, out_prefix='e_'):
"""
This method runs *apall* over the data set inside the path folder as defined by the obs_dict dictionary.
Args:
path (string): path that contaings the data to be extracted with apall.
obs_dict (dict): dictionary containing the filename of the spectra to be extracted, the corresponding
arc file and the sky background definition. The dictionary should be something like this:
self.obs_dict = {'LTTT1788_1': {'spec': 'ccd0093c1', 'arc': 'ccd0095c1', 'sky': '-35:-20,20:35'},
'LTTT1788_2': {'spec': 'ccd0094c1', 'arc': 'ccd0095c1', 'sky': '-35:-20,20:35'},
'LTTT1788_3': {'spec': 'ccd0096c1', 'arc': 'ccd0098c1', 'sky': '-35:-20,20:35'},
'LTTT1788_4': {'spec': 'ccd0097c1', 'arc': 'ccd0098c1', 'sky': '-35:-20,20:35'}}
out_prefix (string): letter that will be added to the filename of the data extracted.
Returns:
It returns 1d extracted spectra.
"""
path = str(os.path.join(path, ''))
# Set task parameters.
twodspec.apextract.apall.unlearn()
# Make sure that in "apextract" the dispaxis parameter is set to "2". If the commando below does not work,
# do it manually.
twodspec.apextract(dispaxis=2)
for target, p in obs_dict.iteritems():
spec_in = p['spec'] + '.fits'
arc_in = p['arc'] + '.fits'
if len(obs_dict) > 0:
print('\n')
log.info('Extracting star spectrum ' + p['spec'] + '...')
apextract.apall(input=path + spec_in, output=out_prefix + spec_in, b_sample=p['sky'],
**self.spec_ext_flags)
print('\n')
log.info('Extracting arc spectrum ' + p['arc'] + '...')
apextract.apall(input=path + arc_in, output=out_prefix + arc_in, reference=path + spec_in,
**self.arc_ext_flags)
def wcalibrate_spec(self, obs_dict, disp_sol_content_dict, prefix='e_', out_prefix='w'):
"""
This method runs a couple of IRAF routines to obtain the dispersion function for a arc files and then apply it
to the corresponding spectra.
Args:
obs_dict (dict):
prefix (str):
out_prefix (str):
Returns:
"""
onedspec.identify.unlearn()
onedspec.refspec.unlearn()
onedspec.dispcor.unlearn()
for target, p in obs_dict.iteritems():
spec_in = prefix + p['spec'] + '.0001.fits'
arc_ref = prefix + p['arc'] + '.0001.fits'
# Checking sepctral setup
self.check_spec_setup(spec_in, arc_ref)
##### Copying disp solution to 'database' dir
# 1. Getting content dictionary with disp solutions of the corresponding arc
slit, grism, filter = self.get_spec_setup(arc_ref)
w1, w2 = self.wrange[grism][0], self.wrange[grism][1]
# Getting specific disp sol content of the corresponding arc file
key = '{}_{}'.format(grism,filter)
content_dict = disp_sol_content_dict[key]
# 2. Writting solution to database dir
self.write_modified_disp_sol_to_database(arc_ref, content_dict, database_dir=self.database_dir)
##### Running iraf to get updated disp sol
print('\n')
log.info('Finding wavelength solution to reference arc ' + arc_ref + '...')
onedspec.identify(arc_ref, **self.identify_flags)
print('\n')
log.info('Associating the obtained wavelength solution with the spectrum of the star:')
log.info(spec_in + ' -----> REFSPEC = ' + arc_ref + '.')
onedspec.refspec(spec_in, reference=arc_ref, sort='', group='')
print('\n')
log.info('Applying wavelength calibration to ' + spec_in + '.')
onedspec.dispcor(spec_in, out=out_prefix + spec_in, w1=w1, w2=w2)
if self.fit_continuum:
onedspec.continuum.unlearn()
print('\n')
log.info('Fitting continuum to ' + out_prefix + spec_in + '.')
input = out_prefix + spec_in
output = 'cont_' + out_prefix + spec_in
onedspec.continuum(input=input, output=output, type='fit', function='legendre', order=15, niterate=10,
low_reject=2.0, high_reject=0.0)
| 37.733154
| 129
| 0.572541
| 13,683
| 0.977427
| 0
| 0
| 1,955
| 0.139653
| 0
| 0
| 5,614
| 0.401029
|
58a9c2475f1d862dde62daacb24c84dc06c0e208
| 1,667
|
py
|
Python
|
lookupService/helpers/job_scheduler.py
|
selfjell/MirMachine
|
b61b555e7d0942f6fdcc53634469fffea2b92f4c
|
[
"MIT"
] | 1
|
2021-11-11T12:47:20.000Z
|
2021-11-11T12:47:20.000Z
|
lookupService/helpers/job_scheduler.py
|
selfjell/MirMachine
|
b61b555e7d0942f6fdcc53634469fffea2b92f4c
|
[
"MIT"
] | null | null | null |
lookupService/helpers/job_scheduler.py
|
selfjell/MirMachine
|
b61b555e7d0942f6fdcc53634469fffea2b92f4c
|
[
"MIT"
] | null | null | null |
from ..models import Job
from engine.scripts.mirmachine_args import run_mirmachine
from .socket_helper import announce_status_change, announce_queue_position, announce_initiation, announce_completed
from .maintainer import clean_up_temporary_files
from django.utils import timezone
from MirMachineWebapp import user_config as config
def schedule_job(stop):
ongoing = Job.objects.filter(status='ongoing')
# check if already job running
if ongoing.exists():
return
queued = Job.objects.filter(status='queued').order_by('initiated')
# check if queue is empty
if not queued.exists():
if config.AUTO_CLEANUP_TEMP_FILES:
clean_up_temporary_files()
return
next_in_line = queued[0]
next_in_line.status = 'ongoing'
next_in_line.initiated = timezone.now()
next_in_line.save()
announce_status_change(next_in_line)
announce_initiation(next_in_line)
for i in range(len(queued)):
announce_queue_position(queued[i], i+1)
try:
process, job_object = run_mirmachine(next_in_line, stop)
handle_job_end(process, job_object)
except OSError:
next_in_line.status = 'halted'
next_in_line.save()
announce_status_change(next_in_line)
except RuntimeError:
print('Interrupted, exiting thread')
return
schedule_job(stop)
def handle_job_end(process, job_object):
if process.returncode != 0:
job_object.status = 'halted'
else:
job_object.status = 'completed'
job_object.completed = timezone.now()
job_object.save()
announce_completed(job_object)
announce_status_change(job_object)
| 30.309091
| 115
| 0.718056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 0.088782
|
542d0bbb398d02e9717cfe574c3d52048a5a205b
| 836
|
py
|
Python
|
Exercicios Colecoes Python/exercicio 33 - secao 7 - p1.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
Exercicios Colecoes Python/exercicio 33 - secao 7 - p1.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
Exercicios Colecoes Python/exercicio 33 - secao 7 - p1.py
|
cristinamais/exercicios_python
|
8a09b0b68ffaa62d13afb952998e890a79667c7e
|
[
"MIT"
] | null | null | null |
"""
33 - Faca um programa que leia um vetor de 15 posicoes e o compacte, ou seja, elimine as posicoes com valor zero.
Para isso, todos os elementos a frente do valor zero, devem ser movidos uma posicao para tras no vetor.
"""
"""
vetor = []
count = 0
for x in range(1, 16):
vetor.append(int(input(f'Digite o {x}/15: ')))
n = len(vetor)
for i in range(n):
if vetor[i] != 0:
vetor[count] = vetor[i]
count += 1
while n > count:
vetor[count] = 0
count += 1
print(vetor) # [5, 6, 9, 8, 10, 15, 33, 22, 66, 99, 10, 100, 0, 0, 0]
Este os zeros vao para tras
"""
from itertools import compress, repeat, chain
vetor = []
for x in range(1, 16):
vetor.append(int(input(f'Digite o {x}/15: ')))
# usando list.count e itertools.compress
y = [0] * vetor.count(0)
y.extend(compress(vetor, vetor))
print(y)
| 22
| 113
| 0.626794
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 650
| 0.777512
|
542d7e740031b1e39b6ee826c5f6675358cb832c
| 533
|
py
|
Python
|
multimedia/Pygame/02-plot_pixels.py
|
vicente-gonzalez-ruiz/python-tutorial
|
e6a79510a0b3663786d6476a40e79fc8e8726f61
|
[
"CC0-1.0"
] | 4
|
2017-03-06T09:49:11.000Z
|
2019-10-16T00:09:38.000Z
|
multimedia/Pygame/02-plot_pixels.py
|
vicente-gonzalez-ruiz/python-tutorial
|
e6a79510a0b3663786d6476a40e79fc8e8726f61
|
[
"CC0-1.0"
] | null | null | null |
multimedia/Pygame/02-plot_pixels.py
|
vicente-gonzalez-ruiz/python-tutorial
|
e6a79510a0b3663786d6476a40e79fc8e8726f61
|
[
"CC0-1.0"
] | 7
|
2017-11-02T11:00:30.000Z
|
2020-01-31T22:41:27.000Z
|
import pygame
import my_colors as color
pygame.init()
screen_width = 800
screen_height = 600
screen_size = (screen_width, screen_height)
screen = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Search the green pixel at the coordinates (x=10, y=100)")
running = True
while running:
screen.set_at((1, 1), color.white)
screen.set_at((10, 100), color.green)
pygame.display.update()
event = pygame.event.wait()
if event.type == pygame.QUIT:
running = False
pygame.quit()
print("Goodbye!")
| 24.227273
| 85
| 0.718574
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.125704
|