max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
Sample_Code/Problem_4.py
|
clarkbains/1405_Practice_Problems
| 0
|
12783551
|
<reponame>clarkbains/1405_Practice_Problems
while True:
print ("Please Select an option. Q to Quit")
print ("[1] - Add a course")
print ("[2] - Drop a course")
choice = input ("> ")
if choice == 'q' or choice == 'Q':
break
choice = int(choice)
if choice == 1:
print ("Enter Course Name: ")
course_name = input(">")
print ("Enter Course ID: ")
course_id = input(">")
print ("Added " + course_name + ", course id is " + course_id)
if choice == 2:
print ("Enter Course Name: ")
course_name = input(">")
print ("Removed " + course_name + " from your course")
if choice < 1 or choice > 2:
print ("Bad Input")
| 4.15625
| 4
|
xrpl/core/binarycodec/types/blob.py
|
SubCODERS/xrpl-py
| 1
|
12783552
|
"""
Codec for serializing and deserializing blob fields.
See `Blob Fields <https://xrpl.org/serialization.html#blob-fields>`_
"""
from __future__ import annotations
from typing import Type
from xrpl.core.binarycodec.binary_wrappers.binary_parser import BinaryParser
from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException
from xrpl.core.binarycodec.types.serialized_type import SerializedType
class Blob(SerializedType):
"""
Codec for serializing and deserializing blob fields.
See `Blob Fields <https://xrpl.org/serialization.html#blob-fields>`_
"""
def __init__(self: Blob, buffer: bytes) -> None:
"""Construct a new Blob type from a ``bytes`` value."""
super().__init__(buffer)
@classmethod
def from_parser(cls: Type[Blob], parser: BinaryParser, length_hint: int) -> Blob:
"""
Defines how to read a Blob from a BinaryParser.
Args:
parser: The parser to construct a Blob from.
length_hint: The number of bytes to consume from the parser.
Returns:
The Blob constructed from parser.
"""
return cls(parser.read(length_hint))
@classmethod
def from_value(cls: Type[Blob], value: str) -> Blob:
"""
Create a Blob object from a hex-string.
Args:
value: The hex-encoded string to construct a Blob from.
Returns:
The Blob constructed from value.
Raises:
XRPLBinaryCodecException: If the Blob can't be constructed from value.
"""
if not isinstance(value, str):
raise XRPLBinaryCodecException(
"Invalid type to construct a Blob: expected str, received "
f"{value.__class__.__name__}."
)
if isinstance(value, str):
return cls(bytes.fromhex(value))
raise XRPLBinaryCodecException("Cannot construct Blob from value given")
| 2.75
| 3
|
za/minstData/showImage.py
|
hth945/pytest
| 0
|
12783553
|
<filename>za/minstData/showImage.py
#%%
import cv2
from cv2 import cv2
import numpy as np
import shutil
import os
import random
from PIL import Image
import matplotlib.pyplot as plt
rootPath = '..\..\dataAndModel\data\mnist\\'
ID = random.randint(0, 10)
label_txt = rootPath + 'objtrainlab.txt'
image_info = open(label_txt).readlines()[ID].split()
print(image_info)
image_path = image_info[0]
image = cv2.imread(image_path)
for bbox in image_info[1:]:
bbox = bbox.split(",")
image = cv2.rectangle(image,(int(float(bbox[0])),
int(float(bbox[1]))),
(int(float(bbox[2])),
int(float(bbox[3]))), (255,0,0), 2)
plt.imshow(image)
plt.show()
# %%
image
# %%
| 2.84375
| 3
|
scripts/startup.py
|
red61/docker-nginx-loadbalancer
| 91
|
12783554
|
#!/usr/bin/python
'''
This script will be run on start-up to evaluate the Docker link environment
variables and automatically generate upstream and location modules for
reverse-proxying and load-balancing.
It looks for environment variables in the following formats:
<service-name>_<service-instance-id>_PORT_80_TCP_ADDR=x.x.x.x
<service-name>_PATH=<some path>
Optional/Conditional environment variables:
<service-name>_BALANCING_TYPE=[ip_hash|least_conn] (optional)
<service-name>_EXPOSE_PROTOCOL=[http|https|both] (optional - default: http)
<service-name>_HOSTNAME=<vhostname> (required if <service-name>_EXPOSE_PROTOCOL is https or both)
<env-formatted-vhostname>_SSL_CERTIFICATE=<something.pem> (required if the vhost will need ssl support)
<env-formatted-vhostname>_SSL_CERTIFICATE_KEY=<something.key> (required if the vhost will need ssl support)
And will build an nginx config file.
Example:
# automatically created environment variables (docker links)
WEBAPP_1_PORT_80_TCP_ADDR=192.168.0.2
WEBAPP_2_PORT_80_TCP_ADDR=192.168.0.3
WEBAPP_3_PORT_80_TCP_ADDR=192.168.0.4
API_1_PORT_80_TCP_ADDR=192.168.0.5
API_2_PORT_80_TCP_ADDR=192.168.0.6
# special environment variables
WEBAPP_PATH=/
WEBAPP_BALANCING_TYPE=ip_hash
WEBAPP_EXPOSE_PROTOCOL=both
WEBAPP_HOSTNAME=www.example.com
API_PATH=/api/
API_EXPOSE_PROTOCOL=https
API_HOSTNAME=www.example.com
WWW_EXAMPLE_COM_SSL_CERTIFICATE=something.pem
WWW_EXAMPLE_COM_SSL_CERTIFICATE_KEY=something.key
Generates (/etc/nginx/sites-enabled/proxy.conf):
upstream webapp {
ip_hash;
server 192.168.0.2;
server 192.168.0.3;
server 192.168.0.4;
}
upstream api {
server 192.168.0.5;
server 192.168.0.6;
}
server {
listen 80;
listen [::]:80 ipv6only=on;
server_name www.example.com;
root /usr/share/nginx/html;
location / {
proxy_pass http://webapp:80;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
server {
listen 443;
server_name www.example.com;
root html;
index index.html index.htm;
ssl on;
ssl_certificate ssl/something.pem;
ssl_certificate_key ssl/something.key;
ssl_session_timeout 5m;
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers "HIGH:!aNULL:!MD5 or HIGH:!aNULL:!MD5:!3DES";
ssl_prefer_server_ciphers on;
root /usr/share/nginx/html;
location / {
proxy_pass http://webapp:80;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
location /api/ {
proxy_pass http://api:80;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
'''
import sys
import os
import re
import argparse
import json
import textwrap
import subprocess
from jinja2 import Environment, FileSystemLoader
env = Environment(
loader=FileSystemLoader(
os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
)
)
parser = argparse.ArgumentParser(
description='Docker-based Nginx Load Balancer Startup Script',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-t',
'--test',
action='store',
choices=['conf', 'parse'],
help=textwrap.dedent('''\
Test against your environment variables
without modifying the config files.
'conf' - Preview the generated Nginx config file's contents.
'parse' - View a detailed parsing of the environment.
'''))
parser.add_argument(
'-o',
'--output-file',
action='store',
help='Location where the generated Nginx config file will be placed.',
default='/etc/nginx/sites-enabled/proxy.conf'
)
def build_conf(hosts, services):
template = env.get_template('proxy.conf')
return template.render(hosts=hosts, services=services)
def parse_env(env=os.environ):
prefix = env.get('ENV_PREFIX')
if prefix:
prefix = prefix.upper() + '_'
print 'Using prefix: %s' % (prefix)
else:
prefix = ''
print 'No fig prefix found.'
link_pattern = re.compile(r'^%s(?P<service_name>[a-zA-Z_]+)(_[\d]+)?_PORT_(?P<service_port>[\d]+)_TCP_ADDR$' % (prefix))
services = {}
hosts = {}
# find services and collect addresses
for var, value in env.iteritems():
m = link_pattern.match(var)
if m:
service_name = m.group('service_name')
service_port = int(m.group('service_port'))
if service_port != 80:
service_port = env.get('%s_REMOTE_PORT' % (service_name))
if not service_port:
continue
if service_name in services:
services[service_name]['addresses'].append(value)
else:
print 'Found service: %s' % service_name
services[service_name] = {
'addresses': [value],
'port': service_port,
'balancing_type': None,
}
# find service details
for service_name, value in services.iteritems():
path = value['location'] = env.get('%s_PATH' % (service_name))
remote_path = value['remote_path'] = env.get('%s_REMOTE_PATH' % (service_name), '/')
balancing_type = value['balancing_type'] = env.get('%s_BALANCING_TYPE' % (service_name))
expose_protocol = value['expose_protocol'] = env.get('%s_EXPOSE_PROTOCOL' % (service_name), 'http')
hostname = value['host'] = env.get('%s_HOSTNAME' % (service_name))
assert path != None, 'Could not find %s_PATH environment variable for service %s.' % (service_name, service_name)
assert balancing_type in [None, 'ip_hash', 'least_conn'], 'Invalid value for %s_BALANCING_TYPE: %s, must be "ip_hash", "least_conn", or nonexistant.' % (service_name, balancing_type)
assert expose_protocol in ['http', 'https', 'both'], 'Invalid value for %s_EXPOSE_PROTOCOL: %s, must be "http", "https", or "both"' % (service_name, expose_protocol)
assert expose_protocol == 'http' or hostname != None, 'With %s_EXPOSE_PROTOCOL=%s, you must supply %s_HOSTNAME.' % (service_name, expose_protocol, service_name)
if hostname == None:
hostname = value['host'] = '0.0.0.0'
if hosts.get(hostname) == None:
hosts[hostname] = {
'protocols': {'http': False, 'https': False},
'services': []
}
hosts[hostname]['services'].append(service_name)
if expose_protocol == 'both':
hosts[hostname]['protocols']['http'] = True
hosts[hostname]['protocols']['https'] = True
else:
hosts[hostname]['protocols'][expose_protocol] = True
for hostname, value in hosts.iteritems():
formatted_hostname = format_hostname(hostname)
access_log = value['access_log'] = env.get('%s_ACCESS_LOG' % (service_name), '/dev/stdout')
log_level = value['log_level'] = env.get('%s_LOG_LEVEL' % (service_name), 'error')
error_log = value['error_log'] = env.get('%s_ERROR_LOG' % (service_name), '/dev/stdout')
assert access_log in ['/dev/stdout', 'off'], 'Invalid value for %s_ERROR_LOG: %s, must be "/dev/stdout" or "off"' % (service_name, access_log)
assert log_level in [None, 'emerg', 'alert', 'crit', 'error', 'warn', 'notice', 'info', 'debug'], 'Invalid value for %s_LOG_LEVEL: %s, must be "emerg", "alert", "crit", "error", "warn", "notice", "info", "debug" or nonexistant.' % (service_name, log_level)
assert error_log in ['/dev/stdout', '/dev/null'], 'Invalid value for %s_ERROR_LOG: %s, must be "/dev/stdout" or "/dev/null"' % (service_name, error_log)
if value['protocols']['https']:
ssl_certificate = env.get('%s_SSL_CERTIFICATE' % formatted_hostname)
ssl_certificate_key = env.get('%s_SSL_CERTIFICATE_KEY' % formatted_hostname)
ssl_dhparam = env.get('%s_SSL_DHPARAM' % formatted_hostname)
ssl_ciphers = env.get('%s_SSL_CIPHERS' % formatted_hostname)
ssl_protocols = env.get('%s_SSL_PROTOCOLS' % formatted_hostname)
assert ssl_certificate, 'SSL certificate .pem not provided for https host: %s, please set %s_SSL_CERTIFICATE' % (hostname, formatted_hostname)
assert ssl_certificate_key, 'SSL certificate .key not provided for https host: %s, please set %s_SSL_CERTIFICATE_KEY' % (hostname, formatted_hostname)
assert ssl_dhparam, 'SSL dhparam .pem not provided for https host: %s, please set %s_SSL_DHPARAM' % (hostname, formatted_hostname)
assert os.path.isfile(os.path.join('/etc/nginx/', ssl_certificate)), 'SSL certificate file: %s could not be found for %s' % (ssl_certificate, hostname)
assert os.path.isfile(os.path.join('/etc/nginx/', ssl_certificate_key)), 'SSL certificate file: %s could not be found for %s' % (ssl_certificate_key, hostname)
assert os.path.isfile(os.path.join('/etc/nginx/', ssl_certificate_key)), 'SSL dhparam file: %s could not be found for %s' % (ssl_dhparam, hostname)
assert ssl_ciphers, 'SSL ciphers have not been provided for https host: %s, please set %s_SSL_CIPHERS (e.g. %s_SSL_CIPHERS="ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256")' % (hostname, formatted_hostname)
assert ssl_protocols, 'SSL protocols have not been provided for https host: %s, please set %s_SSL_PROTOCOLS (e.g. %s_SSL_PROTOCOLS=TLSv1.2)' % (hostname, formatted_hostname)
value['ssl_certificate'] = ssl_certificate
value['ssl_certificate_key'] = ssl_certificate_key
value['ssl_dhparam'] = ssl_dhparam
value['ssl_ciphers'] = ssl_ciphers
value['ssl_protocols'] = ssl_protocols
return hosts, services
def format_hostname(hostname):
return hostname.replace('.', '_').upper()
if __name__ == "__main__":
args = parser.parse_args()
hosts, services = parse_env()
if args.test == 'parse':
print "Services:"
print "%s\n" % json.dumps(services, sort_keys=True, indent=4, separators=(',', ': '))
print "Hosts:"
print "%s" % json.dumps(hosts, sort_keys=True, indent=4, separators=(',', ': '))
exit(0)
conf_contents = build_conf(hosts, services)
sys.stdout.flush()
if args.test == 'conf':
print "Contents of proxy.conf:%s" % conf_contents.replace('\n', '\n ')
exit(0)
f = open(args.output_file, 'w')
f.write(conf_contents)
f.close()
sys.stdout.write("Starting Nginx...\n")
sys.stdout.flush()
p = subprocess.Popen(['nginx'], stdout=subprocess.PIPE, bufsize=0)
while True:
char = p.stdout.read(1)
sys.stdout.write(char)
sys.stdout.flush()
if char == '' and p.poll() != None:
break
p.stdout.close()
| 2.421875
| 2
|
flask_mongodb/serializers/meta.py
|
juanmanuel96/flask-mongodb
| 0
|
12783555
|
<reponame>juanmanuel96/flask-mongodb<filename>flask_mongodb/serializers/meta.py<gh_stars>0
from wtforms.meta import DefaultMeta
class SerializerMeta(DefaultMeta):
def render_field(self, field, render_kw):
"""
render_field allows customization of how widget rendering is done.
The default implementation calls ``field.widget(field, **render_kw)``
"""
super().render_field(field, render_kw)
# -- CSRF
_csrf = False
_csrf_field_name = "serializer_csrf"
_csrf_secret = None
_csrf_context = None
_csrf_class = None
def build_csrf(self, form):
"""
Build a CSRF implementation. This is called once per form instance.
The default implementation builds the class referenced to by
:attr:`csrf_class` with zero arguments. If `csrf_class` is ``None``,
will instead use the default implementation
:class:`wtforms.csrf.session.SessionCSRF`.
:param form: The form.
:return: A CSRF implementation.
"""
if self.csrf_class is not None:
return self.csrf_class()
from wtforms.csrf.session import SessionCSRF
return SessionCSRF()
@property
def csrf(self):
return self._csrf
@property
def csrf_field_name(self):
return self._csrf_field_name
@property
def csrf_secret(self):
return self._csrf_secret
@property
def csrf_context(self):
return self._csrf_context
@property
def csrf_class(self):
return self._csrf_class
| 2.578125
| 3
|
blog/models.py
|
lokeshmeher/django-blogging-platform
| 0
|
12783556
|
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
import time
def user_media_path(instance, filename):
"""
Returns the path to where a user uploaded file is saved.
Has the form: user_<id>/YYYY/MMM/filename
"""
return 'user_{0}/{1}/{2}/{3}'.format(instance.author.id,
time.strftime('%Y'),
time.strftime('%b'),
filename)
@python_2_unicode_compatible
class Article(models.Model):
# Editable fields:
title = models.CharField(max_length=100)
content = models.TextField()
image = models.ImageField(upload_to=user_media_path, null=True, blank=True)
# Non-editable fields:
slug = models.SlugField(max_length=50, unique=True)
published_on = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE)
word_count = models.PositiveIntegerField()
# readtimes- slow: 100 wpm, avg: 130 wpm, fast: 160wpm
read_time_in_mins = models.PositiveIntegerField()
# # `word_count` and `read_time_in_mins` will be (re)assigned
# # everytime the article is saved.
# def save(self, *args, **kwargs):
# self.word_count = len(self.content.split())
# self.read_time_in_mins = self.word_count / 130 # assuming avg reading speed.
# return super(Article, self).save(*args, **kwargs)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog:read_post', kwargs={'slug': self.slug})
class Meta:
ordering = ['-published_on']
| 2.21875
| 2
|
getresponse/__init__.py
|
OpenAT/getresponse-python
| 3
|
12783557
|
<gh_stars>1-10
from getresponse.client import GetResponse
from getresponse.excs import UniquePropertyError
| 1.34375
| 1
|
voctogui/voctogui.py
|
0xflotus/voctomix
| 521
|
12783558
|
#!/usr/bin/env python3
import gi
# import GStreamer and GLib-Helper classes
gi.require_version('Gtk', '3.0')
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('GstNet', '1.0')
from gi.repository import Gtk, Gdk, Gst, GstVideo
import signal
import logging
import sys
import os
sys.path.insert(0, '.')
from vocto.debug import gst_log_messages
# check min-version
minGst = (1, 5)
minPy = (3, 0)
Gst.init([])
if Gst.version() < minGst:
raise Exception('GStreamer version', Gst.version(),
'is too old, at least', minGst, 'is required')
if sys.version_info < minPy:
raise Exception('Python version', sys.version_info,
'is too old, at least', minPy, 'is required')
Gdk.init([])
Gtk.init([])
# select Awaita:Dark theme
settings = Gtk.Settings.get_default()
settings.set_property("gtk-theme-name", "Adwaita")
settings.set_property("gtk-application-prefer-dark-theme", True) # if you want use dark theme, set second arg to True
# main class
class Voctogui(object):
def __init__(self):
self.log = logging.getLogger('Voctogui')
from lib.args import Args
from lib.ui import Ui
# Load UI file
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui/voctogui.ui')
self.log.info('Loading ui-file from file %s', path)
if os.path.isfile(path):
self.ui = Ui(path)
else:
raise Exception("Can't find any .ui-Files to use in {}".format(path))
#
# search for a .css style sheet file and load it
#
css_provider = Gtk.CssProvider()
context = Gtk.StyleContext()
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui/voctogui.css')
self.log.info('Loading css-file from file %s', path)
if os.path.isfile(path):
css_provider.load_from_path(path)
else:
raise Exception("Can't find .css file '{}'".format(path))
context.add_provider_for_screen(
Gdk.Screen.get_default(),
css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_USER
)
self.ui.setup()
def run(self):
self.log.info('Setting UI visible')
self.ui.show()
try:
self.log.info('Running.')
Gtk.main()
self.log.info('Connection lost. Exiting.')
except KeyboardInterrupt:
self.log.info('Terminated via Ctrl-C')
def quit(self):
self.log.info('Quitting.')
Gtk.main_quit()
# run mainclass
def main():
# parse command-line args
from lib import args
args.parse()
from lib.args import Args
docolor = (Args.color == 'always') \
or (Args.color == 'auto' and sys.stderr.isatty())
from lib.loghandler import LogHandler
handler = LogHandler(docolor, Args.timestamp)
logging.root.addHandler(handler)
levels = { 3 : logging.DEBUG, 2 : logging.INFO, 1 : logging.WARNING, 0 : logging.ERROR }
logging.root.setLevel(levels[Args.verbose])
gst_levels = { 3 : Gst.DebugLevel.DEBUG, 2 : Gst.DebugLevel.INFO, 1 : Gst.DebugLevel.WARNING, 0 : Gst.DebugLevel.ERROR }
gst_log_messages(gst_levels[Args.gstreamer_log])
# make killable by ctrl-c
logging.debug('setting SIGINT handler')
signal.signal(signal.SIGINT, signal.SIG_DFL)
logging.info('Python Version: %s', sys.version_info)
logging.info('GStreamer Version: %s', Gst.version())
logging.debug('loading Config')
from lib import config
config.load()
from lib.config import Config
# establish a synchronus connection to server
import lib.connection as Connection
Connection.establish(Config.getHost())
# fetch config from server
Config.fetchServerConfig()
# Warn when connecting to a non-local core without preview-encoders enabled
# The list-comparison is not complete
# (one could use a local hostname or the local system ip),
# but it's only here to warn that one might be making a mistake
localhosts = ['::1',
'127.0.0.1',
'localhost']
if not Config.getPreviewsEnabled() and Config.getHost() not in localhosts:
logging.warning(
'Connecting to `%s` (which looks like a remote host) '
'might not work without enabeling the preview encoders '
'(set `[previews] enabled=true` on the core) or it might saturate '
'your ethernet link between the two machines.',
Config.getHost()
)
import lib.connection as Connection
import lib.clock as ClockManager
# obtain network-clock
ClockManager.obtainClock(Connection.ip)
# switch connection to nonblocking, event-driven mode
Connection.enterNonblockingMode()
# init main-class and main-loop
# (this binds all event-hander on the Connection)
logging.debug('initializing Voctogui')
voctogui = Voctogui()
# start the Mainloop and show the Window
logging.debug('running Voctogui')
voctogui.run()
if __name__ == '__main__':
try:
main()
except RuntimeError as e:
logging.error(str(e))
sys.exit(1)
| 2.15625
| 2
|
setup.py
|
Soundphy/diapason
| 1
|
12783559
|
"""
Setup module.
"""
import re
from os.path import join as pjoin
from setuptools import setup
with open(pjoin('diapason', '__init__.py')) as f:
line = next(l for l in f if l.startswith('__version__'))
version = re.match('__version__ = [\'"]([^\'"]+)[\'"]', line).group(1)
setup(
name='diapason',
version=version,
description='Python module to deal with note sounds.',
long_description='''The diapason Python module can be used to deal with
note sounds: WAV generation, note frequency calculation...''',
url='https://github.com/Soundphy/diapason',
author='<NAME>',
author_email='<EMAIL>',
license='License :: OSI Approved :: BSD License',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Topic :: Education',
'Topic :: Artistic Software',
'Topic :: Multimedia :: Sound/Audio :: Sound Synthesis',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
keywords='diapason',
packages=['diapason'],
install_requires=['numpy', 'scipy'],
extras_require={
'dev': [],
'test': ['tox'],
'docs': ['sphinx', 'numpydoc', 'sphinx_rtd_theme'],
},
)
| 1.554688
| 2
|
0/divide_two_int.py
|
IronCore864/leetcode
| 4
|
12783560
|
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
maxint = 2147483647
if divisor == 0:
return maxint
if dividend == 0:
return 0
if divisor == 1:
return dividend
if divisor == -1:
return -dividend if -dividend < maxint else maxint
if (dividend < 0 and divisor < 0) or (dividend > 0 and divisor > 0):
negative = False
else:
negative = True
dividend = abs(dividend)
divisor = abs(divisor)
res = 0
while dividend >= divisor:
tmp = divisor
count = 1
while tmp <= dividend:
count <<= 1
tmp <<= 1
res += count >> 1
dividend -= tmp >> 1
return res if not negative else -res
s = Solution()
print s.divide(2, 2)
print s.divide(2147483647, 2)
| 3.375
| 3
|
bin/tryprev.py
|
yafeng/nf-core-dda-quant-proteomics
| 5
|
12783561
|
#!/usr/bin/env python3
import os
import sys
from Bio import SeqIO
from Bio.Seq import Seq
def insilico_trypsinized(seq) :
segments = []
seg = []
for i in range(len(seq)) :
if seq[i] in ('K','R') :
if i == len(seq)-1 :
seg.append(seq[i])
elif seq[i+1] == 'P' :
seg.append(seq[i])
else :
#found first tryptic site
if len(seg) :
segments.append(seg)
segments.append( [seq[i]] )
seg = []
else :
seg.append(seq[i])
if len(seg) :
segments.append(seg)
segs_len = sum([len(x) for x in segments])
try :
assert(segs_len == len(seq))
except Exception as e :
segged_seq = []
for s in segments :
segged_seq.extend(s)
print >> sys.stderr , "lens:" , len(seq), len(segged_seq)
print >> sys.stderr , "original_seq:"
print >> sys.stderr , "".join(seq)
print >> sys.stderr , "new_seq:"
print >> sys.stderr , "".join(segged_seq)
raise(e)
return segments
def tryp_rev(seq):
segments = insilico_trypsinized(seq)
final_seq = []
for s in segments :
if len(s) > 1 :
if s[-1] in ['R', 'K']:
new_s = s[:-1]
new_s.reverse()
new_s.append(s[-1])
else:
new_s = s
new_s.reverse()
else :
new_s = s
final_seq.extend(new_s)
seq.seq = Seq(''.join(final_seq))
seq.id = 'decoy_{}'.format(seq.name)
return seq
def main():
fa = sys.argv[1]
out = os.path.join(os.path.split(fa)[0], 'decoy_{}'.format(os.path.basename(fa)))
print(out)
with open(fa) as fp, open(out, 'w') as wfp:
seqs = SeqIO.parse(fp, 'fasta')
SeqIO.write((tryp_rev(x) for x in seqs), wfp, 'fasta')
if __name__ == '__main__':
main()
| 2.265625
| 2
|
python-algorithm/leetcode/problem_973.py
|
isudox/nerd-algorithm
| 5
|
12783562
|
<gh_stars>1-10
"""973. K Closest Points to Origin
https://leetcode.com/problems/k-closest-points-to-origin/
We have a list of points on the plane. Find the K closest points to the
origin (0, 0).
(Here, the distance between two points on a plane is the Euclidean distance.)
You may return the answer in any order. The answer is guaranteed to be unique
(except for the order that it is in.)
Example 1:
Input: points = [[1,3],[-2,2]], K = 1
Output: [[-2,2]]
Explanation:
The distance between (1, 3) and the origin is sqrt(10).
The distance between (-2, 2) and the origin is sqrt(8).
Since sqrt(8) < sqrt(10), (-2, 2) is closer to the origin.
We only want the closest K = 1 points from the origin, so the answer is
just [[-2,2]].
Example 2:
Input: points = [[3,3],[5,-1],[-2,4]], K = 2
Output: [[3,3],[-2,4]]
(The answer [[-2,4],[3,3]] would also be accepted.)
Note:
1 <= K <= points.length <= 10000
-10000 < points[i][0] < 10000
-10000 < points[i][1] < 10000
"""
from typing import List
class Solution:
def k_closest(self, points: List[List[int]], k: int) -> List[List[int]]:
store = {}
for point in points:
distance = point[0] ** 2 + point[1] ** 2
if distance in store:
store[distance].append(point)
else:
store[distance] = [point]
ans = []
distances = sorted(store.keys())
for distance in distances:
for point in store[distance]:
if k == 0:
return ans
ans.append(point)
k -= 1
return ans
def k_closest_2(self, points, k):
"""God damn it!"""
points.sort(key=lambda point: point[0] ** 2 + point[1] ** 2)
return points[:k]
| 3.8125
| 4
|
Intermediate/27/widget_examples.py
|
Matthew1906/100DaysOfPython
| 1
|
12783563
|
<reponame>Matthew1906/100DaysOfPython
from tkinter import *
window = Tk()
window.title("Widget Examples")
window.minsize(width = 500, height=400)
label = Label(text="This is a new text", font = ['Arial',12,'normal'])
button = Button(text="Click Me")
text_input = Entry()
text_input.insert(END, string = 'Some text to input')
# END -> index
text_area = Text(width = 30, height = 5) #text area
text_area.focus()
text_area.insert(END, 'Example of text area')
def spinbox_used():
print(spinbox.get())
spinbox = Spinbox(from_= 0, to=10, command = spinbox_used) # counter
# scale = Slider
def scale_used(value):
print(value)
scale = Scale(from_=0, to=100, command=scale_used)
# Checkbutton = checkbox
def checkbox_checked():
print(checked_state.get())
checked_state = IntVar()
checkbox = Checkbutton(text="is on?", variable = checked_state, command = checkbox_checked)
# Radiobutton = radio button
def radio_used():
print(radio_state.get())
radio_state = IntVar()
radio1 = Radiobutton(text = "Option 1", value = 1, variable = radio_state, command = radio_used)
radio2 = Radiobutton(text = "Option 2", value = 2, variable = radio_state, command = radio_used)
# Listbox = well you get the point
def listbox_used(event):
print(listbox.get(listbox.curselection()))
listbox = Listbox(height=4)
fruits = ['Apple','Pear','Orange','Banana']
for item in fruits:
listbox.insert(fruits.index(item),item)
listbox.bind("<<ListboxSelect>>", listbox_used)
# Pack the components
label.pack()
button.pack()
text_input.pack()
text_area.pack()
spinbox.pack()
scale.pack()
checkbox.pack()
radio1.pack()
radio2.pack()
listbox.pack()
# Loop the window
window.mainloop()
| 3.609375
| 4
|
test/functional/test_payfac_legalEntity.py
|
Vantiv/payfac-mp-sdk-python
| 1
|
12783564
|
<filename>test/functional/test_payfac_legalEntity.py
import unittest
from payfacMPSdk import payfac_legalEntity, generatedClass, utils
from dateutil.parser import parse
class TestLegalEntity(unittest.TestCase):
def test_get_by_legalEntityId(self):
response = payfac_legalEntity.get_by_legalEntityId("1000293")
self.assertEquals("1000293", response["legalEntityId"])
self.assertEquals("123456789",response["taxId"])
self.assertIsNotNone(response["transactionId"])
def test_put_by_legalEntityId(self):
legalEntityUpdateRequest = generatedClass.legalEntityUpdateRequest.factory()
address = generatedClass.address.factory()
address.set_streetAddress1("LE Street Address 1")
address.set_streetAddress2("LE Street Address 2")
address.set_city("LE City")
address.set_stateProvince("MA")
address.set_postalCode("01730")
address.set_countryCode("USA")
legalEntityUpdateRequest.set_address(address)
legalEntityUpdateRequest.set_contactPhone("9785550101")
legalEntityUpdateRequest.set_doingBusinessAs("Other Name Co.")
legalEntityUpdateRequest.set_annualCreditCardSalesVolume(10000000)
legalEntityUpdateRequest.set_hasAcceptedCreditCards("true")
principal = generatedClass.legalEntityPrincipalUpdatable.factory()
principal.set_principalId(9)
principal.set_title("CEO")
principal.set_emailAddress("<EMAIL>")
principal.set_contactPhone("9785551234")
principal.set_address(address)
backgroundCheckField = generatedClass.principalBackgroundCheckFields.factory()
backgroundCheckField.set_firstName("p first")
backgroundCheckField.set_lastName("p last")
backgroundCheckField.set_ssn("123459876")
backgroundCheckField.set_dateOfBirth(parse("1980-10-12T12:00:00-06:00"))
backgroundCheckField.set_driversLicense("892327409832")
backgroundCheckField.set_driversLicenseState("MA")
principal.set_backgroundCheckFields(backgroundCheckField)
legalEntityUpdateRequest.set_principal(principal)
backgroundCheckFields = generatedClass.legalEntityBackgroundCheckFields.factory()
backgroundCheckFields.set_legalEntityName("Company Name")
backgroundCheckFields.set_legalEntityType("INDIVIDUAL_SOLE_PROPRIETORSHIP")
backgroundCheckFields.set_taxId("123456789")
legalEntityUpdateRequest.set_backgroundCheckFields(backgroundCheckFields)
legalEntityUpdateRequest.set_legalEntityOwnershipType("PUBLIC")
legalEntityUpdateRequest.set_yearsInBusiness("10")
response = payfac_legalEntity.put_by_legalEntityId("1000293", legalEntityUpdateRequest)
self.assertEquals("1000293", response["legalEntityId"])
self.assertIsNotNone(response["transactionId"])
self.assertEquals(10, response["responseCode"])
self.assertEquals("Approved", response["responseDescription"])
principal2 = generatedClass.legalEntityPrincipalUpdatable.factory()
principal2.set_title("CEO")
principal2.set_emailAddress("<EMAIL>")
principal2.set_contactPhone("9785551234")
principal2.set_address(address)
legalEntityUpdateRequest.set_principal(principal2)
self.assertRaises(utils.PayfacSchemaError, payfac_legalEntity.put_by_legalEntityId, "1000293",legalEntityUpdateRequest)
def test_post_by_legalEntity(self):
legalEntityCreateRequest = generatedClass.legalEntityCreateRequest.factory()
legalEntityCreateRequest.set_legalEntityName("Legal Entity Name")
legalEntityCreateRequest.set_legalEntityType("CORPORATION")
legalEntityCreateRequest.set_legalEntityOwnershipType("PUBLIC")
legalEntityCreateRequest.set_doingBusinessAs("Alternate Business Name")
legalEntityCreateRequest.set_taxId("123456789")
legalEntityCreateRequest.set_contactPhone("7817659800")
legalEntityCreateRequest.set_annualCreditCardSalesVolume("80000000")
legalEntityCreateRequest.set_hasAcceptedCreditCards("true")
address = generatedClass.address.factory()
address.set_streetAddress1("Street Address 1")
address.set_streetAddress2("Street Address 2")
address.set_city("City")
address.set_stateProvince("MA")
address.set_postalCode("01730")
address.set_countryCode("USA")
legalEntityCreateRequest.set_address(address)
principal = generatedClass.legalEntityPrincipal.factory()
principal.set_title("Chief Financial Officer")
principal.set_firstName("p first")
principal.set_lastName("p last")
principal.set_emailAddress("emailAddress")
principal.set_ssn("123459876")
principal.set_contactPhone("7817659800")
principal.set_dateOfBirth(parse("1980-10-11T12:00:00-06:00"))
principal.set_driversLicense("892327409832")
principal.set_driversLicenseState("MA")
principal.set_address(address)
principal.set_stakePercent(33)
legalEntityCreateRequest.set_principal(principal)
legalEntityCreateRequest.set_yearsInBusiness("12")
response = payfac_legalEntity.post_by_legalEntity(legalEntityCreateRequest)
self.assertIsNotNone(response["legalEntityId"])
self.assertIsNotNone(response["transactionId"])
self.assertEquals(10, response["responseCode"])
self.assertEquals("Approved", response["responseDescription"])
address2 = generatedClass.address.factory()
address.set_streetAddress1("Street Address 1")
address.set_streetAddress2("Street Address 2")
legalEntityCreateRequest.set_address(address2)
self.assertRaises(utils.PayfacSchemaError, payfac_legalEntity.post_by_legalEntity, legalEntityCreateRequest)
| 2.609375
| 3
|
pinnacle/resources/baseresource.py
|
leokan92/pinnacle-modified
| 1
|
12783565
|
<gh_stars>1-10
import datetime
import json
from ..compat import basestring, integer_types
class BaseResource(object):
"""
Data structure based on a becket resource
https://github.com/phalt/beckett
"""
class Meta:
identifier = 'id' # The key with which you uniquely identify this resource.
attributes = {'id': 'id'} # Acceptable attributes that you want to display in this resource.
sub_resources = {} # sub resources are complex attributes within a resource
datetime_attributes = () # Attributes to be converted to datetime
def __init__(self, **kwargs):
self._datetime_sent = kwargs.pop('date_time_sent', None)
self.streaming_unique_id = kwargs.pop('streaming_unique_id', None)
self.publish_time = kwargs.pop('publish_time', None)
now = datetime.datetime.utcnow()
self.datetime_created = now
self._datetime_updated = now
self._sub_resource_map = getattr(self.Meta, 'sub_resources', {})
self._data = kwargs
self.set_attributes(**kwargs)
self._data['Latency'] = self.elapsed_time
def set_sub_resources(self, **kwargs):
"""
For each sub resource assigned to this resource, generate the
sub resource instance and set it as an attribute on this instance.
"""
for attribute_name, resource in self._sub_resource_map.items():
sub_attr = kwargs.get(attribute_name)
if sub_attr:
if isinstance(sub_attr, list):
value = [resource(**x) for x in sub_attr] # A list of sub resources is supported
else:
value = resource(**sub_attr) # So is a single resource
setattr(self, resource.Meta.identifier, value)
else:
setattr(self, resource.Meta.identifier, []) # [] = Empty resource
def set_attributes(self, **kwargs):
"""
Set the resource attributes from the kwargs.
Only sets items in the `self.Meta.attributes` white list.
Subclass this method to customise attributes.
"""
if self._sub_resource_map:
self.set_sub_resources(**kwargs)
for key in self._sub_resource_map.keys():
kwargs.pop(key, None) # Don't let these attributes be overridden later
for field, value in kwargs.items():
if field in self.Meta.attributes:
if field in self.Meta.datetime_attributes:
value = self.strip_datetime(value) or value
setattr(self, self.Meta.attributes[field], value)
def json(self):
return self._data
def message(self):
return json.dumps(self._data)
@staticmethod
def strip_datetime(value):
"""
Converts value to datetime if string or int.
"""
if isinstance(value, basestring):
try:
return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
return
elif isinstance(value, integer_types):
try:
return datetime.datetime.utcfromtimestamp(value / 1e3)
except (ValueError, OverflowError, OSError):
return
@property
def elapsed_time(self):
"""
Elapsed time between datetime sent and datetime created
"""
if self._datetime_sent:
return (self.datetime_created-self._datetime_sent).total_seconds()
def __getattr__(self, item):
"""
If item is an expected attribute in Meta
return None, if not raise Attribute error.
"""
if item in self.Meta.attributes.values():
return
else:
return self.__getattribute__(item)
def __repr__(self):
return '<%s>' % self.__class__.__name__
def __str__(self):
return self.__class__.__name__
| 2.796875
| 3
|
macaca/locator.py
|
macacajs/wd.py
| 35
|
12783566
|
<reponame>macacajs/wd.py
#
# https://w3c.github.io/webdriver/webdriver-spec.html#locator-strategies
# Locator according to WebDriver Protocol
#
from enum import Enum
class Locator(Enum):
"""Locator Enum defined by WebDriver Protocol."""
ID = "id"
XPATH = "xpath"
LINK_TEXT = "link text"
PARTIAL_LINK_TEXT = "partial link text"
NAME = "name"
TAG_NAME = "tag name"
CLASS_NAME = "class name"
CSS_SELECTOR = "css selector"
CONTAINS_TEXT = "text contains"
CONTAINS_DESC = "desc contains"
| 2.84375
| 3
|
pycyqle/test/test_factory.py
|
brunolange/pycyqle
| 0
|
12783567
|
import re
import unittest
from functools import partial
from pycyqle.builder import dict_build, param_build
from pycyqle.factory import Component, Factory
class FactoryTest(unittest.TestCase):
def test_param_build(self):
factory = param_build(
Factory,
name='bicycle-factory',
table='bicycle',
primary_key='id'
)
self._assert_bicycle_factory(factory)
return factory
def test_dict_build(self):
factory = dict_build(Factory, {
'name': 'bicycle-factory',
'table': 'bicycle',
'primary_key': 'id'
})
self._assert_bicycle_factory(factory)
return factory
def _assert_bicycle_factory(self, factory):
self.assertEqual(factory.name(), 'bicycle-factory')
self.assertEqual(factory.table(), 'bicycle')
self.assertEqual(factory.primary_key(), 'id')
@staticmethod
def _format_query(query):
return re.sub(r'\s?,\s?', ',', ' '.join(query.split()))
def test_query(self):
components = list(map(partial(dict_build, Component), [
{'name': 'tire', 'column': 'tire'},
{'name': 'seat', 'column': 'seat'}
]))
factory = self.test_dict_build()
factory.components(components)
self.assertEqual(len(factory.components()), len(components))
self.assertEqual(
FactoryTest._format_query(factory.query(['tire'], {})),
FactoryTest._format_query("""
SELECT bicycle.id AS "__id__"
, bicycle.tire AS tire
FROM bicycle WHERE 1=1
""")
)
new_components = list(map(partial(dict_build, Component), [
{'name': 'pedal', 'column': 'pedal'}
]))
factory.components(components + new_components)
self.assertEqual(
len(factory.components()),
len(components) + len(new_components)
)
self.assertEqual(
FactoryTest._format_query(
factory.query(['seat', 'pedal'], {
'id0': 42
})
),
FactoryTest._format_query("""
SELECT bicycle.id AS "__id__"
, bicycle.seat AS seat
, bicycle.pedal AS pedal
FROM bicycle WHERE bicycle.id IN (%(id0)s)
""")
)
if __name__ == '__main__':
unittest.main()
| 2.953125
| 3
|
pass.py
|
schaten/nut-snmp
| 0
|
12783568
|
<filename>pass.py
#!/usr/bin/python3
import sys
import subprocess
import syslog as log
class Subtree:
def __init__(self, top, data):
self.data = data
self.top = top
self.arr = []
for (i,key) in enumerate(self.data):
self.data[key]['index'] = i
self.arr.append((key, self.data[key]))
def get(self, oid):
if self.top not in oid:
return False
else:
if oid in self.data:
return self._getdata(oid)
else:
return False
def getnext(self, oid):
if oid in self.data:
a = self.data[oid]['index']
a+=1
else:
if self.top in oid or oid in self.top:
a = 0
else:
return False
if a < len(self.arr):
oid = self.arr[a][0]
return self._getdata(oid)
return False
def _getdata(self, oid):
if self.data[oid]['source'] == 'const':
res = self.data[oid]['value']
elif self.data[oid]['source'] == 'extern':
res = self._exec( self.data[oid]['cmd'] )
if 'transform' in self.data[oid]:
res = self.data[oid]['transform'](res)
return [oid, self.data[oid]['type'], res]
def _exec(self, cmd):
try:
tmp = subprocess.run(cmd, stdout=subprocess.PIPE)
return tmp.stdout
except:
return False
def mv2v(x):
return int(1000*float(x[:-1].decode("UTF-8")))
upsoid = '.1.3.6.1.4.1.2021.13.16.4'
prog = 'upsc'
myups = 'ups-fs4@localhost'
mib = {
'.1.3.6.1.4.1.2021.13.16.4.1.1.1': { 'type': 'INTEGER', 'source': 'const', 'value': '1'},
'.1.3.6.1.4.1.2021.13.16.4.1.2.1': { 'type': 'STRING', 'source': 'const', 'value': 'UPS Input Voltage'},
'.1.3.6.1.4.1.2021.13.16.4.1.3.1': { 'type': 'Gauge32', 'source': 'extern', 'cmd': [prog, myups, 'input.voltage'], 'transform': mv2v}
}
tree = Subtree(upsoid, mib)
cont = True
while cont:
tmp = input()
if tmp!='PING':
exit()
print("PONG")
command = input()
oid = input()
if command == 'set':
input();
print("not-writable")
elif command == 'get':
tmp = tree.get(oid)
if tmp:
print(tmp[0])
print(tmp[1])
print(tmp[2], flush=True)
else:
log.syslog('get failed')
print("NONE", flush=True)
elif command == 'getnext':
tmp = tree.getnext(oid)
if tmp:
print(tmp[0])
print(tmp[1])
print(tmp[2], flush=True)
else:
log.syslog('getnext failed')
print("NONE", flush=True)
| 2.4375
| 2
|
app/lib/firebase/fcm.py
|
kwahome/pycon-monitoring-workshop
| 7
|
12783569
|
import firebase_admin
from firebase_admin import credentials, messaging
file_path = './pycon-monitoring-workshop-firebase-adminsdk.json'
cred = credentials.Certificate(file_path)
default_app = firebase_admin.initialize_app(cred)
def send_message(recipients, message, dry_run=False):
if not isinstance(recipients, list):
raise TypeError(
"`recipients` expected to be a `list` but `{0}` found".format(
type(recipients)
)
)
for registration_token in recipients:
message = messaging.Message(
data=dict(message=message),
token=registration_token,
)
return messaging.send(message, dry_run)
| 2.171875
| 2
|
test.py
|
Ji-Xinyou/DIP-proj-DepthEstimation
| 0
|
12783570
|
<gh_stars>0
'''
I/O:
Input: image to be inferenced
Output: depth image
Relation: out = model(in)
path -> readimg -> image -> transform -> tensor
tensor -> model
model: get model from get_model() in train.py, load_param from .pth file
model -> output tensor -> transpose to H x W x C -> imshow & save
'''
import argparse
import cv2
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
import numpy as np
from utils import load_param
from model.model import get_model
| 2.59375
| 3
|
qoredl_project/qoredl-core/Global_client.py
|
qore-dl/qore-dl-code
| 0
|
12783571
|
from influxdb import InfluxDBClient
class Global_Influx():
Client_all = InfluxDBClient(host='172.16.20.190',port=8086,username='voicecomm',password='<PASSWORD>')
| 1.71875
| 2
|
modoboa_radicale/urls_api.py
|
Toniob/modoboa-radicale
| 0
|
12783572
|
<reponame>Toniob/modoboa-radicale<filename>modoboa_radicale/urls_api.py
"""Radicale urls."""
from rest_framework_nested import routers
from . import viewsets
router = routers.SimpleRouter()
router.register(
r"user-calendars", viewsets.UserCalendarViewSet,
base_name="user-calendar")
router.register(
r"shared-calendars", viewsets.SharedCalendarViewSet,
base_name="shared-calendar")
router.register(
r"attendees", viewsets.AttendeeViewSet, base_name="attendee")
router.register(
r"mailboxes", viewsets.MailboxViewSet, base_name="mailbox")
router.register(
r"accessrules", viewsets.AccessRuleViewSet, base_name="access-rule")
calendars_router = routers.NestedSimpleRouter(
router, r"user-calendars", lookup="calendar")
calendars_router.register(
r"events", viewsets.UserEventViewSet, base_name="event")
shared_calendars_router = routers.NestedSimpleRouter(
router, r"shared-calendars", lookup="calendar")
shared_calendars_router.register(
r"events", viewsets.SharedEventViewSet, base_name="event")
urlpatterns = (
router.urls + calendars_router.urls + shared_calendars_router.urls
)
| 1.929688
| 2
|
custom-recipes/ClassifierTrain/recipe.py
|
dataiku/dss-plugin-nlp-crowlingo
| 1
|
12783573
|
<reponame>dataiku/dss-plugin-nlp-crowlingo
from PyCrowlingo.Errors import ModelNotFound
from dataiku.customrecipe import get_recipe_config
from utils import apply_func, get_client
text_column = get_recipe_config().get("text_column")
lang_column = get_recipe_config().get("lang_column")
class_id_column = get_recipe_config().get("class_id_column")
id_column = get_recipe_config().get("id_column")
model_id = get_recipe_config().get("model_id")
model_type = get_recipe_config().get("model_type")
train_ratio = get_recipe_config().get("train_ratio")
def init_model(client):
try:
client.model.clear(model_id)
except ModelNotFound:
client.model.create(model_id, "clf")
def call_api(client, row):
return client.classifier.create_documents(model_id, documents=[{"text": row.get(text_column),
"lang": row.get(lang_column),
"class_id": row.get(class_id_column),
"id": row.get(id_column)}]).dict()
def train_model(client):
client.model.train(model_id, model_type=model_type,
model_config={"train_ratio": train_ratio})
return str(cl_client.model.wait_training(model_id))
cl_client = get_client(get_recipe_config())
init_model(cl_client)
apply_func(call_api, client=cl_client)
train_model(cl_client)
| 2.203125
| 2
|
sklearnTUT/mysample/svdtest.py
|
mwsssxu/tutorials
| 0
|
12783574
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as img
from sklearn.decomposition import TruncatedSVD
"""
打个比方说一张女人图片,我们如何判定这个女人是不是美女呢。我们会看比较关键的一些特征,比如说脸好不好看,胸好不好看,屁股怎么样,腿怎么样,至于衣服上是某个花纹还是手臂上有一个小痔还是,这些特征我们都是不关心的,就可以过滤掉。我们关心的是主成分,也就是对结果贡献系数较大的特征。SVD算法的作用就是来告诉你哪些特征是重要的,有多重要,哪些特征是不重要的,是可以忽略的。
接下来我们使用sklearn提供的TruncatedSVD模块来对美女图片进行压缩。
首先我们使用matplotlib显示一张美女png图片,png图片的格式非常简单,每一个像素有三个维度的颜色值RGB,整个图片就是一个「height x width x 3」维的矩阵。
"""
# 加载png数据矩阵
img_array = img.imread('test2.png')
shape = img_array.shape
print(shape)
# 高度、宽度、RGB通道数=3
height, width, channels = shape[0], shape[1], shape[2]
# 转换成numpy array
img_matrix = np.array(img_array)
# 存储RGB三个通道转换后的数据
planes = []
# RGB三个通道分别处理
for idx in range(channels):
# 提取通道
plane = img_matrix[:, :, idx]
# 转成二维矩阵
plane = np.reshape(plane, (height, width))
# 保留10个主成分
svd = TruncatedSVD(n_components=10)
# 拟合数据,进行矩阵分解,生成特征空间,剔去无关紧要的成分
svd.fit(plane)
# 将输入数据转换到特征空间
new_plane = svd.transform(plane)
# 再将特征空间的数据转换会数据空间
plane = svd.inverse_transform(new_plane)
# 存起来
planes.append(plane)
# 合并三个通道平面数据
img_matrix = np.dstack(planes)
# 显示处理后的图像
plt.imshow(img_matrix)
plt.show()
| 2.9375
| 3
|
PreliminaryAnalysis/Exponential parameters through time/simulation.py
|
alfredholmes/cryptocurrency_data_analysis
| 0
|
12783575
|
from scipy.stats import uniform
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.stats import exponweib
def calculate_parameters(interarrivals):
sample = np.array(interarrivals)
x = np.linspace(0, 1 - 1 / sample.shape[0], sample.shape[0])
x = x[sample > 0]
sample = sample[sample > 0]
sample = sample[x > 0]
x = x[x > 0]
m, c = minimize(lambda t: np.mean((np.log(sample) - (t[0] * np.log(-np.log(1 - x)) + t[1])) ** 2), [1, 0]).x
return 1 / m, np.exp(-c - m)
def main():
step = 4
interarrivals = exponweib(size=10000)
print(calculate_parameters(interarrivals))
hours = []
hour = []
params = []
time = 0
last_time = 0
for arrival in interarrivals:
if time + arrival > last_time + 1000 * 60 * 60 * step:
params.append(calculate_parameters(hour))
hours.append(hour)
hour = []
last_time = time = last_time + 1000 * 60 * 60 * step
time = time + arrival
hour.append(arrival)
fig, ax1 = plt.subplots()
ax2 = plt.twinx()
ax1.plot([p[0] for p in params])
ax2.plot([p[1] for p in params], color='orange')
plt.show()
if __name__ == '__main__':
main()
| 2.75
| 3
|
complete-dsa/string/printDups.py
|
nishantml/Data-Structure-And-Algorithms
| 0
|
12783576
|
<reponame>nishantml/Data-Structure-And-Algorithms
def print_duplicates(str):
hash = dict()
for c in list(str):
if c not in hash:
hash[c] = 1
else:
hash[c] = hash[c] + 1
for i in hash:
if hash[i] > 1:
print(i)
print_duplicates('test')
| 3.84375
| 4
|
flask/wsgi.py
|
jSm449g4d/web_by_flask
| 0
|
12783577
|
# coding: utf-8
import sys
import os
import flask
from flask import redirect,request,render_template_string,render_template
from werkzeug.utils import secure_filename
import importlib
import zipfile
import threading
import random
from datetime import datetime
import pytz
import time
from sqlalchemy import create_engine
import json
from google.cloud import storage
import firebase_admin
from firebase_admin import auth
#Flask_Startup
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.join("./",os.path.dirname(__file__)))
app = flask.Flask(__name__)
wsgi_util=importlib.import_module("wsgi_util")
#prevent uploading too large file
app.config['MAX_CONTENT_LENGTH'] = 100000000
#unzip CDN contents for fallback
try:zipfile.ZipFile(os.path.join("./static/","bootstrap-4.4.1-dist.zip")).extractall("./static/")
except:print("cant unzip CDN contents")
wsgi_util.Resource_Reload()
@app.route("/")
def indexpage_show():
wsgi_util.access_counter+=1
return wsgi_util.render_template_2("index.html",
STATUS_TABLE=wsgi_util.status_table,
access_counter=str(wsgi_util.access_counter)
)
@app.route("/<name>.html")
def html_show(name):
try :return wsgi_util.render_template_2('./'+name+'.html')
except:return redirect('./'),404
@app.route("/<name>.py",methods=['GET', 'POST'])
def py_show(name):
try :return importlib.import_module(name).show(request)
except Exception as e:
return wsgi_util.render_template_2("error.html",
form_error_code="500",form_error_text=str(e)),500
application=app
if __name__ == "__main__":
app.run()
| 2.046875
| 2
|
workflow/migrations/0006_tolabookmarks.py
|
meetdatastory/Activity-CE
| 0
|
12783578
|
<reponame>meetdatastory/Activity-CE
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-12-05 03:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workflow', '0005_auto_20161129_0101'),
]
operations = [
migrations.CreateModel(
name='TolaBookmarks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('bookmark_url', models.CharField(blank=True, max_length=255, null=True)),
('create_date', models.DateTimeField(blank=True, null=True)),
('edit_date', models.DateTimeField(blank=True, null=True)),
('program', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workflow.Program')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='workflow.TolaUser')),
],
options={
'ordering': ('name',),
},
),
]
| 1.570313
| 2
|
scripts/utils.py
|
deez79/PYTH2
| 7
|
12783579
|
<filename>scripts/utils.py
import json
import os
from datetime import datetime
# TODO: refactor the filename_parts bits...
def does_dir_path_exist(filename, dirname="Notes"):
filename_parts = filename.split('/')
newdir = "/".join(filename_parts[0:-1]+[dirname])
filename_name = filename_parts.pop().split('.')[0]
newfilepath = newdir + "/" + filename_name + ".ipynb"
if os.path.isfile(newfilepath):
return newfilepath
else:
return None
def get_ipynb_path(filename, dirname="nb"):
filename_parts = filename.split('/')
newdir = "/".join(filename_parts[0:-1]+[dirname])
filename_name = filename_parts.pop().split('.')[0]
os.makedirs(newdir, exist_ok=True)
return f"{newdir}/{filename_name}.ipynb"
def mrkdown_details(code, with_header_dict=False):
lines = code.split('\n')
header = lines[0:3]
rest = lines[4:]
header_dict = json.loads(header[1])
next_file = header_dict.get("next", None)
returnable = [next_file, rest]
if with_header_dict:
returnable.append(header_dict)
return returnable
def get_status_icon_by_date(data):
str_month, str_day = data.split('/')
month = int(str_month)
day = int(str_day)
today = datetime.today()
if month == today.month and day == today.day:
return "➡️ "
elif month < today.month or month == today.month and day < today.day:
return "✅ "
else:
return ""
| 2.859375
| 3
|
Chapter02/Activity2_01/Activity2_01.py
|
PacktWorkshops/The-Spark-Workshop
| 7
|
12783580
|
from pyspark.sql import SparkSession
if __name__ == "__main__":
# input = sample_warc_loc
spark: SparkSession = SparkSession.builder \
.appName('Activity 2.1') \
.getOrCreate()
spark.sparkContext.setLogLevel('ERROR') # avoids printing of info messages
from operator import add
from collections import defaultdict
from typing import Dict
from Chapter02.utilities02_py.helper_python import extract_raw_records, parse_raw_warc
input = "/Users/a/CC-MAIN-20191013195541-20191013222541-00000.warc"
warc_records = extract_raw_records(input, spark).flatMap(lambda record: parse_raw_warc(record))
# print(warc_records.count())
keyed_by_language = warc_records.filter(lambda rec: rec.language != '').map(lambda rec: (rec.language, 1))
language_map: Dict[str, int] = keyed_by_language.reduceByKey(add).collectAsMap()
## language_list = keyed_by_language.reduceByKey(add).collect()
## language_map: Dict[str, int] = defaultdict(int)
## for key, value in language_list:
## ... language_map[key] += value
## language_map
# warc_records.filter(lambda rec: rec.language != '').map(lambda rec: rec.language).countByValue()
sorted_language_list = [(key, language_map[key]) for key in sorted(language_map, key=language_map.get)]
sorted_language_list[0:10] # a subset of 10 of the rarest languages
sorted_language_list[len(sorted_language_list)-1] # most frequent language
uz_records = warc_records.filter(lambda rec: rec.language != '' and rec.language == 'uz').map(lambda rec: rec.target_uri)
print(uz_records.collect())
wikipages = warc_records.filter(lambda rec: 'wikipedia' in rec.target_uri).map(lambda rec: rec.target_uri)
print(wikipages.collect())
untagged = warc_records.filter(lambda record: record.language == '')
print(untagged.count())
| 2.796875
| 3
|
grama/dfply/transform.py
|
natalia-rubio/py_grama
| 0
|
12783581
|
<reponame>natalia-rubio/py_grama
from .base import *
@dfpipe
def mutate(df, **kwargs):
"""
Creates new variables (columns) in the DataFrame specified by keyword
argument pairs, where the key is the column name and the value is the
new column value(s).
Args:
df (pandas.DataFrame): data passed in through the pipe.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> mutate(x_plus_y=X.x + X.y) >> select_from('x') >> head(3)
x y z x_plus_y
0 3.95 3.98 2.43 7.93
1 3.89 3.84 2.31 7.73
2 4.05 4.07 2.31 8.12
"""
return df.assign(**kwargs)
@dfpipe
def mutate_if(df, predicate, fun):
"""
Modifies columns in place if the specified predicate is true.
Args:
df (pandas.DataFrame): data passed in through the pipe.
predicate: a function applied to columns that returns a boolean value
fun: a function that will be applied to columns where predicate returns True
Example:
diamonds >> mutate_if(lambda col: min(col) < 1 and mean(col) < 4, lambda row: 2 * row) >> head(3)
carat cut color clarity depth table price x y z
0 0.46 Ideal E SI2 61.5 55.0 326 3.95 3.98 4.86
1 0.42 Premium E SI1 59.8 61.0 326 3.89 3.84 4.62
2 0.46 Good E VS1 56.9 65.0 327 4.05 4.07 4.62
(columns 'carat' and 'z', both having a min < 1 and mean < 4, are doubled, while the
other rows remain as they were)
"""
cols = list()
for col in df:
try:
if predicate(df[col]):
cols.append(col)
except:
pass
df[cols] = df[cols].apply(fun)
return df
# df2 = df.copy()
# df2[cols] = df2[cols].apply(fun)
# return df2
@dfpipe
def transmute(df, *keep_columns, **kwargs):
"""
Creates columns and then returns those new columns and optionally specified
original columns from the DataFrame.
This works like `mutate`, but designed to discard the original columns used
to create the new ones.
Args:
*keep_columns: Column labels to keep. Can be string, symbolic, or
integer position.
Kwargs:
**kwargs: keys are the names of the new columns, values indicate
what the new column values will be.
Example:
diamonds >> transmute(x_plus_y=X.x + X.y, y_div_z=(X.y / X.z)) >> head(3)
y_div_z x_plus_y
0 1.637860 7.93
1 1.662338 7.73
2 1.761905 8.12
"""
keep_cols = []
for col in flatten(keep_columns):
try:
keep_cols.append(col.name)
except:
if isinstance(col, str):
keep_cols.append(col)
elif isinstance(col, int):
keep_cols.append(df.columns[col])
df = df.assign(**kwargs)
columns = [k for k in kwargs.keys()] + list(keep_cols)
return df[columns]
| 4.15625
| 4
|
crawler_magazine/crawlers/__init__.py
|
EvertonTomalok/magazine
| 0
|
12783582
|
from abc import ABC, abstractmethod
from crawler_magazine.downloader.asynchronous import AsyncDownloader
class CrawlerInterface(ABC):
def __init__(self, url):
self.url = url
self.downloader = AsyncDownloader()
async def get_page(self, url=None):
return await self.downloader.get(url or self.url)
@abstractmethod
def parse(self, html, *args):
"""Need to be implemented"""
@abstractmethod
def crawl(self):
"""Need to be implemented"""
| 3.234375
| 3
|
model/device.py
|
Rubber-Duck-999/HouseGuard-StatusApi
| 0
|
12783583
|
import pymysql, json
from model.sql import Connection
class DeviceModel():
def create_device(self, allowed, blocked, unknown):
sql = "INSERT INTO `device` (`allowed_devices`, `blocked_devices`, `unknown_devices`) VALUES (%s, %s, %s)"
values = (allowed, blocked, unknown)
conn = Connection()
conn.create(sql, values)
def get_device(self):
sql = "SELECT * FROM device ORDER BY device_id ASC LIMIT 1"
conn = Connection()
fail, events = conn.get(sql)
data = ''
if not fail:
for event in events:
event['created_date'] = event['created_date'].isoformat()
data = {
"length": len(events),
"device": events
}
else:
data = {
"length": 0,
"device": ''
}
return data
| 2.796875
| 3
|
freeldep/cloud/aws/validate.py
|
MatthieuBlais/freeldep
| 0
|
12783584
|
import os
import subprocess
import uuid
class CfnStackValidation:
@classmethod
def validate_config(cls, config):
"""Validate section of the stack config"""
if "aws" not in config:
raise KeyError("aws is required in a stack definition")
else:
cls._validate_aws_config(config["aws"])
if "location" not in config:
raise KeyError("location is required in a stack definition")
elif not cls._file_exists(os.path.expanduser(config["location"])):
raise FileNotFoundError(f"Template {config['location']} does not exist")
if "template" not in config:
raise KeyError("template is required in a stack definition")
else:
cls._validate_template_config(config["template"])
if "functions" in config:
cls._validate_lambda_config(config["functions"])
@classmethod
def validate_stack(cls, template, tmp_location=f"/tmp/{uuid.uuid1().hex}.yaml"):
"""Validate CFN stack with CFNLINT"""
with open(tmp_location, "w+") as f:
f.write(template)
try:
subprocess.check_output(
f"cfn-lint {tmp_location}".split(" "), stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError:
raise RuntimeError("Your CFN stack is not valid")
os.remove(tmp_location)
@classmethod
def _validate_aws_config(cls, config):
"""Validate section of the stack config"""
if type(config) != dict:
raise ValueError("aws must be a dict")
if "region" not in config:
raise KeyError("aws.region is required in stack definition")
if "account-id" not in config:
raise KeyError("aws.account-id is required in stack definition")
if type(config["region"]) != str:
raise ValueError("aws.region must be a string")
if (
type(config["account-id"]) != str
or len(str(config["account-id"])) != 12
or len([x for x in config["account-id"] if not x.isdigit()])
):
raise ValueError("aws.account-id must be a 12 digit string")
@classmethod
def _validate_template_config(cls, config):
"""Validate section of the stack config"""
if type(config) != dict:
raise ValueError("template must be a dict")
if "name" not in config:
raise KeyError("template.name is required in stack definition")
if type(config["name"]) != str:
raise ValueError("template.name must be a string")
if "parameters" in config and type(config["parameters"]) != dict:
raise ValueError("template.parameters must be a dict")
@classmethod
def _validate_lambda_config(cls, config):
"""Validate section of the stack config"""
for lambd in config:
if "name" not in lambd:
raise KeyError("Lambdas must have a name")
if "location" not in lambd:
raise KeyError("Lambdas must have a location")
if "template-attribute" not in lambd:
raise KeyError("Lambdas must have a template-attribute")
if "bucket" not in lambd:
raise KeyError("Lambdas must have a artifact bucket location")
if (
type(lambd["name"]) != str
or type(lambd["template-attribute"]) != str
or type(lambd["bucket"]) != str
):
raise ValueError(
"One of these parameters is not a string: name, template-attribute, bucket"
)
if not os.path.isdir(lambd["location"]):
raise ValueError("Lambda package is not found")
@classmethod
def _file_exists(cls, file_path):
"""Check if a file exists"""
return os.path.exists(os.path.expanduser(file_path))
| 2.453125
| 2
|
src/studio2021/functions/geometry.py
|
Design-Machine-Group/studio2021
| 1
|
12783585
|
from __future__ import print_function
from math import fabs
from math import sqrt
__author__ = ["<NAME>"]
__copyright__ = "Copyright 2020, Design Machine Group - University of Washington"
__license__ = "MIT License"
__email__ = "<EMAIL>"
__version__ = "0.1.0"
def midpoint_point_point(a, b):
return [0.5 * (a[0] + b[0]),
0.5 * (a[1] + b[1]),
0.5 * (a[2] + b[2])]
def geometric_key(xyz, precision=None, sanitize=True):
x, y, z = xyz
if not precision:
precision = '3f'
if precision == 'd':
return '{0},{1},{2}'.format(int(x), int(y), int(z))
if sanitize:
minzero = "-{0:.{1}}".format(0.0, precision)
if "{0:.{1}}".format(x, precision) == minzero:
x = 0.0
if "{0:.{1}}".format(y, precision) == minzero:
y = 0.0
if "{0:.{1}}".format(z, precision) == minzero:
z = 0.0
return '{0:.{3}},{1:.{3}},{2:.{3}}'.format(x, y, z, precision)
def distance_point_point(a, b):
"""Compute the distance bewteen a and b.
Parameters
----------
a : sequence of float
XYZ coordinates of point a.
b : sequence of float
XYZ coordinates of point b.
Returns
-------
float
Distance bewteen a and b.
Examples
--------
>>> distance_point_point([0.0, 0.0, 0.0], [2.0, 0.0, 0.0])
2.0
See Also
--------
distance_point_point_xy
"""
ab = subtract_vectors(b, a)
return length_vector(ab)
def centroid_points(points):
"""Compute the centroid of a set of points.
Warnings
--------
Duplicate points are **NOT** removed. If there are duplicates in the
sequence, they should be there intentionally.
Parameters
----------
points : sequence
A sequence of XYZ coordinates.
Returns
-------
list
XYZ coordinates of the centroid.
Examples
--------
>>>
"""
p = len(points)
x, y, z = zip(*points)
return [sum(x) / p, sum(y) / p, sum(z) / p]
def subtract_vectors(u, v):
"""Subtract one vector from another.
Parameters
----------
u : list
XYZ components of the first vector.
v : list
XYZ components of the second vector.
Returns
-------
list
The resulting vector.
Examples
--------
>>>
"""
return [a - b for (a, b) in zip(u, v)]
def cross_vectors(u, v):
r"""Compute the cross product of two vectors.
Parameters
----------
u : tuple, list, Vector
XYZ components of the first vector.
v : tuple, list, Vector
XYZ components of the second vector.
Returns
-------
cross : list
The cross product of the two vectors.
Notes
-----
The xyz components of the cross product of two vectors :math:`\mathbf{u}`
and :math:`\mathbf{v}` can be computed as the *minors* of the following matrix:
.. math::
:nowrap:
\begin{bmatrix}
x & y & z \\
u_{x} & u_{y} & u_{z} \\
v_{x} & v_{y} & v_{z}
\end{bmatrix}
Therefore, the cross product can be written as:
.. math::
:nowrap:
\mathbf{u} \times \mathbf{v}
=
\begin{bmatrix}
u_{y} * v_{z} - u_{z} * v_{y} \\
u_{z} * v_{x} - u_{x} * v_{z} \\
u_{x} * v_{y} - u_{y} * v_{x}
\end{bmatrix}
Examples
--------
>>> cross_vectors([1.0, 0.0, 0.0], [0.0, 1.0, 0.0])
[0.0, 0.0, 1.0]
"""
return [u[1] * v[2] - u[2] * v[1],
u[2] * v[0] - u[0] * v[2],
u[0] * v[1] - u[1] * v[0]]
def length_vector(vector):
"""Calculate the length of the vector.
Parameters
----------
vector : list
XYZ components of the vector.
Returns
-------
float
The length of the vector.
Examples
--------
>>> length_vector([2.0, 0.0, 0.0])
2.0
>>> length_vector([1.0, 1.0, 0.0]) == sqrt(2.0)
True
"""
return sqrt(length_vector_sqrd(vector))
def length_vector_sqrd(vector):
"""Compute the squared length of a vector.
Parameters
----------
vector : list
XYZ components of the vector.
Returns
-------
float
The squared length.
Examples
--------
>>> length_vector_sqrd([1.0, 1.0, 0.0])
2.0
"""
return vector[0] ** 2 + vector[1] ** 2 + vector[2] ** 2
def dot_vectors(u, v):
"""Compute the dot product of two vectors.
Parameters
----------
u : tuple, list, Vector
XYZ components of the first vector.
v : tuple, list, Vector
XYZ components of the second vector.
Returns
-------
dot : float
The dot product of the two vectors.
Examples
--------
>>> dot_vectors([1.0, 0, 0], [2.0, 0, 0])
2.0
"""
return sum(a * b for a, b in zip(u, v))
def area_polygon(polygon):
"""Compute the area of a polygon.
Parameters
----------
polygon : sequence
The XYZ coordinates of the vertices/corners of the polygon.
The vertices are assumed to be in order.
The polygon is assumed to be closed:
the first and last vertex in the sequence should not be the same.
Returns
-------
float
The area of the polygon.
Examples
--------
>>>
"""
o = centroid_points(polygon)
a = polygon[-1]
b = polygon[0]
oa = subtract_vectors(a, o)
ob = subtract_vectors(b, o)
n0 = cross_vectors(oa, ob)
area = 0.5 * length_vector(n0)
for i in range(0, len(polygon) - 1):
oa = ob
b = polygon[i + 1]
ob = subtract_vectors(b, o)
n = cross_vectors(oa, ob)
if dot_vectors(n, n0) > 0:
area += 0.5 * length_vector(n)
else:
area -= 0.5 * length_vector(n)
return area
def add_vectors(u, v):
"""Add two vectors.
Parameters
----------
u : sequence of float
XYZ components of the first vector.
v : sequence of float
XYZ components of the second vector.
Returns
-------
list
The resulting vector.
"""
return [a + b for (a, b) in zip(u, v)]
def scale_vector(vector, factor):
"""Scale a vector by a given factor.
Parameters
----------
vector : list, tuple
XYZ components of the vector.
factor : float
The scaling factor.
Returns
-------
list
The scaled vector.
Examples
--------
>>> scale_vector([1.0, 2.0, 3.0], 2.0)
[2.0, 4.0, 6.0]
>>> v = [2.0, 0.0, 0.0]
>>> scale_vector(v, 1 / length_vector(v))
[1.0, 0.0, 0.0]
"""
return [axis * factor for axis in vector]
def intersection_line_plane(line, plane, tol=1e-6):
"""Computes the intersection point of a line and a plane
Parameters
----------
line : tuple
Two points defining the line.
plane : tuple
The base point and normal defining the plane.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
point or None
"""
a, b = line
o, n = plane
ab = subtract_vectors(b, a)
cosa = dot_vectors(n, ab)
if fabs(cosa) <= tol:
# if the dot product (cosine of the angle between segment and plane)
# is close to zero the line and the normal are almost perpendicular
# hence there is no intersection
return None
# based on the ratio = -dot_vectors(n, ab) / dot_vectors(n, oa)
# there are three scenarios
# 1) 0.0 < ratio < 1.0: the intersection is between a and b
# 2) ratio < 0.0: the intersection is on the other side of a
# 3) ratio > 1.0: the intersection is on the other side of b
oa = subtract_vectors(a, o)
ratio = - dot_vectors(n, oa) / cosa
ab = scale_vector(ab, ratio)
return add_vectors(a, ab)
def intersection_segment_plane(segment, plane, tol=1e-6):
"""Computes the intersection point of a line segment and a plane
Parameters
----------
segment : tuple
Two points defining the line segment.
plane : tuple
The base point and normal defining the plane.
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
point or None
"""
a, b = segment
o, n = plane
ab = subtract_vectors(b, a)
cosa = dot_vectors(n, ab)
if fabs(cosa) <= tol:
# if the dot product (cosine of the angle between segment and plane)
# is close to zero the line and the normal are almost perpendicular
# hence there is no intersection
return None
# based on the ratio = -dot_vectors(n, ab) / dot_vectors(n, oa)
# there are three scenarios
# 1) 0.0 < ratio < 1.0: the intersection is between a and b
# 2) ratio < 0.0: the intersection is on the other side of a
# 3) ratio > 1.0: the intersection is on the other side of b
oa = subtract_vectors(a, o)
ratio = - dot_vectors(n, oa) / cosa
if 0.0 <= ratio and ratio <= 1.0:
ab = scale_vector(ab, ratio)
return add_vectors(a, ab)
return None
def normalize_vector(vector):
"""Normalise a given vector.
Parameters
----------
vector : list, tuple
XYZ components of the vector.
Returns
-------
list
The normalized vector.
Examples
--------
>>>
"""
length = length_vector(vector)
if not length:
return vector
return [vector[0] / length, vector[1] / length, vector[2] / length]
def normal_polygon(polygon, unitized=True):
"""Compute the normal of a polygon defined by a sequence of points.
Parameters
----------
polygon : list of list
A list of polygon point coordinates.
Returns
-------
list
The normal vector.
Raises
------
ValueError
If less than three points are provided.
Notes
-----
The points in the list should be unique. For example, the first and last
point in the list should not be the same.
"""
p = len(polygon)
assert p > 2, "At least three points required"
nx = 0
ny = 0
nz = 0
o = centroid_points(polygon)
a = polygon[-1]
oa = subtract_vectors(a, o)
for i in range(p):
b = polygon[i]
ob = subtract_vectors(b, o)
n = cross_vectors(oa, ob)
oa = ob
nx += n[0]
ny += n[1]
nz += n[2]
if not unitized:
return nx, ny, nz
return normalize_vector([nx, ny, nz])
| 3.203125
| 3
|
utils/db_api/db_commands.py
|
KARTASAR/DatingBot
| 12
|
12783586
|
<reponame>KARTASAR/DatingBot<filename>utils/db_api/db_commands.py
from django_project.telegrambot.usersmanage.models import User
from asgiref.sync import sync_to_async
@sync_to_async
def select_user(telegram_id: int):
user = User.objects.filter(telegram_id=telegram_id).first()
return user
@sync_to_async
def add_user(telegram_id, name, username, age):
return User(telegram_id=int(telegram_id), fullname=name, username=username, age=int(age)).save()
@sync_to_async
def select_all_users():
users = User.objects.all()
return users
@sync_to_async
def count_users():
return User.objects.all().count()
| 2.203125
| 2
|
Define_Projection_of_SHAPEFILES_in_folder.py
|
mcfoi/arcpy-toolbox
| 3
|
12783587
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Defin_Projection_GB_for_folder.py
# Created on: 2015-04-04 12:22:48.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import os
import arcpy
# Dynamic variables:
rasterFolder = arcpy.GetParameterAsText(0)
# Set the workspace environment to local file geodatabase
arcpy.env.workspace = rasterFolder
shpFiles = arcpy.ListFeatureClasses()
spatialReference = arcpy.GetParameter(1)
projection = spatialReference.exportToString()
# Process: Define Projection
if shpFiles != None:
for shpFile in shpFiles:
arcpy.DefineProjection_management(shpFile, projection)
| 2.125
| 2
|
applications/SwimmingDEMApplication/python_scripts/derivative_recovery/standard_recoverer.py
|
AndreaVoltan/MyKratos7.0
| 2
|
12783588
|
<filename>applications/SwimmingDEMApplication/python_scripts/derivative_recovery/standard_recoverer.py
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# importing the Kratos Library
from KratosMultiphysics import *
from KratosMultiphysics.FluidDynamicsApplication import *
from KratosMultiphysics.SwimmingDEMApplication import *
from . import recoverer
class StandardGradientRecoverer(recoverer.GradientRecoverer):
def __init__(self, pp, model_part):
recoverer.GradientRecoverer.__init__(self, pp, model_part)
def RecoverGradientOfScalar(self, scalar_variable, gradient_variable):
self.cplusplus_recovery_tool.CalculateGradient(self.model_part, scalar_variable, gradient_variable)
def RecoverGradientOfVector(self, vector_variable, gradient_variable_x, gradient_variable_y, gradient_variable_z):
self.cplusplus_recovery_tool.CalculateGradient(self.model_part, vector_variable, gradient_variable_x, gradient_variable_y, gradient_variable_z)
def RecoverGradientOfVelocity(self):
self.RecoverGradientOfVector(VELOCITY, VELOCITY_X_GRADIENT, VELOCITY_Y_GRADIENT, VELOCITY_Z_GRADIENT)
def RecoverPressureGradient(self):
self.RecoverGradientOfScalar(PRESSURE, RECOVERED_PRESSURE_GRADIENT)
class StandardMaterialAccelerationRecoverer(recoverer.MaterialAccelerationRecoverer):
def __init__(self, pp, model_part):
recoverer.MaterialAccelerationRecoverer.__init__(self, pp, model_part)
def RecoverMaterialAcceleration(self):
self.cplusplus_recovery_tool.CalculateVectorMaterialDerivative(self.model_part, VELOCITY, ACCELERATION, MATERIAL_ACCELERATION)
class StandardLaplacianRecoverer(recoverer.LaplacianRecoverer):
def __init__(self, pp, model_part):
recoverer.LaplacianRecoverer.__init__(self, pp, model_part)
def RecoverVectorLaplacian(self, vector_variable, laplacian_variable):
self.cplusplus_recovery_tool.CalculateVectorLaplacian(self.model_part, vector_variable, laplacian_variable)
| 2.09375
| 2
|
fe_utils/cluster.py
|
ml-pipes/future_engineering_utils
| 0
|
12783589
|
<reponame>ml-pipes/future_engineering_utils
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_cluster.ipynb (unless otherwise specified).
__all__ = ['load_embedding', 'BayesClusterTrainer']
# Cell
from functools import partial
import hdbscan
from hyperopt import hp
from hyperopt import fmin, tpe, space_eval
from hyperopt import Trials
from hyperopt import STATUS_OK
import numpy as np
import os
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import homogeneity_score, v_measure_score, silhouette_score
from sklearn.metrics.cluster import completeness_score, adjusted_mutual_info_score
import time
import umap
# Cell
def load_embedding(path):
"""
dataloader for saved embeddings. Load from numpy file
"""
emb = np.load(path)
return emb
# Cell
class BayesClusterTrainer():
"""
A trainer for cluster optimization runs
Inputs:
`space`: `dict` containing relevant parameter spaces for `hdbscan` and `umap`
"""
def __init__(self, space, cost_fn_params, embeddings, labels, optimize='default' , *args, **kwargs):
self.space = space
self.cost_fn_params = cost_fn_params
self.embeddings = embeddings
self.labels = labels
self.optimize = optimize
self.logs = []
self.run = dict()
def generate_clusters(self, embeddings,
min_cluster_size,
cluster_selection_epsilon,
cluster_selection_method,
metric,
n_neighbors,
n_components,
random_state = 42):
"""
Generate HDBSCAN cluster object after reducing embedding dimensionality with UMAP
"""
umap_embeddings = (umap.UMAP(n_neighbors=n_neighbors,
n_components=n_components,
metric='cosine',
random_state=random_state)
.fit_transform(embeddings))
clusters = hdbscan.HDBSCAN(min_cluster_size = min_cluster_size,
metric=metric, cluster_selection_epsilon = cluster_selection_epsilon,
cluster_selection_method=cluster_selection_method,
gen_min_span_tree=True).fit(umap_embeddings)
return clusters
def objective(self, params, embeddings, labels):
"""
Objective function for hyperopt to minimize, which incorporates constraints
on the number of clusters we want to identify
"""
clusters = self.generate_clusters(embeddings,
n_neighbors = params['n_neighbors'],
n_components = params['n_components'],
min_cluster_size = params['min_cluster_size'],
cluster_selection_epsilon = params['cluster_selection_epsilon'],
metric = params['metric'],
cluster_selection_method = params['cluster_selection_method'],
random_state = 42)
cost = self.score_clusters(clusters, labels, params)
loss = cost
return {'loss': loss, 'status': STATUS_OK}
def score_clusters(self, clusters, y, params):
"""
Returns the label count and cost of a given cluster supplied from running hdbscan
"""
val = clusters.relative_validity_
#prevent pers from getting NaN value if no clusters exist
if len(clusters.cluster_persistence_)==0:
pers = 0.
else:
pers = clusters.cluster_persistence_.mean(0)
prob = clusters.probabilities_.mean(0)
penalty = (clusters.labels_ == -1).sum() / len(clusters.labels_)
if len(clusters.outlier_scores_)==0:
outlier = 0.0
else:
outlier = clusters.outlier_scores_.mean(0)
cluster_size = len(np.unique(clusters.labels_))
self.run['relative_validity'] = val
self.run['probability'] = prob
self.run['persistence'] = pers
self.run['penalty'] = penalty
self.run['outlier'] = outlier
self.run['cluster_size'] = cluster_size
fns = [adjusted_rand_score, homogeneity_completeness_v_measure, homogeneity_score, v_measure_score, completeness_score, adjusted_mutual_info_score]
for fn in fns:
print(f"{fn.__name__} : {fn(clusters.labels_, y)}")
self.run[f'{fn.__name__}'] = fn(clusters.labels_, y)
print("-"*20)
if self.optimize == 'rand_score':
score = -1. * adjusted_rand_score(clusters.labels_, y)
self.run['score'] = score
self.run = {**self.run, **params}
self.logs.append(self.run.copy())
self.run.clear()
print(f"SCORE: {score}")
return score
elif self.optimize == 'default':
print(f'val: {val}')
print(f'pers: {pers}')
print(f'prob: {prob}')
print(f'penalty: {penalty}')
print(f'outlier: {outlier}')
print(f'cluster size: {cluster_size}')
val_w = self.cost_fn_params['val_w']
prob_w = self.cost_fn_params['prob_w']
pers_w = self.cost_fn_params['pers_w']
penalty_w = self.cost_fn_params['penalty_w']
outlier_w = self.cost_fn_params['outlier_w']
score = -1*(val_w * val + prob_w * prob + pers_w * pers) + (penalty_w * penalty + outlier_w * outlier)
self.run['score'] = score
self.run = {**self.run, **self.cost_fn_params, **params}
self.logs.append(self.run.copy())
self.run.clear()
print(f"SCORE: {score}")
return score
def train(self, max_evals=100, algo=tpe.suggest):
"""
Perform bayesian search on hyperopt hyperparameter space to minimize objective function
"""
trials = Trials()
fmin_objective = partial(self.objective, embeddings=self.embeddings, labels=self.labels)
best = fmin(fmin_objective,
space = self.space,
algo=algo,
max_evals=max_evals,
trials=trials)
best_params = space_eval(self.space, best)
print ('best:')
print (best_params)
print("-"*20)
print("-"*20)
best_clusters = self.generate_clusters(self.embeddings,
n_neighbors = best_params['n_neighbors'],
n_components = best_params['n_components'],
min_cluster_size = best_params['min_cluster_size'],
cluster_selection_epsilon = best_params['cluster_selection_epsilon'],
metric = best_params['metric'],
cluster_selection_method = best_params['cluster_selection_method']
)
self.best_params = best_params
self.best_clusters = best_clusters
self.trials = trials
print(f'Finished training!')
def save_logs_to_csv(self, path, dataset=None):
"""
save logs to a csv file. Provide the path, optionally provide a dataset name. Creates new column.
"""
if self.optimize == 'default':
cols = ['adjusted_rand_score', 'homogeneity_completeness_v_measure',
'homogeneity_score', 'v_measure_score', 'completeness_score',
'adjusted_mutual_info_score', 'relative_validity', 'probability',
'persistence', 'penalty', 'outlier', 'score', 'cluster_size', 'val_w',
'prob_w', 'pers_w', 'penalty_w', 'outlier_w',
'cluster_selection_epsilon', 'cluster_selection_method', 'metric',
'min_cluster_size', 'n_components', 'n_neighbors']
elif self.optimize == 'rand_score':
cols = ['relative_validity', 'probability', 'persistence', 'penalty', 'outlier', 'cluster_size',
'adjusted_rand_score', 'homogeneity_completeness_v_measure', 'homogeneity_score', 'v_measure_score',
'completeness_score', 'adjusted_mutual_info_score', 'score']
df = pd.DataFrame(columns=cols)
df = df.append(self.logs)
if dataset!=None:
df['dataset'] = dataset
#df.to_csv(path, index=False)
return df
| 1.992188
| 2
|
sampledb/logic/schemas/validate.py
|
FlorianRhiem/sampledb
| 0
|
12783590
|
# coding: utf-8
"""
Implementation of validate(instance, schema)
"""
import re
import datetime
import typing
from ...logic import actions, objects, datatypes
from ..errors import ObjectDoesNotExistError, ValidationError, ValidationMultiError
from .utils import units_are_valid
def validate(instance: typing.Union[dict, list], schema: dict, path: typing.Optional[typing.List[str]] = None) -> None:
"""
Validates the given instance using the given schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if path is None:
path = []
if not isinstance(schema, dict):
raise ValidationError('invalid schema (must be dict)', path)
if 'type' not in schema:
raise ValidationError('invalid schema (must contain type)', path)
if schema['type'] == 'array':
return _validate_array(instance, schema, path)
elif schema['type'] == 'object':
return _validate_object(instance, schema, path)
elif schema['type'] == 'text':
return _validate_text(instance, schema, path)
elif schema['type'] == 'datetime':
return _validate_datetime(instance, schema, path)
elif schema['type'] == 'bool':
return _validate_bool(instance, schema, path)
elif schema['type'] == 'quantity':
return _validate_quantity(instance, schema, path)
elif schema['type'] == 'sample':
return _validate_sample(instance, schema, path)
elif schema['type'] == 'measurement':
return _validate_measurement(instance, schema, path)
elif schema['type'] == 'tags':
return _validate_tags(instance, schema, path)
elif schema['type'] == 'hazards':
return _validate_hazards(instance, schema, path)
else:
raise ValidationError('invalid type', path)
def _validate_array(instance: list, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given array schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, list):
raise ValidationError('instance must be list', path)
if 'minItems' in schema and len(instance) < schema['minItems']:
raise ValidationError('expected at least {} items'.format(schema['minItems']), path)
if 'maxItems' in schema and len(instance) > schema['maxItems']:
raise ValidationError('expected at most {} items'.format(schema['maxItems']), path)
errors = []
for index, item in enumerate(instance):
try:
validate(item, schema['items'], path + [str(index)])
except ValidationError as e:
errors.append(e)
if len(errors) == 1:
raise errors[0]
elif len(errors) > 1:
raise ValidationMultiError(errors)
def _validate_hazards(instance: list, schema: dict, path: typing.List[str]) -> None:
"""
Validate the given instance using the given GHS hazards schema and raise a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
if path != ['hazards']:
raise ValidationError('GHS hazards must be a top-level entry named "hazards"', path)
valid_keys = {'_type', 'hazards'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'hazards':
raise ValidationError('expected _type "hazards"', path)
if not isinstance(instance['hazards'], list):
raise ValidationError('hazards must be list', path)
errors = []
hazards = []
for index, item in enumerate(instance['hazards']):
if not isinstance(item, int):
errors.append(ValidationError('invalid hazard index type: {}'.format(type(item)), path + ['hazards', str(index)]))
elif item in hazards:
errors.append(ValidationError('duplicate hazard index: {}'.format(item), path + ['hazards', str(index)]))
elif item < 1 or item > 9:
errors.append(ValidationError('invalid hazard index: {}'.format(item), path + ['hazards', str(index)]))
else:
hazards.append(item)
if len(errors) == 1:
raise errors[0]
elif len(errors) > 1:
raise ValidationMultiError(errors)
def _validate_tags(instance: list, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given tags schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
valid_keys = {'_type', 'tags'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'tags':
raise ValidationError('expected _type "tags"', path)
if not isinstance(instance['tags'], list):
raise ValidationError('tags must be list', path)
errors = []
tags = []
for index, item in enumerate(instance['tags']):
if not isinstance(item, str):
errors.append(ValidationError('invalid tag type: {}'.format(type(item)), path + ['tags', str(index)]))
elif item in tags:
errors.append(ValidationError('duplicate tag: {}'.format(item), path + ['tags', str(index)]))
elif item.lower() != item:
errors.append(ValidationError('tag not lowercase: {}'.format(item), path + ['tags', str(index)]))
elif any(c not in 'abcdefghijklmnopqrstuvwxyz0123456789_-äöüß' for c in item):
errors.append(ValidationError('tag contains invalid character: {}'.format(item), path + ['tags', str(index)]))
else:
tags.append(item)
if len(errors) == 1:
raise errors[0]
elif len(errors) > 1:
raise ValidationMultiError(errors)
def _validate_object(instance: dict, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given object schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
errors = []
if 'required' in schema:
for property_name in schema['required']:
if property_name not in instance:
errors.append(ValidationError('missing required property "{}"'.format(property_name), path + [property_name]))
for property_name, property_value in instance.items():
try:
if property_name not in schema['properties']:
raise ValidationError('unknown property "{}"'.format(property_name), path + [property_name])
else:
validate(property_value, schema['properties'][property_name], path + [property_name])
except ValidationError as e:
errors.append(e)
if len(errors) == 1:
raise errors[0]
elif len(errors) > 1:
raise ValidationMultiError(errors)
def _validate_text(instance: dict, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given text object schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
valid_keys = {'_type', 'text'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'text':
raise ValidationError('expected _type "text"', path)
if not isinstance(instance['text'], str):
raise ValidationError('text must be str', path)
choices = schema.get('choices', None)
if choices and instance['text'] not in choices:
raise ValidationError('The text must be one of {}.'.format(choices), path)
min_length = schema.get('minLength', 0)
max_length = schema.get('maxLength', None)
if len(instance['text']) < min_length:
raise ValidationError('The text must be at least {} characters long.'.format(min_length), path)
if max_length is not None and len(instance['text']) > max_length:
raise ValidationError('The text must be at most {} characters long.'.format(max_length), path)
if 'pattern' in schema:
if re.match(schema['pattern'], instance['text']) is None:
raise ValidationError('The text must match the pattern: {}.'.format(schema['pattern']), path)
def _validate_datetime(instance: dict, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given datetime object schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
valid_keys = {'_type', 'utc_datetime'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'datetime':
raise ValidationError('expected _type "datetime"', path)
if not isinstance(instance['utc_datetime'], str):
raise ValidationError('utc_datetime must be str', path)
try:
datetime.datetime.strptime(instance['utc_datetime'], '%Y-%m-%d %H:%M:%S')
except ValueError:
raise ValidationError('Please enter the date and time in the format: YYYY-MM-DD HH:MM:SS.', path)
def _validate_bool(instance: dict, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given boolean object schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
valid_keys = {'_type', 'value'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'bool':
raise ValidationError('expected _type "bool"', path)
if not isinstance(instance['value'], bool):
raise ValidationError('value must be bool', path)
def _validate_quantity(instance: dict, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given quantity object schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
valid_keys = {'_type', 'units', 'dimensionality', 'magnitude_in_base_units'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'quantity':
raise ValidationError('expected _type "quantity"', path)
if not isinstance(instance['units'], str):
raise ValidationError('units must be str', path)
if not units_are_valid(instance['units']):
raise ValidationError('Invalid/Unknown units', path)
if not isinstance(instance['magnitude_in_base_units'], float) and not isinstance(instance['magnitude_in_base_units'], int):
raise ValidationError('magnitude_in_base_units must be float or int', path)
try:
quantity = datatypes.Quantity(instance['magnitude_in_base_units'], units=instance['units'])
except Exception:
raise ValidationError('Unable to create quantity', path)
if not isinstance(instance['dimensionality'], str):
raise ValidationError('dimensionality must be str', path)
try:
schema_quantity = datatypes.Quantity(1.0, units=schema['units'])
except Exception:
raise ValidationError('Unable to create schema quantity', path)
if quantity.dimensionality != schema_quantity.dimensionality:
raise ValidationError('Invalid units, expected units for dimensionality "{}"'.format(str(schema_quantity.dimensionality)), path)
if str(quantity.dimensionality) != instance['dimensionality']:
raise ValidationError('Invalid dimensionality, expected "{}"'.format(str(schema_quantity.dimensionality)), path)
def _validate_sample(instance: dict, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given sample object schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
valid_keys = {'_type', 'object_id'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'sample':
raise ValidationError('expected _type "sample"', path)
if not isinstance(instance['object_id'], int):
raise ValidationError('object_id must be int', path)
try:
sample = objects.get_object(object_id=instance['object_id'])
except ObjectDoesNotExistError:
raise ValidationError('object does not exist', path)
action = actions.get_action(sample.action_id)
if action.type != actions.ActionType.SAMPLE_CREATION:
raise ValidationError('object must be sample', path)
def _validate_measurement(instance: dict, schema: dict, path: typing.List[str]) -> None:
"""
Validates the given instance using the given measurement object schema and raises a ValidationError if it is invalid.
:param instance: the sampledb object
:param schema: the valid sampledb object schema
:param path: the path to this subinstance / subschema
:raise ValidationError: if the schema is invalid.
"""
if not isinstance(instance, dict):
raise ValidationError('instance must be dict', path)
valid_keys = {'_type', 'object_id'}
required_keys = valid_keys
schema_keys = set(instance.keys())
invalid_keys = schema_keys - valid_keys
if invalid_keys:
raise ValidationError('unexpected keys in schema: {}'.format(invalid_keys), path)
missing_keys = required_keys - schema_keys
if missing_keys:
raise ValidationError('missing keys in schema: {}'.format(missing_keys), path)
if instance['_type'] != 'measurement':
raise ValidationError('expected _type "measurement"', path)
if not isinstance(instance['object_id'], int):
raise ValidationError('object_id must be int', path)
try:
measurement = objects.get_object(object_id=instance['object_id'])
except ObjectDoesNotExistError:
raise ValidationError('object does not exist', path)
action = actions.get_action(measurement.action_id)
if action.type != actions.ActionType.MEASUREMENT:
raise ValidationError('object must be measurement', path)
| 3.078125
| 3
|
app/forms.py
|
ruppatel115/Local_Music
| 0
|
12783591
|
<gh_stars>0
from flask_wtf import FlaskForm, Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField, DateField, SelectField, SelectMultipleField, \
TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User,Artist, Venue, Event, ArtistToEvent
class ArtistForm(FlaskForm):
artistName = StringField('ArtistName', validators=[DataRequired()])
HomeTown = StringField('Hometown', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
submit = SubmitField('CREATE NEW ARTIST')
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'<PASSWORD>', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class addNewVenue(FlaskForm):
venue_name = StringField('Venue Name', validators=[DataRequired()])
location = StringField('Location Name', validators=[DataRequired()])
submit = SubmitField('Add New Venue')
def validate_venueName(self, venue_name):
venue = Venue.query.filter_by(venue_name=venue_name.data).first()
if venue is not None:
raise ValidationError('Venue already exists')
class addNewEvent(FlaskForm):
event_name = StringField('Event Name', validators=[DataRequired()])
datetime = DateField('Event Date', format='%Y-%m-%d', validators=[DataRequired()])
artist = SelectMultipleField("Artists", coerce=int, choices=[])
venue_name = SelectField('Venue', coerce=int, choices=[])
submit = SubmitField('Add New Event')
def validate_datetime(self, datetime):
datetime = Event.query.filter_by(datetime=datetime.data).first()
if datetime is not None:
raise ValidationError('No eventDate name entered')
# class EditProfileForm(FlaskForm):
# username = StringField('Username', validators=[DataRequired()])
# about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
# submit = SubmitField('Submit')
#
# def __init__(self, original_username, *args, **kwargs):
# super(EditProfileForm, self).__init__(*args, **kwargs)
# self.original_username = original_username
#
# def validate_username(self, username):
# if username.data != self.original_username:
# user = User.query.filter_by(username=self.username.data).first()
# if user is not None:
# raise ValidationError('Please use a different username.')
| 2.796875
| 3
|
Heuristic based Approaches/Path Scanning Algorithm/path_scanning.py
|
Eashwar-S/Icy_Road_Project
| 0
|
12783592
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import time
# In[2]:
def createGraph(depotNodes ,requiredEdges, numNodes, show=True):
G = nx.Graph()
edges = []
pos = {}
reqPos = {}
s = [1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 7]
t = [2, 3, 4, 6, 4, 5, 5, 7, 6, 8, 7, 8]
weights = [2.3, 2, 3, 1.5, 3.2, 2.2, 3.8, 2.6, 2.2, 2.8, 1.8, 0.8]
xData = [-2, -0.5, -1, 0, 1, 1.5, 2, 2.5];
yData = [ 0, -2, 2.5, 0, 3, -2, 0.3, 1.5];
for i in range(len(s)):
edges.append((s[i], t[i], weights[i]))
for i in range(1, numNodes+1):
G.add_node(i)
pos[i] =(xData[i-1], yData[i-1])
node_color = ['y']*int(G.number_of_nodes())
depot_node_color = node_color
for i in range(1, len(node_color)+1):
if i in depotNodes:
depot_node_color[i-1] = 'g'
G.add_weighted_edges_from(edges)
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx(G,pos, node_color = node_color)
nx.draw_networkx(G,pos, node_color = depot_node_color)
nx.draw_networkx_edges(G, pos, edgelist=requiredEdges, width=3, alpha=0.5,
edge_color="r")
nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)
if show:
plt.figure(1)
plt.show()
return G,pos, depot_node_color
# In[3]:
# Allocating task based on distance between base station and desired edge and UAV availability
def taskAllocation(G, depotNodes, requiredNodes, numrequiredEdges, uavsInDepotNodes):
depotNodesCost = np.zeros((len(depotNodes), numrequiredEdges))
depotPath = []
bestPathTillDesiredEdge = []
bestCostTillDesiredEdge = []
for j in range(numrequiredEdges):
for i in range(len(depotNodes)):
c1 = nx.dijkstra_path_length(G, source=depotNodes[i], target=requiredNodes[j][0])
c2 = nx.dijkstra_path_length(G, source=depotNodes[i], target=requiredNodes[j][1])
l = []
if c1 <= c2:
l = nx.dijkstra_path(G, source=depotNodes[i], target=requiredNodes[j][0])
l.append(requiredNodes[j][1])
else:
l = nx.dijkstra_path(G, source=depotNodes[i], target=requiredNodes[j][1])
l.append(requiredNodes[j][0])
depotNodesCost[i,j] = min(c1,c2)
depotNodesCost[i,j] += G.get_edge_data(requiredNodes[j][0], requiredNodes[j][1])['weight']
depotPath.append(l)
if uavsInDepotNodes[np.argmin(depotNodesCost[:,j])] > 0:
uavsInDepotNodes[np.argmin(depotNodesCost[:,j])] -= 1
else:
depotNodesCost[np.argmin(depotNodesCost[:,j]),j] = np.inf
depotPath = np.transpose(np.array(depotPath, dtype=object).reshape((len(depotNodes), numrequiredEdges)))
taskAllocatedtoBaseStations = []
print("Task Allocation Algorithm Output: ")
for i in range(numrequiredEdges):
taskAllocatedtoBaseStations.append(np.argmin(depotNodesCost[:,i]))
bestCostTillDesiredEdge.append(depotNodesCost[taskAllocatedtoBaseStations[i],i])
bestPathTillDesiredEdge.append(depotPath[taskAllocatedtoBaseStations[i],i])
print('Allocating arc ' + str(requiredNodes[i][0]) + ' - ' + str(requiredNodes[i][1]) + ' to base station - node ' + str(depotNodes[taskAllocatedtoBaseStations[i]]))
return bestPathTillDesiredEdge, bestCostTillDesiredEdge
# In[4]:
def pathScanningAlgorithm(G, numrequiredEdges,depotNodes, bestPathTillDesiredEdge, bestCostTillDesiredEdge, vehicleCapacity):
bestRoute = []
bestRouteCost = []
minCost = np.inf
for j in range(numrequiredEdges):
minCost = np.inf
l = []
for i in range(len(depotNodes)):
c1 = nx.dijkstra_path_length(G, source=bestPathTillDesiredEdge[j][-1], target=depotNodes[i])
if c1 <= minCost:
l = nx.dijkstra_path(G, source=bestPathTillDesiredEdge[j][-1], target=depotNodes[i])[1:]
minCost = c1
bestRoute.append(bestPathTillDesiredEdge[j] + l)
bestRouteCost.append(bestCostTillDesiredEdge[j] + minCost)
if bestRouteCost[j] > vehicleCapacity:
bestRoute[j] = None
bestRouteCost[j] = np.inf
print("Path Scanning Algorithm Output: ")
return bestRoute, bestRouteCost
# In[5]:
def visualizePath(depotNodes, requiredNodes, numNodes, path, pathType="solution"):
plt.figure(1)
for j in range(len(path)):
if path[j] != None:
# plt.figure(j+1)
G, pos, depot_node_color = createGraph(depotNodes, requiredNodes , numNodes, show=False)
G1 = nx.DiGraph()
pos1 = {}
node_color = []
edges = []
for i in range(len(path[j])-1):
edges.append((path[j][i], path[j][i+1], G.get_edge_data(path[j][i], path[j][i+1])['weight']))
pos1[path[j][i]] = pos[path[j][i]]
if i == len(path[j])-2:
pos1[path[j][i+1]] = pos[path[j][i+1]]
for key in pos1.keys():
node_color.append(depot_node_color[key-1])
G1.add_weighted_edges_from(edges)
nx.draw_networkx(G1,pos1, arrows=True, node_color = node_color, edge_color='b', arrowsize=12, width=1, arrowstyle='simple')
if pathType == "solution":
plt.legend(["Solution Path"], loc ="upper left")
else:
plt.legend(["Path"], loc ="upper left")
plt.show()
# In[6]:
def main():
# Initializing Parameters
vehicleCapacity = 14
numNodes = 8
requiredNodes = [[2, 4], [6, 7]];
uavsInDepotNodes = [0, 2];
totalUavs = sum(uavsInDepotNodes);
numrequiredEdges = 2;
depotNodes = [1, 5];
taskAllocatedtoBaseStations = [];
start = time.time()
G,pos, depot_node_color = createGraph(depotNodes, requiredNodes, numNodes, show=False)
bestPathTillDesiredEdge, bestCostTillDesiredEdge = taskAllocation(G, depotNodes, requiredNodes, numrequiredEdges, uavsInDepotNodes)
visualizePath(depotNodes, requiredNodes, numNodes, bestPathTillDesiredEdge, pathType="normal")
bestRoute, bestRouteCost = pathScanningAlgorithm(G, numrequiredEdges, depotNodes, bestPathTillDesiredEdge, bestCostTillDesiredEdge, vehicleCapacity)
visualizePath(depotNodes, requiredNodes, numNodes, bestRoute)
end = time.time()
print("Execution took "+ str(end-start) + " seconds.")
if __name__ == "__main__":
# execute only if run as a script
main()
# In[ ]:
# In[ ]:
# In[ ]:
| 3.046875
| 3
|
2to3/fix_reload.py
|
joulez/Limnoria
| 1
|
12783593
|
<filename>2to3/fix_reload.py
# Based on fix_intern.py. Original copyright:
# Copyright 2006 <NAME>.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for intern().
intern(s) -> sys.intern(s)"""
# Local imports
from lib2to3 import pytree
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, Attr, touch_import
class FixReload(fixer_base.BaseFix):
BM_compatible = True
order = "pre"
PATTERN = """
power< 'reload'
after=any*
>
"""
def transform(self, node, results):
touch_import('imp', 'reload', node)
return node
| 2.015625
| 2
|
bncbot/event.py
|
TotallyNotRobots/bnc-bot
| 3
|
12783594
|
<filename>bncbot/event.py
# coding=utf-8
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from asyncirc.irc import ParamList, Message
from bncbot.bot import Command
from bncbot.conn import Conn
class Event:
def __init__(self, *, conn: 'Conn' = None, base_event: 'Event' = None,
nick: str = None, user: str = None, host: str = None,
mask: str = None, chan: str = None) -> None:
if base_event:
self.conn = base_event.conn
self.nick = base_event.nick
self.user = base_event.user
self.host = base_event.host
self.mask = base_event.mask
self.chan = base_event.chan
else:
self.conn = conn
self.nick = nick
self.user = user
self.host = host
self.mask = mask
self.chan = chan
def message(self, message: str, target: str = None) -> None:
if not target:
assert self.chan
target = self.chan
self.conn.msg(target, message)
def notice(self, message: str, target: str = None) -> None:
if not target:
assert self.nick
target = self.nick
self.conn.notice(target, message)
@property
def bnc_data(self):
return self.conn.bnc_data
@property
def bnc_queue(self):
return self.conn.bnc_queue
@property
def bnc_users(self):
return self.conn.bnc_users
@property
def event(self):
return self
@property
def loop(self):
return self.conn.loop
@property
def is_admin(self):
return self.conn.is_admin(self.mask)
class RawEvent(Event):
def __init__(self, *, conn: 'Conn' = None, base_event=None,
nick: str = None, user: str = None, host: str = None,
mask: str = None, chan: str = None, irc_rawline: 'Message' = None,
irc_command: str = None, irc_paramlist: 'ParamList' = None) -> None:
super().__init__(
conn=conn, base_event=base_event, nick=nick, user=user, host=host,
mask=mask, chan=chan
)
self.irc_rawline = irc_rawline
self.irc_command = irc_command
self.irc_paramlist = irc_paramlist
class CommandEvent(Event):
def __init__(self, *, conn: 'Conn' = None, base_event=None,
nick: str = None, user: str = None, host: str = None,
mask: str = None, chan: str = None, command: str,
text: str = None, cmd_handler: 'Command' = None) -> None:
super().__init__(
conn=conn, base_event=base_event, nick=nick, user=user, host=host,
mask=mask, chan=chan
)
self.command = command
self.text = text
self.cmd_handler = cmd_handler
def notice_doc(self):
if not self.cmd_handler.doc:
message = "{}{} requires additional arguments.".format(
self.conn.cmd_prefix, self.command
)
else:
message = "{}{} {}".format(self.conn.cmd_prefix, self.command, self.cmd_handler.doc)
self.notice(message)
| 2.40625
| 2
|
checkmetadata.py
|
jkalkhof/MediaServer
| 0
|
12783595
|
import ffmpy, subprocess, json
import argparse
def main():
# Globals in Python are global to a module, not across all modules.
# global validDatesDict
# global stripDates
# TODO: use command line parameters to determine path to scan
# https://stackabuse.com/command-line-arguments-in-python/
parser = argparse.ArgumentParser(
description='checkmetadata')
# parser.add_argument('-i','--input', help='Input file name', required=True)
# generic directory scanning operations - into internal dictionary tree data structure
parser.add_argument('-i','--input', help='Input file name')
args = parser.parse_args()
print ("Input file: %s" % args.input )
# ffprobe = ffmpy.FFprobe(global_options="-loglevel quiet -sexagesimal -of json -show_entries stream=width,height,duration -show_entries format=duration -select_streams v:0", inputs={args.input : None})
ffprobe = ffmpy.FFprobe(global_options="-show_format -of json", inputs={args.input : None})
print("ffprobe.cmd:", ffprobe.cmd) # printout the resulting ffprobe shell command
stdout, stderr = ffprobe.run(stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# std* is byte sequence, but json in Python 3.5.2 requires str
ff0string = str(stdout,'utf-8')
ffinfo = json.loads(ff0string)
print(json.dumps(ffinfo, indent=4)) # pretty print
# print("Video Dimensions: {}x{}".format(ffinfo["streams"][0]["width"], ffinfo["streams"][0]["height"]))
# print("Streams Duration:", ffinfo["streams"][0]["duration"])
# print("Format Duration: ", ffinfo["format"]["duration"])
if ("ensemble" in ffinfo["format"]["tags"]):
print("ADS ensemble: ", ffinfo["format"]["tags"]["ensemble"])
if ("boundary_condition" in ffinfo["format"]["tags"]):
print("ADS boundary_condition: ", ffinfo["format"]["tags"]["boundary_condition"])
if ("init" in ffinfo["format"]["tags"]):
print("ADS init: ", ffinfo["format"]["tags"]["init"])
if ("plot" in ffinfo["format"]["tags"]):
print("ADS plot: ", ffinfo["format"]["tags"]["plot"])
if ("plot_group" in ffinfo["format"]["tags"]):
print("ADS plot_group: ", ffinfo["format"]["tags"]["plot_group"])
if __name__ == '__main__':
main()
| 2.484375
| 2
|
sb3/querys.py
|
himalshakya1/sciencebasepy
| 0
|
12783596
|
import json
meQuery = """
me {
username
myFolderExists
myFolder {
id
}
roles
}
"""
# createUploadSession({itemIdStr}, files : [{{ {filenameStr}, {fileContentType}}}]){{
def createUploadSessionQuery(itemStr, itemIdStr, filenameStr, fileContentType):
return f'''
{{
item({itemStr}) {{
id
title
}}
createUploadSession({itemIdStr}, files : [{{ {filenameStr}}}]){{
uploads {{
name
url
}}
}}
}}'''
def getItemQuery(itemId, params):
paramStr = make_param(params)
return f'''
query {{
item(id: "{itemId}")
{paramStr}
}}
'''
def createMultipartUploadSession(s3FilePath):
return f'''
query {{
createMultipartUploadSession(object: "{s3FilePath}")
}}
'''
def getPreSignedUrlForChunk(s3FilePath, upload_id, part_number):
return f'''
query {{
getPreSignedUrlForChunk(object: "{s3FilePath}", upload_id: "{upload_id}", part_number: "{part_number}")
}}
'''
def completeMultiPartUpload(itemStr, upload_id, etag_payload):
etag_payload_array = str(etag_payload).replace("'","")
return f'''
query {{
completeMultiPartUpload(
object: "{itemStr}"
upload_id: "{upload_id}"
parts_eTags: {etag_payload_array}
)
}}
'''
def make_param(mlist):
mystr = '{ '
for x in mlist:
if type(x) == list:
mystr = mystr + make_param(x)
else:
mystr = ' ' + mystr + ' ' + x + ' '
mystr = mystr + ' }'
return mystr
| 2.296875
| 2
|
worker/plugins/levels.py
|
NepStark/mee6
| 43
|
12783597
|
<filename>worker/plugins/levels.py<gh_stars>10-100
from random import randint
from plugins.base import Base
from collections import defaultdict
from utils import fmt
from copy import copy
XP_REWARD_RANGE = (15, 25)
COOLDOWN_DURATION = 60
lvls_xp = [5*(i**2)+50*i+100 for i in range(200)]
def get_level_from_xp(xp):
remaining_xp = int(xp)
lvl = 0
while remaining_xp >= lvls_xp[lvl]:
remaining_xp -= lvls_xp[lvl]
lvl += 1
return lvl
class Player():
def __init__(self, guild, member):
self._guild = guild
self.guild_id = guild.id
self.member_id = member.id
self._storage = guild.storage
@property
def lvl(self):
return get_level_from_xp(self.xp)
@property
def xp(self):
return int(self._storage.get('player:{}:xp'.format(self.member_id)) or 0)
@xp.setter
def xp(self, xp):
return self._storage.set('player:{}:xp'.format(self.member_id), xp)
class Levels(Base):
players = defaultdict(dict)
def get_player(self, g, member):
cached_player = self.players[g.id].get(member.id)
if cached_player:
return cached_player
player = Player(g,
member)
self.players[g.id][member.id] = player
return player
def is_member_banned(self, guild, member):
banned_roles = guild.storage.smembers('banned_roles')
for role in member.roles:
if str(role.id) in banned_roles:
return True
return False
def on_message_create(self, guild, message):
storage = guild.storage
# check if member is banned from gaining lvls
if self.is_member_banned(guild, message.author):
return
# check member's CD
cooldown_key = 'player:{}:check'.format(message.author.id)
print(cooldown_key)
print(storage.get(cooldown_key))
if storage.get(cooldown_key) and False:
return
# activating CD
storage.set(cooldown_key, '1', ex=COOLDOWN_DURATION)
# get the player
player = self.get_player(guild, message.author)
player_lvl = copy(player.lvl)
new_xp = randint(*XP_REWARD_RANGE)
print('adding xp to {}'.format(message.author.id))
print(player.xp)
player.xp += new_xp
print(player.xp)
# adding the player (in case of not added)
storage.sadd('players', message.author.id)
has_lvl_up = player.lvl != player_lvl
print('lvl')
print(player.lvl)
print(player_lvl)
if has_lvl_up:
announcement_enabled = storage.get('announcement_enabled')
if not announcement_enabled:
return
should_whisp = storage.get('whisp')
if should_whisp:
destination = message.author
else:
destination = message.channel
announcement_fmt = storage.get('announcement')
announcement = fmt(announcement_fmt,
player=message.author.mention,
level=player.lvl)
self.send_message(destination, announcement)
| 2.125
| 2
|
src/Observer/FileObserver.py
|
TheNexusAvenger/ROCKPro64-Fan-Controller
| 1
|
12783598
|
<gh_stars>1-10
"""
TheNexusAvenger
Observes changes to files and handles IO operations.
"""
import threading
import time
import os
from Observer import Observer
"""
Class representing a file observer.
"""
class FileObserver(Observer.Observable):
"""
Creates a file observer object.
"""
def __init__(self,fileName):
super().__init__()
# Store the file and the last time.
self.fileName = fileName
if self.fileExists():
self.lastContents = self.readFileLines()
else:
self.lastContents = None
# Start polling in a thread.
threading.Thread(target=self.startPolling).start()
"""
Returns if the file exists.
"""
def fileExists(self):
return os.path.exists(self.fileName)
"""
Returns the contents of the file. Throws an error
if the file doesn't exist.
"""
def readFileLines(self):
# Throw an error if the file doesn't exist.
if not self.fileExists():
raise Exception("File does not exist: " + self.fileName)
# Return the contents.
with open(self.fileName) as file:
return file.read()
"""
Writes the contents of the files. Throws an error
if the file doesn't exist.
"""
def writeFileLines(self,lines):
# Throw an error if the file doesn't exist.
if not self.fileExists():
raise Exception("File does not exist: " + self.fileName)
# Write the contents.
with open(self.fileName,"w") as file:
file.writelines(str(lines))
"""
Polling function that checks for file changes.
"""
def startPolling(self):
# Set up the loop.
while True:
# Check if the file has changed.
if self.fileExists():
newContents = self.readFileLines()
# If the last modified time changed, notify the observers.
if self.lastContents != newContents:
self.lastContents = newContents
self.notify()
else:
# If the file was removed, notify the observers.
if self.lastContents != None:
self.lastContents = None
self.notify()
# Sleep for 0.1 seconds.
time.sleep(0.1)
| 3.21875
| 3
|
app/globus_portal_framework/tests/test_utils.py
|
dbmi-pitt/aurora-meta
| 0
|
12783599
|
from unittest import mock
from django.test import TestCase
from django.contrib.auth.models import AnonymousUser
import globus_sdk
from globus_portal_framework.utils import load_globus_client
from globus_portal_framework.tests.mocks import (
MockGlobusClient, mock_user, globus_client_is_loaded_with_authorizer
)
class GlobusPortalFrameworkUtilsTests(TestCase):
@mock.patch('globus_sdk.SearchClient', MockGlobusClient)
def test_load_search_client_with_anonymous_user(self):
c = load_globus_client(AnonymousUser(), globus_sdk.SearchClient,
'search.api.globus.org')
self.assertFalse(globus_client_is_loaded_with_authorizer(c))
@mock.patch('globus_sdk.SearchClient', MockGlobusClient)
def test_load_globus_client_with_real_user(self):
user = mock_user('bob', ['search.api.globus.org'])
c = load_globus_client(user, globus_sdk.SearchClient,
'search.api.globus.org')
self.assertTrue(globus_client_is_loaded_with_authorizer(c))
@mock.patch('globus_sdk.SearchClient', MockGlobusClient)
def test_load_transfer_client_with_bad_token(self):
user = mock_user('bob', ['transfer.api.globus.org'])
with self.assertRaises(ValueError):
c = load_globus_client(user, globus_sdk.SearchClient,
'search.api.globus.org')
| 2.265625
| 2
|
ripiu/cmsplugin_articles/apps.py
|
ripiu/cmsplugin_articles
| 0
|
12783600
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ArticlesConfig(AppConfig):
name = 'ripiu.cmsplugin_articles'
verbose_name = _('Articles and sections')
| 1.414063
| 1
|
tests/test_downloader.py
|
codeasap-pl/asyncio-crawler
| 0
|
12783601
|
from crawler import Downloader, DownloaderError, Worker
from .testing import WorkerTestCase, FakeApp
import asyncio
import aiohttp
from urllib.parse import urljoin
class DownloaderTestCase(WorkerTestCase):
def setUp(self):
super().setUp()
# reset class attribute
Downloader.SEQ_ID = 0
self.url_queue = asyncio.Queue()
async def asyncSetUp(self):
super().setUp()
self.http_client = aiohttp.ClientSession(
headers={"User-Agent": "Test Client"},
)
async def asyncTearDown(self):
await super().asyncTearDown()
await self.http_client.close()
class TestDownloaderBasic(DownloaderTestCase):
def test_init(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client
)
self.assertIsInstance(downloader, Worker, "Is a Worker")
def test_object_enumeration(self):
for i in range(1, 8):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client
)
self.assertEqual(downloader.seq_id, i, "sequential id")
self.assertEqual(str(downloader), "Downloader-%d" % i, "__str__")
class TestDownloaderOperations(DownloaderTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.webapp = FakeApp()
cls.webapp.start()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls.webapp.stop()
def make_url(self, uri):
return "http://%s:%d/%s" % (
self.webapp.host,
self.webapp.port,
uri.lstrip("/")
)
def test_extract_urls(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client,
)
page_url = "http://localhost"
paths = ["/test/%d" % i for i in range(16)]
links = ['<a href="%s"></a>' % p for p in paths]
text = "<body>%s</body>" % "".join(links)
urls = downloader.extract_urls(page_url, text)
absurls = [urljoin(page_url, p) for p in paths]
self.assertEqual(urls, absurls, "Returns absolute urls")
async def test_errors(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client,
)
error_codes = [400, 401, 402, 403, 404, 422, 500, 502]
for ec in error_codes:
job = {"url": self.make_url("/error/%d" % ec)}
response = await downloader.HEAD(job)
self.assertEqual(response.status, ec, "Handler error: %d" % ec)
async def test_HEAD(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client,
size_limit_kb=64,
)
requested_url = self.make_url("/html/1")
job = {"url": requested_url}
response = await downloader.HEAD(job)
self.assertEqual(response.status, 200, "HEAD")
async def test_GET(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client,
size_limit_kb=64,
)
requested_url = self.make_url("/html/10")
job = {"url": requested_url}
url, headers, content = await downloader.GET(job)
self.assertEqual(url, requested_url)
self.assertTrue(headers)
self.assertTrue(content)
async def test_size_limit(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client,
size_limit_kb=1,
)
requested_url = self.make_url("/html/1000")
job = {"url": requested_url}
with self.assertRaises(DownloaderError):
await downloader.GET(job)
async def test_reacts_to_stop(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client,
size_limit_kb=1,
)
task = asyncio.create_task(downloader())
self.stop_ev.set()
try:
await asyncio.wait_for(asyncio.gather(task), timeout=1)
except (asyncio.TimeoutError, asyncio.CancelledError):
self.fail("Did not react to stop event")
async def test_run(self):
downloader = Downloader(
self.stop_ev,
self.cache,
self.url_queue,
self.http_client,
size_limit_kb=128,
whitelist=["localhost"],
)
n = 128
for i in range(n):
url = self.make_url("/html/%d" % i)
self.url_queue.put_nowait({"url": url})
tasks = [
asyncio.create_task(downloader())
for _ in range(8)
]
total_processed = 0
wait_interval = 0.1
max_wait_loops = 3 / wait_interval
while max_wait_loops and total_processed < n:
max_wait_loops -= 1
cur = self.dbconn.cursor()
cur.execute("SELECT COUNT(*) FROM urls WHERE mtime IS NOT NULL")
total_processed = cur.fetchone()[0]
await asyncio.sleep(wait_interval)
self.stop_ev.set()
await asyncio.wait_for(asyncio.gather(*tasks), timeout=1)
self.assertEqual(total_processed, n, "Processed all")
| 2.546875
| 3
|
hebert-sentiment-analysis-inference-docker-lambda/app/app.py
|
shneydor/aws-lambda-docker-serverless-inference
| 0
|
12783602
|
from transformers import pipeline
import json
sentiment_analysis = pipeline(
"sentiment-analysis",
model="./model",
tokenizer="./model",
return_all_scores = True
)
def handler(event, context):
print('Received event: ' + json.dumps(event, indent=2))
hebrew_text = event['hebrew_text']
result = sentiment_analysis(hebrew_text)
print('result: {}'.format(result))
return json.dumps(result)
| 2.640625
| 3
|
tscreensvr.py
|
ninadmhatre/tweet2image
| 1
|
12783603
|
from draw_image import make_image
from twitter_interface import get_twitter_api, fetch_statues
from config import TWITTER_HANDLERS
def run():
api = get_twitter_api()
for handler in TWITTER_HANDLERS:
statues, user = fetch_statues(api, handler, count=20)
for status in statues:
make_image(user.name, status, verbose=True)
if __name__ == '__main__':
run()
| 2.6875
| 3
|
language/python/DeepNudeImage/DCGAN/dcgan_model.py
|
LIU2016/Demo
| 1
|
12783604
|
<reponame>LIU2016/Demo
import tensorflow as tf
import matplotlib.pyplot as plt
class Generator(tf.keras.Model):
def __init__(self):
super(Generator, self).__init__()
self.fc_a = tf.keras.layers.Dense(53 * 43 * 128, use_bias=False)
self.Conv2DT_a = tf.keras.layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
self.Conv2DT_b = tf.keras.layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='valid', use_bias=False)
self.Conv2DT_c = tf.keras.layers.Conv2DTranspose(3, (5, 5), strides=(2, 2),
padding='same', use_bias=False, activation='tanh')
self.BN_a = tf.keras.layers.BatchNormalization()
self.BN_b = tf.keras.layers.BatchNormalization()
self.BN_c = tf.keras.layers.BatchNormalization()
self.LeckyReLU_a = tf.keras.layers.LeakyReLU()
self.LeckyReLU_b = tf.keras.layers.LeakyReLU()
self.LeckyReLU_c = tf.keras.layers.LeakyReLU()
def call(self, random_noise, training=False):
# random_noise (batch_size, 53 * 43 * 256)
# x (batch_size, 53 * 43 * 128)
x = self.fc_a(random_noise)
x = self.BN_a(x, training=training)
x = self.LeckyReLU_a(x)
# (batch_size, 53, 43, 128)
x = tf.keras.layers.Reshape((53, 43, 128))(x)
# (batch_size, 53, 43, 128)
x = self.Conv2DT_a(x)
x = self.BN_b(x, training=training)
x = self.LeckyReLU_b(x)
# (batch_size, 109, 89, 64)
x = self.Conv2DT_b(x)
x = self.BN_c(x, training=training)
x = self.LeckyReLU_c(x)
# (batch_size, 218, 178, 3)
generated_image = self.Conv2DT_c(x)
return generated_image
class Discriminator(tf.keras.Model):
def __init__(self):
super(Discriminator, self).__init__()
self.Conv2D_a = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')
self.Conv2D_b = tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')
self.LeckyReLU_a = tf.keras.layers.LeakyReLU()
self.LeckyReLU_b = tf.keras.layers.LeakyReLU()
self.Dropout_a = tf.keras.layers.Dropout(0.3)
self.Dropout_b = tf.keras.layers.Dropout(0.3)
self.Flatten = tf.keras.layers.Flatten()
self.dense = tf.keras.layers.Dense(1)
def call(self, image, training=False):
# image (batch_size, 218, 178, 3)
x = self.Conv2D_a(image)
x = self.LeckyReLU_a(x)
x = self.Dropout_a(x, training=training)
x = self.Conv2D_b(x)
x = self.LeckyReLU_b(x)
x = self.Dropout_b(x, training=training)
x = self.Flatten(x)
x = self.dense(x)
return x
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
def max_min_normal_matrix(image_matrix):
image_matrix_min = image_matrix.min()
image_matrix_max = image_matrix.max()
image_matrix_normal = (image_matrix - image_matrix_min) / (image_matrix_max - image_matrix_min)
return image_matrix_normal
if __name__ == "__main__":
generator = Generator()
noise = tf.random.normal([16, 100])
print(f"Inputs noise.shape {noise.shape}")
generated_image = generator(noise, training=False)
#generator.summary()
print(f"Pass by ------------ ----generator----------------------")
print(f"Outputs generated_image.shape {generated_image.shape}")
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
plt.show()
plt.savefig("generated_image_test.png")
discriminator = Discriminator()
print(f"Pass by ------------ ----discriminator----------------------")
decision = discriminator(generated_image, training=False)
print(f"Outputs decision.shape {decision.shape}")
#discriminator.summary()
print(f"Outputs decision \n{decision}")
predictions = generated_image
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i + 1)
plt.imshow(max_min_normal_matrix(predictions[i].numpy()))
plt.axis('off')
save_image_path = 'image_at_epoch_{:04d}.png'.format(1)
plt.savefig(save_image_path)
| 2.59375
| 3
|
sniffing/http-password-sniffer.py
|
imsouza/pentest-tools
| 0
|
12783605
|
<gh_stars>0
from scapy.all import *
def print_packages(package):
header = str(package[TCP].payload[0:4])
if header == 'POST':
if 'pass' in str(package[TCP].payload).lower():
print package[TCP].payload
sniff(filter='port 80',store=0, prn=print_packages)
| 2.328125
| 2
|
pydatajson/status_indicators_generator.py
|
datosgobar/pydatajson
| 13
|
12783606
|
# -*- coding: utf-8 -*-
from pydatajson.readers import read_catalog
from pydatajson.reporting import generate_datasets_summary
from pydatajson.validators\
.distribution_download_urls_validator \
import DistributionDownloadUrlsValidator
class StatusIndicatorsGenerator(object):
def __init__(self, catalog, validator=None, verify_ssl=True,
url_check_timeout=1, threads_count=1):
self.download_url_ok = None
self.catalog = read_catalog(catalog)
self.summary = generate_datasets_summary(self.catalog,
validator=validator,
verify_ssl=verify_ssl)
self.verify_url = verify_ssl
self.url_check_timeout = url_check_timeout
self.threads_count = threads_count
def datasets_cant(self):
return len(self.summary)
def distribuciones_cant(self):
return sum(ds['cant_distribuciones'] for ds in self.summary)
def datasets_meta_ok_cant(self):
return sum(ds['estado_metadatos'] == 'OK' for ds in self.summary)
def datasets_meta_error_cant(self):
return sum(ds['estado_metadatos'] == 'ERROR' for ds in self.summary)
def datasets_meta_ok_pct(self):
return self._get_dataset_percentage(self.datasets_meta_ok_cant)
def datasets_con_datos_cant(self):
return sum(ds['tiene_datos'] == 'SI' for ds in self.summary)
def datasets_sin_datos_cant(self):
return sum(ds['tiene_datos'] == 'NO' for ds in self.summary)
def datasets_con_datos_pct(self):
return self._get_dataset_percentage(self.datasets_con_datos_cant)
def distribuciones_download_url_ok_cant(self):
if self.download_url_ok:
return self.download_url_ok
validator = DistributionDownloadUrlsValidator(
self.catalog, self.verify_url, self.url_check_timeout,
self.threads_count)
self.download_url_ok = validator.validate()
return self.download_url_ok
def distribuciones_download_url_error_cant(self):
return self.distribuciones_cant() - \
self.distribuciones_download_url_ok_cant()
def distribuciones_download_url_ok_pct(self):
total = self.distribuciones_cant()
if not total:
return None
return \
round(float(self.distribuciones_download_url_ok_cant()) / total, 4)
def _get_dataset_percentage(self, indicator):
total = self.datasets_cant()
if not total:
return None
return round(float(indicator()) / total, 4)
| 2.375
| 2
|
Leetcode/Python/_169.py
|
Xrenya/algorithms
| 1
|
12783607
|
class Solution:
def majorityElement(self, nums: List[int]) -> int:
vote = 0
result = None
n = len(nums) // 2
for num in nums:
if vote == 0:
result = num
if result == num:
vote += 1
if vote > n:
return result
else:
vote -= 1
return result
class Solution:
def majorityElement(self, nums: List[int]) -> int:
vote = 0
result = None
for num in nums:
if vote == 0:
result = num
if result == num:
vote += 1
else:
vote -= 1
return result
from collections import defaultdict
class Solution:
def majorityElement(self, nums: List[int]) -> int:
lenght = len(nums)
major = lenght // 2
hashMap = defaultdict(int)
for num in nums:
hashMap[num] += 1
if hashMap[num] > major:
return num
class Solution:
def majorityElement(self, nums: List[int]) -> int:
hashMap = {}
high = 0
output = 0
for num in nums:
if num not in hashMap:
hashMap[num] = 0
hashMap[num] += 1
if high < hashMap[num]:
high = hashMap[num]
output = num
return output
class Solution:
def majorityElement(self, nums: List[int]) -> int:
hashMap = defaultdict(int)
high = 0
output = 0
for num in nums:
hashMap[num] += 1
if high < hashMap[num]:
high = hashMap[num]
output = num
return output
| 3.4375
| 3
|
tests/conftest.py
|
SuperBOY000/escape_roomba
| 1
|
12783608
|
<gh_stars>1-10
"""Shared configuration for unit tests:
docs.pytest.org/en/stable/fixture.html#conftest-py-sharing-fixture-functions.
"""
import argparse
import logging
from copy import copy
import discord
import pytest
import regex
from escape_roomba.format_util import fobj
import escape_roomba.context
logger_ = logging.getLogger('bot.mock')
class DiscordMockFixture:
"""Class to generate Discord client library Mocks simulating a server.
Tests can get an instance via the 'discord_mock' fixture (defined below).
Attributes:
pytest_mocker - The mocker fixture from pytest-mock
context - A default Context-like object, with a Client-like object.
"""
# Keep generated IDs globally unique (see unique_id() below).
last_id = 0x92ee70e00000000 # 2020-01-01 at midnight.
def __init__(self, pytest_mocker):
self.pytest_mocker = pytest_mocker
parser = argparse.ArgumentParser(parents=[escape_roomba.context.args])
self.context = escape_roomba.context.Context(
parsed_args=parser.parse_args([]),
inject_client=self.make_client())
self.event_queue = []
self.reset_data() # Default setup.
@classmethod
def unique_id(cls):
"""Returns a unique Discord/snowflake-style ID int."""
cls.last_id += (1000 << 22) # Advance one second.
return cls.last_id
#
# Mock object creation
#
def make_client(self):
"""Returns a new Client-like Mock object."""
client = self.pytest_mocker.Mock(spec=discord.Client, name='client')
client.guilds = []
client.user = self.make_user(name='Client User')
client.get_guild.side_effect = lambda id: next(
(g for g in client.guilds if g.id == id), None)
client.get_channel.side_effect = lambda id: next(
(c for g in client.guilds for c in g.channels if c.id == id), None)
client.get_user.side_effect = lambda id: next(
(m for g in client.guilds for m in g.members if m.id == id), None)
logger_.debug('make_client')
return client
def make_guild(self, client, name='Mock Guild'):
"""Returns a new Guild-like Mock."""
guild = self.pytest_mocker.Mock(spec=discord.Guild, name='guild')
guild.id = self.unique_id()
guild.name = name
guild.channels = []
guild.members = []
guild.default_role = self.make_role(guild, '@everyone')
guild.me = self.make_user(
guild=guild, name=client.user.name, id=client.user.id,
discriminator=client.user.discriminator)
async def create_text_channel(*args, **kwargs):
return self.sim_add_channel(*args, guild=guild, **kwargs)
guild.create_text_channel.side_effect = create_text_channel
guild.get_channel = client.get_channel
logger_.debug(f'make_guild:\n {fobj(g=guild)}')
return guild
def make_role(self, guild, name=None):
"""Returns a new Role-like Mock."""
role = self.pytest_mocker.Mock(spec=discord.Role, name='role')
role.id = self.unique_id()
role.name = name
role.guild = guild
# TODO: Add other fields and methods.
logger_.debug(f'make_role: {fobj(r=role)}')
return role
def make_user(self, guild=None, name=None, id=None, discriminator='9999'):
"""Returns a new User-like (or Member-like if guild is set) Mock."""
user = self.pytest_mocker.Mock(
spec=discord.Member if guild else discord.User, name='user')
user.id = id or self.unique_id()
user.discriminator = discriminator
if guild is None:
user.name = name or 'Mock User'
else:
user.name = name or 'Mock Member'
user.guild = guild
logger_.debug(f'make_user: {fobj(u=user)}')
return user
def make_text_channel(self, guild, name='mock-channel', category=None,
position=None, topic=None, reason=None,
overwrites=None):
# TODO: Handle all the other arguments, mangle the name...
"""Returns a new TextChannel-like Mock."""
channel = self.pytest_mocker.Mock(
spec=discord.TextChannel, name='channel')
channel.id = self.unique_id()
channel.guild = guild
channel.type = discord.ChannelType.text
channel.name = name
channel.topic = topic or f'topic for {name}'
channel.history_for_mock = [] # Messages that history() will return.
channel.position = position
channel.overwrites = overwrites or {}
# TODO: Handle category and reason.
async def history(limit=100, oldest_first=None):
limit = len(channel.history_for_mock) if limit is None else limit
history = channel.history_for_mock
slice = history[:limit] if oldest_first else history[:-limit:-1]
for m in slice:
yield m
async def fetch_message(id):
m = next((m for m in channel.history_for_mock if m.id == id), None)
if not m:
raise discord.NotFound(None, f'message {id} not found')
return m
async def send(content=None, embed=None):
return self.sim_add_message(
channel=channel, author=guild.me, content=content, embed=embed)
async def edit(name=None, overwrites=None, topic=None, reason=None):
# TODO: Handle all the other arguments (and mangle the name)...
if overwrites is not None:
channel.overwrites = overwrites
if topic is not None:
channel.topic = topic
channel.history.side_effect = history
channel.fetch_message.side_effect = fetch_message
channel.send.side_effect = send
channel.edit.side_effect = edit
# Temporary hack to support bug workaround (see thread_channel.py).
async def bulk_channel_update(*a, **kw):
pass
channel._state.http.bulk_channel_update.side_effect = \
bulk_channel_update
logger_.debug(f'make_channel:\n {fobj(c=channel)}')
return channel
def make_message(self, channel, author, content='Mock content',
embed=None):
"""Returns a new Message-like Mock."""
message = self.pytest_mocker.Mock(spec=discord.Message, name='message')
message.id = self.unique_id()
message.guild = channel.guild
message.channel = channel
message.author = author
message.content = content
message.attachments = []
message.embeds = [embed] if embed is not None else []
message.reactions = []
async def edit(*args, **kwargs):
self.sim_edit_message(message, *args, **kwargs)
async def add_reaction(emoji):
self.sim_reaction(message, str(emoji), message.guild.me, +1)
async def remove_reaction(emoji, member):
self.sim_reaction(message, str(emoji), message.guild.me, -1)
message.edit.side_effect = edit
message.add_reaction.side_effect = add_reaction
message.remove_reaction.side_effect = remove_reaction
logger_.debug(f'make_message:\n {fobj(m=message)}')
return message
def make_reaction(self, message, unicode):
logger_.debug(f'make_reaction: {unicode}\n on: {fobj(m=message)}')
assert isinstance(unicode, str)
assert regex.fullmatch(r'\p{Emoji}', unicode)
reaction = self.pytest_mocker.MagicMock(
spec=discord.Reaction, name='reaction')
reaction.emoji = self.pytest_mocker.MagicMock(
spec=discord.PartialEmoji, name='reaction.emoji')
reaction.emoji.name = unicode
reaction.emoji.__str__.return_value = unicode
reaction.__str__.return_value = unicode
reaction.count = 0
reaction.me = False
reaction.message = message
reaction.users_for_mock = {}
async def users(limit=None, oldest_first=None):
for i, m in enumerate(reaction.users_for_mock.values()):
if limit is not None and i >= limit:
break
yield m
reaction.users.side_effect = users
return reaction
#
# Helper methods to update data and generate notification events.
#
def reset_data(self, guild_count=1, members_per_guild=1,
channels_per_guild=1, messages_per_channel=1):
"""Clears the simulated server and populates it with test data.
Args:
guild_count - number of (simulated) guilds (servers) to set up
members_per_guild - number of members in each simulated guild
channels_per_guild - number of text channels in each guild
messages_per_channel - number of messages in each channel's history
"""
logger_.debug(f'reset_data: #g={guild_count} #u/g={members_per_guild} '
f'#c/g={channels_per_guild} #m/c={messages_per_channel}')
self.context.discord().guilds[:] = [] # Erase preexisting data.
for gi in range(guild_count):
guild = self.make_guild(
self.context.discord(), name=f'Mock Guild {gi}')
self.context.discord().guilds.append(guild)
for mi in range(members_per_guild):
guild.members.append(self.make_user(
guild, name=f'Mock Member {mi}', discriminator=1000 + mi))
for ci in range(channels_per_guild):
chan = self.sim_add_channel(guild, name=f'mock-channel-{ci}')
for mi in range(messages_per_channel):
# Need member for message author.
assert len(guild.members) > 0
author = guild.members[mi % len(guild.members)]
self.sim_add_message(
channel=chan, author=author,
content=f'Mock message {mi} in #mock-channel-{ci}')
self.event_queue = [] # No events for initial content.
logger_.debug('reset_data done')
def queue_event(self, event_name, *args, **kwargs):
"""Queues an event to be sent to registered listeners."""
logger_.debug(f'queue_event: {event_name}')
assert event_name.startswith('on_')
self.event_queue.append((event_name, args, kwargs))
async def async_dispatch_events(self):
"""Sends all queued events to registered handlers."""
logger_.debug(f'async_dispatch_event: {len(self.event_queue)} events')
while self.event_queue:
batch, self.event_queue = self.event_queue, []
for event_name, args, kwargs in batch:
handler = getattr(self.context.discord(), event_name, None)
logger_.debug(f'async_dispatch_event: {event_name}'
f'{"" if handler else " [unhandled]"}]')
if handler is not None:
await handler(*args, **kwargs)
logger_.debug('async_dispatch_event: batch done, '
f'{len(self.event_queue)} added')
def sim_add_message(self, channel, **kwargs):
"""Simulates a message post and queues notification events.
Args:
channel - the channel to post to
message - the message to post
"""
message = self.make_message(channel=channel, **kwargs)
logger_.debug(f'sim_add_message:\n {fobj(m=message)}')
channel.history_for_mock.append(message)
self.queue_event(f'on_message', message)
return message
def sim_edit_message(self, message, content=None, embed=None):
edited = copy(message)
edited.content = content
edited.embeds = [embed] if embed is not None else []
logger_.debug('sim_edit_message:\n'
f' before: {fobj(m=message)}\n'
f' after: {fobj(m=edited)}')
history = message.channel.history_for_mock
history[:] = [edited if m.id == message.id else m for m in history]
event = self.pytest_mocker.Mock(
spec=discord.RawMessageUpdateEvent, name='raw_edit_event')
event.message_id = message.id
event.channel_id = message.channel.id
event.data = None # TODO: Fill in if needed.
event.cached_message = message
self.queue_event('on_raw_message_edit', event)
self.queue_event('on_message_edit', message, edited)
def sim_reaction(self, message, unicode, user, delta):
"""Simulates an emoji reaction change and queues notification events.
Args:
message - the message object to modify
unicode - unicode of emoji to add/remove
user - the user adding/removing the emoji
delta - +1 to add, -1 to remove
"""
logger_.debug(f'sim_reaction: {delta:+d} {unicode}\n'
f' by: {fobj(u=user)}\n'
f' on: {fobj(m=message)}')
assert isinstance(unicode, str)
assert regex.fullmatch(r'\p{Emoji}', unicode)
assert delta in (-1, +1)
reaction = next(
(r for r in message.reactions if str(r.emoji) == unicode), None)
if reaction is None:
reaction = self.make_reaction(message, unicode)
message.reactions.append(reaction)
old_count = len(reaction.users_for_mock)
if delta > 0:
reaction.users_for_mock[user.id] = user
elif delta < 0:
reaction.users_for_mock.pop(user.id, None)
reaction.me = (message.guild.me.id in reaction.users_for_mock)
reaction.count = len(reaction.users_for_mock)
if reaction.count != old_count:
event = self.pytest_mocker.Mock(
spec=discord.RawReactionActionEvent, name='raw_reaction_event')
event.message_id = message.id
event.user_id = user.id
event.channel_id = message.channel.id
event.guild_id = message.guild.id
event.emoji = reaction.emoji
event.member = user
event.event_type = 'REACTION_' + ('ADD' if delta > 0 else 'REMOVE')
self.queue_event(f'on_raw_{event.event_type.lower()}', event)
logger_.debug('sim_reaction done:\n ' + ' '.join(
f'{str(r.emoji)}x{r.count}{"*" if r.me else ""}'
for r in message.reactions))
return reaction
def sim_add_channel(self, guild, name, *args, **kwargs):
"""Simulates guild.create_text_channel() and queues events."""
logger_.debug(f'sim_add_channel: #{name}')
channel = self.make_text_channel(guild, name, *args, **kwargs)
if channel.position is None:
channel.position = len(guild.channels)
guild.channels.insert(channel.position, channel)
for i in range(channel.position + 1, len(guild.channels)):
old = copy(guild.channels[i])
guild.channels[i].position = i
self.queue_event('on_guild_channel_update', old, guild.channels[i])
self.queue_event('on_guild_channel_create', channel)
return channel
@pytest.fixture
def discord_mock(mocker, event_loop):
"""Fixture class to generate Mocks for Discord client library objects."""
yield DiscordMockFixture(mocker) # Keep event loop until teardown.
| 2.25
| 2
|
pcdet/version.py
|
tuanho27/OpenPCDet
| 0
|
12783609
|
__version__ = "0.2.0+1e2b0ad"
| 1.046875
| 1
|
Z - TrashCode/algo1.py
|
khanfarhan10/100DaysofDSAAdvanced
| 2
|
12783610
|
"""
cd G:\GitLab\pdf_extraction\ModularInsurance
python algo1.py
"""
import sys
import random
sys.setrecursionlimit(1500)
print(sys.getrecursionlimit())
def merge_sort(arr,n=None,pivot_index=None):
"""Returns sorted array"""
n = len(arr) if n is None else n
if n==1:
return arr
if n==2:
a = arr[0]
b = arr[1]
srt = arr if a<b else arr[::-1]
return srt
pivot_index = int(n/2)-1 if pivot_index is None else pivot_index
# pivot_element = arr[pivot_index]
left_arr = arr[:pivot_index]
right_arr = arr[pivot_index:]
left_sorted = merge_sort(left_arr, len(left_arr))
right_sorted = merge_sort(right_arr, len(right_arr))
len_left = len(left_sorted)
len_right = len(right_sorted)
i = 0
j = 0
big_old_arr = []
while True:
if i==len_left or j == len_right:
break
elif left_sorted[i] > right_sorted[j]:
big_old_arr.append(right_sorted[j])
j+=1
else:
big_old_arr.append(left_sorted[i])
i+=1
return big_old_arr
if __name__ == "__main__":
arr = [4,8,1,5,7]
print(merge_sort(arr))
if __name__ == "__main__2":
a = "FarhanHaiKhan"
sample_space = "<KEY>"
details = dict()
for each_letter in sample_space:
if each_letter in a:
details[each_letter] = a.count(each_letter)
else:
details[each_letter] = 0
for key in details:
val = details[key]
if not val== 0:
print(key, val)
| 3.09375
| 3
|
tools/unit_test/iter_next.py
|
HAOCHENYE/yehc_mmdet
| 1
|
12783611
|
class A(object):
def __init__(self):
self.array = [1, 2, 3]
self.index = 0
def __iter__(self):
return self
def __next__(self):
self.index += 1
return self.array[self.index]
x = A()
iter
print(next(x))
| 3.71875
| 4
|
Beginner_Day_1_14/Day5/Exc4/main.py
|
fredtheninja/100-Days-of-Code
| 0
|
12783612
|
<reponame>fredtheninja/100-Days-of-Code
#Write your code below this row 👇
for number in range(1,101):
if number%3 == 0 and number%5 == 0:
print("FizzBuzz")
elif number%3 == 0:
print("Fizz")
elif number%5 == 0:
print("Buzz")
else:
print(number)
| 4
| 4
|
function_examples.py
|
dipakbari4/PythonExercises
| 0
|
12783613
|
def printMyName(name):
print("Hello", name)
printMyName("Dipak")
| 2.53125
| 3
|
LeetCode/Python/1295. Find Numbers with Even Number of Digits #2.py
|
rayvantsahni/Competitive-Programming-Codes
| 1
|
12783614
|
class Solution:
def numLength(self, n):
count = 0
while n:
n //= 10
count += 1
return count
def findNumbers(self, arr: List[int]) -> int:
count = 0
for a in arr:
if not self.numLength(a) & 1:
count += 1
return count
| 3.421875
| 3
|
magmap/tests/unit_testing.py
|
clifduhn/magellanmapper
| 0
|
12783615
|
<reponame>clifduhn/magellanmapper
# MagellanMapper unit testing
# Author: <NAME>, 2018, 2020
"""Unit testing for the MagellanMapper package.
"""
import unittest
from magmap.cv import stack_detect
from magmap.io import cli
from magmap.io import importer
from magmap.settings import config
TEST_IMG = "test.czi"
class TestImageStackProcessing(unittest.TestCase):
def setUp(self):
config.filename = TEST_IMG
config.channel = None
cli.setup_profiles(["lightsheet,4xnuc"], None, None)
def test_load_image(self):
config.image5d = importer.read_file(
config.filename, config.series)
if config.image5d is None:
chls, import_path = importer.setup_import_multipage(
config.filename)
import_md = importer.setup_import_metadata(chls, config.channel)
config.image5d = importer.import_multiplane_images(
chls, import_path, import_md, channel=config.channel)
self.assertEqual(config.image5d.shape, (1, 51, 200, 200, 2))
def test_process_whole_image(self):
_, _, blobs = stack_detect.detect_blobs_large_image(
config.filename, config.image5d, (30, 30, 8), (70, 70, 10))
self.assertEqual(len(blobs), 54)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 2.109375
| 2
|
motutils/bbox_mot.py
|
smidm/motutils
| 1
|
12783616
|
import numpy as np
import pandas as pd
import xarray as xr
from shape import BBox
from .mot import Mot
class BboxMot(Mot):
def __init__(self, **kwargs):
"""
Ground truth stored in xarray.Dataset with frame and id coordinates (frames are 0-indexed).
Example:
<xarray.Dataset>
Dimensions: (frame: 5928, id: 5)
Coordinates:
* frame (frame) int64 0 1 2 3 4 5 6 ... 5922 5923 5924 5925 5926 5927
* id (id) int64 1 2 3 4 5
Data variables:
x (frame, id) float64 434.5 277.7 179.2 180.0 ... nan nan nan nan
y (frame, id) float64 279.0 293.6 407.9 430.0 ... nan nan nan nan
width (frame, id) float64 nan nan nan nan nan ... nan nan nan nan nan
height (frame, id) float64 nan nan nan nan nan ... nan nan nan nan nan
confidence (frame, id) float64 1.0 1.0 1.0 1.0 1.0 ... 1.0 1.0 1.0 1.0 1.0
"""
super(Mot, self).__init__(**kwargs)
def init_blank(self, frames, ids):
"""
Initialize blank ground truth.
:param frames: list of frames
:param ids: list of identities
"""
self.ds = xr.Dataset(
data_vars={
"x": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"y": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"width": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"height": (["frame", "id"], np.nan * np.ones((len(frames), len(ids)))),
"confidence": (
["frame", "id"],
np.nan * np.ones((len(frames), len(ids))),
),
},
coords={"frame": frames, "id": ids},
)
def load(self, filename):
"""
Load Multiple Object Tacking Challenge trajectories file.
Format described in https://arxiv.org/abs/1603.00831, section 3.3 Data Format
Loads trajectories into a DataFrame, columns frame and id start with 1 (MATLAB indexing).
:param filename: mot filename_or_buffer or buffer
"""
df = pd.read_csv(
filename,
index_col=["frame", "id"],
names=["frame", "id", "x", "y", "width", "height", "confidence"],
converters={
"frame": lambda x: int(x) - 1,
"id": lambda x: int(x) - 1,
},
)
df[df == -1] = np.nan
ds = df.to_xarray()
# ensure that all frames are in the Dataset
self.init_blank(list(range(ds.frame.min(), ds.frame.max())), ds.id)
self.ds = ds.merge(self.ds)
def save(self, filename, make_backup=False):
import datetime
import os
if make_backup and os.path.exists(filename):
dt = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.rename(filename, filename[:-4] + "_" + dt + ".txt")
df = self.ds.to_dataframe().reset_index()
df[df.isna()] = -1
df["frame"] += 1
df["id"] += 1
df.to_csv(filename, index=False, header=False)
def get_bboxes(self, frame):
"""
Get GT bounding boxes in a frame.
The returned BBoxes include obj_id attribute.
:param frame: frame number
:return: list of bounding boxes (BBox)
"""
bboxes = []
for obj_id, obj in self.get_positions_dataframe(frame).iterrows():
if not (
np.isnan(obj.x)
or np.isnan(obj.y)
or np.isnan(obj.width)
or np.isnan(obj.height)
):
bbox = BBox.from_xywh(obj.x, obj.y, obj.width, obj.height, frame)
bbox.obj_id = obj_id
bboxes.append(bbox)
return bboxes
def get_object_distance(self, frame, obj_id, other):
"""
TODO bbox iou
:param frame:
:param obj_id:
:param other:
:return:
"""
assert False, "not implemented"
def draw_frame(self, img, frame, mapping=None):
"""
Draw objects on an image.
:param img: ndarray
:param frame: frame
:param mapping: mapping of ids, dict
:return: image
"""
if frame in self.ds.frame:
if self.colors is None:
self._init_draw()
if mapping is None:
mapping = dict(list(zip(self.ds.id.data, self.ds.id.data)))
for bbox in self.get_bboxes(frame):
bbox.draw_to_image(img, color=self.colors[mapping[bbox.obj_id]])
return img
| 2.71875
| 3
|
filter_vulnerabilities.py
|
Thomas-Neumann/wiz-io-tools
| 0
|
12783617
|
#!/usr/bin/env python3
#
# filter out known vulnerabilities from wiz.io vulnerability report
#
# ./filter_vulnerabilities.py <report_file1> [<report_file2> ...]
#
# usage:
# ./filter_vulnerabilities.py data/vulnerability-reports/1644573599308653316.csv
# ./filter_vulnerabilities.py file1.csv file2.csv
# input file format:
# Created At,Title,Severity,Status,Resource Type,Resource external ID,Subscription ID,Project IDs,Project Names,Resolved Time,Resolution,Control ID,Resource Name,Resource Region,Resource Status,Resource Platform,Resource OS,Resource original JSON,Issue ID,Resource vertex ID,Ticket URLs
import logging
import os
import sys
sys.path.insert(0, os.path.dirname(__file__) + "/lib")
import wiz_io_tools.reports
from wiz_io_tools.reports_cli import configure_logger, parse_argv
# version string including prerelease and metadata (if appliccable)
# major.minor.patch[-prerelease][+metadata]
VERSIONSTRING="0.1.0-alpha2"
LH = logging.getLogger()
if __name__ == "__main__":
configure_logger(LH, logging.INFO)
config = parse_argv(VERSIONSTRING)
ignored_issues = [
# [ issue.title, issue.resource.external_id ]
]
fixed_issues = [
# [ issue.title, issue.resource.external_id ]
]
exemptions = [
# [ issue.title, issue.resource.external_id ]
]
error_count = 0
issues = list()
for csvfile in config["reports"]:
try:
issues.extend(wiz_io_tools.reports.parse_issues_report(csvfile))
except FileNotFoundError as e:
LH.error("Skipping '%s': %s", csvfile, e.strerror)
error_count += 1
counter_ignored = 0
counter_already_fixed = 0
counter_exempted = 0
counters_severity = {
"LOW": 0,
"MEDIUM": 0,
"HIGH": 0,
"CRITICAL": 0,
}
for issue in issues:
counters_severity[issue.severity] += 1
if issue.severity in ["LOW", "MEDIUM"]:
continue
skip_issue = False
for ignored_issue in ignored_issues:
if issue.title == ignored_issue[0] and issue.resource.external_id == ignored_issue[1]:
counter_ignored += 1
skip_issue = True
break
for exemption in exemptions:
if issue.title == exemption[0] and issue.resource.external_id == exemption[1]:
counter_exempted += 1
skip_issue = True
break
for fixed_issue in fixed_issues:
if issue.title == fixed_issue[0] and issue.resource.external_id == fixed_issue[1]:
counter_already_fixed += 1
skip_issue = True
break
if skip_issue:
continue
# add additional filter conditions here
print("{:100s} {} {} {} <{}>".format(issue.title, issue.severity, issue.resource.name, issue.resource.type, issue.resource.external_id))
issue_count = len(issues)
if issue_count == 0:
LH.info("Found no issues. Awesome!")
else:
if counters_severity["CRITICAL"] == 0 and counters_severity["HIGH"] == 0:
LH.warning("Found %i issues. (no critical, no high)", issue_count, counters_severity["CRITICAL"], counters_severity["HIGH"])
else:
LH.error("Found %i issues. (critical: %i, high: %i)", issue_count, counters_severity["CRITICAL"], counters_severity["HIGH"])
LH.info("(%i already fixed, %i exempted, %i ignored)", counter_already_fixed, counter_exempted, counter_ignored)
if error_count:
LH.warning("Encountered %i error(s)! Please verify input.", error_count)
| 2.078125
| 2
|
custom_components/magic_lights/const.py
|
justanotherariel/hass_MagicLights
| 0
|
12783618
|
DOMAIN = "magic_lights"
| 1.117188
| 1
|
ptb_data/preprocessing.py
|
windweller/nlplab
| 0
|
12783619
|
<gh_stars>0
"""
Preprocess ptb.train.txt
into train.x.txt and train.y.txt type
that can be load into NLCEnv
We also prepare 10 characters and 30 characters version
like in AC paper
"""
import os
import numpy as np
import string
import random
import re
_WORD_SPLIT = re.compile(b"([.,!?\"':;)(])")
def basic_tokenizer(sentence):
"""Very basic tokenizer: split the sentence into a list of tokens."""
# used by NLCEnv
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(_WORD_SPLIT, space_separated_fragment))
return [w for w in words if w]
def char_tokenizer(sentence):
# used by NLCEnv
return list(sentence.strip())
def create_truncated_seq_ptb(keeplen=10, noise=0.3, filename="train"):
"""
Parameters
----------
keeplen: the number of characters we want to save, choices: 10, 30
noise: 0.3, 0.5
Returns
-------
"""
xs = []
ys = []
with open("./ptb."+filename+".txt", "r") as f:
lines = f.readlines()
# x = np.array(char_tokenizer(line)[:keeplen])
# # 0 is drop
# mask = np.random.choice([0,1], size=keeplen, p=[noise, 1-noise])
# np.multiply(x, mask)
for line in lines:
line = line.replace("<unk>", "U")
x = char_tokenizer(line)[:keeplen]
y = char_tokenizer(line)[:keeplen]
mask = np.random.choice([0, 1], size=keeplen, p=[noise, 1 - noise])
for i in xrange(len(x)):
if mask[i] == 0 and x[i] != " " and x[i] != "U": # not sapce, not <unk>
# should we include special chars or entire vocab? eh
x[i] = random.choice(string.lowercase)
xs.append(x)
ys.append(y)
with open("./" + filename + ".x.txt", "w") as f1:
with open("./" + filename + ".y.txt", "w") as f2:
for i in range(len(xs)):
f1.write("".join(xs[i]) + "\n")
f2.write("".join(ys[i]) + "\n")
if __name__ == '__main__':
create_truncated_seq_ptb(keeplen=30, filename="valid")
| 2.8125
| 3
|
CarlaScripts/new_static_cam_2veh_sem_go.py
|
ayushjain1144/SeeingByMoving
| 13
|
12783620
|
from subprocess import Popen, PIPE, STDOUT
import shlex
import time
import sys
import psutil
import os, signal
mod = 'aa' # first good moving camera data generated with carla version 0.97
mod = 'test' # 100 views
#mod = 'ab' # 88 views, yaw 0 to 359nes - 5 sceenes test
mod = 'ac' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'ad' # 51 views, yaw 0 to 359 - 100 scenes
mod = 'ae' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'af' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'ag' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'test' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'ah' # 51 views, yaw 0 to 359 - 5 scenes
mod = 'ai' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'aj' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'ak' # 51 views, yaw 0 to 359 - 300 scenes
mod = 'test' # 51 views, yaw 0 to 359 - 10 scenes
#mod = 'rotaa'#43 views, 40 vehicles,
mod = 'rot_ab'#43 views, 35 vehicles,
mod = 'rot_ac'#43 views, 35 vehicles,
mod = 'test' # 51 views, yaw 0 to 359 - 10 scenes
# mod = 'ep_aa' # 51 views, yaw 0 to 359 - 10 scenes
mod = 'hiaa'
mod = 'hiab'
mod = 'hiac'
mod = 'hiad'
mod = 'hiae'
mod = 'hiaf'
mod = 'hiag'
mod = 'hiah'
mod = 'hiai'
mod = 'hiaj'
mod = 'hiak'
mod = 'hial'
mod = 'hiam'
mod = 'hian'
mod = 'hiao'
mod = 'hiap' # The above are all 43 views, 30 vehicles
mod = 'test'
mod = 'vehaa' # two vehicles, rotate around the two vehicles, second veh atleast 5 meters away
mod = 'vehab' # no bikes, two vehicles, rotate around the two vehicles, second veh atleast 5 meters away
mod = 'test'
mod = 'mr06' # with segmentation masks
save_dir = '/hdd/gsarch/data'
carla_sim = "/hdd/carla97/CarlaUE4.sh -carla-server -windows -ResX=100 -ResY=100 -benchmark"
carla_sim_args = shlex.split(carla_sim)
cnt = 0
for i in range(0,100):
p1 = Popen(carla_sim_args, stdout=PIPE, stderr=PIPE)
time.sleep(10)
print("Number of times carla simulator started: ", cnt)
cnt+=1
p2 = Popen(["python3.5","new_static_cam_2veh_sem.py", str(i), mod, save_dir], stdout=PIPE, stderr=PIPE)
time.sleep(1)
out, err = p2.communicate()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
# for line in out.decode("utf-8").split('\\n'):
# print('\t' + line)
# print('ERROR')
# for line in err.decode("utf-8").split('\\n'):
# print('\t' + line)
p1.terminate()
time.sleep(5)
# Iterate over all running process
for proc in psutil.process_iter():
try:
# Get process name & pid from process object.
processName = proc.name()
processID = proc.pid
#print(processName , ' ::: ', processID)
if 'Carla' in processName:
print("PROCESS FOUND")
print(processName)
os.kill(processID, signal.SIGSTOP)
print("PROCESS STOPPED")
time.sleep(5)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
for proc in psutil.process_iter():
try:
# Get process name & pid from process object.
processName = proc.name()
processID = proc.pid
#print(processName , ' ::: ', processID)
if 'Carla' in processName:
print("PROCESS FOUND")
print(processName)
os.kill(processID, signal.SIGKILL)
print("PROCESS KILLED")
time.sleep(5)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
print("Done with single iteration. Terminating everything")
print("==========================================================")
p2.terminate()
time.sleep(10)
| 1.9375
| 2
|
envisage/ui/action/i_action_manager_builder.py
|
anshsrtv/envisage
| 0
|
12783621
|
# (C) Copyright 2007-2020 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" The interface for action manager builders. """
# Enthought library imports.
from traits.api import Interface, List
# Local imports.
from .action_set import ActionSet
class IActionManagerBuilder(Interface):
""" The interface for action manager builders.
An action manager builder populates action managers (i.e. menus, menu bars
and tool bars) from the menus, groups and actions defined in its action
sets.
"""
# The action sets used by the builder.
action_sets = List(ActionSet)
def initialize_action_manager(self, action_manager, root):
""" Initialize an action manager from the builder's action sets.
"""
| 1.875
| 2
|
src/py-entry-central/nightjar_central/tests/generate_test.py
|
groboclown/nightjar-mesh
| 3
|
12783622
|
<reponame>groboclown/nightjar-mesh
"""
Test the generate module.
"""
from typing import List, Dict, Optional, Any
import unittest
import os
import platform
import shutil
import json
from .. import generate
from ..config import (
Config,
ENV__DATA_STORE_EXEC, ENV__DISCOVERY_MAP_EXEC,
)
class GenerateDataImplTest(unittest.TestCase):
"""Test the generator functions and classes."""
# These are all smashed together, because they share the same setup and teardown logic.
# Yeah, it's a lousy reason to jam them together, but it makes less duplication.
def setUp(self) -> None:
noop_cmd = 'where' if platform.system() == 'Windows' else 'echo'
python_cmd = 'python' if platform.system() == 'Windows' else 'python3'
self._runnable = [
python_cmd,
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runnable.py'),
]
self._config = Config({
ENV__DISCOVERY_MAP_EXEC: noop_cmd,
ENV__DATA_STORE_EXEC: noop_cmd,
})
self._gen_file = os.path.join(self._config.temp_dir, 'generated-discovery-map.json')
self._old_file = os.path.join(self._config.temp_dir, 'last-discovery-map.json')
self._file_index = 0
def tearDown(self) -> None:
shutil.rmtree(self._config.temp_dir)
def test_create_generator(self) -> None:
"""Test the create generator with a gateway request."""
res = generate.create_generator(self._config)
self.assertIsInstance(res, generate.GenerateDataImpl)
def test_is_generated_map_different__no_files(self) -> None:
"""Test is_generated_map_different with no files"""
self.assertFalse(os.path.isfile(self._gen_file))
self.assertFalse(os.path.isfile(self._old_file))
gen = generate.GenerateDataImpl(self._config)
res = gen.is_generated_map_different()
self.assertFalse(res)
def test_is_generated_map_different__just_new(self) -> None:
"""Test is_generated_map_different with no files"""
with open(self._gen_file, 'w') as f:
json.dump({}, f)
self.assertFalse(os.path.isfile(self._old_file))
gen = generate.GenerateDataImpl(self._config)
res = gen.is_generated_map_different()
self.assertTrue(res)
def test_is_generated_map_different__just_old(self) -> None:
"""Test is_generated_map_different with no files"""
with open(self._old_file, 'w') as f:
json.dump({}, f)
self.assertFalse(os.path.isfile(self._gen_file))
gen = generate.GenerateDataImpl(self._config)
res = gen.is_generated_map_different()
self.assertFalse(res)
def test_is_generated_map_different__same(self) -> None:
"""Test is_generated_map_different with no files"""
with open(self._gen_file, 'w') as f:
json.dump({'x': True}, f)
with open(self._old_file, 'w') as f:
json.dump({'x': True}, f)
gen = generate.GenerateDataImpl(self._config)
res = gen.is_generated_map_different()
self.assertFalse(res)
def test_is_generated_map_different__different(self) -> None:
"""Test is_generated_map_different with no files"""
with open(self._gen_file, 'w') as f:
json.dump({'x': True}, f)
with open(self._old_file, 'w') as f:
json.dump({'y': True}, f)
gen = generate.GenerateDataImpl(self._config)
res = gen.is_generated_map_different()
self.assertTrue(res)
def test_commit_discovery_map__with_error(self) -> None:
"""Test commit_discovery_map with an error"""
self._config.data_store_exec = self._get_runnable_cmd(1, self._gen_file, {
'schema-version': 'v1',
'document-version': 'x',
'namespaces': [],
})
gen = generate.GenerateDataImpl(self._config)
res = gen.commit_discovery_map()
self.assertEqual(1, res)
self.assertFalse(os.path.isfile(self._old_file))
def test_commit_discovery_map__no_gen_file(self) -> None:
"""Test commit_discovery_map with no gen file"""
self.assertFalse(os.path.isfile(self._gen_file))
gen = generate.GenerateDataImpl(self._config)
res = gen.commit_discovery_map()
self.assertEqual(0, res)
self.assertFalse(os.path.isfile(self._old_file))
def test_commit_discovery_map__success(self) -> None:
"""Test commit_discovery_map successful run."""
self.assertFalse(os.path.isfile(self._old_file))
expected = {
'schema-version': 'v1',
'document-version': 'a',
'namespaces': [],
}
self._config.data_store_exec = self._get_runnable_cmd(0, self._gen_file, expected)
gen = generate.GenerateDataImpl(self._config)
res = gen.commit_discovery_map()
self.assertEqual(0, res)
self.assertTrue(os.path.isfile(self._old_file))
with open(self._old_file, 'r') as f:
self.assertEqual(expected, json.load(f))
def test_generate_discovery_map__failure(self) -> None:
"""Test generate_discovery_map which fails to execute."""
self._config.discovery_map_exec = self._get_runnable_cmd(6, None, {})
gen = generate.GenerateDataImpl(self._config)
res = gen.generate_discovery_map()
self.assertEqual(1, res)
self.assertFalse(os.path.isfile(self._gen_file))
def test_update_discovery_map__failure_gen(self) -> None:
"""Test update_discovery_map with failure form discovery map."""
self._config.discovery_map_exec = self._get_runnable_cmd(6, None, {})
self._config.data_store_exec = self._get_runnable_cmd(12, None, {})
gen = generate.GenerateDataImpl(self._config)
res = gen.update_discovery_map()
self.assertEqual(1, res)
def test_update_discovery_map__failure_commit(self) -> None:
"""Test update_discovery_map with failure form discovery map."""
self._config.discovery_map_exec = self._get_runnable_cmd(0, None, {})
self._config.data_store_exec = self._get_runnable_cmd(6, None, {})
gen = generate.GenerateDataImpl(self._config)
res = gen.update_discovery_map()
self.assertEqual(1, res)
def test_update_discovery_map__no_change(self) -> None:
"""Test update_discovery_map with no changed contents."""
expected = {
'schema-version': 'v1',
'document-version': 'z',
'namespaces': [],
}
with open(self._old_file, 'w') as f:
json.dump(expected, f)
self._config.discovery_map_exec = self._get_runnable_cmd(0, None, expected)
# data-store should not run, so have it generate an error if it does.
self._config.data_store_exec = self._get_runnable_cmd(1, None, {})
gen = generate.GenerateDataImpl(self._config)
res = gen.update_discovery_map()
self.assertEqual(0, res)
def test_update_discovery_map__changed(self) -> None:
"""Test update_discovery_map with changed contents."""
with open(self._old_file, 'w') as f:
json.dump({
'schema-version': 'v1',
'document-version': 'old',
'namespaces': [],
}, f)
expected = {
'schema-version': 'v1',
'document-version': 'new',
'namespaces': [],
}
self._config.discovery_map_exec = self._get_runnable_cmd(0, None, expected)
self._config.data_store_exec = self._get_runnable_cmd(0, None, {})
gen = generate.GenerateDataImpl(self._config)
res = gen.update_discovery_map()
self.assertEqual(0, res)
self.assertTrue(os.path.isfile(self._old_file))
with open(self._old_file, 'r') as f:
self.assertEqual(expected, json.load(f))
def _get_runnable_cmd(
self, exit_code: int, filename: Optional[str], src_contents: Dict[str, Any],
) -> List[str]:
ret = list(self._runnable)
ret.append(str(exit_code))
self._file_index += 1
if filename:
out = filename
else:
out = os.path.join(self._config.temp_dir, '{0}-src.json'.format(self._file_index))
with open(out, 'w') as f:
json.dump(src_contents, f)
ret.append(out)
return ret
| 2.28125
| 2
|
main.py
|
Inspirateur/fm-helper
| 2
|
12783623
|
import dofus_protocol as dp
from fm_state import FMState
def packet_handle(pkt: dp.DofusPacket):
state.update(pkt)
state = FMState()
listener = dp.DofusListener(packet_handle)
| 1.90625
| 2
|
object_generator/object_generator.pyde
|
aditikhare33/generative_art
| 1
|
12783624
|
w = 800
h = 800
blue = "#1380FF"
yellow = "#F8E500"
red = "#F40000"
green = "#2ABA00"
colors = [blue, red, green, yellow]
#curr_color = colors[int(random(0, len(colors)))]
curr_color = 255
incr = 0.1
recent_window = 3.0/incr
bgColor = 255
def setup():
size(w, h)
background(bgColor)
t_x = random(0, 100)
t_y = random(0, 100)
recents = []
def draw():
global t_x, t_y, recents
#background(255)
thickness = map(noise(t_x, t_y), 0, 1, 5, 5)
strokeWeight(thickness)
x = map(noise(t_x), 0, 1, 0, w)
y = map(noise(t_y), 0, 1, 0, h)
recents.append({'x': x, 'y':y, 'th': thickness, 'col': curr_color})
if (len(recents) == 1):
point(x, y)
point(y, x)
else:
if len(recents) > recent_window*25:
recents.pop(0)
else:
show_recents()
t_x += incr
t_y += incr
#change_color()
def keyPressed():
if keyCode == 32:
saveFrame("output/pic_########.png")
show_recents()
def change_color():
global curr_color
chance = int(random(0, 100))
if chance == 1:
curr_color = colors[int(random(0, len(colors)))]
def show_recents():
background(bgColor)
scl = 5.0
for row in range(0, int(scl)):
for col in range(0, int(scl)):
push()
translate(w/scl*col, h/scl*row)
rotate(PI/4.0)
translate(w/18, -h/10)
stroke(colors[int(row*scl+col)%len(colors)])
shift_upper = (row*scl + col + 1) * recent_window
shift_lower = (row*scl + col) * recent_window
for i, item in enumerate(recents):
if (i >= 4 and i <= shift_upper and i >= shift_lower):
#stroke(item['col'])
strokeWeight(item['th']/scl)
p_item = recents[i-1]
p2_item = recents[i-4]
noFill()
curve(p2_item['x']/scl, p2_item['y']/scl, p_item['y']/scl, p_item['x']/scl, p_item['x']/scl, p_item['y']/scl, item['y']/scl, item['x']/scl)
#line(item['x']/scl, item['y']/scl, item['y']/scl, item['x']/scl)
#line(p_item['x']/scl, p_item['y']/scl, item['y']/scl, item['x']/scl)
line(p_item['x']/scl, p_item['y']/scl, item['x']/scl, item['y']/scl)
line(p_item['y']/scl, p_item['x']/scl, item['y']/scl, item['x']/scl)
pop()
if len(recents) >= recent_window*25:
fill(0)
text("PRESS SPACE FOR NEW OBJECTS", w/2.5 , h/20)
| 2.75
| 3
|
signup/models.py
|
PingaxAnalytics/koob_beta
| 0
|
12783625
|
# This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class Users(models.Model):
# Primary Store connection details
temporaryCredentialsRequestUrl = models.CharField(max_length=254,null=True)
adminAuthorizationUrl = models.CharField(max_length=254,null=True)
accessTokenRequestUrl = models.CharField(max_length=254,null=True)
apiUrl = models.CharField(max_length=254,null=True)
consumerKey = models.CharField(max_length=254,null=True)
consumerSecret = models.CharField(max_length=254,null=True)
# Store details
password = models.CharField(max_length=128)
last_login = models.DateTimeField(blank=True, null=True)
is_superuser = models.IntegerField(null=True)
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.CharField(max_length=254)
registered_at = models.IntegerField(null=True)
webstore_url = models.CharField(max_length=254,null=True)
webstore_platform = models.CharField(max_length=254,null=True)
class Stores(models.Model):
name = models.CharField(max_length=100)
| 2.0625
| 2
|
src/genie/libs/parser/iosxr/tests/ShowSpanningTreePvrsTag/cli/equal/golden_output_expected.py
|
balmasea/genieparser
| 204
|
12783626
|
expected_output = {
'pvrstag': {
'foo': {
'domain': 'foo',
'interfaces': {
'GigabitEthernet0/0/0/0': {
'interface': 'GigabitEthernet0/0/0/0',
'vlans': {
'5': {
'preempt_delay': True,
'preempt_delay_state': 'Sending startup BPDU until 13:38:03',
'sub_interface': 'GigabitEthernet0/0/0/0.5',
'sub_interface_state': 'Up',
'max_age': 20,
'root_priority': 0,
'root_bridge': '0000.0000.0000',
'root_cost': 1,
'bridge_priority': 32768,
'bridge_id': '0255.1dff.3c70',
'port_priority': 128,
'port_id': 1,
'hello_time': 2,
'active': True,
'counters': {
'bdpu_sent': 6,
'topology_changes': 0,
},
},
},
},
'GigabitEthernet0/0/0/1': {
'interface': 'GigabitEthernet0/0/0/1',
'vlans': {
'5': {
'preempt_delay': True,
'preempt_delay_state': 'Sending standard BPDU',
'sub_interface': 'GigabitEthernet0/0/0/1.5',
'sub_interface_state': 'Up',
'max_age': 20,
'root_priority': 0,
'root_bridge': '0000.0000.0000',
'root_cost': 0,
'bridge_priority': 32768,
'bridge_id': '021a.9eff.5645',
'port_priority': 128,
'port_id': 1,
'hello_time': 2,
'active': True,
'counters': {
'bdpu_sent': 7,
'topology_changes': 0,
},
},
},
},
},
},
},
}
| 1.671875
| 2
|
Chapter09/filepickle2.py
|
kaushalkumarshah/Learn-Python-in-7-Days
| 12
|
12783627
|
import pickle
pickle_file = open("emp1.dat",'r')
name_list = pickle.load(pickle_file)
skill_list =pickle.load(pickle_file)
print name_list ,"\n", skill_list
| 2.84375
| 3
|
saas/tests/biz/policy_tests.py
|
nannan00/bk-iam-saas
| 0
|
12783628
|
from typing import List
import pytest
from backend.biz.policy import InstanceBean, InstanceBeanList, PathNodeBean, PathNodeBeanList
from backend.common.error_codes import APIException
from backend.service.models import PathResourceType, ResourceTypeDict
from backend.service.models.instance_selection import InstanceSelection
@pytest.fixture()
def path_node_bean():
return PathNodeBean(
id="id", name="name", system_id="system_id", type="type", type_name="type_name", type_name_en="type_name_en"
)
@pytest.fixture()
def resource_type_dict():
return ResourceTypeDict(data={("system_id", "type"): {"name": "name_test", "name_en": "name_en_test"}})
class TestPathNodeBean:
def test_fill_empty_fields(self, path_node_bean: PathNodeBean, resource_type_dict: ResourceTypeDict):
path_node_bean.fill_empty_fields(resource_type_dict)
assert path_node_bean.type_name == "name_test" and path_node_bean.type_name_en == "name_en_test"
@pytest.mark.parametrize(
"resource_system_id, resource_type_id, expected",
[
("system_id", "type", True),
("system_id_no", "type", False),
("system_id", "type_no", False),
],
)
def test_match_resource_type(self, path_node_bean: PathNodeBean, resource_system_id, resource_type_id, expected):
assert path_node_bean.match_resource_type(resource_system_id, resource_type_id) == expected
def test_to_path_resource_type(self, path_node_bean: PathNodeBean):
assert path_node_bean.to_path_resource_type() == PathResourceType(
system_id=path_node_bean.system_id, id=path_node_bean.type
)
@pytest.fixture()
def path_node_bean_list(path_node_bean: PathNodeBean):
path_node_bean1 = path_node_bean.copy(deep=True)
path_node_bean1.id = "id1"
path_node_bean1.name = "name1"
path_node_bean1.type = "type1"
path_node_bean1.type_name = "type_name1"
path_node_bean1.type_name_en = "type_name_en1"
return PathNodeBeanList(
nodes=[
path_node_bean,
path_node_bean1,
]
)
def gen_instance_selection(chian: List, ignore_iam_path=False) -> InstanceSelection:
return InstanceSelection(
id="id",
system_id="system_id",
name="name",
name_en="name_en",
ignore_iam_path=ignore_iam_path,
resource_type_chain=chian,
)
class TestPathNodeBeanList:
def test_dict(self, path_node_bean_list: PathNodeBeanList):
assert path_node_bean_list.dict() == [
{
"id": "id",
"name": "name",
"system_id": "system_id",
"type": "type",
"type_name": "type_name",
"type_name_en": "type_name_en",
},
{
"id": "id1",
"name": "name1",
"system_id": "system_id",
"type": "type1",
"type_name": "type_name1",
"type_name_en": "type_name_en1",
},
]
def test_to_path_string(self, path_node_bean_list: PathNodeBeanList):
assert path_node_bean_list.to_path_string() == "/type,id/type1,id1/"
def test_to_path_resource_types(self, path_node_bean_list: PathNodeBeanList):
assert path_node_bean_list._to_path_resource_types() == [
PathResourceType(system_id="system_id", id="type"),
PathResourceType(system_id="system_id", id="type1"),
]
def test_display(self, path_node_bean_list: PathNodeBeanList):
assert path_node_bean_list.display() == "type:name/type1:name1"
def test_match_selection_one_node(self, path_node_bean_list: PathNodeBeanList):
path_node_bean_list.nodes.pop()
assert path_node_bean_list.match_selection("system_id", "type", None)
@pytest.mark.parametrize(
"instance_selection, expected",
[
(
gen_instance_selection(
[{"system_id": "system_id", "id": "type"}, {"system_id": "system_id", "id": "type1"}]
),
True,
),
(gen_instance_selection([{"system_id": "system_id", "id": "type"}]), False),
],
)
def test_match_selection(self, path_node_bean_list: PathNodeBeanList, instance_selection, expected):
assert path_node_bean_list.match_selection("system_id", "type", instance_selection) == expected
@pytest.mark.parametrize(
"instance_selection, start, end",
[
(
gen_instance_selection(
[{"system_id": "system_id", "id": "type"}, {"system_id": "system_id", "id": "type1"}],
ignore_iam_path=True,
),
1,
2,
),
(gen_instance_selection([{"system_id": "system_id", "id": "type"}]), 0, 2),
],
)
def test_ignore_path(self, path_node_bean_list: PathNodeBeanList, instance_selection, start, end):
assert path_node_bean_list.ignore_path(instance_selection) == path_node_bean_list.nodes[start:end]
@pytest.fixture()
def instance_bean(path_node_bean: PathNodeBean):
path_node_bean1 = path_node_bean.copy(deep=True)
path_node_bean1.id = "id1"
path_node_bean1.name = "name1"
path_node_bean1.type = "type1"
path_node_bean1.type_name = "type_name1"
path_node_bean1.type_name_en = "type_name_en1"
return InstanceBean(path=[[path_node_bean, path_node_bean1]], type="type")
def gen_paths():
return [
[
PathNodeBean(
id="id",
name="name",
system_id="system_id",
type="type",
type_name="type_name",
type_name_en="type_name_en",
),
PathNodeBean(
id="id1",
name="name1",
system_id="system_id",
type="type1",
type_name="type_name1",
type_name_en="type_name_en1",
),
]
]
class TestInstanceBean:
def test_fill_empty_fields(self, instance_bean: InstanceBean, resource_type_dict: ResourceTypeDict):
instance_bean.fill_empty_fields(resource_type_dict)
assert instance_bean.name == "name_test"
assert instance_bean.name_en == "name_en_test"
assert instance_bean.path[0][0].type_name == "name_test"
assert instance_bean.path[0][0].type_name_en == "name_en_test"
assert instance_bean.path[0][1].type_name == ""
assert instance_bean.path[0][1].type_name_en == ""
def test_iter_path_node(self, instance_bean: InstanceBean):
assert list(instance_bean.iter_path_node()) == instance_bean.path[0]
def test_get_system_id_set(self, instance_bean: InstanceBean):
assert instance_bean.get_system_id_set() == {"system_id"}
@pytest.mark.parametrize(
"paths, length",
[
(gen_paths(), 1),
([[gen_paths()[0][0]]], 2),
],
)
def test_add_paths(self, instance_bean: InstanceBean, paths, length):
instance_bean.add_paths(paths)
assert len(instance_bean.path) == length
@pytest.mark.parametrize(
"paths, length",
[
(gen_paths(), 0),
([[gen_paths()[0][0]]], 1),
],
)
def test_remove_paths(self, instance_bean: InstanceBean, paths, length):
instance_bean.remove_paths(paths)
assert len(instance_bean.path) == length
def test_is_empty(self, instance_bean: InstanceBean):
assert not instance_bean.is_empty
instance_bean.path.pop()
assert instance_bean.is_empty
def test_count(self, instance_bean: InstanceBean):
assert instance_bean.count() == 1
@pytest.mark.parametrize(
"instance_selection, length",
[
(
gen_instance_selection(
[{"system_id": "system_id", "id": "type"}, {"system_id": "system_id", "id": "type1"}]
),
1,
),
(gen_instance_selection([{"system_id": "system_id", "id": "type"}]), 0),
],
)
def test_clone_and_filter_by_instance_selections(self, instance_bean: InstanceBean, instance_selection, length):
instance_bean1 = instance_bean.clone_and_filter_by_instance_selections(
"system_id", "type", [instance_selection]
)
if instance_bean1 is not None:
assert len(instance_bean1.path) == length
else:
assert 0 == length
@pytest.mark.parametrize(
"instance_selection, raise_exception",
[
(
gen_instance_selection(
[{"system_id": "system_id", "id": "type"}, {"system_id": "system_id", "id": "type1"}]
),
False,
),
(gen_instance_selection([{"system_id": "system_id", "id": "type"}]), True),
],
)
def test_check_instance_selection(self, instance_bean: InstanceBean, instance_selection, raise_exception):
try:
instance_bean.check_instance_selection("system_id", "type", [instance_selection])
assert not raise_exception
except APIException:
assert raise_exception
def test_check_instance_selection_ignore_path(self, instance_bean: InstanceBean):
instance_selection = gen_instance_selection(
[{"system_id": "system_id", "id": "type"}, {"system_id": "system_id", "id": "type1"}], ignore_iam_path=True
)
instance_bean.check_instance_selection("system_id", "type", [instance_selection], ignore_path=True)
assert len(instance_bean.path[0]) == 1
@pytest.fixture()
def instance_bean_list(instance_bean: InstanceBean):
instance_bean1 = instance_bean.copy(deep=True)
instance_bean1.type = "type1"
return InstanceBeanList([instance_bean, instance_bean1])
class TestInstanceBeanList:
def test_get(self, instance_bean_list: InstanceBeanList):
assert instance_bean_list.get("type").type == "type"
assert instance_bean_list.get("test") is None
def test_add(self, instance_bean_list: InstanceBeanList):
instance_bean_list1 = InstanceBeanList([instance_bean_list.instances.pop()])
instance_bean_list._instance_dict.pop("type1")
assert len(instance_bean_list.instances) == 1
instance_bean_list.add(instance_bean_list1)
assert len(instance_bean_list.instances) == 2
instance_bean_list.add(instance_bean_list1)
assert instance_bean_list.instances[1].type == "type1"
assert len(instance_bean_list.instances[1].path) == 1
instance_bean_list1 = InstanceBeanList([instance_bean_list.instances.pop()])
instance_bean_list._instance_dict.pop("type1")
assert len(instance_bean_list.instances) == 1
instance_bean_list1.instances[0].type = "type"
instance_bean_list1.instances[0].path[0][-1].id = "id2"
instance_bean_list.add(instance_bean_list1)
assert len(instance_bean_list.instances) == 1
assert len(instance_bean_list.instances[0].path) == 2
def test_sub(self, instance_bean_list: InstanceBeanList):
instance_bean_list1 = InstanceBeanList([instance_bean_list.instances.pop()])
instance_bean_list._instance_dict.pop("type1")
assert len(instance_bean_list.instances) == 1
instance_bean_list1.instances[0].type = "type"
instance_bean_list1._instance_dict.pop("type1")
instance_bean_list1._instance_dict["type"] = instance_bean_list1.instances[0]
instance_bean_list.sub(instance_bean_list1)
assert len(instance_bean_list.instances) == 0
"""
PolicyBeanList sub
1. 需要sub没有关联资源类型的操作
2. 需要sub都是任意的操作
"""
| 2.0625
| 2
|
fileshare/models.py
|
bwelch21/secure-file-share
| 0
|
12783629
|
<reponame>bwelch21/secure-file-share
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
# Django automatically creates a primary key ID... No need to create one here
user = models.OneToOneField(User)
reports_owned = models.ManyToManyField('Report', blank=True)
groups_in = models.ManyToManyField('ProfileGroup', blank=True)
publickey = models.CharField(null=True, max_length=10000)
unreadmessages = models.CharField(max_length=10, default="false")
def __str__(self):
return self.user.username
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
# Create Reports model
class Report(models.Model):
owned_by = models.ForeignKey(User)
created = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now_add=True, null=True)
last_modified_by = models.CharField(default="Owner", max_length=128)
short_desc = models.CharField("Title", max_length=128, unique=True)
long_desc = models.TextField("Description")
private = models.BooleanField("Restrict access to this file?", default=False)
files = models.ManyToManyField('Documents', blank=True)
is_encrypted = models.BooleanField("Is the attached file encrypted?", default=False,
help_text="Leave blank if no file is attached.")
in_folder = models.BooleanField(default=False)
group_in = models.ManyToManyField('ProfileGroup', blank=True)
comments = models.ManyToManyField('ReportComments', blank=True)
def __str__(self):
return self.short_desc
class Documents(models.Model):
datetime = '%Y/%m/%d'
file_attached = models.FileField("Upload a file", upload_to='reports/' + datetime, blank=True, null=True)
is_encrypted = models.BooleanField(default=False)
file_hash = models.CharField(max_length=128, blank=True, null=True)
def __str__(self):
return str(self.file_attached)
class Folder(models.Model):
name = models.CharField(max_length=128, unique=True)
owned_by = models.ForeignKey(User)
created = models.DateTimeField(auto_now_add=True)
reports = models.ManyToManyField(Report, blank=True)
class ProfileGroup(models.Model):
creator = models.ForeignKey(User, null=True)
name = models.CharField(max_length=128, unique=True)
members = models.ManyToManyField('Profile', related_name='group_members', blank=True)
reports = models.ManyToManyField('Report', blank=True)
comments = models.ManyToManyField('ReportComments', blank=True)
def __str__(self):
return self.name
class Conversation(models.Model):
sender = models.ForeignKey(User, related_name="sender")
reciever = models.ForeignKey(User, related_name="reciever")
reciever_name = models.CharField(max_length=128)
recently_used = models.DateTimeField()
messages = models.ManyToManyField('Message', blank=True)
unreadmessages = models.CharField(max_length=1000, default="0")
def __str__(self):
return self.reciever_name
class Message(models.Model):
owned_by = models.ForeignKey('Conversation')
sender = models.ForeignKey(User)
time = models.DateTimeField(auto_now_add=True)
messagecontent = models.CharField(max_length=10000)
key = models.CharField(null=True, max_length=10000)
def __str__(self):
return self.messagecontent
class Activity(models.Model):
owned_by = models.ForeignKey(User)
time = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=10000)
class ReportComments(models.Model):
creator = models.ForeignKey('Profile')
comment = models.TextField(max_length=1000, blank=False)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.comment
| 2.484375
| 2
|
Robot Code/autonomous/driveForwards.py
|
cboy116/Team-4480-Code-2018
| 4
|
12783630
|
from robotpy_ext.autonomous import StatefulAutonomous, timed_state, state
class DriveForward(StatefulAutonomous):
DEFAULT = False
MODE_NAME = 'Drive Forward'
def initialize(self):
self.drive.setAutoSetpoint(696.75*10.5*12)
@timed_state(duration=0.5, next_state='drive_forward', first=True)
def drive_wait(self):
self.drive.driveMeBoi(0, 0)
self.drive.resetEncoders()
@timed_state(duration=1.5, next_state='stop')
def drive_forward(self):
self.drive.autoForward.enable()
self.drive.driveMeBoi(0, 0)
if not self.drive.autoForward.isEnabled():
self.next_state('stop')
@state()
def stop(self):
self.drive.driveMeBoi(0, 0)
| 2.8125
| 3
|
stored/backends/local.py
|
triagemd/stored
| 3
|
12783631
|
import os
import shutil
class LocalFileStorage(object):
def __init__(self, path):
self.path = path
self.filename = os.path.basename(path)
def list(self, relative=False):
matches = []
for root, dirnames, filenames in os.walk(self.path):
for filename in filenames:
file_path = os.path.join(root, filename)
if relative:
file_path = os.path.relpath(file_path, self.path)
matches.append(file_path)
return matches
def sync_to(self, output_path):
if not os.path.exists(self.path):
return
if self.is_dir(self.path):
input_paths = self.list(relative=True)
if not os.path.exists(output_path):
os.makedirs(output_path)
output_paths = LocalFileStorage(output_path).list(relative=True)
new_paths = set(input_paths) - set(output_paths)
for path in new_paths:
LocalFileStorage(os.path.join(self.path, path)).sync_to(os.path.join(output_path, path))
else:
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copyfile(self.path, output_path)
def sync_from(self, input_path):
if not os.path.exists(input_path):
return
if self.is_dir(input_path):
input_paths = LocalFileStorage(input_path).list(relative=True)
if not os.path.exists(self.path):
os.makedirs(self.path)
output_paths = self.list(relative=True)
new_paths = set(input_paths) - set(output_paths)
for path in new_paths:
LocalFileStorage(os.path.join(self.path, path)).sync_from(os.path.join(input_path, path))
else:
output_dir = os.path.dirname(self.path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copyfile(input_path, self.path)
def is_dir(self, path=None):
if path is None:
path = self.path
_, extension = os.path.splitext(path)
return os.path.isdir(path) or len(extension) == 0 or path.endswith('/')
| 3.21875
| 3
|
api/views.py
|
Luanlpg/favorites_list
| 0
|
12783632
|
<gh_stars>0
from django.shortcuts import render
from django.http import Http404
from .utils import EmailService
from .serializer import ClientSerializer
from .serializer import UserSerializer
from .serializer import FavoriteSerializer
from .serializer import FavoriteIdSerializer
from .models import UserModel
from .models import ClientModel
from .models import FavoriteModel
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.views import status
import time
import requests
import json
class UserView(APIView):
"""=========================================================================\n
View que cadastra usuário.\n
========================================================================="""
serializer_class = UserSerializer
email_service = EmailService()
def post(self, request, format=None):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
time.sleep(1)
user = UserModel.objects.get(email=request.data.dict()['email'])
self.email_service.send_token(user.email, user.token)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ClientListView(APIView):
"""=========================================================================\n
View que lista e cadastra clientes.\n
========================================================================="""
serializer_class = ClientSerializer
def get(self, request, token, format=None):
try:
UserModel.objects.get(token=token)
except UserModel.DoesNotExist:
return Response(json.dumps({"token":"Não autorizado."}), status=status.HTTP_404_NOT_FOUND)
serializer = self.serializer_class(ClientModel.objects.all(), many=True)
return Response(serializer.data)
def post(self, request, token, format=None):
try:
UserModel.objects.get(token=token)
except UserModel.DoesNotExist:
return Response(json.dumps({"token":"Não autorizado."}), status=status.HTTP_404_NOT_FOUND)
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ClientDetailView(APIView):
"""=========================================================================\n
View que mostra, altera e apaga cliente.\n
========================================================================="""
serializer_class = ClientSerializer
def get_client(self, email):
try:
return ClientModel.objects.get(email=email)
except ClientModel.DoesNotExist:
raise Http404
def get(self, request, token, email, format=None):
try:
UserModel.objects.get(token=token)
except UserModel.DoesNotExist:
return Response(json.dumps({"token":"Não autorizado."}), status=status.HTTP_404_NOT_FOUND)
client = self.get_client(email)
serializer = self.serializer_class(client)
return Response(serializer.data)
def patch(self, request, token, email, format=None):
try:
UserModel.objects.get(token=token)
except UserModel.DoesNotExist:
return Response(json.dumps({"token":"Não autorizado."}), status=status.HTTP_404_NOT_FOUND)
client = self.get_client(email)
serializer = self.serializer_class(client, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, token, email, format=None):
try:
UserModel.objects.get(token=token)
except UserModel.DoesNotExist:
return Response(json.dumps({"token":"Não autorizado."}), status=status.HTTP_404_NOT_FOUND)
client = self.get_client(email)
for favorite in FavoriteModel.objects.filter(id_client=client.id):
favorite.delete()
client.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FavoritesListView(APIView):
"""=========================================================================\n
View que lista e cadastra favoritos.\n
========================================================================="""
serializer_class = FavoriteSerializer
id_serializer_class = FavoriteIdSerializer
def get_client(self, email):
try:
return ClientModel.objects.get(email=email)
except ClientModel.DoesNotExist:
raise Http404
def get(self, request, token, email, format=None):
try:
UserModel.objects.get(token=token)
except UserModel.DoesNotExist:
return Response(json.dumps({"token":"Não autorizado."}), status=status.HTTP_404_NOT_FOUND)
client = self.get_client(email)
serializer = self.serializer_class(FavoriteModel.objects.filter(id_client=client.id), many=True)
return Response(serializer.data)
def post(self, request, token, email, format=None):
try:
UserModel.objects.get(token=token)
except UserModel.DoesNotExist:
return Response(json.dumps({"token":"Não autorizado."}), status=status.HTTP_404_NOT_FOUND)
client = self.get_client(email)
id_serializer = self.id_serializer_class(data=request.data)
if id_serializer.is_valid():
try:
response = requests.get(f'http://challenge-api.luizalabs.com/api/product/{request.data.dict()["id"]}')
FavoriteModel.objects.create(
id = response.json()['id'],
id_client = client.id,
title = response.json()['title'],
price = response.json()['price'],
image = response.json()['image'],
brand = response.json()['brand']
)
except Exception as e:
return Response(json.dumps({"id":"Produto não encontrado."}), status=status.HTTP_404_NOT_FOUND)
return Response(id_serializer.data, status=status.HTTP_201_CREATED)
return Response(id_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| 2.125
| 2
|
anuga/file_conversion/sts2sww_mesh.py
|
GeoscienceAustralia/anuga_core
| 136
|
12783633
|
from __future__ import print_function
from __future__ import division
from builtins import range
from past.utils import old_div
import os
import numpy as num
from anuga.file.netcdf import NetCDFFile
import pylab as P
import anuga
from anuga.abstract_2d_finite_volumes.mesh_factory import rectangular
from anuga.shallow_water.shallow_water_domain import Domain
from anuga.shallow_water.boundaries import Reflective_boundary
from anuga.coordinate_transforms.geo_reference import Geo_reference
from anuga.shallow_water.forcing import *
from anuga.utilities.numerical_tools import ensure_numeric
from anuga.file.sww import Write_sww
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
def sts2sww_mesh(basename_in, basename_out=None,
spatial_thinning=1, verbose=False):
from anuga.mesh_engine.mesh_engine import NoTrianglesError
from anuga.pmesh.mesh import Mesh
if verbose:
print("Starting sts2sww_mesh")
mean_stage=0.
zscale=1.
if (basename_in[:-4]=='.sts'):
stsname = basename_in
else:
stsname = basename_in + '.sts'
if verbose: print("Reading sts NetCDF file: %s" %stsname)
infile = NetCDFFile(stsname, netcdf_mode_r)
cellsize = infile.cellsize
ncols = infile.ncols
nrows = infile.nrows
no_data = infile.no_data
refzone = infile.zone
x_origin = infile.xllcorner
y_origin = infile.yllcorner
origin = num.array([x_origin, y_origin])
x = infile.variables['x'][:]
y = infile.variables['y'][:]
times = infile.variables['time'][:]
wind_speed_full = infile.variables['wind_speed'][:]
wind_angle_full = infile.variables['wind_angle'][:]
pressure_full = infile.variables['barometric_pressure'][:]
infile.close()
number_of_points = nrows*ncols
points_utm = num.zeros((number_of_points,2),num.float)
points_utm[:,0]=x+x_origin
points_utm[:,1]=y+y_origin
thinned_indices=[]
for i in range(number_of_points):
if (old_div(i,ncols)==0 or old_div(i,ncols)==ncols-1 or (old_div(i,ncols))%(spatial_thinning)==0):
if ( i%(spatial_thinning)==0 or i%nrows==0 or i%nrows==nrows-1 ):
thinned_indices.append(i)
#Spatial thinning
points_utm=points_utm[thinned_indices]
number_of_points = points_utm.shape[0]
number_of_timesteps = wind_speed_full.shape[0]
wind_speed = num.empty((number_of_timesteps,number_of_points),dtype=float)
wind_angle = num.empty((number_of_timesteps,number_of_points),dtype=float)
barometric_pressure = num.empty((number_of_timesteps,number_of_points),dtype=float)
if verbose:
print("Total number of points: ", nrows*ncols)
print("Number of thinned points: ", number_of_points)
for i in range(number_of_timesteps):
wind_speed[i] = wind_speed_full[i,thinned_indices]
wind_angle[i] = wind_angle_full[i,thinned_indices]
barometric_pressure[i] = pressure_full[i,thinned_indices]
#P.plot(points_utm[:,0],points_utm[:,1],'ro')
#P.show()
if verbose:
print("Generating sww triangulation of gems data")
mesh = Mesh()
mesh.add_vertices(points_utm)
mesh.auto_segment(smooth_indents=True, expand_pinch=True)
mesh.auto_segment(mesh.shape.get_alpha() * 1.1)
try:
mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
except NoTrianglesError:
# This is a bit of a hack, going in and changing the data structure.
mesh.holes = []
mesh.generate_mesh(minimum_triangle_angle=0.0, verbose=False)
mesh_dic = mesh.Mesh2MeshList()
points_utm=ensure_numeric(points_utm)
assert num.alltrue(ensure_numeric(mesh_dic['generatedpointlist'])
== ensure_numeric(points_utm))
volumes = mesh_dic['generatedtrianglelist']
# Write sww intro and grid stuff.
if (basename_out is not None and basename_out[:-4]=='.sww'):
swwname = basename_out
else:
swwname = basename_in + '.sww'
if verbose: 'Output to %s' % swwname
if verbose:
print("Writing sww wind and pressure field file")
outfile = NetCDFFile(swwname, netcdf_mode_w)
sww = Write_sww([], ['wind_speed','wind_angle','barometric_pressure'])
sww.store_header(outfile, times, len(volumes), len(points_utm),
verbose=verbose, sww_precision='d')
outfile.mean_stage = mean_stage
outfile.zscale = zscale
sww.store_triangulation(outfile, points_utm, volumes,
refzone,
new_origin=origin, #check effect of this line
verbose=verbose)
if verbose:
print('Converting quantities')
# Read in a time slice from the sts file and write it to the SWW file
#print wind_angle[0,:10]
for i in range(len(times)):
sww.store_quantities(outfile,
slice_index=i,
verbose=verbose,
wind_speed=wind_speed[i,:],
wind_angle=wind_angle[i,:],
barometric_pressure=barometric_pressure[i,:],
sww_precision=num.float)
if verbose:
sww.verbose_quantities(outfile)
outfile.close()
| 1.929688
| 2
|
web/backend/app/resources.py
|
pascalpoizat/fbpmn
| 27
|
12783634
|
from flask import request
from flask_restplus import Resource, fields, Namespace
from app import db
from app.models import Application, Constraints, CounterExample, Model, Result, UserNets, UserProps, \
Verification, get_workdir
from app.schemas import CounterExampleSchema, ModelSchema, ResultSchema, VerificationSchema
MODEL_NOT_FOUND = "Model not found."
VERIFICATION_NOT_FOUND = "Verification not found."
RESULT_NOT_FOUND = "Result not found."
COUNTER_EXAMPLE_NOT_FOUND = "Counter-example not found."
URL_ID = "/<int:id>"
def create_schema(schema_type, bool):
if bool:
return schema_type(many=bool)
else:
return schema_type()
a = Application()
models_ns = Namespace('models', description='models related operations')
verifications_ns = Namespace(
'verifications', description='verifications related operations')
results_ns = Namespace('results', description='results related operations')
counter_examples_ns = Namespace(
'counter_examples', description='counter-examples related operations')
verification_input = verifications_ns.model(
'Verification', {
'model': fields.Raw(),
'usernets': fields.List(fields.String, description='Network01Bag'),
'userdefs': fields.List(fields.String, description='User1'),
'userprops': fields.List(fields.String, description='MessageSound'),
'constraintNode': fields.String('TRUE'),
'constraintEdge': fields.String('TRUE')
})
@models_ns.route('')
class ModelList(Resource):
def get(self):
models = a.get_all_elements(Model)
return (create_schema(ModelSchema, True)).jsonify(models)
@models_ns.route(f'{URL_ID}')
class ModelById(Resource):
def get(self, id):
m = a.get_element_by_id(Model, id)
if m:
return (create_schema(ModelSchema, False)).jsonify(m)
return {'message': MODEL_NOT_FOUND}, 404
@verifications_ns.route(f'{URL_ID}/model')
class ModelByVerification(Resource):
def get(self, id):
model_id = (a.get_element_by_id(Verification, id)).model_id
return ModelById.get(self, model_id)
@counter_examples_ns.route(f'{URL_ID}/model')
class ModelByCounterExample(Resource):
def get(self, id):
ce = a.get_element_by_id(CounterExample, id)
m_id = ce.get_result().get_verification().model_id
return ModelById.get(self, m_id)
@verifications_ns.route('')
class VerificationList(Resource):
def get(self):
v = a.get_all_elements(Verification)
return (create_schema(VerificationSchema, True)).jsonify(v)
@verifications_ns.expect(verification_input)
def post(self):
data = request.get_json()
model = (data['model']['xml'])
usernets = data['usernets']
userdefs = data['userdefs']
userprops = data['userprops']
constraints = str(f'CONSTANT ConstraintNode <- {data["constraintNode"]}\n'
f' ConstraintEdge <- {data["constraintEdge"]}\n'
" Constraint <- ConstraintNodeEdge\n")
v = a.create_verification()
try:
m = v.create_model(model)
v.create_file(UserNets, usernets, m.name)
if not userdefs is None:
v.create_properties_files(userdefs, userprops, m.name)
else:
v.create_file(UserProps, userprops, m.name)
v.create_file(Constraints, constraints, m.name)
output = v.launch_check(m.name)
workdir = get_workdir(output)
xx = v.create_results_list(workdir, m.name)
v.create_counter_examples(workdir, m.name, xx)
del m, v
return output
except (AttributeError, TypeError) as e:
print(e)
v.aborted()
return ("Incorrect model")
@verifications_ns.route(f'{URL_ID}')
class VerificationById(Resource):
def get(self, id):
v = a.get_element_by_id(Verification, id)
if v:
return (create_schema(VerificationSchema, False)).jsonify(v)
return {'message': VERIFICATION_NOT_FOUND}, 404
def delete(self, id):
v = Verification.query.get(id)
db.session.delete(v)
db.session.commit()
return "Verification was successfully deleted"
@results_ns.route(f'{URL_ID}/verification')
class VerificationByResult(Resource):
def get(self, id):
verification = (a.get_element_by_id(Result, id)).verification
return (create_schema(VerificationSchema, False)).jsonify(verification)
@verifications_ns.route(f'/latest')
class LatestVerification(Resource):
def get(self):
v = a.get_latest_verification()
return (create_schema(VerificationSchema, False)).jsonify(v)
@results_ns.route('')
class ResultList(Resource):
def get(self):
r = a.get_all_elements(Result)
return (create_schema(ResultSchema, True)).jsonify(r)
@results_ns.route(f'{URL_ID}')
class ResultById(Resource):
def get(self, id):
r = a.get_element_by_id(Result, id)
if r:
return (create_schema(ResultSchema, False)).jsonify(r)
return {'message': RESULT_NOT_FOUND}, 404
@verifications_ns.route(f'{URL_ID}/results')
class ResultByVerification(Resource):
def get(self, id):
verification = a.get_element_by_id(Verification, id)
return (create_schema(ResultSchema, True)).jsonify(verification.results)
@counter_examples_ns.route('')
class CounterExampleList(Resource):
def get(self):
ce = a.get_all_elements(CounterExample)
return (create_schema(CounterExampleSchema, True)).jsonify(ce)
@counter_examples_ns.route(f'{URL_ID}')
class CounterExampleById(Resource):
def get(self, id):
ce = a.get_element_by_id(CounterExample, id)
if ce:
return (create_schema(CounterExampleSchema, False)).jsonify(ce)
return {'message': COUNTER_EXAMPLE_NOT_FOUND}, 404
@results_ns.route(f'{URL_ID}/counter_examples')
class CounterExampleByResult(Resource):
def get(self, id):
counter_example = (a.get_element_by_id(Result, id)).counter_example
if counter_example:
return (create_schema(CounterExampleSchema, False)).jsonify(counter_example)
else:
return "Record not found", 400
| 2.265625
| 2
|
can_tools/scrapers/official/PA/philadelhpia_vaccine.py
|
christopherturner/can-scrapers
| 7
|
12783635
|
import json
import pandas as pd
from us import states
from bs4 import BeautifulSoup
import urllib.parse
import json
from can_tools.scrapers import variables
from can_tools.scrapers.official.base import TableauDashboard
from can_tools.scrapers.util import requests_retry_session
class PhiladelphiaVaccine(TableauDashboard):
state_fips = int(states.lookup("Pennsylvania").fips)
has_location = True
location_type = "county"
provider = "county"
source = (
"https://www.phila.gov/programs/coronavirus-disease-2019-covid-19/data/vaccine/"
)
source_name = "Philadelphia Department of Public Health"
baseurl = "https://healthviz.phila.gov/t/PublicHealth/"
viewPath = "COVIDVaccineDashboard/COVID_Vaccine"
data_tableau_table = "Residents Percentage {dose_type}"
variables = {
"Residents Receiving At Least 1 Dose* ": variables.INITIATING_VACCINATIONS_ALL,
"Fully Vaccinated Residents*": variables.FULLY_VACCINATED_ALL,
}
def fetch(self) -> pd.DataFrame:
# create a dict of the 2 dose type tables
# which are titled "Residents Percentage New" and "... Full"
return {
dose_type: self.get_tableau_view(dose_type=dose_type)[
self.data_tableau_table.format(dose_type=dose_type)
]
for dose_type in ["New", "Full"]
}
def normalize(self, data: pd.DataFrame) -> pd.DataFrame:
dataframes = []
for dose_type in ["New", "Full"]:
dose_data = (
data[dose_type]
.rename(
columns={
"Measure Values-alias": "value",
"Measure Names-alias": "variable",
}
)
.loc[:, ["value", "variable"]]
.query(
"variable in"
"['Residents Receiving At Least 1 Dose* ', 'Fully Vaccinated Residents*']"
)
.assign(
location=42101,
value=lambda x: pd.to_numeric(x["value"].str.replace(",", "")),
vintage=self._retrieve_vintage(),
)
.pipe(
self._rename_or_add_date_and_location,
location_column="location",
timezone="US/Eastern",
)
)
dataframes.append(dose_data)
data = (
self.extract_CMU(df=pd.concat(dataframes), cmu=self.variables)
.drop(columns={"variable"})
.reset_index(drop=True)
)
# break scraper if both init and completed variables are not included in data
vars = {"total_vaccine_initiated", "total_vaccine_completed"}
assert vars <= set(data["category"])
return data
# could not find a way to select the "Demographics New" dashboard tab in the usual manner,
# so edit request body to manually select Demographic tab/sheets
# this is the default function with only form_data["sheet_id"] altered
def get_tableau_view(self, dose_type, url=None):
def onAlias(it, value, cstring):
return value[it] if (it >= 0) else cstring["dataValues"][abs(it) - 1]
req = requests_retry_session()
fullURL = self.baseurl + "/views/" + self.viewPath
reqg = req.get(
fullURL,
params={
":language": "en",
":display_count": "y",
":origin": "viz_share_link",
":embed": "y",
":showVizHome": "n",
":jsdebug": "y",
":apiID": "host4",
"#navType": "1",
"navSrc": "Parse",
},
headers={"Accept": "text/javascript"},
)
soup = BeautifulSoup(reqg.text, "html.parser")
tableauTag = soup.find("textarea", {"id": "tsConfigContainer"})
tableauData = json.loads(tableauTag.text)
parsed_url = urllib.parse.urlparse(fullURL)
dataUrl = f'{parsed_url.scheme}://{parsed_url.hostname}{tableauData["vizql_root"]}/bootstrapSession/sessions/{tableauData["sessionid"]}'
# copy over some additional headers from tableauData
form_data = {}
form_map = {
"sheetId": "sheet_id",
"showParams": "showParams",
"stickySessionKey": "stickySessionKey",
}
for k, v in form_map.items():
if k in tableauData:
form_data[v] = tableauData[k]
# set sheet manually to access the subsheets we need
form_data["sheet_id"] = f"Demographics {dose_type}"
resp = req.post(
dataUrl,
data=form_data,
headers={"Accept": "text/javascript"},
)
# Parse the response.
# The response contains multiple chuncks of the form
# `<size>;<json>` where `<size>` is the number of bytes in `<json>`
resp_text = resp.text
data = []
while len(resp_text) != 0:
size, rest = resp_text.split(";", 1)
chunck = json.loads(rest[: int(size)])
data.append(chunck)
resp_text = rest[int(size) :]
# The following section (to the end of the method) uses code from
# https://stackoverflow.com/questions/64094560/how-do-i-scrape-tableau-data-from-website-into-r
presModel = data[1]["secondaryInfo"]["presModelMap"]
metricInfo = presModel["vizData"]["presModelHolder"]
metricInfo = metricInfo["genPresModelMapPresModel"]["presModelMap"]
data = presModel["dataDictionary"]["presModelHolder"]
data = data["genDataDictionaryPresModel"]["dataSegments"]["0"]["dataColumns"]
scrapedData = {}
for metric in metricInfo:
metricsDict = metricInfo[metric]["presModelHolder"]["genVizDataPresModel"]
columnsData = metricsDict["paneColumnsData"]
result = [
{
"fieldCaption": t.get("fieldCaption", ""),
"valueIndices": columnsData["paneColumnsList"][t["paneIndices"][0]][
"vizPaneColumns"
][t["columnIndices"][0]]["valueIndices"],
"aliasIndices": columnsData["paneColumnsList"][t["paneIndices"][0]][
"vizPaneColumns"
][t["columnIndices"][0]]["aliasIndices"],
"dataType": t.get("dataType"),
"paneIndices": t["paneIndices"][0],
"columnIndices": t["columnIndices"][0],
}
for t in columnsData["vizDataColumns"]
if t.get("fieldCaption")
]
frameData = {}
cstring = [t for t in data if t["dataType"] == "cstring"][0]
for t in data:
for index in result:
if t["dataType"] == index["dataType"]:
if len(index["valueIndices"]) > 0:
frameData[f'{index["fieldCaption"]}-value'] = [
t["dataValues"][abs(it)] for it in index["valueIndices"]
]
if len(index["aliasIndices"]) > 0:
frameData[f'{index["fieldCaption"]}-alias'] = [
onAlias(it, t["dataValues"], cstring)
for it in index["aliasIndices"]
]
df = pd.DataFrame.from_dict(frameData, orient="index").fillna(0).T
scrapedData[metric] = df
return scrapedData
| 2.859375
| 3
|
tests/test_toml_sort.py
|
kasium/toml-sort
| 34
|
12783636
|
<reponame>kasium/toml-sort
"""Test the toml_sort module."""
from toml_sort import TomlSort
def test_sort_toml_is_str() -> str:
"""Take a TOML string, sort it, and return the sorted string."""
sorted_result = TomlSort("[hello]").sorted()
assert isinstance(sorted_result, str)
| 2.75
| 3
|
GUI/Basic-train/Multi-Thread/MWE.py
|
muyuuuu/PyQt-learn
| 12
|
12783637
|
import sys, time
from PyQt5.QtWidgets import QMainWindow, QWidget, QHBoxLayout, QApplication, QPushButton
class mainwindow(QMainWindow):
def __init__(self):
super(mainwindow, self).__init__()
layout = QHBoxLayout()
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
btn = QPushButton("点击")
layout.addWidget(btn)
btn.clicked.connect(self.count)
def count(self):
pass
if __name__ == '__main__':
app = QApplication([])
m = mainwindow()
m.show()
sys.exit(app.exec())
| 2.828125
| 3
|
rbldap/commands/enable.py
|
butlerx/rb-ldap-python
| 0
|
12783638
|
"""enable command"""
from ldap3 import MODIFY_REPLACE
from ..accounts.clients import LDAPConnection
async def enable(rb_client: LDAPConnection, commit: bool, username: str) -> int:
"""
Renable a Users LDAP Account
Args:
rb_client: ldap client configured for redbrick ldap
commit: flag to commit any changes
username: username of account to reset
Returns:
Returns int to indicate exit code
Raises:
UserNotFound: No user was found matching the username
"""
async with rb_client.connect() as conn:
if commit:
await conn.modify(
f"uid={username},ou=accounts,o=redbrick",
{"loginShell": [(MODIFY_REPLACE, ["/usr/local/shells/shell"])]},
)
print(f"{username} Account re-enabled")
return 0
| 2.46875
| 2
|
LEVEL1/키패드누르기/solution.py
|
seunghwanly/CODING-TEST
| 0
|
12783639
|
<reponame>seunghwanly/CODING-TEST
def solution(numbers, hand):
answer = ''
pad = [['1', '2', '3'], ['4', '5', '6'], ['7', '8', '9'], ['*', '0', '#']]
# [y][x]
leftCurr = [3, 0]
rightCurr = [3, 2]
for number in numbers:
for idx, line in enumerate(pad):
if str(number) in line:
res = line.index(str(number))
if res == 0:
answer += 'L'
leftCurr = [idx, 0]
break
elif res == 2:
answer += 'R'
rightCurr = [idx, 2]
break
else:
leftDistance = abs(leftCurr[0] - idx) + abs(leftCurr[1] - 1)
rightDistace = abs(rightCurr[0] - idx) + abs(rightCurr[1] - 1)
if leftDistance == rightDistace:
if hand == 'left':
answer += 'L'
leftCurr = [idx, 1]
break
else:
answer += 'R'
rightCurr = [idx, 1]
break
else:
if leftDistance > rightDistace:
answer += 'R'
rightCurr = [idx, 1]
break
else:
answer += 'L'
leftCurr = [idx, 1]
break
return answer
# print(solution([1, 3, 4, 5, 8, 2, 1, 4, 5, 9, 5], "right"))
print(solution([7, 0, 8, 2, 8, 3, 1, 5, 7, 6, 2], 'left'))
| 3.453125
| 3
|
ReverseWordsInAString.py
|
adesh-gadge/LeetCodePractice
| 0
|
12783640
|
<gh_stars>0
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
ans=''
words=s.split()
for i in range(len(words)):
ans+=words[len(words)-1-i]
if i!=len(words)-1:
ans+=' '
return ans
# or return " ".join(reversed(s.split()))
| 3.296875
| 3
|
src/book.py
|
psikon/pitft-scripts
| 0
|
12783641
|
<filename>src/book.py<gh_stars>0
class Book:
'''class for storing all relevant informations for playing a audio book and
show informations in the interfaces. The seperated chapter of audio books are
stored together for ensure an unbroken playback'''
def __init__(self, path, title, artist, album, chapter, chapter_playtime,
total_playtime, position, cover):
self.path = path
self.title = title
self.artist = artist
self.album = album
self.chapter = chapter
self.chapter_playtime = chapter_playtime
self.total_playtime = total_playtime
self.position = float(position)
self.cover = cover
def get_path(self):
''' return an array with all paths for the seperated chapter'''
return self.path
def get_num_chapter(self):
''' count the number of chapters in the path array '''
return len(self.path)
def get_chapter(self):
''' get actual played chapter '''
return self.chapter
def get_title(self):
''' get title of the book '''
return self.title
def get_artist(self):
''' get artist of the book '''
return self.artist
def get_album(self):
''' if contained in id3 tags return the album name '''
return self.getAlbum
def get_chapter_playtime(self):
''' get playtime of actual chapter '''
return self.chapter_playtime
def get_total_playtime(self):
''' sum the playtime of every chapters of a book '''
return self.total_playtime
def get_pos(self):
''' get actual position in chapter '''
return self.position
def set_pos(self, milliseconds):
''' set position to value '''
self.position = milliseconds
def get_cover(self):
''' return the path to the cover image '''
return self.cover
| 3.5625
| 4
|
scripts/tweetsearch.py
|
xytosis/dataworx
| 6
|
12783642
|
# Class to store tweet information
import oauth2
import urllib2
import json
import sys
class Tweet:
def __init__(self, date, idd, text, username, location):
self.date = date
self.id = idd
self.text = text.replace("\n", " ")
self.username = username.replace("\n", " ")
self.location = location.replace("\n", "")
def __repr__(self):
return (self.username.encode("utf8") + "|#|" + str(self.id) + "|#|"
+ self.date.encode('utf8') + "|#|" + self.location.encode("utf8")
+ "|#|" + self.text.encode('utf8') + "|#|" + "0" + "\n")
TWITTER_CONSUMER_KEY = TODO
TWITTER_CONSUMER_SECRET = TODO
TWITTER_TOKEN = TODO
TWITTER_TOKEN_SECRET = TODO
# Searches for target in twitter and returns a list of tweet objects
def search(target):
url_params = {
"q": target,
"result_type": "recent"
}
url = "https://{0}{1}?".format("api.twitter.com", "/1.1/search/tweets.json")
consumer = oauth2.Consumer(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
oauth_request = oauth2.Request(method="GET", url=url, parameters=url_params)
oauth_request.update({
"oauth_nonce": oauth2.generate_nonce(),
"oauth_timestamp": oauth2.generate_timestamp(),
"oauth_token": TWITTER_TOKEN,
"oauth_consumer_key": TWITTER_CONSUMER_KEY
})
token = oauth2.Token(TWITTER_TOKEN, TWITTER_TOKEN_SECRET)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
signed_url = oauth_request.to_url()
try:
conn = urllib2.urlopen(signed_url, None)
except Exception, e:
return
try:
response = json.loads(conn.read())
finally:
conn.close()
tweets = []
for tweet in response["statuses"]:
tweets.append(Tweet(tweet["created_at"], tweet["id"], tweet["text"], tweet["user"]["name"]
, tweet["user"]["location"]))
return tweets
# Writes to file a collection of tweet objects. Each field in the tweet object is
# separated by |#| to make splitting easier
def main():
if len(sys.argv) != 3:
print "Usage: python tweetsearch.py <search string> <file to save at>"
return
results = search(sys.argv[1])
myfile = open(sys.argv[2], "a+")
for t in results:
myfile.write(str(t))
myfile.close()
if __name__ == '__main__':
main()
| 3.359375
| 3
|
src/tests/decision_tree_test.py
|
kafkasl/cart_and_random_forests
| 0
|
12783643
|
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from decision_tree import *
from decision_tree import DecisionTree
class DecisionTreeTests(unittest.TestCase):
def test_read_data(self):
result_data = [['FALSE', 'high', 'hot', 'sunny', 'no'],
['TRUE', 'high', 'hot', 'sunny', 'no'],
['FALSE', 'high', 'hot', 'overcast', 'yes'],
['FALSE', 'high', 'mild', 'rainy', 'yes'],
['FALSE', 'normal', 'cool', 'rainy', 'yes'],
['TRUE', 'normal', 'cool', 'rainy', 'no'],
['TRUE', 'normal', 'cool', 'overcast', 'yes'],
['FALSE', 'high', 'mild', 'sunny', 'no'],
['FALSE', 'normal', 'cool', 'sunny', 'yes'],
['FALSE', 'normal', 'mild', 'rainy', 'yes'],
['TRUE', 'normal', 'mild', 'sunny', 'yes'],
['TRUE', 'high', 'mild', 'overcast', 'yes'],
['FALSE', 'normal', 'hot', 'overcast', 'yes'],
['TRUE', 'high', 'mild', 'rainy', 'no']]
self.assertEquals(DecisionTree.read_libsvm_data("../resources/weatherNominalTr.txt"), result_data)
def test_gini_index_1(self):
data = [[[1, 1], [1, 1]], [[0, 0], [0, 0]]]
self.failUnless(DecisionTree([item for sublist in data for item in sublist] ).gini_index(data) == 0.0)
def test_gini_index_2(self):
data = [[[1, 1], [0, 0]], [[1, 1], [0, 0]]]
self.failUnless(DecisionTree([item for sublist in data for item in sublist] ).gini_index(data) == 1.0)
def test_gini_index_3(self):
data = [[[1, 0], [1, 0]], [[1, 1], [0.3, 1], [0, 0], [0.6, 1]]]
self.failUnless(DecisionTree([item for sublist in data for item in sublist]).gini_index(data) == 0.375)
def test_gini_index_4(self):
data = [[[1, 0], [1, 0], [1, 0], [0.3, 1]], [[0, 0], [0.6, 1]]]
result = DecisionTree([item for sublist in data for item in sublist]).gini_index(data)
print(result)
self.failUnless(result == 0.875)
def test_split_1(self):
to_split = [[1, 0], [1, 0], [1, 1], [0.3, 1], [0, 0], [0.6, 1]]
splitted = [[[0.3, 1], [0, 0]], [[1, 0], [1, 0], [1, 1], [0.6, 1]]]
self.assertEquals(DecisionTree(to_split).test_split(to_split, 0, 0.5), splitted)
# def test_split_2(self):
# to_split = [[1], [2], [3]]
# with self.assertRaises(IndexError) as context:
# DecisionTree(to_split).test_split(to_split, 1, 0.5)
# def test_get_split(self):
# dataset = [[2.771244718, 1.784783929, 0],
# [1.728571309, 1.169761413, 0],
# [3.678319846, 2.81281357, 0],
# [3.961043357, 2.61995032, 0],
# [2.999208922, 2.209014212, 0],
# [7.497545867, 3.162953546, 1],
# [9.00220326, 3.339047188, 1],
# [7.444542326, 0.476683375, 1],
# [10.12493903, 3.234550982, 1],
# [6.642287351, 3.319983761, 1]]
#
# split = DecisionTree(dataset).get_split(dataset)
# group_1 = [[2.771244718, 1.784783929, 0], [1.728571309, 1.169761413, 0], [3.678319846, 2.81281357, 0],
# [3.961043357, 2.61995032, 0], [2.999208922, 2.209014212, 0]]
# group_2 = [[7.497545867, 3.162953546, 1], [9.00220326, 3.339047188, 1], [7.444542326, 0.476683375, 1],
# [10.12493903, 3.234550982, 1], [6.642287351, 3.319983761, 1]]
# result = [0, 6.642, group_1, group_2]
# self.assertEquals([split['index'], round(split['value'], 3), split['groups'][0], split['groups'][1]], result)
def test_to_terminal_1(self):
dataset = [[2.771244718, 1.784783929, 0],
[1.728571309, 1.169761413, 0],
[3.678319846, 2.81281357, 0],
[3.961043357, 2.61995032, 0],
[2.999208922, 2.209014212, 1],
[7.497545867, 3.162953546, 1],
[9.00220326, 3.339047188, 1],
[7.444542326, 0.476683375, 1],
[10.12493903, 3.234550982, 1],
[6.642287351, 3.319983761, 1]]
self.assertEquals(TerminalNode(dataset).group, dataset)
# def test_build_tree(self):
# n0 = TerminalNode([0])
# n1 = TerminalNode([0, 0, 0, 0])
# n2 = TerminalNode([1])
# n3 = TerminalNode([1, 1, 1, 1])
# sn0 = SplitNode[0]
# dataset = [[2.771244718, 1.784783929, 0],
# [1.728571309, 1.169761413, 0],
# [3.678319846, 2.81281357, 0],
# [3.961043357, 2.61995032, 0],
# [2.999208922, 2.209014212, 0],
# [7.497545867, 3.162953546, 1],
# [9.00220326, 3.339047188, 1],
# [7.444542326, 0.476683375, 1],
# [10.12493903, 3.234550982, 0],
# [6.642287351, 3.319983761, 1]]
#
# tree = DecisionTree(dataset, 2, 1)
# tree.print(tree)
# print('Split: [X%d < %.3f]' % ((split['index']+1), split['value']))
def main():
unittest.main()
if __name__ == '__main__':
main()
| 2.71875
| 3
|
tts/setup.py
|
aws-robotics/tts-ros2
| 8
|
12783644
|
<filename>tts/setup.py
from setuptools import find_packages
from setuptools import setup
package_name = 'tts'
setup(
name=package_name,
version='2.0.2',
packages=find_packages(exclude=['test']),
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name,
['package.xml', 'launch/tts.launch.py']),
],
package_data={
'tts': ['services/data/models/polly/2016-06-10/*.json', 'services/data/*.ogg'],
},
install_requires=['setuptools'],
zip_safe=True,
author='RoboMaker',
author_email='<EMAIL>',
maintainer='RoboMaker',
maintainer_email='<EMAIL>',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description=(
'TTS'
),
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'polly_server = tts.services.amazonpolly:main',
'synthesizer_server = tts.services.synthesizer:main',
'voicer = tts.scripts.voicer:main',
],
},
)
| 1.429688
| 1
|
services/Baseline_Approach/cells_lookup_strategies/all_tokens.py
|
muhammad-abbady/JenTab
| 9
|
12783645
|
<gh_stars>1-10
import itertools
from config import MAX_TOKENS, AllToken_priority, CLEAN_CELL_SEPARATOR
from utils.util import load_stop_words
from cells_lookup_strategies.strategy import *
class AllTokensLookup(CellsStrategy):
def __init__(self):
CellsStrategy.__init__(self, name='allTokens', priority=AllToken_priority)
# Here it is all combinations style with MAX_TOKENS as a limit ...
def process_cell_values(self, cell):
tokens = cell['clean_val'].split(CLEAN_CELL_SEPARATOR)
# remove stop words
stop_words = load_stop_words()
# unique rokens
tokens = list(set(tokens))
tokens = [t for t in tokens if t not in stop_words and t != ""]
if len(tokens) > MAX_TOKENS:
# use only the longest tokens
tokens.sort(key=lambda x: len(x), reverse=True)
tokens = tokens[0:MAX_TOKENS]
res = []
for L in range(0, len(tokens) + 1):
for subset in itertools.combinations(tokens, L):
if (len(subset) > 0):
res = res + [" ".join(subset)]
return res
| 2.71875
| 3
|
xpath_blindeye/retrieve.py
|
OnBloom/xpath-blindeye
| 2
|
12783646
|
import os
import hashlib
import logging
import traceback
from typing import Union
from xml.etree.ElementTree import Element, SubElement, parse, ElementTree
from xpath_blindeye.xnode import XNode
from xpath_blindeye.util import prettify
from xpath_blindeye.config import ROOT_PATH, URL
logger = logging.getLogger("xpath-blindeye")
def retrieve():
url_md5 = hashlib.md5(URL.encode())
try:
os.mkdir('./saved_requests')
except FileExistsError:
pass
save_location = './saved_requests/{}.xml'.format(url_md5.hexdigest())
saved_root = None
try:
saved_tree = parse(save_location)
saved_root = saved_tree.getroot()
except FileNotFoundError:
pass
root_path = ROOT_PATH
root_node_name = XNode.get_node_name(root_path)
logger.info("Root node name is " + root_node_name)
xml_root = Element(root_node_name)
try:
visit_node(root_node_name, root_path, None, xml_root, saved_root)
except KeyboardInterrupt:
pass
except Exception as e:
traceback.print_exc()
finally:
print(prettify(xml_root))
result = input("\n\nOverwrite last xml save?(Y/N)")
if result.lower() != "y":
exit(0)
et = ElementTree(xml_root)
logger.info("Saving...")
et.write(save_location)
def visit_node(node_name: str, path: str, parent: Union[Element, None], xml_root: Element, saved_root: Element):
if parent is None:
node = xml_root
else:
node = SubElement(parent, node_name) # type: Element
xnode = XNode(node_name, path, parent, xml_root, saved_root)
# Get and add attributes
node.attrib = xnode.get_attributes()
# Get and add text value
node.text = xnode.get_node_text()
# Get children
child_names = xnode.get_child_node_names()
# Do last
for child_name, child_path in child_names:
visit_node(node_name=child_name, path=child_path, parent=node, xml_root=xml_root, saved_root=saved_root)
| 2.53125
| 3
|
src/python/pants/backend/scala/bsp/rules.py
|
danxmoran/pants
| 0
|
12783647
|
<filename>src/python/pants/backend/scala/bsp/rules.py
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import textwrap
from dataclasses import dataclass
from pathlib import Path
from typing import Callable
from pants.backend.scala.bsp.spec import (
ScalaBuildTarget,
ScalacOptionsItem,
ScalacOptionsParams,
ScalacOptionsResult,
ScalaMainClassesParams,
ScalaMainClassesResult,
ScalaPlatform,
ScalaTestClassesParams,
ScalaTestClassesResult,
)
from pants.backend.scala.subsystems.scala import ScalaSubsystem
from pants.backend.scala.target_types import ScalaFieldSet, ScalaSourceField
from pants.base.build_root import BuildRoot
from pants.bsp.protocol import BSPHandlerMapping
from pants.bsp.spec.base import BuildTargetIdentifier
from pants.bsp.spec.targets import DependencyModule
from pants.bsp.util_rules.lifecycle import BSPLanguageSupport
from pants.bsp.util_rules.targets import (
BSPBuildTargetsMetadataRequest,
BSPBuildTargetsMetadataResult,
BSPCompileRequest,
BSPCompileResult,
BSPDependencyModulesRequest,
BSPDependencyModulesResult,
BSPResolveFieldFactoryRequest,
BSPResolveFieldFactoryResult,
BSPResourcesRequest,
BSPResourcesResult,
)
from pants.core.util_rules.system_binaries import BashBinary, ReadlinkBinary, ReadlinkBinaryRequest
from pants.engine.addresses import Addresses
from pants.engine.fs import AddPrefix, CreateDigest, Digest, FileContent, MergeDigests, Workspace
from pants.engine.internals.native_engine import Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import Process, ProcessResult
from pants.engine.rules import _uncacheable_rule, collect_rules, rule, rule_helper
from pants.engine.target import CoarsenedTarget, CoarsenedTargets, FieldSet, Targets
from pants.engine.unions import UnionRule
from pants.jvm.bsp.compile import _jvm_bsp_compile, jvm_classes_directory
from pants.jvm.bsp.compile import rules as jvm_compile_rules
from pants.jvm.bsp.resources import _jvm_bsp_resources
from pants.jvm.bsp.resources import rules as jvm_resources_rules
from pants.jvm.bsp.spec import JvmBuildTarget, MavenDependencyModule, MavenDependencyModuleArtifact
from pants.jvm.compile import ClasspathEntry, ClasspathEntryRequest, ClasspathEntryRequestFactory
from pants.jvm.jdk_rules import DefaultJdk, JdkEnvironment, JdkRequest
from pants.jvm.resolve.common import ArtifactRequirement, ArtifactRequirements, Coordinate
from pants.jvm.resolve.coursier_fetch import (
CoursierLockfileEntry,
CoursierResolvedLockfile,
ToolClasspath,
ToolClasspathRequest,
)
from pants.jvm.resolve.key import CoursierResolveKey
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import JvmArtifactFieldSet, JvmJdkField, JvmResolveField
from pants.util.logging import LogLevel
LANGUAGE_ID = "scala"
_logger = logging.getLogger(__name__)
class ScalaBSPLanguageSupport(BSPLanguageSupport):
language_id = LANGUAGE_ID
can_compile = True
can_provide_resources = True
@dataclass(frozen=True)
class ScalaMetadataFieldSet(FieldSet):
required_fields = (ScalaSourceField, JvmResolveField, JvmJdkField)
source: ScalaSourceField
resolve: JvmResolveField
jdk: JvmJdkField
class ScalaBSPResolveFieldFactoryRequest(BSPResolveFieldFactoryRequest):
resolve_prefix = "jvm"
class ScalaBSPBuildTargetsMetadataRequest(BSPBuildTargetsMetadataRequest):
language_id = LANGUAGE_ID
can_merge_metadata_from = ("java",)
field_set_type = ScalaMetadataFieldSet
@dataclass(frozen=True)
class ThirdpartyModulesRequest:
addresses: Addresses
@dataclass(frozen=True)
class ThirdpartyModules:
resolve: CoursierResolveKey
entries: dict[CoursierLockfileEntry, ClasspathEntry]
merged_digest: Digest
@rule
async def collect_thirdparty_modules(
request: ThirdpartyModulesRequest,
classpath_entry_request: ClasspathEntryRequestFactory,
) -> ThirdpartyModules:
coarsened_targets = await Get(CoarsenedTargets, Addresses, request.addresses)
resolve = await Get(CoursierResolveKey, CoarsenedTargets, coarsened_targets)
lockfile = await Get(CoursierResolvedLockfile, CoursierResolveKey, resolve)
applicable_lockfile_entries: dict[CoursierLockfileEntry, CoarsenedTarget] = {}
for ct in coarsened_targets.coarsened_closure():
for tgt in ct.members:
if not JvmArtifactFieldSet.is_applicable(tgt):
continue
artifact_requirement = ArtifactRequirement.from_jvm_artifact_target(tgt)
entry = get_entry_for_coord(lockfile, artifact_requirement.coordinate)
if not entry:
_logger.warning(
f"No lockfile entry for {artifact_requirement.coordinate} in resolve {resolve.name}."
)
continue
applicable_lockfile_entries[entry] = ct
classpath_entries = await MultiGet(
Get(
ClasspathEntry,
ClasspathEntryRequest,
classpath_entry_request.for_targets(component=target, resolve=resolve),
)
for target in applicable_lockfile_entries.values()
)
resolve_digest = await Get(Digest, MergeDigests(cpe.digest for cpe in classpath_entries))
return ThirdpartyModules(
resolve,
dict(zip(applicable_lockfile_entries, classpath_entries)),
resolve_digest,
)
@rule_helper
async def _materialize_scala_runtime_jars(scala_version: str) -> Snapshot:
tool_classpath = await Get(
ToolClasspath,
ToolClasspathRequest(
artifact_requirements=ArtifactRequirements.from_coordinates(
[
Coordinate(
group="org.scala-lang",
artifact="scala-compiler",
version=scala_version,
),
Coordinate(
group="org.scala-lang",
artifact="scala-library",
version=scala_version,
),
]
),
),
)
return await Get(
Snapshot,
AddPrefix(tool_classpath.content.digest, f"jvm/scala-runtime/{scala_version}"),
)
@rule
def bsp_resolve_field_factory(
request: ScalaBSPResolveFieldFactoryRequest,
jvm: JvmSubsystem,
) -> BSPResolveFieldFactoryResult:
return BSPResolveFieldFactoryResult(
lambda target: target.get(JvmResolveField).normalized_value(jvm)
)
@rule
async def bsp_resolve_scala_metadata(
request: ScalaBSPBuildTargetsMetadataRequest,
bash: BashBinary,
jvm: JvmSubsystem,
scala: ScalaSubsystem,
build_root: BuildRoot,
) -> BSPBuildTargetsMetadataResult:
resolves = {fs.resolve.normalized_value(jvm) for fs in request.field_sets}
jdk_versions = {fs.jdk for fs in request.field_sets}
if len(resolves) > 1:
raise ValueError(
"Cannot provide Scala metadata for multiple resolves. Please set the "
"`resolve = jvm:$resolve` field in your `[experimental-bsp].groups_config_files` to "
"select the relevant resolve to use."
)
(resolve,) = resolves
scala_version = scala.version_for_resolve(resolve)
scala_runtime = await _materialize_scala_runtime_jars(scala_version)
#
# Extract the JDK paths from an lawful-evil process so we can supply it to the IDE.
#
# Why lawful-evil?
# This script relies on implementation details of the Pants JVM execution environment,
# namely that the Coursier Archive Cache (i.e. where JDKs are extracted to after download)
# is stored into a predictable location on disk and symlinked into the sandbox on process
# startup. The script reads the symlink of the cache directory, and outputs the linked
# location of the JDK (according to Coursier), and we use that to calculate the permanent
# location of the JDK.
#
# Please don't do anything like this except as a last resort.
#
# The maximum JDK version will be compatible with all the specified targets
jdk_requests = [JdkRequest.from_field(version) for version in jdk_versions]
jdk_request = max(jdk_requests, key=_jdk_request_sort_key(jvm))
jdk, readlink, = await MultiGet(
Get(JdkEnvironment, JdkRequest, jdk_request),
Get(ReadlinkBinary, ReadlinkBinaryRequest()),
)
if any(i.version == DefaultJdk.SYSTEM for i in jdk_requests):
system_jdk = await Get(JdkEnvironment, JdkRequest, JdkRequest.SYSTEM)
if system_jdk.jre_major_version > jdk.jre_major_version:
jdk = system_jdk
cmd = "leak_paths.sh"
leak_jdk_sandbox_paths = textwrap.dedent(
f"""\
# Script to leak JDK cache paths out of Coursier sandbox so that BSP can use them.
{readlink.path} {jdk.coursier.cache_dir}
{jdk.java_home_command}
"""
)
leak_sandbox_path_digest = await Get(
Digest,
CreateDigest(
[
FileContent(
cmd,
leak_jdk_sandbox_paths.encode("utf-8"),
is_executable=True,
),
]
),
)
leaked_paths = await Get(
ProcessResult,
Process(
[
bash.path,
cmd,
],
input_digest=leak_sandbox_path_digest,
immutable_input_digests=jdk.immutable_input_digests,
env=jdk.env,
use_nailgun=(),
description="Report JDK cache paths for BSP",
append_only_caches=jdk.append_only_caches,
level=LogLevel.TRACE,
),
)
cache_dir, jdk_home = leaked_paths.stdout.decode().strip().split("\n")
_, sep, suffix = jdk_home.partition(jdk.coursier.cache_dir)
if sep:
coursier_java_home = cache_dir + suffix
else:
# Partition failed. Probably a system JDK instead
coursier_java_home = jdk_home
scala_jar_uris = tuple(
build_root.pathlib_path.joinpath(".pants.d/bsp").joinpath(p).as_uri()
for p in scala_runtime.files
)
jvm_build_target = JvmBuildTarget(
java_home=Path(coursier_java_home).as_uri(),
java_version=f"1.{jdk.jre_major_version}",
)
return BSPBuildTargetsMetadataResult(
metadata=ScalaBuildTarget(
scala_organization="org.scala-lang",
scala_version=scala_version,
scala_binary_version=".".join(scala_version.split(".")[0:2]),
platform=ScalaPlatform.JVM,
jars=scala_jar_uris,
jvm_build_target=jvm_build_target,
),
digest=scala_runtime.digest,
)
def _jdk_request_sort_key(
jvm: JvmSubsystem,
) -> Callable[[JdkRequest,], tuple[int, ...]]:
def sort_key_function(request: JdkRequest) -> tuple[int, ...]:
if request == JdkRequest.SYSTEM:
return (-1,)
version_str = request.version if isinstance(request.version, str) else jvm.jdk
_, version = version_str.split(":")
return tuple(int(i) for i in version.split("."))
return sort_key_function
# -----------------------------------------------------------------------------------------------
# Scalac Options Request
# See https://build-server-protocol.github.io/docs/extensions/scala.html#scalac-options-request
# -----------------------------------------------------------------------------------------------
class ScalacOptionsHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/scalacOptions"
request_type = ScalacOptionsParams
response_type = ScalacOptionsResult
@dataclass(frozen=True)
class HandleScalacOptionsRequest:
bsp_target_id: BuildTargetIdentifier
@dataclass(frozen=True)
class HandleScalacOptionsResult:
item: ScalacOptionsItem
@_uncacheable_rule
async def handle_bsp_scalac_options_request(
request: HandleScalacOptionsRequest,
build_root: BuildRoot,
workspace: Workspace,
) -> HandleScalacOptionsResult:
targets = await Get(Targets, BuildTargetIdentifier, request.bsp_target_id)
thirdparty_modules = await Get(
ThirdpartyModules, ThirdpartyModulesRequest(Addresses(tgt.address for tgt in targets))
)
resolve = thirdparty_modules.resolve
resolve_digest = await Get(
Digest, AddPrefix(thirdparty_modules.merged_digest, f"jvm/resolves/{resolve.name}/lib")
)
workspace.write_digest(resolve_digest, path_prefix=".pants.d/bsp")
classpath = tuple(
build_root.pathlib_path.joinpath(
f".pants.d/bsp/jvm/resolves/{resolve.name}/lib/{filename}"
).as_uri()
for cp_entry in thirdparty_modules.entries.values()
for filename in cp_entry.filenames
)
return HandleScalacOptionsResult(
ScalacOptionsItem(
target=request.bsp_target_id,
options=(),
classpath=classpath,
class_directory=build_root.pathlib_path.joinpath(
f".pants.d/bsp/{jvm_classes_directory(request.bsp_target_id)}"
).as_uri(),
)
)
@rule
async def bsp_scalac_options_request(request: ScalacOptionsParams) -> ScalacOptionsResult:
results = await MultiGet(
Get(HandleScalacOptionsResult, HandleScalacOptionsRequest(btgt)) for btgt in request.targets
)
return ScalacOptionsResult(items=tuple(result.item for result in results))
# -----------------------------------------------------------------------------------------------
# Scala Main Classes Request
# See https://build-server-protocol.github.io/docs/extensions/scala.html#scala-main-classes-request
# -----------------------------------------------------------------------------------------------
class ScalaMainClassesHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/scalaMainClasses"
request_type = ScalaMainClassesParams
response_type = ScalaMainClassesResult
@rule
async def bsp_scala_main_classes_request(request: ScalaMainClassesParams) -> ScalaMainClassesResult:
# TODO: This is a stub. VSCode/Metals calls this RPC and expects it to exist.
return ScalaMainClassesResult(
items=(),
origin_id=request.origin_id,
)
# -----------------------------------------------------------------------------------------------
# Scala Test Classes Request
# See https://build-server-protocol.github.io/docs/extensions/scala.html#scala-test-classes-request
# -----------------------------------------------------------------------------------------------
class ScalaTestClassesHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/scalaTestClasses"
request_type = ScalaTestClassesParams
response_type = ScalaTestClassesResult
@rule
async def bsp_scala_test_classes_request(request: ScalaTestClassesParams) -> ScalaTestClassesResult:
# TODO: This is a stub. VSCode/Metals calls this RPC and expects it to exist.
return ScalaTestClassesResult(
items=(),
origin_id=request.origin_id,
)
# -----------------------------------------------------------------------------------------------
# Dependency Modules
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class ScalaBSPDependencyModulesRequest(BSPDependencyModulesRequest):
field_set_type = ScalaMetadataFieldSet
def get_entry_for_coord(
lockfile: CoursierResolvedLockfile, coord: Coordinate
) -> CoursierLockfileEntry | None:
for entry in lockfile.entries:
if entry.coord == coord:
return entry
return None
@rule
async def scala_bsp_dependency_modules(
request: ScalaBSPDependencyModulesRequest,
build_root: BuildRoot,
) -> BSPDependencyModulesResult:
thirdparty_modules = await Get(
ThirdpartyModules,
ThirdpartyModulesRequest(Addresses(fs.address for fs in request.field_sets)),
)
resolve = thirdparty_modules.resolve
resolve_digest = await Get(
Digest, AddPrefix(thirdparty_modules.merged_digest, f"jvm/resolves/{resolve.name}/lib")
)
modules = [
DependencyModule(
name=f"{entry.coord.group}:{entry.coord.artifact}",
version=entry.coord.version,
data=MavenDependencyModule(
organization=entry.coord.group,
name=entry.coord.artifact,
version=entry.coord.version,
scope=None,
artifacts=tuple(
MavenDependencyModuleArtifact(
uri=build_root.pathlib_path.joinpath(
f".pants.d/bsp/jvm/resolves/{resolve.name}/lib/{filename}"
).as_uri()
)
for filename in cp_entry.filenames
),
),
)
for entry, cp_entry in thirdparty_modules.entries.items()
]
return BSPDependencyModulesResult(
modules=tuple(modules),
digest=resolve_digest,
)
# -----------------------------------------------------------------------------------------------
# Compile Request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class ScalaBSPCompileRequest(BSPCompileRequest):
field_set_type = ScalaFieldSet
@rule
async def bsp_scala_compile_request(
request: ScalaBSPCompileRequest,
classpath_entry_request: ClasspathEntryRequestFactory,
) -> BSPCompileResult:
result: BSPCompileResult = await _jvm_bsp_compile(request, classpath_entry_request)
return result
# -----------------------------------------------------------------------------------------------
# Resources Request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class ScalaBSPResourcesRequest(BSPResourcesRequest):
field_set_type = ScalaFieldSet
@rule
async def bsp_scala_resources_request(
request: ScalaBSPResourcesRequest,
build_root: BuildRoot,
) -> BSPResourcesResult:
result: BSPResourcesResult = await _jvm_bsp_resources(request, build_root)
return result
def rules():
return (
*collect_rules(),
*jvm_compile_rules(),
*jvm_resources_rules(),
UnionRule(BSPLanguageSupport, ScalaBSPLanguageSupport),
UnionRule(BSPBuildTargetsMetadataRequest, ScalaBSPBuildTargetsMetadataRequest),
UnionRule(BSPResolveFieldFactoryRequest, ScalaBSPResolveFieldFactoryRequest),
UnionRule(BSPHandlerMapping, ScalacOptionsHandlerMapping),
UnionRule(BSPHandlerMapping, ScalaMainClassesHandlerMapping),
UnionRule(BSPHandlerMapping, ScalaTestClassesHandlerMapping),
UnionRule(BSPCompileRequest, ScalaBSPCompileRequest),
UnionRule(BSPResourcesRequest, ScalaBSPResourcesRequest),
UnionRule(BSPDependencyModulesRequest, ScalaBSPDependencyModulesRequest),
)
| 1.53125
| 2
|
src/cogs/commands.py
|
thekraftyman/discord-bot-starter
| 0
|
12783648
|
<gh_stars>0
# commands.py
# By: thekraftyman
# import packages
import discord
from discord.ext import commands
from src.lib.core_funcs import *
from random import choice
import subprocess
import asyncio
import random
import json
import os
class Bot_Commands(commands.Cog):
def __init__(self,client):
self.client = client
# ---------------------------
# Add commands below
# Sarcasm command
@commands.command(name='sarcasm', help='Returns your input... but sarcastically')
async def sarcasm(self,ctx, *args):
# escape if not arg
for arg in args:
if type(arg) != str:
return
# combine the args into one arg
arg = ' '.join(args)
toreturn = '' # string to return
arg_list = list(arg) # exploded argument
odd = True # is on odd char (starting at 1)
for char in arg_list:
if not char.isalpha(): # is not a letter
toreturn = toreturn + char
continue
if odd: # capitalize the upper case
toreturn = toreturn + char.upper()
odd = False # switch
else: # make lowercase
toreturn = toreturn + char.lower()
odd = True # switch
# print out the sarcastic message and delete the invoking command
await asyncio.gather(
async_message_send_with_context(ctx, toreturn),
async_delete_message_with_context(ctx)
)
# return bot version
@commands.command(name='version', help='Returns the bot version')
async def version(self,ctx, *args):
await ctx.send(get_version())
# for choosing between inputs randomly
@commands.command(name='choose', help='Returns on of multiple inputs randomly')
async def choose(self,ctx, *args):
for arg in args:
if type(arg) != str:
return
await ctx.send(choice(args))
# Don't add below line
# ---------------------------
# add the cog
def setup(client):
client.add_cog(Bot_Commands(client))
| 2.8125
| 3
|
statistic_scripts/job_arrival_graph.py
|
lfdversluis/wta-tools
| 3
|
12783649
|
<reponame>lfdversluis/wta-tools<filename>statistic_scripts/job_arrival_graph.py<gh_stars>1-10
import math
import os
import matplotlib
from matplotlib.ticker import ScalarFormatter
from pyspark.sql import SparkSession
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sortedcontainers import SortedDict
class JobArrivalGraph(object):
def __init__(self, workload_name, df, image_folder_location):
self.workload_name = workload_name
self.df = df
self.folder = image_folder_location
def generate_content(self):
plot_location = self.generate_graphs()
return None, plot_location
def generate_graphs(self, show=False):
filename = "job_arrival_{0}.png".format(self.workload_name)
if os.path.isfile(os.path.join(self.folder, filename)):
return filename
fig = plt.figure(figsize=(9, 7))
granularity_order = [
"Second",
"Minute",
"Hour",
"Day"
]
granularity_lambdas = {
"Second": lambda x: int(x / 1000),
"Minute": lambda x: int(x / 60 / 1000),
"Hour": lambda x: int(x / (60 * 60) / 1000),
"Day": lambda x: int(x / (60 * 60 * 24) / 1000),
}
plot_count = 0
for granularity in granularity_order:
job_arrivals = SortedDict()
for workflow in self.df.select("ts_submit").toPandas().itertuples():
submit_time = int(workflow.ts_submit)
submit_time = granularity_lambdas[granularity](submit_time)
if submit_time not in job_arrivals:
job_arrivals[submit_time] = 0
job_arrivals[submit_time] += 1
ax = plt.subplot2grid((2, 2), (int(math.floor(plot_count / 2)), (plot_count % 2)))
if sum(job_arrivals.values()) == 1:
ax.text(0.5, 0.5, 'Not available;\nTrace contains one workflow.', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=12)
ax.grid(False)
elif len(job_arrivals.keys()) == 1:
ax.text(0.5, 0.5, 'Not available;\nTrace is too small.', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=12)
ax.grid(False)
else:
ax.plot(job_arrivals.keys(), job_arrivals.values(), color="black", linewidth=1.0)
ax.grid(True)
ax.locator_params(nbins=3, axis='y')
ax.set_xlim(0)
ax.set_ylim(0)
ax.margins(0.05)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.tick_params(axis='both', which='minor', labelsize=14)
ax.get_xaxis().get_offset_text().set_visible(False)
formatter = ScalarFormatter(useMathText=True)
formatter.set_powerlimits((-4, 5))
ax.get_xaxis().set_major_formatter(formatter)
fig.tight_layout() # Need to set this to be able to get the offset... for whatever reason
offset_text = ax.get_xaxis().get_major_formatter().get_offset()
ax.set_xlabel('Time{0} [{1}]'.format(f' {offset_text}' if len(offset_text) else "", granularity.lower()), fontsize=18)
ax.set_ylabel('Number of Workflows', fontsize=18)
plot_count += 1
# Rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
# fig.autofmt_xdate()
fig.tight_layout()
fig.savefig(os.path.join(self.folder, filename), dpi=600, format='png')
if show:
fig.show()
return filename
if __name__ == '__main__':
tasks_loc = "C:/Users/L/Downloads/Galaxy/tasks/schema-1.0"
spark = (SparkSession.builder
.master("local[5]")
.appName("WTA Analysis")
.config("spark.executor.memory", "3G")
.config("spark.driver.memory", "12G")
.getOrCreate())
task_df = spark.read.parquet(tasks_loc)
gne = JobArrivalGraph("test", task_df, ".")
gne.generate_graphs(show=True)
| 2.421875
| 2
|
test_libs/pyspec/eth2spec/test/phase_0/epoch_processing/test_process_crosslinks.py
|
prestonvanloon/eth2.0-specs
| 1
|
12783650
|
from copy import deepcopy
from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.helpers.state import (
next_epoch,
next_slot
)
from eth2spec.test.helpers.block import apply_empty_block
from eth2spec.test.helpers.attestations import (
add_attestation_to_state,
fill_aggregate_attestation,
get_valid_attestation,
sign_attestation,
)
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
def run_process_crosslinks(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_crosslinks')
@with_all_phases
@spec_state_test
def test_no_attestations(spec, state):
yield from run_process_crosslinks(spec, state)
for shard in range(spec.SHARD_COUNT):
assert state.previous_crosslinks[shard] == state.current_crosslinks[shard]
@with_all_phases
@spec_state_test
def test_single_crosslink_update_from_current_epoch(spec, state):
next_epoch(spec, state)
attestation = get_valid_attestation(spec, state, signed=True)
fill_aggregate_attestation(spec, state, attestation)
add_attestation_to_state(spec, state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
assert len(state.current_epoch_attestations) == 1
shard = attestation.data.crosslink.shard
pre_crosslink = deepcopy(state.current_crosslinks[shard])
yield from run_process_crosslinks(spec, state)
assert state.previous_crosslinks[shard] != state.current_crosslinks[shard]
assert pre_crosslink != state.current_crosslinks[shard]
@with_all_phases
@spec_state_test
def test_single_crosslink_update_from_previous_epoch(spec, state):
next_epoch(spec, state)
attestation = get_valid_attestation(spec, state, signed=True)
fill_aggregate_attestation(spec, state, attestation)
add_attestation_to_state(spec, state, attestation, state.slot + spec.SLOTS_PER_EPOCH)
assert len(state.previous_epoch_attestations) == 1
shard = attestation.data.crosslink.shard
pre_crosslink = deepcopy(state.current_crosslinks[shard])
crosslink_deltas = spec.get_crosslink_deltas(state)
yield from run_process_crosslinks(spec, state)
assert state.previous_crosslinks[shard] != state.current_crosslinks[shard]
assert pre_crosslink != state.current_crosslinks[shard]
# ensure rewarded
for index in spec.get_crosslink_committee(
state,
attestation.data.target.epoch,
attestation.data.crosslink.shard):
assert crosslink_deltas[0][index] > 0
assert crosslink_deltas[1][index] == 0
@with_all_phases
@spec_state_test
def test_double_late_crosslink(spec, state):
if spec.get_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT:
print("warning: ignoring test, test-assumptions are incompatible with configuration")
return
next_epoch(spec, state)
state.slot += 4
attestation_1 = get_valid_attestation(spec, state, signed=True)
fill_aggregate_attestation(spec, state, attestation_1)
# add attestation_1 to next epoch
next_epoch(spec, state)
add_attestation_to_state(spec, state, attestation_1, state.slot + 1)
for _ in range(spec.SLOTS_PER_EPOCH):
attestation_2 = get_valid_attestation(spec, state)
if attestation_2.data.crosslink.shard == attestation_1.data.crosslink.shard:
sign_attestation(spec, state, attestation_2)
break
next_slot(spec, state)
apply_empty_block(spec, state)
fill_aggregate_attestation(spec, state, attestation_2)
# add attestation_2 in the next epoch after attestation_1 has
# already updated the relevant crosslink
next_epoch(spec, state)
add_attestation_to_state(spec, state, attestation_2, state.slot + 1)
assert len(state.previous_epoch_attestations) == 1
assert len(state.current_epoch_attestations) == 0
crosslink_deltas = spec.get_crosslink_deltas(state)
yield from run_process_crosslinks(spec, state)
shard = attestation_2.data.crosslink.shard
# ensure that the current crosslinks were not updated by the second attestation
assert state.previous_crosslinks[shard] == state.current_crosslinks[shard]
# ensure no reward, only penalties for the failed crosslink
for index in spec.get_crosslink_committee(
state,
attestation_2.data.target.epoch,
attestation_2.data.crosslink.shard):
assert crosslink_deltas[0][index] == 0
assert crosslink_deltas[1][index] > 0
| 2.03125
| 2
|