hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56185b126c1c6cd1aee3411e9637e3f4ba5da646
| 4,933
|
py
|
Python
|
glue/viewers/image/compat.py
|
sergiopasra/glue
|
c25a217a122a11818382672c99cb21f57a30636f
|
[
"BSD-3-Clause"
] | 1
|
2019-12-17T07:58:35.000Z
|
2019-12-17T07:58:35.000Z
|
glue/viewers/image/compat.py
|
sergiopasra/glue
|
c25a217a122a11818382672c99cb21f57a30636f
|
[
"BSD-3-Clause"
] | null | null | null |
glue/viewers/image/compat.py
|
sergiopasra/glue
|
c25a217a122a11818382672c99cb21f57a30636f
|
[
"BSD-3-Clause"
] | 1
|
2019-08-04T14:10:12.000Z
|
2019-08-04T14:10:12.000Z
|
from __future__ import absolute_import, division, print_function
import uuid
import numpy as np
from glue.viewers.image.state import ImageLayerState, ImageSubsetLayerState
from glue.viewers.scatter.state import ScatterLayerState
STATE_CLASS = {}
STATE_CLASS['ImageLayerArtist'] = ImageLayerState
STATE_CLASS['ScatterLayerArtist'] = ScatterLayerState
STATE_CLASS['SubsetImageLayerArtist'] = ImageSubsetLayerState
class DS9Compat(object):
@classmethod
def __setgluestate__(cls, rec, context):
result = cls()
for k, v in rec.items():
setattr(result, k, v)
return result
def update_image_viewer_state(rec, context):
"""
Given viewer session information, make sure the session information is
compatible with the current version of the viewers, and if not, update
the session information in-place.
"""
if '_protocol' not in rec:
# Note that files saved with protocol < 1 have bin settings saved per
# layer but they were always restricted to be the same, so we can just
# use the settings from the first layer
rec['state'] = {}
rec['state']['values'] = {}
# TODO: could generalize this into a mapping
properties = rec.pop('properties')
viewer_state = rec['state']['values']
viewer_state['color_mode'] = 'st__Colormaps'
viewer_state['reference_data'] = properties['data']
data = context.object(properties['data'])
# TODO: add an id method to unserializer
x_index = properties['slice'].index('x')
y_index = properties['slice'].index('y')
viewer_state['x_att_world'] = str(uuid.uuid4())
context.register_object(viewer_state['x_att_world'], data.world_component_ids[x_index])
viewer_state['y_att_world'] = str(uuid.uuid4())
context.register_object(viewer_state['y_att_world'], data.world_component_ids[y_index])
viewer_state['x_att'] = str(uuid.uuid4())
context.register_object(viewer_state['x_att'], data.pixel_component_ids[x_index])
viewer_state['y_att'] = str(uuid.uuid4())
context.register_object(viewer_state['y_att'], data.pixel_component_ids[y_index])
viewer_state['x_min'] = -0.5
viewer_state['x_max'] = data.shape[1] - 0.5
viewer_state['y_min'] = -0.5
viewer_state['y_max'] = data.shape[0] - 0.5
viewer_state['aspect'] = 'st__equal'
# Slicing with cubes
viewer_state['slices'] = [s if np.isreal(s) else 0 for s in properties['slice']]
# RGB mode
for layer in rec['layers'][:]:
if layer['_type'].split('.')[-1] == 'RGBImageLayerArtist':
for icolor, color in enumerate('rgb'):
new_layer = {}
new_layer['_type'] = 'glue.viewers.image.layer_artist.ImageLayerArtist'
new_layer['layer'] = layer['layer']
new_layer['attribute'] = layer[color]
new_layer['norm'] = layer[color + 'norm']
new_layer['zorder'] = layer['zorder']
new_layer['visible'] = layer['color_visible'][icolor]
new_layer['color'] = color
rec['layers'].append(new_layer)
rec['layers'].remove(layer)
viewer_state['color_mode'] = 'st__One color per layer'
layer_states = []
for layer in rec['layers']:
state_id = str(uuid.uuid4())
state_cls = STATE_CLASS[layer['_type'].split('.')[-1]]
state = state_cls(layer=context.object(layer.pop('layer')))
for prop in ('visible', 'zorder'):
value = layer.pop(prop)
value = context.object(value)
setattr(state, prop, value)
if 'attribute' in layer:
state.attribute = context.object(layer['attribute'])
else:
state.attribute = context.object(properties['attribute'])
if 'norm' in layer:
norm = context.object(layer['norm'])
state.bias = norm.bias
state.contrast = norm.contrast
state.stretch = norm.stretch
if norm.clip_hi is not None:
state.percentile = norm.clip_hi
else:
if norm.vmax is not None:
state.v_min = norm.vmin
state.v_max = norm.vmax
state.percentile = 'Custom'
if 'color' in layer:
state.global_sync = False
state.color = layer['color']
context.register_object(state_id, state)
layer['state'] = state_id
layer_states.append(state)
list_id = str(uuid.uuid4())
context.register_object(list_id, layer_states)
rec['state']['values']['layers'] = list_id
| 38.24031
| 95
| 0.588283
|
369fa80b40b091564f7a909c251fea12a1999b8f
| 48
|
py
|
Python
|
config.py
|
ydogukan/TRON_Discord_Bot
|
87c1ec8fa8fb1291ecf332b13fe5f6b856bdf8fc
|
[
"MIT"
] | null | null | null |
config.py
|
ydogukan/TRON_Discord_Bot
|
87c1ec8fa8fb1291ecf332b13fe5f6b856bdf8fc
|
[
"MIT"
] | null | null | null |
config.py
|
ydogukan/TRON_Discord_Bot
|
87c1ec8fa8fb1291ecf332b13fe5f6b856bdf8fc
|
[
"MIT"
] | null | null | null |
queues = {}
global now_playing
now_playing = []
| 12
| 18
| 0.708333
|
22cbeee3d5e38481ae49455caaa4b2d29cb018ea
| 2,359
|
py
|
Python
|
scripts/Reader_RDM6300.py
|
emhal/RPi-Jukebox-RFID
|
0914ef7b1b018f3c39a127ab33d95e3a306e1e06
|
[
"MIT"
] | null | null | null |
scripts/Reader_RDM6300.py
|
emhal/RPi-Jukebox-RFID
|
0914ef7b1b018f3c39a127ab33d95e3a306e1e06
|
[
"MIT"
] | null | null | null |
scripts/Reader_RDM6300.py
|
emhal/RPi-Jukebox-RFID
|
0914ef7b1b018f3c39a127ab33d95e3a306e1e06
|
[
"MIT"
] | null | null | null |
"""
Support for the RDM6300 serial RFID module
1.) Connect the RDM6300 module
------------------------------
Connect the RDM6300 module to the serial GPIO pins 14 and 15.
2.) Enable GPIO serial port
---------------------------
Edit the /boot/config.txt (sudo nano /boot/config.txt) and add the following line:
enable_uart=1
3.) Install dependecies
-----------------------
Be aware not to install the "serial" module, install "pyserial" instead:
pip install pyserial
4.) Replace the default Reader.py
---------------------------------
Replace the Reader.py file with the Reader_RDM6300.py:
mv Reader.py Reader_default.py; mv Reader_RDM6300.py Reader.py
"""
import serial
import string
import atexit
class Reader:
def __init__(self):
device = '/dev/ttyS0'
baudrate = 9600
ser_timeout = 0.1
self.last_card_id = ''
atexit.register(self.cleanup)
try:
self.rfid_serial = serial.Serial(device, baudrate, timeout=ser_timeout)
except serial.SerialException as e:
print(e)
exit(1)
def readCard(self):
byte_card_id = b''
try:
while True:
try:
read_byte = self.rfid_serial.read()
if read_byte == b'\x02': # start byte
while read_byte != b'\x03': # end bye
read_byte = self.rfid_serial.read()
byte_card_id += read_byte
card_id = byte_card_id.decode('utf-8')
byte_card_id = ''
card_id = ''.join(x for x in card_id if x in string.printable)
# Only return UUIDs with correct length
if len(card_id) == 12 and card_id != self.last_card_id:
self.last_card_id = card_id
self.rfid_serial.reset_input_buffer()
return self.last_card_id
else: # wrong UUID length or aleady send that UUID last time
self.rfid_serial.reset_input_buffer()
except ValueError as ve:
print(ve)
except serial.SerialException as se:
print(se)
def cleanup(self):
self.rfid_serial.close()
| 31.039474
| 86
| 0.529462
|
b98ca19a1ebd063a0776d50ba072626e515fcf84
| 3,600
|
py
|
Python
|
src/sphinx/conf.py
|
cvogt/sbt
|
1432d633e2652c51baa8af1c84411bd0517b6bf9
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/sphinx/conf.py
|
cvogt/sbt
|
1432d633e2652c51baa8af1c84411bd0517b6bf9
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/sphinx/conf.py
|
cvogt/sbt
|
1432d633e2652c51baa8af1c84411bd0517b6bf9
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os
sys.path.append(os.path.abspath('_sphinx/exts'))
extensions = ['sphinxcontrib.issuetracker', 'sphinx.ext.extlinks', 'howto', 'codeliteral', 'key']
# Project variables
project = 'sbt'
version = os.environ['sbt.partial.version']
site_version = os.environ['sbt.site.version']
release = os.environ['sbt.full.version']
scalaVersion = os.environ['scala.binary.version']
scalaRelease = os.environ['scala.full.version']
# General settings
needs_sphinx = '1.1'
nitpicky = True
default_role = 'codeliteral'
master_doc = 'home'
highlight_language = 'scala'
add_function_parentheses = False
# TODO: make this an argument
# pdf_index should be excluded when generating html
# index.rst should be excluded when generating a pdf
exclude_patterns = [ 'pdf_index.rst' ]
# HTML
html_theme = 'sbt'
html_theme_path = ['_sphinx/themes']
html_title = 'sbt Documentation'
html_domain_indices = False
html_use_index = False
html_show_sphinx = False
htmlhelp_basename = 'sbtdoc'
html_use_smartypants = False
html_copy_source = False
# if true:
# the Home link is to scala-sbt.org
# if false:
# the Home link is to home.html for the current documentation version
# TODO: pass this as an argument to sphinx
home_site = True
# Passed to Google as site:<site_search_base>
# If empty, no search box is included
site_search_base = 'http://www.scala-sbt.org/' + site_version + '/docs'
# passes variables to the template
html_context = {'home_site': home_site, 'site_search_base': site_search_base}
# Latex (PDF)
#latex_documents = [
# ('pdf_index', 'sbt.tex', html_title, '', 'manual', True),
# ('Getting-Started/index', 'sbt-Getting-Started.tex', html_title, '', 'manual', True),
#]
# Issues role
issuetracker = 'github'
issuetracker_project = 'sbt/sbt'
issuetracker_plaintext_issues = True
issuetracker_issue_pattern = r'\bgh-(\d+)\b'
issuetracker_title_template = '#{issue.id}'
# links, substitutions
typesafe_base = 'http://repo.typesafe.com/typesafe/'
typesafe_ivy_snapshots = typesafe_base + 'ivy-snapshots/'
typesafe_ivy_releases = typesafe_base + 'ivy-releases/'
launcher_release_base = typesafe_ivy_releases + 'org.scala-sbt/sbt-launch/'
launcher_snapshots_base = typesafe_ivy_snapshots + 'org.scala-sbt/sbt-launch/'
sbt_native_package_base = 'http://repo.scala-sbt.org/scalasbt/sbt-native-packages/org/scala-sbt/sbt/'
rst_epilog = """
.. |scalaVersion| replace:: %(scalaVersion)s
.. |scalaRelease| replace:: %(scalaRelease)s
.. _typesafe-snapshots: %(typesafe_ivy_snapshots)s
.. |typesafe-snapshots| replace:: Typesafe Snapshots
.. _sbt-launch.jar: %(launcher_release_base)s%(release)s/sbt-launch.jar
.. _MSI: %(sbt_native_package_base)s%(release)s/sbt.msi
.. _TGZ: %(sbt_native_package_base)s%(release)s/sbt.tgz
.. _ZIP: %(sbt_native_package_base)s%(release)s/sbt.zip
.. _DEB: %(sbt_native_package_base)s%(release)s/sbt.deb
.. _RPM: %(sbt_native_package_base)s%(release)s/sbt.rpm
.. |nightly-launcher| replace:: <%(launcher_snapshots_base)s
.. _sbt-dev mailing list: https://groups.google.com/forum/#!forum/sbt-dev
.. _adept: https://groups.google.com/group/adept-dev/topics
.. _sbt-launcher-package: https://github.com/sbt/sbt-launcher-package
.. _Stack Overflow: http://stackoverflow.com/tags/sbt
.. _source code: http://github.com/sbt/sbt
""" % {
'launcher_release_base': launcher_release_base,
'launcher_snapshots_base': launcher_snapshots_base,
'typesafe_ivy_snapshots': typesafe_ivy_snapshots,
'sbt_native_package_base': sbt_native_package_base,
'scalaRelease': scalaRelease,
'scalaVersion': scalaVersion,
'release': release
}
| 32.727273
| 101
| 0.750278
|
48926be6fe65f47f8f85f12dafc9f1e8f6f9f47b
| 3,731
|
py
|
Python
|
onadata/apps/logger/management/commands/create_image_thumbnails.py
|
BuildAMovement/whistler-kobocat
|
7f61dd0761bb0aa5b27c909bcff8c29453d3311d
|
[
"BSD-2-Clause"
] | 38
|
2017-02-28T05:39:40.000Z
|
2019-01-16T04:39:04.000Z
|
onadata/apps/logger/management/commands/create_image_thumbnails.py
|
BuildAMovement/whistler-kobocat
|
7f61dd0761bb0aa5b27c909bcff8c29453d3311d
|
[
"BSD-2-Clause"
] | 48
|
2019-03-18T09:26:31.000Z
|
2019-05-27T08:12:03.000Z
|
onadata/apps/logger/management/commands/create_image_thumbnails.py
|
BuildAMovement/whistler-kobocat
|
7f61dd0761bb0aa5b27c909bcff8c29453d3311d
|
[
"BSD-2-Clause"
] | 5
|
2017-02-22T12:25:19.000Z
|
2019-01-15T11:16:40.000Z
|
#!/usr/bin/env python
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from django.core.files.storage import get_storage_class
from django.conf import settings
from onadata.apps.logger.models.attachment import Attachment
from onadata.apps.logger.models.xform import XForm
from onadata.libs.utils.image_tools import resize, resize_local_env
from onadata.libs.utils.model_tools import queryset_iterator
from onadata.libs.utils.viewer_tools import get_path
from django.utils.translation import ugettext as _, ugettext_lazy
class Command(BaseCommand):
help = ugettext_lazy("Creates thumbnails for "
"all form images and stores them")
option_list = BaseCommand.option_list + (
make_option('-u', '--username',
help=ugettext_lazy("Username of the form user")),
make_option('-i', '--id_string',
help=ugettext_lazy("id string of the form")),
make_option('-f', '--force', action='store_false',
help=ugettext_lazy("regenerate thumbnails if they exist."))
)
def handle(self, *args, **kwargs):
attachments_qs = Attachment.objects.select_related(
'instance', 'instance__xform')
if kwargs.get('username'):
username = kwargs.get('username')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError(
"Error: username %(username)s does not exist" %
{'username': username}
)
attachments_qs = attachments_qs.filter(instance__user=user)
if kwargs.get('id_string'):
id_string = kwargs.get('id_string')
try:
xform = XForm.objects.get(id_string=id_string)
except XForm.DoesNotExist:
raise CommandError(
"Error: Form with id_string %(id_string)s does not exist" %
{'id_string': id_string}
)
attachments_qs = attachments_qs.filter(instance__xform=xform)
fs = get_storage_class('django.core.files.storage.FileSystemStorage')()
for att in queryset_iterator(attachments_qs):
filename = att.media_file.name
default_storage = get_storage_class()()
full_path = get_path(filename,
settings.THUMB_CONF['small']['suffix'])
if kwargs.get('force') is not None:
for s in ['small', 'medium', 'large']:
fp = get_path(filename,
settings.THUMB_CONF[s]['suffix'])
if default_storage.exists(fp):
default_storage.delete(fp)
if not default_storage.exists(full_path):
try:
if default_storage.__class__ != fs.__class__:
resize(filename)
else:
resize_local_env(filename)
if default_storage.exists(get_path(
filename,
'%s' % settings.THUMB_CONF['small']['suffix'])):
print (_(u'Thumbnails created for %(file)s')
% {'file': filename})
else:
print (_(u'Problem with the file %(file)s')
% {'file': filename})
except (IOError, OSError), e:
print _(u'Error on %(filename)s: %(error)s') \
% {'filename': filename, 'error': e}
| 46.061728
| 79
| 0.56044
|
df2a7bb93d330dfda915e0af01b5e654b0e6dac0
| 11,784
|
py
|
Python
|
lib/py/src/server/TServer.py
|
Jimexist/thrift
|
684ee0717472e1c084f4858f3faf650b6f17b451
|
[
"Apache-2.0"
] | 8,514
|
2015-01-02T12:00:14.000Z
|
2022-03-31T10:34:56.000Z
|
lib/py/src/server/TServer.py
|
Jimexist/thrift
|
684ee0717472e1c084f4858f3faf650b6f17b451
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
lib/py/src/server/TServer.py
|
Jimexist/thrift
|
684ee0717472e1c084f4858f3faf650b6f17b451
|
[
"Apache-2.0"
] | 3,849
|
2015-01-01T02:13:43.000Z
|
2022-03-31T06:23:34.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from six.moves import queue
import logging
import os
import threading
from thrift.protocol import TBinaryProtocol
from thrift.protocol.THeaderProtocol import THeaderProtocolFactory
from thrift.transport import TTransport
logger = logging.getLogger(__name__)
class TServer(object):
"""Base interface for a server, which must have a serve() method.
Three constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
input_is_header = isinstance(self.inputProtocolFactory, THeaderProtocolFactory)
output_is_header = isinstance(self.outputProtocolFactory, THeaderProtocolFactory)
if any((input_is_header, output_is_header)) and input_is_header != output_is_header:
raise ValueError("THeaderProtocol servers require that both the input and "
"output protocols are THeaderProtocol.")
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
itrans = self.inputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
# for THeaderProtocol, we must use the same protocol instance for
# input and output so that the response is in the same dialect that
# the server detected the request was in.
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
otrans = None
oprot = iprot
else:
otrans = self.outputTransportFactory.getTransport(client)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
if otrans:
otrans.close()
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logger.exception(x)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
# for THeaderProtocol, we must use the same protocol instance for input
# and output so that the response is in the same dialect that the
# server detected the request was in.
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
otrans = None
oprot = iprot
else:
otrans = self.outputTransportFactory.getTransport(client)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
if otrans:
otrans.close()
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.clients = queue.Queue()
self.threads = 10
self.daemon = kwargs.get("daemon", False)
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""Loop around getting clients from the shared queue and process them."""
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception as x:
logger.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
# for THeaderProtocol, we must use the same protocol instance for input
# and output so that the response is in the same dialect that the
# server detected the request was in.
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
otrans = None
oprot = iprot
else:
otrans = self.outputTransportFactory.getTransport(client)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
itrans.close()
if otrans:
otrans.close()
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.setDaemon(self.daemon)
t.start()
except Exception as x:
logger.exception(x)
# Pump the socket for clients
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
self.clients.put(client)
except Exception as x:
logger.exception(x)
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def try_close(file):
try:
file.close()
except IOError as e:
logger.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
try:
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self.collect_children()
# Parent must close socket or the connection may not get
# closed promptly
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
# for THeaderProtocol, we must use the same protocol
# instance for input and output so that the response is in
# the same dialect that the server detected the request was
# in.
if isinstance(self.inputProtocolFactory, THeaderProtocolFactory):
otrans = None
oprot = iprot
else:
otrans = self.outputTransportFactory.getTransport(client)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException:
pass
except Exception as e:
logger.exception(e)
ecode = 1
finally:
try_close(itrans)
if otrans:
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException:
pass
except Exception as x:
logger.exception(x)
def collect_children(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
| 36.37037
| 92
| 0.588425
|
9ccc0fba52e34bd2b0a37ce9e1b9e5280862df48
| 9,934
|
py
|
Python
|
federal_spending/usaspending/management/commands/import_updates.py
|
rlugojr/federal_spending
|
e06f5a0018c1a0c581e8659472acb1574919dc50
|
[
"CC0-1.0"
] | 20
|
2015-01-15T18:59:50.000Z
|
2022-01-31T03:37:53.000Z
|
federal_spending/usaspending/management/commands/import_updates.py
|
rlugojr/federal_spending
|
e06f5a0018c1a0c581e8659472acb1574919dc50
|
[
"CC0-1.0"
] | null | null | null |
federal_spending/usaspending/management/commands/import_updates.py
|
rlugojr/federal_spending
|
e06f5a0018c1a0c581e8659472acb1574919dc50
|
[
"CC0-1.0"
] | 7
|
2015-01-16T15:47:06.000Z
|
2020-01-09T07:14:45.000Z
|
from federal_spending.usaspending.models import Contract, Grant
from federal_spending.usaspending.scripts.usaspending.contracts_loader import Loader
from federal_spending.usaspending.scripts.usaspending.fpds import FIELDS as CONTRACT_FIELDS, CALCULATED_FIELDS as CONTRACT_CALCULATED_FIELDS
from federal_spending.usaspending.scripts.usaspending.faads import FIELDS as GRANT_FIELDS, CALCULATED_FIELDS as GRANT_CALCULATED_FIELDS
from django.core.management.base import BaseCommand
from django.core import management
from django.conf import settings
from django.db import connections, connection, transaction
from django.db.models import sql
from itertools import izip
from dateutil.parser import parse
import os
import csv
import datetime
import time
from federal_spending.usaspending.management.commands.create_indexes import contracts_idx, grants_idx
from federal_spending.usaspending.scripts.usaspending.config import INDEX_COLS_BY_TABLE
def notnull(val):
if val and val != '' and 'null' not in val.strip().lower():
return True
return False
class Command(BaseCommand):
ALL_CONTRACT_FIELDS = [ x[0] for x in CONTRACT_FIELDS ] + [ x[0] for x in CONTRACT_CALCULATED_FIELDS ]
ALL_GRANT_FIELDS = [ x[0] for x in GRANT_FIELDS ] + [ x[0] for x in GRANT_CALCULATED_FIELDS ]
contracts_failed = []
grants_failed = []
contracts_idx_drop = contracts_idx[:10]
contracts_idx_add = contracts_idx[12:22]
grants_idx_drop = grants_idx[:3]
grants_idx_add = grants_idx[5:8]
@transaction.commit_manually
def handle(self, download_file='delta_downloads.txt', **options):
OUTPATH = settings.CSV_PATH + 'out/'
a="""confirm = raw_input("Clearing out the csvs in the out folder, continue? y/n")
if confirm != 'y':
return
#remove any csvs so we don't reprocess everything
for f in os.listdir(OUTPATH):
os.remove(OUTPATH + f)
print "Downloading links in {0}".format(download_file)
management.call_command('download_files', settings.PROJECT_ROOT + '/usaspending/downloads/' + download_file)
print "sleeping for a minute"
time.sleep(60)
print "processing downloaded files into proper format"
for fname in os.listdir(settings.CSV_PATH + 'datafeeds/'):
if 'Delta' in fname and 'Contracts' in fname:
management.call_command('convert_usaspending_contracts')
elif 'Delta' in fname and ('Grants' in fname or 'Loans' in fname or 'Insurance' in fname or 'Direct_Payments' in fname):
management.call_command('convert_usaspending_grants')
print "Processing transaction updates in database"
#print "Current number of rows in contract table: {0}".format(Contract.objects.all().count())
#print "Current number of rows in grant table: {0}".format(Grant.objects.all().count())
"""
c = connections['default'].cursor()
print 'deleting unecessary indexes'
for x in self.contracts_idx_drop:
print x
c.execute(x)
for x in self.grants_idx_drop:
print x
c.execute(x)
for tab in ['usaspending_grant', 'usaspending_contract']:
for fy in settings.FISCAL_YEARS:
for i, colname in enumerate(INDEX_COLS_BY_TABLE[tab]):
if 'fiscal_year' not in colname and 'unique_transaction_id' not in colname:
del_stmt = 'drop index if exists {0}_{1}_{2}; commit;'.format(tab, fy, i)
print del_stmt
c.execute(del_stmt)
for sname in os.listdir(OUTPATH):
line_total = 0
if 'contracts' in sname:
print "processing file {0}".format(sname)
reader = csv.reader(open(OUTPATH + sname), delimiter='|')
for line in reader:
self.update_contract_row(line)
if line_total % 1000 == 0: print "... on line {0}".format(line_total)
line_total += 1
line_total = 0
if 'grants' in sname:
print "processing file {0}".format(sname)
reader = csv.reader(open(OUTPATH + sname), delimiter='|')
for line in reader:
self.update_grant_row(line)
if line_total % 1000 == 0:
print "... on line {0}".format(line_total)
transaction.commit()
line_total += 1
print 'recreating unecessary indexes'
for x in self.contracts_idx_add:
print x
c.execute(x)
for x in self.grants_idx_add:
print x
c.execute(x)
#print "New number of rows in contract table: {0}".format(Contract.objects.all().count())
#print "New number of rows in grant table: {0}".format(Grant.objects.all().count())
self.write_log()
def check_fiscal_year(self, line, num):
if len(line) >= (num):
fy = line[num]
if fy and fy != '' and len(fy) == 4:
return True
else:
print "it failed! {0}".format(line[0])
return False
else:
print "length failed {0} it's only {1}".format(line[0], len(line))
return False
def update_contract_row(self, line):
c = None
status = line[1]
if status.strip().lower() == 'inactive':
#means that this update deletes a record
try:
c = Contract.objects.get(unique_transaction_id=line[0], fiscal_year=line[97])
print "Deleting {0}".format(line[0])
c.delete()
except Contract.DoesNotExist as e:
pass
return
else:
if not self.check_fiscal_year(line, 97):
self.contracts_failed.append(line)
return
try:
c = Contract.objects.get(unique_transaction_id=line[0], fiscal_year=line[97])
except Contract.DoesNotExist as e:
c = Contract(unique_transaction_id=line[0], fiscal_year=line[97])
except Contract.MultipleObjectsReturned as e:
# delete extra objects
cset = Contract.objects.filter(unique_transaction_id=line[0], fiscal_year=line[97]).order_by('-id')
for i, obj in enumerate(cset):
if i == 0:
c = obj
else:
obj.delete()
for (i, (column_name, value)) in enumerate(izip(self.ALL_CONTRACT_FIELDS, line)):
if i in [13,14,15,16, 68, 69, 158]:
if notnull(value):
#parse date fields into python date objects
try:
value = parse(value)
except OverflowError as e:
value = None
else:
value = None
if value == 'NULL': #convert CSV/Postgresql null values to python null
value = None
setattr(c, column_name, value)
c.save()
def update_grant_row(self, line):
#To Do: add logging for transactions that fail
c = None
status = line[1]
#print "processing {0}".format(line[0])
if status.strip().lower() == 'inactive':
#means that this update deletes a record
try:
c = Grant.objects.get(unique_transaction_id=line[0], fiscal_year=line[46])
print "Deleting {0}".format(line[0])
c.delete()
except Grant.DoesNotExist as e:
pass
return
else:
if not self.check_fiscal_year(line, 46):
self.contracts_failed.append(line)
return
try:
c = Grant.objects.get(unique_transaction_id=line[0], fiscal_year=line[46])
except Grant.DoesNotExist as e:
c = Grant(unique_transaction_id=line[0], fiscal_year=line[46])
except Grant.MultipleObjectsReturned as f:
print f
cset = Grant.objects.filter(unique_transaction_id=line[0], fiscal_year=line[46]).order_by('-id')
# delete extra objects
for i, obj in enumerate(cset):
print obj
if i == 0:
c = obj
else:
obj.delete()
#print connection.queries[-1]
for (i, (column_name, value)) in enumerate(izip(self.ALL_GRANT_FIELDS, line)):
if i in [21, 22, 23, 55]:
if notnull(value):
#parse date fields into python date objects
try:
value = parse(value).date()
except OverflowError as e:
value = None
else:
value = None
if value == 'NULL': #convert CSV/Postgresql null values to python null
value = None
setattr(c, column_name, value)
c.save()
#print connection.queries[-1]
def write_log(self):
today = datetime.datetime.now()
print "Writing Log"
writer = csv.writer(open(settings.LOGGING_DIRECTORY + '/failed_contracts_{0}.csv'.format(today.strftime('%Y%m%d')), 'w+'))
for line in self.contracts_failed:
writer.writerow(line)
gwriter = csv.writer(open(settings.LOGGING_DIRECTORY + '/failed_grants_{0}.csv'.format(today.strftime('%Y%m%d')), 'w+'))
for line in self.grants_failed:
gwriter.writerow(line)
| 39.895582
| 140
| 0.567042
|
336ee2de3be52b01d6cad9e93f839072131d5334
| 235
|
py
|
Python
|
venv/Lib/site-packages/konlpy/user.py
|
movierecommend-chatbot/chat-bot
|
fc40c1937e8f597230578c1957305ad22f8280e4
|
[
"bzip2-1.0.6"
] | null | null | null |
venv/Lib/site-packages/konlpy/user.py
|
movierecommend-chatbot/chat-bot
|
fc40c1937e8f597230578c1957305ad22f8280e4
|
[
"bzip2-1.0.6"
] | null | null | null |
venv/Lib/site-packages/konlpy/user.py
|
movierecommend-chatbot/chat-bot
|
fc40c1937e8f597230578c1957305ad22f8280e4
|
[
"bzip2-1.0.6"
] | null | null | null |
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
from . import utils
def addterm(term):
dicfilename = os.path.join(utils.installpath, "data", "dictionary.tsv")
with open(dicfilename, 'a') as f:
f.write(term)
| 18.076923
| 75
| 0.629787
|
acc3e751b854448b31c3ed7fc7ffff9e3a36fa95
| 842
|
py
|
Python
|
setup.py
|
NotFaizen/popcat_wrapper
|
f2ea07a07cfa8084115645272cdbdee718012b90
|
[
"MIT"
] | null | null | null |
setup.py
|
NotFaizen/popcat_wrapper
|
f2ea07a07cfa8084115645272cdbdee718012b90
|
[
"MIT"
] | null | null | null |
setup.py
|
NotFaizen/popcat_wrapper
|
f2ea07a07cfa8084115645272cdbdee718012b90
|
[
"MIT"
] | 2
|
2021-09-10T00:27:21.000Z
|
2022-02-02T19:46:48.000Z
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="popcat_wrapper",
version="1.6",
author="NotFaizen",
author_email="munavir370@gmail.com",
url = "https://github.com/NotFaizen/popcat_wrapper",
description="A wrapper designed for easy image manipulation",
long_description=long_description,
long_description_content_type="text/markdown",
keywords=['python', 'async', 'popcat', 'popcatapi', 'api', 'api wrapper','discord','wrapper'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=["aiohttp"]
)
| 35.083333
| 99
| 0.649644
|
159f0d39b309f0056b77a3459098dbe0a5aac1c6
| 54,356
|
py
|
Python
|
estrategias/2021_Kasparov.py
|
rsouza/jogos_vorazes
|
807031a981b18e8fed0a73b5edb35fa4d486c269
|
[
"MIT"
] | null | null | null |
estrategias/2021_Kasparov.py
|
rsouza/jogos_vorazes
|
807031a981b18e8fed0a73b5edb35fa4d486c269
|
[
"MIT"
] | null | null | null |
estrategias/2021_Kasparov.py
|
rsouza/jogos_vorazes
|
807031a981b18e8fed0a73b5edb35fa4d486c269
|
[
"MIT"
] | 19
|
2016-01-15T17:24:45.000Z
|
2021-01-28T18:12:50.000Z
|
import pickle
import numpy as np
import random
from .jogadores import Jogador
class MeuJogador(Jogador):
'''Entao, meu plano inicial era usar uma rede neural, mas acabei desistindo da ideia, porque estava tendo problemas em importar o tensorflow em uma maquina que nao possui instalando usando apenas um pip install no modulo (conversei com o Professor Renato sobre isso em sala que a versao nova estava colidindo com o Cuda). Decidi entao por fazer um k-cluster de 300 clusters em um banco de dados que criei ao modificar o aqruivo simulador.py para criar um csv com a jogada de cada jogador, em funcao dos inputs recebidos. Treinei o k-cluster em 1000000 de instancias e depois, fiz uma estimativa da probabilidade de retorno de "c" para cada parametro fornecido. O resultado foi regular. Apesar de vencer algumas vezes no conjunto oferecido de jogadores, nao era constante como seria se treinasse um algoritmo de reinforcement, o que nao fiz por limitacoes de tempo e espaco. Isso me indica algumas coisas 1) o sample set utilizado era muito heterogeneo; 2)os jogadores divergiam muito em estrategia, especialmente durante rodadas mais avancadas. Entretanto, como exercicio, a experiencia foi divertida!'''
def __init__(self):
#medias e variancas da data usada para treino, importante pois o algoritmo de cluster precisa que variaveis estejam em mesma escala, caso contrario pode supervalorizar variaveis mais altas na hora de calcular as distancias ate os centroides
self.medias = [99.486022, 0.505942, 102.346536, 0.50406664, 0.50368208, 0, 0.86783335]
self.var = [3.33037732e+03, 3.92964236e-02, 9.92188611e+03, 4.02969574e-02, 2.70893678e-03,
0.00000000e+00, 1.08038673e-02]
#desvio padrao dado como a raiz da variancia
self.desvio = [(variancia)**0.5 for variancia in self.var]
#o ideal seria importar esses dados, mas como eu achava que so poderiamos fazer o envio de apenas um arquivo, decidi por
#simplesmente copiar e colar os dados a seguir
#essa lista da as coordenadas do centroide de cada cluster
self.centros = [[-1.3426829348155176,
0.6924552266171596,
-1.0275578946133381,
-2.5086053815815297,
1.1826925858274417,
1.2699594110885652],
[0.7646816750653262,
0.23567784797510313,
-1.0275578946133381,
0.7354025192197684,
-0.563189301306828,
-0.6515455280526591],
[-0.37646244020598263,
0.017061445306800357,
0.9398433574915842,
0.0025065262759503062,
-0.04139868056448892,
-0.4480920639082951],
[-1.2931779667372716,
0.07542576034266327,
-1.0275578946133381,
-0.3524564505307079,
1.0657573402534872,
1.2699594110885655],
[-1.544828221135023,
-0.09132626385313414,
1.1606741102788711,
0.04841172256894984,
1.282849835576562,
1.2699594110885655],
[-1.7238711890180138,
1.5330155915144752,
-1.0275578946133381,
-2.5086053815815292,
-9.662092783798043,
-8.33756528461756],
[1.392775957558075,
-0.0738973394704198,
0.815626059048735,
0.06916673153121014,
-0.6980909063982096,
-0.6335314192482101],
[0.9011297433309924,
0.09048803783284924,
-1.0275578946133381,
-1.4127409083741813,
-0.5983629897097297,
-0.6515455280526591],
[-1.1070392867630656,
0.1300594261719541,
-1.0275578946133381,
1.5062435520781192,
0.978411902616491,
1.2699594110885652],
[-1.0481283747499528,
0.9900499947530698,
1.1272149053111005,
1.4597523320026558,
0.7902144478614118,
1.2699594110885652],
[0.6735194014621467,
-2.446293774051437,
0.8595412655689342,
0.1721870487438844,
-0.35752341645936847,
-0.4681291474982699],
[0.2610030033192252,
-0.18195093541483157,
-1.0275578946133381,
0.09822374407837477,
-0.16947068411239485,
-0.19251934814670052],
[-0.034514153347861246,
-0.6178748103634039,
0.9298055960012528,
-2.5086053815815297,
-0.09010430882111396,
-0.3152821637029456],
[-1.1549765975188342,
-2.475971589931971,
1.147290428291763,
-0.856506668185603,
1.1427268200274077,
1.2699594110885652],
[1.6635062517359844,
-0.48673467925691055,
-1.0275578946133381,
0.280867822946266,
-0.7858897361929769,
-0.9397712689238431],
[-1.1347620688868836,
0.42713644270125356,
-1.0275578946133381,
0.7789880380405153,
0.9834571869628221,
1.2699594110885652],
[0.3553374702683278,
-0.03104102342373167,
0.9197678345109216,
-0.4663124996951078,
-0.21752551640134732,
-0.17116929326735356],
[-1.0697867982841855,
1.524256521262631,
-1.0275578946133381,
-2.01048516648728,
0.6895030184461921,
1.2699594110885652],
[-1.7065444501906275,
2.4952626277726724,
1.1807496332595337,
-2.5086053815815297,
3.5488146210623355,
1.2699594110885652],
[-1.0689204613428163,
-1.7195043363746776,
-1.0275578946133381,
0.0915821412104513,
0.9465663656657026,
1.2699594110885652],
[-0.43129647249500475,
0.08245134356632196,
0.9398433574915842,
1.3866947004554993,
-0.03495033981306848,
-0.49782513292136166],
[-1.4540691129915717,
-2.0596334952719486,
1.1692779058420126,
2.408552741705988,
1.993785271685662,
1.2699594110885655],
[0.29565648097399755,
0.6146128986109288,
-1.0275578946133381,
0.5742052829462128,
-0.24702197274710247,
-0.224544430465721],
[-1.658029581473946,
0.30657768169489186,
1.1687043194711362,
2.422784747851537,
1.9219922760929946,
1.2699594110885652],
[-1.2820393489196662,
2.371989592973655,
1.1640200307756483,
-0.1591383670536541,
1.0863468546260762,
1.2699594110885652],
[-1.1520888077142697,
1.9410151645661655,
1.1272149053111005,
2.3563687191723046,
0.8001876843599733,
1.2699594110885652],
[-0.6756034899611498,
0.09396849750466822,
-1.0275578946133381,
-0.5659365427139578,
0.13891765365474984,
0.11705644760383044],
[1.2736546281197954,
0.547882658071266,
0.8153751150114766,
-1.0640567578082067,
-0.7242152568218809,
-0.7668358244011327],
[0.9011297433309924,
-2.1741058457389997,
-1.0275578946133381,
-2.4504913564872006,
-0.4586638053291047,
-0.6195204457336388],
[0.8318227880214478,
-0.09827721499755392,
-1.0275578946133381,
-0.33348044233664137,
-0.5600951378011079,
-0.6515455280526591],
[0.2266245532648876,
0.2512588492010434,
0.9226357663653021,
0.4943479151295156,
-0.19190462242954878,
-0.23979446966525453],
[0.9236545038065943,
0.9936993755195441,
0.8314355333960067,
0.5299279304933905,
-0.6418572873660603,
-0.6515455280526591],
[1.1061628194550623,
-1.1338200593237138,
0.8260820606011635,
0.01520370822933314,
-0.5995195641119295,
-0.6515455280526591],
[-0.6236232734789913,
0.14878385137210492,
-0.043857268560877016,
-0.49121851044982023,
0.4708023006208418,
0.5013574354320751],
[-0.2433976158780169,
0.028740783877281923,
-1.0275578946133381,
0.2697984848330605,
-0.05256812793748276,
-0.3846698420608232],
[0.21672355964923834,
0.044544851028180295,
0.9197678345109216,
-0.316876435166833,
-0.1908263941921861,
-0.2672445402244149],
[-0.6192915887721449,
1.4798223475207264,
-1.0275578946133381,
0.24973530950287556,
0.06431151137365539,
0.14107525934309578],
[-0.7189203370296153,
-2.5565343125828637,
0.9799944034529091,
1.4265443176630395,
0.22908157821147035,
0.020981200646769102],
[-0.7015935982022292,
-1.200424028712869,
0.9799944034529091,
-0.018004306110283502,
0.18591799730665715,
0.06901882412529978],
[0.8716742873244359,
-2.447033670924399,
-1.0275578946133384,
-0.0777787319215934,
-0.47201461667157457,
-0.6515455280526592],
[-1.074118482991032,
-2.2682966939785643,
1.1255419450627118,
0.37218986238021173,
0.9463339576658482,
1.2699594110885657],
[-0.16099934678778033,
0.15556378972531137,
0.9425200938890058,
-1.1968888151666732,
-0.060081481936848403,
-0.2608395237606108],
[0.8924663739172993,
0.787110080190615,
0.8394657425882718,
-0.11762834912913345,
-0.6126959173983717,
-0.6515455280526591],
[0.20517240043098087,
0.724449610619481,
0.9231137550076988,
-0.25046040648759976,
-0.20257831318926492,
-0.2512319990649046],
[-1.3340195654018248,
-0.2046046324163659,
1.1606741102788711,
0.6949302517433605,
1.0774329411753885,
1.2699594110885652],
[-1.1867422853690424,
-0.22479496257641624,
1.1472904282917626,
-2.2097332525249795,
1.0121257291194936,
1.2699594110885652],
[1.1485170699220062,
-2.3288984303283544,
0.8260820606011633,
-0.8316006574308905,
-0.5410939574304531,
-0.6408705006129858],
[-1.1492010179097052,
1.6117866019357951,
-1.0275578946133381,
0.38879386955002,
0.9538308079523837,
1.2699594110885652],
[1.6721696211496773,
-0.227244080559039,
0.7993146966269468,
-2.1101092095061302,
-0.7953762374907357,
-0.9397712689238431],
[0.12142649609861438,
0.10707597596782104,
-1.0275578946133381,
-1.7116130374307308,
-0.12139454992867692,
-0.24322572848514956],
[0.19362124121272342,
-0.774408843330042,
0.9197678345109216,
1.5925843893611222,
-0.15585713293355521,
-0.2992696225434353],
[1.5682091881853601,
-0.47227473927691743,
0.7993146966269468,
1.3269202746441895,
-0.7402559986700514,
-0.7476207750097205],
[0.7934564377608069,
0.49220374094871416,
0.8394657425882717,
0.010459706180816477,
-0.5755659553297156,
-0.6515455280526591],
[-0.05617257688209397,
-0.9804425416760604,
-1.0275578946133381,
1.3767322961536141,
-0.037564765184651766,
-0.1231316697888229],
[1.3637536700222035,
0.5496792883450538,
-1.0275578946133381,
0.07165733260668143,
-0.723269661926016,
-0.6515455280526591],
[-1.5506038007441518,
1.022583889213829,
1.1606741102788714,
0.08992174049347045,
1.2503879285420196,
1.2699594110885652],
[-1.1520888077142697,
-0.6136401486109524,
-1.0275578946133381,
-2.5086053815815297,
1.0350833523671514,
1.2699594110885652],
[-1.297633413864314,
-0.11295471644362491,
-1.0275578946133381,
-0.03792911471405349,
1.0681710311035608,
1.2699594110885652],
[-0.26842512751757464,
1.3812851748041024,
0.9448622382367498,
-1.2633048438459062,
-0.09089598405743823,
-0.3393009754422107],
[-0.5716430569968328,
-2.2692036579475428,
-1.0275578946133381,
0.0006752019557508399,
0.15969833896029004,
0.06901882412529978],
[-0.02007520432503944,
-2.5565343125828637,
0.9331515164980299,
0.015203708229333238,
-0.006307139418358897,
-0.29926962254343537],
[0.771179202125596,
0.04062537404342433,
-1.0275578946133381,
-0.9312247004497403,
-0.5556257905150646,
-0.6515455280526591],
[0.5286048585421895,
-0.2775328504875179,
0.8796167885495967,
0.6295519735122405,
-0.36972265684383193,
-0.36331978718147623],
[0.13008986551230745,
1.5260028519416347,
-1.0275578946133381,
1.7005104359648762,
-0.20229914625258438,
-0.21920691674588422],
[0.3524496804637634,
0.32363834199587566,
-1.0275578946133381,
-2.475397367241913,
-0.245567401481846,
-0.203194375586374],
[-0.7674352057462966,
0.33465995640929647,
-1.0275578946133381,
-0.45635009539322285,
0.3828221514924966,
1.2699594110885652],
[0.7885059409529822,
0.5789703897855711,
-1.0275578946133381,
-0.24215840290269558,
-0.5807228945059161,
-0.6515455280526591],
[-1.312361141867592,
-0.2743548305133629,
1.1606741102788714,
-1.1761338062044129,
1.0883228354403645,
1.2699594110885652],
[0.003027114111475479,
1.3208364735812375,
-1.0275578946133381,
-0.0927223383744209,
-0.1480970355949058,
-0.2832570813839251],
[0.7654036225164673,
0.1239969161471103,
-1.0275578946133381,
-2.458793360072105,
-0.5576885661855472,
-0.6515455280526591],
[-0.6426826861891162,
-0.34351318946817366,
-1.0275578946133384,
0.6096271649084705,
0.16131642055319087,
0.232346743952304],
[1.0657337621911611,
0.4601538298803248,
0.8234053242037419,
0.03180771539914147,
-0.6595971581321957,
-0.6515455280526592],
[-0.644662884912246,
-0.24643631106396452,
-1.0275578946133381,
-0.11051234605635826,
0.1487836150044233,
0.11705644760383047],
[1.158143035937221,
-0.016530165255332064,
-1.0275578946133381,
0.29747183011607425,
-0.6693325624378762,
-0.6515455280526591],
[1.4902388634621222,
-0.08914686963825198,
-1.0275578946133381,
-1.2383988330911941,
-0.7362765564992518,
-0.7316082338502102],
[0.254842385069488,
0.12281250746587795,
0.9197678345109216,
-0.31687643516683306,
-0.19913278776836768,
-0.22881444144159033],
[-0.7535738146843877,
0.8873156152189452,
-1.0275578946133381,
0.09822374407837464,
0.33894460988231856,
1.2699594110885652],
[0.4982830655942637,
-0.5482200725755738,
-1.0275578946133381,
0.3680388605877597,
-0.34986572683693823,
-0.36331978718147623],
[-1.5549354854509985,
-1.479209729566562,
1.165692991024037,
0.04426072077649765,
1.406475746346479,
1.2699594110885652],
[0.42464442557787246,
-1.2626002077151817,
0.891662102337994,
-2.139996422411785,
-0.27605573526400173,
-0.3825348365728883],
[1.358955496193081,
-2.4175380397526527,
0.813213135613559,
0.154421922191572,
-0.608575710821447,
-0.6811071425009856],
[-1.6083595968354392,
0.5973312003504277,
-1.0275578946133381,
-0.10102434195932501,
1.384978621738683,
1.2699594110885652],
[1.3256348446019537,
-0.01064861024319939,
-1.0275578946133381,
1.1774842101159146,
-0.6926172638921644,
-0.6515455280526591],
[-1.3166928265744384,
-1.7222412183186901,
-1.0275578946133381,
1.0529541563423523,
1.1204425235754478,
1.2699594110885652],
[-0.8228807699939324,
2.013284715484412,
1.0502587338852276,
0.7291760165310904,
0.4195547134350154,
1.2699594110885652],
[0.38999094792310013,
-2.5565343125828637,
-1.0275578946133381,
-1.0142447362987819,
-0.1196287349677516,
-0.17116929326735356],
[-0.8426827572252309,
0.12483615399423027,
1.0803720183562215,
1.4550083299541392,
0.6099870402635797,
1.2699594110885655],
[-0.582231619613569,
-0.13965768297152617,
0.9599188804722467,
0.6738293259650625,
0.10948524806957706,
0.15975655736252437],
[1.510453392094073,
0.33756578216317296,
-1.0275578946133381,
-2.3010552919589258,
-0.7696320727370319,
-0.7956583984882512],
[1.5310804621266758,
-2.460531026844922,
-1.0275578946133381,
-0.06781632761970843,
-0.6501522403983054,
-0.7613458102893007],
[0.2786047697470461,
-0.022076765718730334,
-1.0275578946133381,
1.4692403360996893,
-0.20379459995916865,
-0.2260694343856743],
[1.3256348446019537,
-0.023641266251452506,
0.8193902196076093,
1.4763563391724643,
-0.6957450596099056,
-0.6515455280526591],
[-1.533277061916766,
0.5748619927292556,
1.160674110278871,
0.7330077104933539,
1.2714625957131942,
1.2699594110885655],
[-0.78822729233916,
0.03149702530619862,
-1.0275578946133381,
1.4763563391724643,
0.42815985019742664,
1.2699594110885652],
[0.8058326797803684,
0.004349525720934149,
0.8394657425882718,
-2.4338873493173923,
-0.5559695864601482,
-0.6515455280526591],
[-0.5837717741760031,
-1.158379172010415,
-1.0275578946133381,
0.021845311097256424,
0.12382638684951633,
0.15548654638665496],
[1.3776150610841122,
0.13929412873002206,
-1.0275578946133381,
-0.4165004781856829,
-0.7103414396260233,
-0.6515455280526591],
[0.28256516719330577,
-0.9834178166277914,
-1.0275578946133381,
0.13143175841799132,
-0.14656195378887696,
-0.1903843426587658],
[-0.27882117081400637,
1.5507093671237322,
0.9398433574915842,
0.1862249820783588,
-0.10137988075894862,
-0.3344972130943577],
[-1.1804416530681743,
-0.044966949119732307,
-1.0275578946133381,
2.400142919892708,
1.0142835543220172,
1.2699594110885652],
[-1.0617422409714705,
0.057771395798443814,
1.160674110278871,
0.45520989822925323,
0.9634269052556225,
1.2699594110885657],
[0.393841334329186,
1.5716241338903396,
0.8996923115302593,
-0.0788856657329139,
-0.3411495363809618,
-0.2779195676640884],
[0.24487951024374088,
0.08038310863048338,
-1.0275578946133381,
-0.41650047818568287,
-0.1759254385147931,
-0.20719751087625154],
[0.6614431895521503,
-0.17001254668089483,
-1.0275578946133381,
1.4431483248328478,
-0.5315600743594547,
-0.6515455280526591],
[1.167218946751566,
0.4311490332069274,
0.819390219607609,
0.7078280073127655,
-0.6850596253913881,
-0.6515455280526591],
[1.5104533920940728,
-1.3350567565326878,
0.809352458117278,
0.5797399520028156,
-0.6952944126078363,
-0.7956583984882509],
[-1.0069773700349105,
1.9767250429507914,
1.1355797065530433,
-1.661801015921306,
0.8316741735148729,
1.2699594110885655],
[1.5624336085762316,
-1.0640461704735713,
-1.0275578946133381,
-0.2504604064875998,
-0.7278479299436335,
-0.8436960219667817],
[-0.8228807699939323,
2.2058504438954833,
1.060296495375559,
-0.8399026610157945,
0.41309862227404104,
1.2699594110885652],
[0.08210812722108418,
0.7177725729518479,
0.924400647506459,
-2.3783277868645722,
-0.17699898156734292,
-0.2524637330002516],
[1.4440342265890926,
0.04145484433784166,
-1.0275578946133381,
-0.3749904602611622,
-0.7120791039136568,
-0.6195204457336386],
[-0.9441679417856357,
-0.2601835416729557,
-1.0275578946133381,
-0.8648086717705072,
0.9294843776764794,
1.2699594110885652],
[-1.3672291481543146,
2.367940864016002,
-1.0275578946133381,
-0.05536332224235221,
1.0828588884923558,
1.2699594110885655],
[-0.5610544943800968,
0.06500536448540889,
0.9576882668077287,
-0.07335099667631113,
0.08371090079378164,
0.1811066122418713],
[-0.009967940009064159,
0.0714227843714944,
0.9398433574915844,
0.6337029753046924,
-0.08829308718976342,
-0.2832570813839251],
[0.2441575627925999,
-2.41740435094907,
-1.0275578946133381,
0.09407274228592262,
-0.11887818837413622,
-0.2832570813839251],
[-1.2629799362095415,
-0.10268028592575491,
-1.0275578946133381,
-1.8211994847514656,
1.0470512361654252,
1.2699594110885652],
[-0.6149599040652982,
-1.0388155893964528,
-0.043857268560877016,
0.4303038874745406,
0.2103208194529656,
0.3572445649964837],
[-0.6496133817200705,
0.5260828772009764,
-0.05389503005120827,
-0.8897146825252196,
0.6628982650535189,
0.5493950589106058],
[-1.6510988859429918,
2.4448210849491523,
1.1727194240672687,
0.4801159089839655,
1.578444476622473,
1.2699594110885652],
[1.4088031909734076,
0.2843242661528951,
0.8093524581172782,
-2.4089813385626795,
-0.7371511172066532,
-0.718798200922602],
[1.4370210227780078,
-0.3205815195509771,
-1.0275578946133381,
-2.3662853201260297,
-0.6948514036905504,
-0.6240954574934987],
[0.134421550219154,
-1.3100407574713313,
0.9298055960012528,
-0.14253435988384577,
-0.08622960627828549,
-0.24322572848514956],
[-0.8209555767908895,
-0.03831517649560084,
1.0536046543820046,
0.07055039879536079,
0.4961055086294483,
1.2699594110885652],
[-0.008524045106781983,
-2.2083287522149515,
0.9197678345109216,
1.3269202746441893,
-0.056785768814103244,
-0.2992696225434353],
[-1.1226333517077132,
-0.023850029590593153,
1.1606741102788711,
-0.25710200935552313,
0.9854518342625344,
1.2699594110885655],
[-1.1001085912321114,
-1.5496505899361077,
1.1606741102788714,
-1.836143091204293,
1.0509231985707517,
1.2699594110885652],
[-1.0953831170064605,
-0.13770787467333392,
1.1424236348419052,
1.453714511213635,
0.9883638059888514,
1.2699594110885655],
[-0.008524045106781971,
-0.5631536636050155,
0.9331515164980299,
0.6793639950216654,
-0.08622960627828503,
-0.2992696225434353],
[-0.2926825618759153,
0.06311074999620205,
0.9398433574915843,
-2.259545274034404,
-0.04565349212703787,
-0.3537122624857699],
[0.5459315973695757,
-0.34641340622927147,
-1.0275578946133381,
-0.1389763583474583,
-0.36299149412963133,
-0.36331978718147623],
[-0.28286407654039647,
0.28903650884485305,
-1.0275578946133381,
0.8288000595499402,
-0.05615693662949304,
-0.41135741066000686],
[0.1446808034722116,
0.7115411806501649,
-1.0275578946133381,
-0.03897778885109397,
-0.16058395847346435,
-0.24701817244398094],
[0.25426482710857506,
-0.35837157870844566,
0.9130759935173675,
0.01520370822933314,
-0.18555936056707356,
-0.21920691674588422],
[-1.391197803532199,
-0.39218168863061365,
1.1606741102788714,
2.2733486833232623,
1.1266024637657335,
1.2699594110885652],
[0.6187039004445977,
0.632650500631836,
-1.0275578946133381,
1.5062435520781192,
-0.4417725569291072,
-0.44017998474712483],
[0.9747683833473837,
-1.5760124887565556,
-1.0275578946133381,
0.2870943256349441,
-0.5434303767458363,
-0.6515455280526591],
[-1.4119898901250627,
0.9837578770444905,
-1.0275578946133381,
0.6544579842669529,
1.1634357061347333,
1.2699594110885655],
[-1.1828918989629562,
0.029531648723111197,
-1.0275578946133381,
0.24765980860664957,
1.0133768964585121,
1.2699594110885655],
[1.3949417999114984,
0.5249533606574875,
0.812698378614055,
-0.366688456676258,
-0.7235476882120379,
-0.6195204457336386],
[1.4526975960027857,
-1.8592259015355213,
0.806006537620501,
-0.08442033478951678,
-0.6816604312740973,
-0.8436960219667817],
[-1.6834421317541122,
0.007821830734667233,
1.177403712762757,
0.7872900416254193,
2.9552899814654014,
1.2699594110885652],
[0.48702068535646276,
0.03994899773152084,
-1.0275578946133381,
0.5398903347952755,
-0.3694534103352649,
-0.36331978718147623],
[0.2730354608382434,
-0.07409803621924607,
0.9197678345109216,
0.8661590756820089,
-0.19478181970465253,
-0.21920691674588424],
[-1.240888344204624,
1.7333245267102688,
1.1606741102788714,
0.7042700057763779,
0.9748039376478912,
1.2699594110885652],
[1.620189404667519,
0.4430763562698022,
0.7993146966269468,
0.5216259269084863,
-0.8199708704849331,
-0.9397712689238432],
[-0.03327652914590508,
-1.689996112728459,
-1.0275578946133381,
-0.10339634298358333,
-0.013976782522100372,
-0.3084196460631555],
[-1.162918019481386,
-0.38013689433849146,
-1.0275578946133381,
-0.09272233837442084,
1.0159435382044668,
1.2699594110885655],
[-1.0076993174860516,
0.45235433589954915,
1.1249842916465826,
-0.842669995544096,
0.8560840647130651,
1.2699594110885655],
[-1.1451581121833152,
0.5648585215356416,
-1.0275578946133381,
-0.7851094373554273,
0.9734839504642594,
1.2699594110885652],
[-1.671890972535855,
-1.8593406171944298,
1.1807496332595337,
1.0280481455876396,
1.980738170566781,
1.2699594110885652],
[-0.5256605578010772,
0.018052424771751677,
-1.0275578946133381,
1.5223366667196256,
0.06456945228761134,
-0.06031323908612894],
[-1.3542340940337751,
2.1405031607117775,
-1.0275578946133381,
-2.093505202336322,
1.0397766401311797,
1.2699594110885652],
[1.0657337621911613,
-0.39293052667922695,
-1.0275578946133381,
-0.0014002989404751805,
-0.6247437231259602,
-0.6515455280526591],
[-0.30019081536778264,
-0.02706791836575953,
-1.0275578946133381,
-0.6489565785629993,
-0.04636927656037387,
-0.41135741066000686],
[-0.815950074462978,
-0.754356634620399,
1.0522662861832939,
-0.07777873192159342,
0.5248812292326572,
1.2699594110885652],
[0.3825652027113632,
1.539745296234652,
-1.0275578946133381,
0.36625985981956594,
-0.3250403091431625,
-0.28096957550399493],
[0.9791000680542302,
-2.5565343125828637,
-1.0275578946133381,
0.7789880380405154,
-0.4889293771695189,
-0.6515455280526591],
[0.36111304987745657,
0.8894022709834554,
0.9063841525238132,
0.0650157297387581,
-0.2881021044268904,
-0.23521945790539445],
[-0.5860820060196547,
1.083399578423982,
-1.0275578946133381,
0.08161973690856643,
0.04359929025597683,
0.14908152992285087],
[0.8132584249921052,
-0.006367160992754838,
0.8365978107338915,
0.907076093350465,
-0.5631893013068299,
-0.6515455280526591],
[0.8058326797803684,
0.13625605522584489,
-1.0275578946133381,
-2.0104851664872805,
-0.5652520769773091,
-0.6515455280526591],
[1.239001150465023,
-2.2464879637286406,
-1.0275578946133381,
-0.40819847460077874,
-0.5806278701608492,
-0.6995831515311898],
[0.7995320474795008,
-0.09407046516683165,
-1.0275578946133381,
0.38049186596511575,
-0.5545631485029985,
-0.6515455280526591],
[0.17051892277620853,
-0.5891569411496688,
-1.0275578946133381,
-2.3010552919589258,
-0.13416442805871576,
-0.2992696225434353],
[-0.9441679417856357,
2.1735597949654792,
1.1355797065530433,
0.19369678530477247,
0.8612850398554169,
1.2699594110885652],
[0.8924663739172993,
0.25698363621703574,
0.8365978107338913,
-0.27418041673018306,
-0.5953577113357741,
-0.6515455280526591],
[0.5415999126627291,
0.007730550482790062,
0.8695790270592655,
-0.3293294405441892,
-0.3978521142183613,
-0.36331978718147623],
[1.5751398837163149,
-1.1227381433381451,
0.8033298012230793,
-0.24713960505363824,
-0.7197637810116264,
-0.8052659231839574],
[-1.3542340940337751,
-2.5565343125828637,
-1.0275578946133381,
0.6959680021914737,
1.1852685608161095,
1.2699594110885652],
[-1.591032858008053,
0.6231250692587272,
-1.0275578946133381,
1.80843648256863,
1.409383718111637,
1.2699594110885652],
[1.6652389256187228,
-0.04098295318227512,
-1.0275578946133381,
-0.4065380738837979,
-0.8042303053686503,
-0.9397712689238432],
[-0.0037110620991746925,
-0.03982775344322855,
0.9275749823367351,
1.5234010261535877,
-0.12226016363404378,
-0.2885945951037619],
[-0.5456529487557535,
0.45017805145749296,
0.9599188804722466,
-0.4663124996951079,
0.04414186276566541,
0.21313169456089176],
[1.6837207803679348,
0.45266994963602986,
-1.0275578946133381,
0.48011590898396567,
-0.8291060198827781,
-0.9397712689238431],
[-0.2900835510518074,
0.9883726486825064,
-1.0275578946133381,
0.16879077455005997,
-0.0868249381793985,
-0.41135741066000686],
[1.5624336085762316,
-1.6274918565778413,
-1.0275578946133381,
0.13143175841799123,
-0.7034618248769492,
-0.8917336454453123],
[-1.1520888077142697,
-0.967080356980929,
-1.0275578946133381,
1.152578199361202,
0.9124911446133906,
1.2699594110885652],
[0.984875647663359,
-0.2914280523086143,
0.8327739015947175,
-0.366688456676258,
-0.5989739748947381,
-0.6515455280526591],
[-0.26669245363483607,
-0.10755083057503959,
0.9398433574915843,
1.018085741285755,
-0.04591251909048241,
-0.3921423612685946],
[1.3949417999114986,
0.9815115261494605,
-1.0275578946133381,
0.7540820272858028,
-0.7593435725372851,
-0.7236019632704551],
[0.5567608091366922,
-0.017261435341042936,
-1.0275578946133381,
-0.26083791096873,
-0.38192288926332185,
-0.36331978718147623],
[1.1798014594714532,
-0.0672469684348031,
0.8235726202285804,
0.5797399520028154,
-0.6543970576172359,
-0.6515455280526591],
[1.6249148788931693,
-0.11789876565168765,
0.799314696626947,
-0.027061037293815256,
-0.7963344699450562,
-0.9397712689238431],
[1.3083081057745676,
0.9079485973740771,
0.8193902196076093,
-0.018004306110283502,
-0.7249378196421484,
-0.6515455280526591],
[-1.528326565108941,
0.9765007736049169,
-1.0275578946133381,
0.024691712326366428,
1.1882018656686248,
1.2699594110885655],
[0.03479280196168347,
-0.04968572471277463,
-0.043857268560877016,
0.15633776917270387,
-0.19820850976596327,
-0.41135741066000686],
[-1.055059070280907,
-0.037716426361675257,
-1.0275578946133381,
0.8586872724555953,
0.9724279607173498,
1.2699594110885652],
[-1.608359596835439,
0.136856833718224,
1.1640200307756483,
1.4597523320026558,
1.4297355205065647,
1.2699594110885652],
[0.14741660433969364,
-1.1197396236901662,
-1.0275578946133381,
-0.2670644136574081,
-0.05452565995130234,
-0.2672445402244149],
[-0.6084623770050284,
0.6564707643040966,
-1.0275578946133381,
0.21237629337080682,
0.0754051574556909,
0.16509407108236113],
[-1.1382274166523605,
0.8088168468432789,
-1.0275578946133381,
0.06667613045573886,
0.9660920222359118,
1.2699594110885655],
[0.9964268068816163,
-1.4593644910431192,
0.8394657425882718,
-0.914620693279932,
-0.5384359932610611,
-0.6515455280526591],
[-0.6214574311255681,
0.020235727594880727,
-1.0275578946133381,
0.019354710021785237,
0.11487511316751428,
0.15308466521272845],
[-1.3474084090411684,
0.6105148589420032,
1.160674110278871,
0.03633608099090735,
1.0514031939102528,
1.2699594110885657],
[0.016228438932341136,
0.8698652743922425,
0.9369754256372039,
0.323563841382916,
-0.13194187080138886,
-0.28096957550399504],
[-0.3110755615542176,
0.8899058592543242,
0.9460204414856341,
1.0088896757763224,
-0.05791971930450673,
-0.3189773655089863],
[0.6109068679722739,
0.6702963186892553,
0.8595412655689342,
0.7291760165310903,
-0.472374476318565,
-0.43537622239927193],
[-1.5605047943598014,
0.20859679029870495,
-1.0275578946133381,
0.45876789976564064,
1.3076269918290797,
1.2699594110885655],
[0.41213066975809354,
0.1617775465825103,
-1.0275578946133381,
0.017971042757634545,
-0.29303597730297226,
-0.26724454022441485],
[0.45641011342808047,
0.08104132542322211,
0.8863086295431507,
0.5797399520028155,
-0.34935221545461687,
-0.33129470486245577],
[0.8231594186077547,
0.023015328550777866,
0.8394657425882719,
-0.033331081959337275,
-0.5649347268741596,
-0.6515455280526592],
[-0.9723238923801382,
-0.03256587476121223,
1.1481269084159573,
0.10029924497460078,
0.9420242559209953,
1.2699594110885655],
[1.4757999144393008,
-0.8598310463659219,
-1.0275578946133381,
0.5133239233235822,
-0.7208819966235369,
-0.8436960219667817],
[1.1177139786733195,
-1.02730589455306,
0.8193902196076093,
0.5299279304933906,
-0.6311039481099449,
-0.6515455280526591],
[0.5528622929005301,
-0.4576398340995877,
0.8715865793573319,
0.07165733260668132,
-0.3795372649460477,
-0.3633197871814763],
[1.5855359270127465,
1.3452126977656607,
0.7993146966269468,
-0.018004306110283502,
-0.8544033566768057,
-1.804448491537394],
[0.4073176867504863,
-0.3215229796720116,
0.8896545500399279,
-1.0516037524308506,
-0.29378152296699933,
-0.2912633519636802],
[-0.9466431901895479,
1.1717618136274204,
1.1147872006087858,
-0.04646831840138344,
0.7859031344105933,
1.2699594110885655],
[-0.9116803064842866,
0.18163013212451012,
1.1079758624546323,
2.254669175257228,
0.8217679235453906,
1.2699594110885652],
[0.8370208096696636,
-0.35133065040769873,
0.8354506379921393,
0.2410182057387261,
-0.5483373164793678,
-0.6515455280526592],
[-1.146313228105141,
0.46157659790195654,
1.1606741102788714,
0.5216259269084863,
0.981403873566058,
1.2699594110885652],
[-1.2156201834146858,
0.03501660908770618,
-1.0275578946133381,
-1.0889627685629193,
1.0324433779998843,
1.2699594110885652],
[0.6556676099430215,
-0.002952052657517315,
-1.0275578946133381,
0.04841172256894979,
-0.5384359932610568,
-0.6515455280526591],
[-0.5341017895374961,
1.034237931673806,
-1.0275578946133381,
-0.8814126789403155,
0.022826514170698545,
0.21313169456089176],
[0.5589266514901154,
0.4793981329367088,
0.8695790270592655,
0.015203708229333144,
-0.41894786459712247,
-0.36331978718147623],
[-1.52807904026855,
0.11827436050285005,
1.1626816625769374,
-0.6406545749780952,
1.3178694066664467,
1.2699594110885655],
[0.17821969558838016,
-0.013810717913595592,
-1.0275578946133381,
0.7734533689839124,
-0.146103626705686,
-0.23521945790539445],
[-0.1046086876950144,
-0.06000307741293773,
0.9398433574915844,
-0.321404800758599,
-0.07463485245694938,
-0.32838333374254486],
[-0.5866070587113936,
0.5761817916496687,
0.9617439280159433,
0.2763394573545001,
0.07393578762407182,
0.16072701440249468],
[0.5078127719493261,
0.08109639706667107,
0.8796167885495967,
0.16630017347458875,
-0.3799540241694156,
-0.36331978718147623],
[-1.7065444501906277,
2.4952626277726724,
1.1807496332595337,
2.4725967693609627,
3.5488146210623355,
1.2699594110885652],
[0.11853870629405001,
0.07558356393124473,
-1.0275578946133381,
0.28916982653117024,
-0.12901950710279836,
-0.23521945790539445],
[1.0946116602368046,
0.6061317090599673,
0.8193902196076092,
-0.38329246384606624,
-0.6728788923496287,
-0.6515455280526591],
[1.4488472095966998,
-2.308575411907902,
-1.0275578946133381,
0.5409972686065961,
-0.6246828648375461,
-0.672895582932006],
[1.6006968234867094,
-0.11987655549146968,
0.8043335773721125,
0.4863424116726437,
-0.7754207862402841,
-0.8677148337060471],
[-0.05357356605798604,
-0.10512101818093414,
0.9398433574915842,
0.011882906795371476,
-0.07958206972824605,
-0.30567463900723946],
[-0.07783100041632669,
0.16626848585976392,
0.9398433574915841,
-0.06781632761970846,
-0.0853567891690295,
-0.3393009754422109],
[1.2313003776528513,
0.017063240424407437,
-1.0275578946133381,
-0.3390151113932442,
-0.6823071224522056,
-0.6515455280526591],
[1.6115260352538259,
-0.040160449838744575,
-1.0275578946133381,
0.4801159089839657,
-0.7847211092215164,
-0.8917336454453123],
[-0.6438378021109419,
0.7219649547129713,
0.4312634419814682,
1.1110681814366814,
0.5194777493016277,
0.6294577647081572],
[-1.2343908171443543,
-0.36139915968936454,
-1.0275578946133381,
0.40539787671982824,
1.0377233267344195,
1.2699594110885652],
[1.3492622157302079,
-0.047141902820185225,
0.8139150769765195,
-0.48895432765393737,
-0.694228552595242,
-0.6253431879734606],
[1.478110146282952,
0.47962264364510654,
0.8113600104153443,
-0.018004306110283502,
-0.7534477348949812,
-0.7284057256183082],
[1.0397436539500817,
0.7606333979604605,
0.8294279810979406,
-0.018004306110283506,
-0.6655993869038007,
-0.6515455280526591],
[1.3160088785867392,
0.4201219629963734,
-1.0275578946133381,
0.45244256370095187,
-0.7112681939127629,
-0.6515455280526591],
[-1.250851219030371,
1.436429969279872,
-1.0275578946133381,
1.8748525112478636,
1.0037556565422496,
1.2699594110885655],
[-1.3224684061835672,
-0.9215228864268009,
-1.0275578946133381,
-0.28366842082721644,
1.1180958796934306,
1.2699594110885652],
[0.4939513808874172,
-2.5565343125828637,
-1.0275578946133381,
0.03180771539914146,
-0.2660627510451372,
-0.36331978718147623],
[-0.8965194100103236,
-0.1023184019218062,
-1.0275578946133381,
-0.04291031686499592,
0.7174656670335128,
1.2699594110885652],
[-1.6177449137002733,
0.453555436129951,
1.1606741102788711,
0.0006752019557509509,
1.378060022247896,
1.2699594110885655],
[1.2216744116376368,
0.9916057165380878,
-1.0275578946133381,
-0.06781632761970846,
-0.7020604416445299,
-0.6515455280526591],
[-0.30827662682056284,
0.2192268368404901,
-1.0275578946133381,
-2.2346392632796923,
-0.015666039434221447,
-0.19999186735447197],
[0.0514531277572471,
-0.037225849484349975,
-1.0275578946133381,
-0.06781632761970843,
-0.09255704520639532,
-0.26724454022441485],
[1.120189227077232,
-0.1220822840772644,
0.8251260833163698,
-2.4516773569993293,
-0.6453415845311968,
-0.6515455280526591],
[-1.654564233708469,
1.502886087517727,
1.1707118717692024,
-0.09272233837442095,
1.4968708686583923,
1.2699594110885652],
[0.518978892526975,
-0.12063401989653316,
0.8751555612205606,
-0.11762834912913335,
-0.38005093381700294,
-0.36331978718147623],
[0.9479119381649351,
0.3340393120978894,
-1.0275578946133384,
0.18124377992741617,
-0.5953686017663318,
-0.6515455280526592],
[1.592466622543701,
-0.40927332303676245,
-1.0275578946133384,
-0.1275907534310184,
-0.7487823927108653,
-0.8244809725753696],
[-0.9268412029582495,
-0.265007708253949,
-1.0275578946133381,
0.7789880380405154,
0.9330043434994983,
1.2699594110885652],
[-0.2830862142176707,
-0.07625859305400244,
-1.0275578946133381,
-0.006509224223493124,
-0.04669553189601218,
-0.41505261246604774],
[-0.6582767511337637,
-0.040644235228651654,
-1.0275578946133381,
-1.8361430912042929,
0.138917653654752,
0.11705644760383044],
[-1.188667478572085,
0.15773774586441125,
-1.0275578946133381,
0.45244256370095176,
1.0118124672038364,
1.2699594110885652],
[-0.26842512751757464,
-2.3183393054277674,
0.9415163177399727,
0.11482775124818294,
0.04936382657567376,
-0.3473072460219659],
[1.6808329905633703,
-0.14022217633923859,
-1.0275578946133381,
0.8163470541725841,
-0.8027546273889986,
-0.9397712689238431],
[1.0108657559044383,
-0.32122996046832536,
0.8260820606011635,
0.06501572973875812,
-0.5927632264086153,
-0.6515455280526591],
[-0.8440578952274043,
0.2473516134002498,
1.0602964953755587,
0.7568493618141041,
0.5411561823227141,
1.2699594110885655],
[0.2687037761313969,
-0.003372137367550041,
-1.0275578946133381,
-1.0142447362987819,
-0.19415607507068175,
-0.2992696225434353],
[1.242151466615457,
0.024394023254135564,
-1.0275578946133381,
-0.058759596436176655,
-0.6832338767389451,
-0.6515455280526591],
[1.1437040869143988,
0.22647758333896278,
-0.10408383750286443,
0.5299279304933906,
-0.6439626638384309,
-0.6515455280526591],
[0.2662285277274846,
0.5381745012486109,
0.9140319708021609,
-0.0037722999647334726,
-0.2259307956153433,
-0.22606943438567434],
[0.9271198515720718,
-2.5565343125828637,
-1.0275578946133381,
0.01520370822933314,
-0.49479632971055865,
-0.6515455280526591],
[1.1643628909008976,
-0.2063688274136757,
0.8209344906061218,
0.03563940936140493,
-0.6624267482367027,
-0.6515455280526591],
[0.8828404079020848,
-0.18253189435497028,
-1.0275578946133381,
0.015203708229333143,
-0.5647936823838688,
-0.6515455280526591],
[1.6635062517359844,
0.4292369221358457,
-1.0275578946133381,
0.03180771539914146,
-0.8206735742847678,
-0.9397712689238431],
[-0.2922493934052307,
0.30429881367989503,
-1.0275578946133381,
-0.08026933299706464,
-0.060887638996232646,
-0.3993480047903742],
[-1.4206532595387555,
0.3529609829518997,
1.165692991024037,
1.9246645327572884,
1.3007767726267443,
1.2699594110885652],
[-0.19218747667707545,
0.5446656761669962,
0.9398433574915842,
-0.09106193765744003,
-0.08636818070950533,
-0.38253483657288856],
[1.6721696211496773,
-0.03404300611490871,
-1.0275578946133381,
-0.07404283030838656,
-0.8067073362630645,
-0.9397712689238431],
[0.8534812115556805,
-0.9922777015676499,
-1.0275578946133381,
0.11897875304063502,
-0.5445626612171335,
-0.6515455280526591],
[0.14452881453512925,
-0.018967484501814683,
0.9298055960012528,
-2.20973325252498,
-0.1431964786462104,
-0.2672445402244149],
[0.3404859798448539,
0.01074468390230489,
0.9025602433846396,
0.1385477614907664,
-0.23670815292684422,
-0.18489432854693374],
[-0.16793004231873482,
0.10946617050973875,
-1.0275578946133381,
-0.04789151901593849,
-0.06235578800660215,
-0.344104737790064],
[1.382565557891937,
-0.025769095972862786,
-1.0275578946133381,
0.07450373383579142,
-0.7028943069647389,
-0.6515455280526591],
[1.2274499912467656,
-0.23441613122786464,
-1.0275578946133381,
0.46351190181415736,
-0.6611487241799926,
-0.6408705006129856],
[-1.654564233708469,
0.17985569677638485,
-1.0275578946133381,
0.38049186596511564,
1.5674141837206963,
1.2699594110885652],
[-0.35258357325059325,
0.5372258538171669,
0.939843357491584,
0.48011590898396583,
-0.06738944175643244,
-0.4593950341385374],
[0.491785538533994,
0.5869035285817917,
-1.0275578946133381,
-0.22970539752533942,
-0.3912623775292753,
-0.36331978718147623],
[-1.0629798651734266,
-0.8920509115439784,
1.1463344510069697,
0.11008374919966635,
1.0003865463973576,
1.2699594110885655],
[-0.39404398401612445,
0.08810382299680516,
0.9398433574915841,
-0.6780135911101638,
-0.04424861687873256,
-0.45939503413853755],
[0.5921362342426054,
-1.146242557894625,
0.8662331065624884,
0.21445179426703262,
-0.3951346055422824,
-0.45939503413853716],
[-0.35289297930108227,
0.010925536259622276,
0.9398433574915843,
0.3182268390783347,
-0.04229108486490991,
-0.4353762223992722],
[-1.5708183293761024,
-0.042831008845740946,
-1.0275578946133381,
-0.03460831328009182,
1.340664385339448,
1.2699594110885652],
[-0.1798662401776009,
-1.1630554832985747,
0.9420739711561021,
0.5576012757764045,
-0.01143798559091178,
-0.28859459510376184],
[1.1647436983476536,
0.47744133739690414,
-1.0275578946133381,
-0.2386004013663082,
-0.6850724376626347,
-0.6515455280526591],
[0.8318227880214477,
0.6189930822416798,
0.8394657425882718,
-0.316876435166833,
-0.596193712034522,
-0.6515455280526591],
[1.137515965904618,
0.16746719828784465,
0.8208241855347994,
0.049597723081078945,
-0.6747745746492859,
-0.6515455280526591],
[-0.9303065507237267,
0.5683212030056752,
1.1205230643175466,
0.03678891755008396,
0.747515682956305,
1.2699594110885652],
[0.7870620460507,
0.08751287018577458,
-1.0275578946133381,
-0.09272233837442091,
-0.560954627663807,
-0.6515455280526591],
[0.4939513808874172,
1.4258997517343683,
-1.0275578946133381,
-1.9606731449778554,
-0.35687099359980956,
-0.2672445402244149],
[-1.3624849220468163,
-0.26019732287078856,
1.1606741102788714,
-0.010888303037508512,
1.0858714306707582,
1.2699594110885652],
[-0.18921717859238069,
-0.36952639097820345,
0.9398433574915842,
0.24884580911877868,
-0.04725482675710389,
-0.36331978718147623],
[1.6086382454492612,
0.5936130425771036,
-1.0275578946133381,
-0.051212320449900144,
-0.8277006122831106,
-0.9397712689238431],
[0.05830766179885042,
0.09872415770581178,
0.9341074937828233,
0.07450373383579142,
-0.1179133718628538,
-0.2741070578642049],
[1.3949417999114986,
-0.2202323833423584,
0.8193902196076093,
-2.5086053815815297,
-0.6926172638921648,
-0.6515455280526591],
[0.9906512272724877,
-1.2769300272889106,
-1.0275578946133381,
1.2771082531347644,
-0.5747438345914775,
-0.6515455280526591],
[-0.11826005768022778,
-0.5956479510935556,
-1.0275578946133381,
0.015203708229333188,
-0.04506425521782632,
-0.33129470486245577],
[-1.065455113577339,
0.9469782202658675,
1.1606741102788714,
2.4725967693609627,
0.9154045143843887,
1.2699594110885652],
[-1.4812968454346072,
-0.05361401376156978,
-1.0275578946133381,
1.1027661778517772,
1.1582821561729344,
1.2699594110885652],
[-0.6091843244561694,
0.1926399798169334,
-1.0275578946133381,
0.4801159089839658,
0.10237933459748931,
0.16189156285045905]]
#essa lista da a acuracia media de cada cluster, calculada da seguinte maneira: numero de "c" no cluster/numero de elementos no
#cluster
self.accuracy = [0.30763358778625954,
0.5448634590377113,
0.7056499407348874,
0.5014644351464436,
0.5817369093231162,
0.21309036816660468,
0.056481979558902634,
0.013043478260869565,
0.6285963382737576,
0.992707672796449,
0.004991680532445923,
0.40569395017793597,
0.5124421296296297,
0.40532909828833596,
0.5558285558285558,
0.6279854620976116,
0.47768206734534063,
0.43078038173471855,
0.41393168117883455,
0.013022618231665525,
0.76793893129771,
0.0028708133971291866,
0.2601685599120557,
0.4740061162079511,
0.17678173719376392,
0.09327548806941431,
0.41829268292682925,
0.7872073578595318,
0.32897362003990244,
0.31643803766565914,
0.4247231367853936,
0.3897544853635505,
0.33668561434193267,
0.7765834932821497,
0.5605461731943946,
0.22340425531914893,
0.03389330975537872,
0.5380233551694674,
0.798125,
0.7173154362416108,
0.42102590147282887,
0.957667731629393,
0.6845726970033297,
0.011235955056179775,
0.4047568932856241,
0.6038415366146459,
0.48743987172634956,
0.46289517470881864,
0.1949286846275753,
0.9534103615355944,
0.18986486486486487,
0.5478158205430933,
0.5183743003163787,
0.07758620689655173,
0.40902612826603324,
0.6951450189155107,
0.8598207008964955,
0.9305873379099924,
0.38461538461538464,
0.5380631796996375,
0.10408560311284047,
0.4720883534136546,
0.16638935108153077,
0.9914201851433733,
0.6498257839721254,
0.051075268817204304,
0.008159866777685262,
0.5872210953346856,
0.75,
0.2566103164282618,
0.43799111718483086,
0.42420937840785167,
0.0032552083333333335,
0.6185538504078529,
0.15673981191222572,
0.608010801080108,
0.6683435141933166,
0.38020351526364476,
0.9763546798029556,
0.8388342696629213,
0.7485955056179775,
0.11692529772645255,
0.31734006734006737,
0.0205699188526137,
0.015204170286707211,
0.3747252747252747,
0.060496380558428126,
0.6889352818371608,
0.5800193986420951,
0.28110599078341014,
0.3955431754874652,
0.1964461994076999,
0.35591133004926107,
0.30368098159509205,
0.34215133242559814,
0.7472712006717045,
0.20897615708274894,
0.4872512437810945,
0.4685123212655917,
0.5026843741169822,
0.4297924297924298,
0.755165581658647,
0.49142327306444133,
0.018463642580351037,
0.18500923726576934,
0.4385212965443343,
0.8519602106495027,
0.4664905909541103,
0.10808767951625095,
0.27095808383233533,
0.009983361064891847,
0.49170834430113186,
0.4338492261607589,
1.0,
0.921747042766151,
0.8929889298892989,
0.5950611888111889,
0.8984423676012461,
0.11720096518441916,
0.5363438782902681,
0.31161364507474126,
0.8326359832635983,
0.6173988943846378,
0.5621289449015727,
0.5730935251798561,
0.5146081504702195,
0.7330710291985918,
0.6359832635983264,
0.4109396914446003,
0.10874053682037164,
0.5455615728749705,
0.10471204188481675,
0.5269814502529511,
0.820677570093458,
0.9589381999170469,
0.4303630363036304,
0.5357769545720413,
0.7739957068383931,
0.808610400682012,
0.3764564081960627,
0.9941754641427011,
0.9299655568312285,
0.5300751879699248,
0.5327852004110997,
0.4655577299412916,
0.31878999418266435,
0.145935960591133,
0.1977142857142857,
0.48074608904933813,
0.3124202467035304,
0.9819680577022154,
0.6471951784886416,
0.8828754389588928,
0.8472344161545216,
0.17105263157894737,
0.6996197718631179,
0.5493449781659389,
0.33831808585503165,
0.9179400113186191,
0.5075783091950151,
0.31774744027303753,
0.39846005774783444,
0.35416139640779154,
0.9562872551802442,
0.020612653879186944,
0.448972602739726,
0.48043184885290147,
0.0073541701487548055,
0.17417162276975362,
0.29827315541601257,
0.9057154776804339,
0.40791984732824427,
0.3641851106639839,
0.3098503740648379,
0.3705386112913692,
0.6262398557258791,
0.4777542372881356,
0.5383670963781461,
0.4950955085183273,
0.40476190476190477,
0.25924075924075923,
0.7658087067047982,
0.5026680896478122,
0.6924386920980926,
0.731462086300796,
0.7467948717948718,
0.6818663838812301,
0.5140845070422535,
0.305744888023369,
0.4764181007010835,
0.4614168247944339,
0.4318518518518519,
0.21048951048951048,
0.6226904376012966,
0.42008066227977076,
0.5179570474643567,
0.6607629427792916,
0.11895910780669144,
0.6896493594066082,
0.19865319865319866,
0.4741726492733322,
0.764037985136251,
0.5792544956532095,
0.4708209693372898,
0.8691492996241886,
0.4578106203815088,
0.9372099372099372,
0.5889253871421868,
0.5537513997760358,
0.5482832618025751,
0.012973533990659055,
0.40542035398230086,
0.46336272423228947,
0.5070445084854307,
0.5869851007887817,
0.4701335207308503,
0.9561975768872321,
1.0,
0.009992862241256246,
0.6859635210150674,
0.5163269778654394,
0.3633666037226868,
0.5140039447731756,
0.4318330071754729,
0.4681870448447681,
0.0,
0.5506361323155217,
0.45352400408580185,
0.6963912133891214,
0.4004587155963303,
0.010676965015901863,
0.5699448231093801,
0.5457276368491322,
0.018212267113250993,
0.459802538787024,
0.6549405069839628,
0.5215590284315571,
0.4848746758859118,
0.2830188679245283,
0.0,
0.8156424581005587,
0.46451284660391756,
0.6962332928311057,
0.6081235697940504,
0.5730710624740268,
0.9418672930123312,
0.42763713080168775,
0.7689984901862104,
0.5684097421203438,
0.2902843601895735,
0.5897435897435898,
0.7626561472715319,
0.27549751243781095,
0.9632224168126094,
0.5773672055427251,
0.5722150259067358,
0.5267597440372309,
0.6704918032786885,
0.2668410725964683,
0.6773872679045093,
0.4245582238332578,
0.5323590814196242,
0.5153093105602999,
0.6826884722776226,
0.5121562375448386,
0.00987713803902674,
0.7053824362606232,
0.6672519754170325,
0.48134018908608395,
0.5455265241488519,
0.9373737373737374,
0.5304246655031996,
0.5440699935191186,
0.27380952380952384,
0.4193202146690519,
0.4393162393162393,
0.03729071537290715,
0.456645056726094,
0.5629863301787592,
0.3611650485436893,
0.282472613458529,
0.7747220596840257,
0.5293098469535085,
0.45551839464882943,
0.5333013128402178,
0.5858164481525626,
0.5526072911859714,
0.7510755992624463,
0.3282329713721619,
0.5593099768806686,
0.5468113975576662,
0.41034271725826194,
0.6544583526502858,
0.5114116652578191,
0.4516000795070563,
0.5640434639446823,
0.4832914121451671,
0.5309815950920246,
0.7248707639287766,
0.3947887879984208]
#transforma em np.array as coordenadas dos centroides, permitindo assim usar np.dot
self.centros = np.array(self.centros)
#lista que salva a comida do meu jogador em cada rodada
self.comida_historico = []
def faz_previsao(self, data):
"""Funcao recebe os dados e, com base no cluster de centroid mais proximo, devolve o cluster a qual aquele dado povavelemte esta e, utilizando dessa informacao, retorna a probabilidade de aquele cluster possuir "c", que depois sera usado como parametro para decisao de jogada."""
distancia = 10000000000000000000000
centro_min = 0
#procura o cluster de centroide a menor distancia
for centro in range(len(self.centros)):
erro = self.centros[centro] - data
if erro.dot(erro) < distancia:
distancia = erro.dot(erro)
centro_min = centro
#retorna a probabilidade estimada de aqueles valores especificos de input retornarem "c"
return self.accuracy[centro_min]
def escolha_de_cacada(self, rodada, comida_atual, reputacao_atual, m, reputacoes_dos_jogadores):
self.comida_historico.append(comida_atual)
#prepara os dados de maneira em que foi treinado
media_de_reputacao = sum(reputacoes_dos_jogadores)/len(reputacoes_dos_jogadores)
max_de_reputacao = max(reputacoes_dos_jogadores)
#retorna a melhor jogada possivel
#para isso, se calculamos probabilidade menor de 0.5-r de um jogador descansar, descansamos tambem
#se a probailidade de descansar e superior a 0.5+r, entao provavelmente esse jogador vai cacar e entao cacamos com probabilidade de prob_2
#se a probabilidade de descansar e em torno de 50%, cacamos com probabilidade de prob_1
#porem, se estamos bem de comida, jogamos mais agressivamente, cacando menos
#no comeco, porem, vamos tomar cuidado para a reputacao cair pouco
if rodada<200:
if reputacao_atual>media_de_reputacao:
r = 0.1
prob_1 = 0.5
prob_2 = 0.3
else:
r = 0.3
prob_1 = 0.7
prob_2 = 0.5
#se ainda temos mais de 15 jogadores vivos
elif len(reputacoes_dos_jogadores)>15:
#se minha comida estiver caindo muito, vou cacar mais
ultimas_comidas = [1 for rodada in range(-10,-2) if self.comida_historico[rodada]<comida_atual]
if len(ultimas_comidas)<5:
r = 0.1
prob_1 = 0.5
prob_2 = 0.2
else:
r = 0.3
prob_1 = 0.7
prob_2 = 0.5
#porem, com poucos jogadores, podemos cacar pouco
else:
r = 0.1
prob_1 = 0.3
prob_2 = 0
#escala os dados utilizando as medias e desvios padroes que foram usados para treinar os dados
data_da_rodada = []
for jogador in range(len(reputacoes_dos_jogadores)):
rodada_escalada =(rodada-self.medias[0])/self.desvio[0]
reputacao_escalada = (reputacoes_dos_jogadores[jogador]-self.medias[3])/self.desvio[3]
recompensa_escalada = (m-self.medias[2])/self.desvio[2]
input_escalada = (reputacao_atual-self.medias[1])/self.desvio[1]
media_escalada = (media_de_reputacao-self.medias[4])/self.desvio[4]
max_escalada = (max_de_reputacao-self.medias[6])/self.desvio[6]
data_da_rodada.append(np.array([rodada_escalada, reputacao_escalada, recompensa_escalada, input_escalada, media_escalada,
max_escalada]))
#faz a lista de respostas, tendo em ideia as previsoes
resposta = []
for jogador in range(len(reputacoes_dos_jogadores)):
previsao = self.faz_previsao(data_da_rodada[jogador]) #prediz qual a chance de sair um determinado resultado
if previsao< (0.5-r):
solucao = "d"
elif previsao> (0.5+r):
if random.random() < prob_2:
solucao = "c"
else:
solucao = "d"
else:
if random.random() < prob_1:
solucao = "c"
else:
solucao = "d"
resposta.append(solucao)
return resposta
| 24.551039
| 1,108
| 0.745566
|
e3237089ce1b23c5dc87d26edef94adbe690edd7
| 2,010
|
py
|
Python
|
dl/models/gcn.py
|
salemilab/DeepDynaTree
|
a1cd0b8e6cbc415dd91425667e6ef722eb4138a5
|
[
"MIT"
] | null | null | null |
dl/models/gcn.py
|
salemilab/DeepDynaTree
|
a1cd0b8e6cbc415dd91425667e6ef722eb4138a5
|
[
"MIT"
] | null | null | null |
dl/models/gcn.py
|
salemilab/DeepDynaTree
|
a1cd0b8e6cbc415dd91425667e6ef722eb4138a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env
# -*- coding: utf-8 -*-
"""
gcn.py:
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch.conv import GraphConv
from dl import feat_dict
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
in_feats = len(feat_dict[args.node_feat_cols])
num_classes = len(feat_dict[args.node_label_cols.split("_cat")[0]])
h_feat = 64
self.conv1 = GraphConv(in_feats, h_feat)
self.conv2 = GraphConv(h_feat, h_feat)
self.conv3 = GraphConv(h_feat, h_feat)
self.conv4 = GraphConv(h_feat, h_feat)
self.conv5 = GraphConv(h_feat, h_feat)
self.conv6 = GraphConv(h_feat, h_feat)
self.conv7 = GraphConv(h_feat, h_feat)
self.conv8 = GraphConv(h_feat, h_feat)
self.conv9 = GraphConv(h_feat, h_feat)
self.conv10 = GraphConv(h_feat, h_feat)
self.conv11 = GraphConv(h_feat, h_feat)
self.fc = nn.Linear(h_feat, num_classes)
def forward(self, g):
info = dict()
node_feat = g.ndata["feat"]
edge_feat = g.edata["feat"]
h = self.conv1(g, node_feat)
h = F.relu(h)
h = self.conv2(g, h)
h = F.relu(h)
h = self.conv3(g, h)
h = F.relu(h)
h = self.conv4(g, h)
h = F.relu(h)
h = self.conv5(g, h)
h = F.relu(h)
h = self.conv6(g, h)
h = F.relu(h)
h = self.conv7(g, h)
h = F.relu(h)
h = self.conv8(g, h)
h = F.relu(h)
h = self.conv9(g, h)
h = F.relu(h)
h = self.conv10(g, h)
h = F.relu(h)
h = self.conv11(g, h)
h = F.relu(h)
h = self.fc(h)
return h, info
def ce_loss(self, y_pred, y_true, weight=None):
# print(y_pred.shape, y_true.shape, weight.shape)
ce = F.cross_entropy(y_pred, y_true, weight=weight, size_average=None, reduce=None, reduction='mean')
return {"loss": ce}
| 28.309859
| 109
| 0.555224
|
2f4b313f3251ebe4344d789f7f2be826e3c4fea7
| 5,424
|
py
|
Python
|
docs/conf.py
|
lesamouraipourpre/Adafruit_CircuitPython_IL0398
|
b37e709588b999e8d1fe2ba05621fe274668979a
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 1
|
2021-07-13T14:58:31.000Z
|
2021-07-13T14:58:31.000Z
|
docs/conf.py
|
lesamouraipourpre/Adafruit_CircuitPython_IL0398
|
b37e709588b999e8d1fe2ba05621fe274668979a
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 2
|
2019-09-03T23:21:57.000Z
|
2021-09-27T16:45:13.000Z
|
docs/conf.py
|
lesamouraipourpre/Adafruit_CircuitPython_IL0398
|
b37e709588b999e8d1fe2ba05621fe274668979a
|
[
"Unlicense",
"MIT-0",
"MIT"
] | 7
|
2019-08-21T01:40:25.000Z
|
2022-03-27T14:09:33.000Z
|
# -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = ["displayio"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit IL0398 Library"
copyright = "2019 Scott Shawcroft"
author = "Scott Shawcroft"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitIl0398Librarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitIL0398Library.tex",
"AdafruitIL0398 Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitIL0398library",
"Adafruit IL0398 Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitIL0398Library",
"Adafruit IL0398 Library Documentation",
author,
"AdafruitIL0398Library",
"One line description of project.",
"Miscellaneous",
),
]
| 29.639344
| 85
| 0.669432
|
c94621cc4618b48094f33d32a4ba2fcd11e19b83
| 205
|
py
|
Python
|
drf_haystack/__init__.py
|
advantch/drf-haystack
|
e625264508b505074349c374cd7f0ab0e63f89b6
|
[
"MIT"
] | 201
|
2015-02-14T08:17:35.000Z
|
2019-07-10T04:19:04.000Z
|
drf_haystack/__init__.py
|
advantch/drf-haystack
|
e625264508b505074349c374cd7f0ab0e63f89b6
|
[
"MIT"
] | 138
|
2015-02-17T09:28:33.000Z
|
2019-07-30T10:29:52.000Z
|
drf_haystack/__init__.py
|
advantch/drf-haystack
|
e625264508b505074349c374cd7f0ab0e63f89b6
|
[
"MIT"
] | 60
|
2015-04-01T14:51:18.000Z
|
2019-05-12T15:31:52.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__title__ = "drf-haystack"
__version__ = "1.8.11"
__author__ = "Rolf Haavard Blindheim"
__license__ = "MIT License"
VERSION = __version__
| 18.636364
| 39
| 0.731707
|
51b4192e5b540a160f4c815cd7e08861f4ef00f9
| 3,142
|
py
|
Python
|
pythonforandroid/recipes/android/__init__.py
|
wo01/python-for-android
|
df0866d95c9c508299a6f948302454beb971e3ac
|
[
"MIT"
] | 1
|
2018-12-21T03:40:18.000Z
|
2018-12-21T03:40:18.000Z
|
pythonforandroid/recipes/android/__init__.py
|
wo01/python-for-android
|
df0866d95c9c508299a6f948302454beb971e3ac
|
[
"MIT"
] | null | null | null |
pythonforandroid/recipes/android/__init__.py
|
wo01/python-for-android
|
df0866d95c9c508299a6f948302454beb971e3ac
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from pythonforandroid.recipe import CythonRecipe, IncludedFilesBehaviour
from pythonforandroid.util import current_directory
from pythonforandroid.patching import will_build
from pythonforandroid import logger
from os.path import join
class AndroidRecipe(IncludedFilesBehaviour, CythonRecipe):
# name = 'android'
version = None
url = None
src_filename = 'src'
depends = [('pygame', 'sdl2', 'genericndkbuild'),
('python2', 'python3crystax', 'python3')]
config_env = {}
def get_recipe_env(self, arch):
env = super(AndroidRecipe, self).get_recipe_env(arch)
env.update(self.config_env)
return env
def prebuild_arch(self, arch):
super(AndroidRecipe, self).prebuild_arch(arch)
ctx_bootstrap = self.ctx.bootstrap.name
# define macros for Cython, C, Python
tpxi = 'DEF {} = {}\n'
th = '#define {} {}\n'
tpy = '{} = {}\n'
# make sure bootstrap name is in unicode
if isinstance(ctx_bootstrap, bytes):
ctx_bootstrap = ctx_bootstrap.decode('utf-8')
bootstrap = bootstrap_name = ctx_bootstrap
is_sdl2 = bootstrap_name in ('sdl2', 'sdl2python3', 'sdl2_gradle')
is_pygame = bootstrap_name in ('pygame',)
is_webview = bootstrap_name in ('webview',)
if is_sdl2 or is_webview:
if is_sdl2:
bootstrap = 'sdl2'
java_ns = 'org.kivy.android'
jni_ns = 'org/kivy/android'
elif is_pygame:
java_ns = b'org.renpy.android'
jni_ns = b'org/renpy/android'
else:
logger.error((
'unsupported bootstrap for android recipe: {}'
''.format(bootstrap_name)
))
exit(1)
config = {
'BOOTSTRAP': bootstrap,
'IS_SDL2': int(is_sdl2),
'IS_PYGAME': int(is_pygame),
'PY2': int(will_build('python2')(self)),
'JAVA_NAMESPACE': java_ns,
'JNI_NAMESPACE': jni_ns,
}
# create config files for Cython, C and Python
with (
current_directory(self.get_build_dir(arch.arch))), (
open(join('android', 'config.pxi'), 'w')) as fpxi, (
open(join('android', 'config.h'), 'w')) as fh, (
open(join('android', 'config.py'), 'w')) as fpy:
for key, value in config.items():
fpxi.write(tpxi.format(key, repr(value)))
fpy.write(tpy.format(key, repr(value)))
fh.write(th.format(
key,
value if isinstance(value, int) else '"{}"'.format(value)
))
self.config_env[key] = str(value)
if is_sdl2:
fh.write('JNIEnv *SDL_AndroidGetJNIEnv(void);\n')
fh.write(
'#define SDL_ANDROID_GetJNIEnv SDL_AndroidGetJNIEnv\n'
)
elif is_pygame:
fh.write('JNIEnv *SDL_ANDROID_GetJNIEnv(void);\n')
recipe = AndroidRecipe()
| 32.729167
| 77
| 0.559835
|
40c00e8eb4445f185ade400935bee7e2ac1f7559
| 1,475
|
py
|
Python
|
build/lib/adapter/estimator/xgboost_classification.py
|
mozjay0619/scikit-optimize-adapter
|
6550b6cba1667a0f0bf2c082a6ab64981cedb3e0
|
[
"BSD-3-Clause"
] | null | null | null |
build/lib/adapter/estimator/xgboost_classification.py
|
mozjay0619/scikit-optimize-adapter
|
6550b6cba1667a0f0bf2c082a6ab64981cedb3e0
|
[
"BSD-3-Clause"
] | null | null | null |
build/lib/adapter/estimator/xgboost_classification.py
|
mozjay0619/scikit-optimize-adapter
|
6550b6cba1667a0f0bf2c082a6ab64981cedb3e0
|
[
"BSD-3-Clause"
] | null | null | null |
from .base_estimator import BaseEstimator
import xgboost as xgb
from sklearn.metrics import mean_absolute_error as mae
# add more metrics
class XgboostClassification(BaseEstimator):
def __init__(self, **kwargs):
self.n_jobs = kwargs["n_jobs"]
def fit(self, X, y, params):
learning_rate = params[0]
gamma = params[1]
max_depth = int(params[2])
n_estimators = int(params[3])
learning_rate = learning_rate / float(n_estimators)
min_child_weight = int(params[4])
colsample_bytree = params[5]
subsample = params[6]
algo = xgb.XGBRegressor(objective ="binary:logistic",
learning_rate=learning_rate,
gamma=gamma,
max_depth=max_depth,
n_estimators=n_estimators,
min_child_weight=min_child_weight,
colsample_bytree=colsample_bytree,
subsample=subsample,
n_jobs=self.n_jobs,
tree_method='hist') # for fast hyperparameter tuning)
self.model = algo.fit(X, y)
def predict(self, X):
return self.model.predict(X)
def score(self, X, y, score_metric="mae"):
pred = self.model.predict(X)
return mae(pred, y)
| 33.522727
| 86
| 0.527458
|
2acaeb0de8b6ff094473ca6554657ef7bc07a0ea
| 267
|
py
|
Python
|
precompute_syllables.py
|
superMDguy/nanogenmo-2018
|
26f72dc922948a0f2bc464b33b70a3a37f376b61
|
[
"MIT"
] | 1
|
2021-05-04T14:11:11.000Z
|
2021-05-04T14:11:11.000Z
|
precompute_syllables.py
|
superMDguy/nanogenmo-2018
|
26f72dc922948a0f2bc464b33b70a3a37f376b61
|
[
"MIT"
] | 1
|
2018-12-20T20:59:48.000Z
|
2019-01-19T17:11:29.000Z
|
precompute_syllables.py
|
superMDguy/nanogenmo-2018
|
26f72dc922948a0f2bc464b33b70a3a37f376b61
|
[
"MIT"
] | null | null | null |
import cmudict
from tqdm import tqdm
import re
with open('cmu_pronouncing.txt', 'w') as f:
only_stress = re.compile(r'[^012]')
for word, phonemes in tqdm(cmudict.dict().items()):
f.write(f"{word} {re.sub(only_stress, '', ''.join(phonemes[0]))}\n")
| 26.7
| 76
| 0.640449
|
64cac1588112331a9f5b080991b8bc61eee56aec
| 2,423
|
py
|
Python
|
astrodenoisepygui-package-dist.py
|
kalgecin/astro-csbdeep
|
a216642750e0357b9b59002eaff1d69c54eb2316
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T13:51:12.000Z
|
2022-01-24T13:51:12.000Z
|
astrodenoisepygui-package-dist.py
|
kalgecin/astro-csbdeep
|
a216642750e0357b9b59002eaff1d69c54eb2316
|
[
"BSD-3-Clause"
] | null | null | null |
astrodenoisepygui-package-dist.py
|
kalgecin/astro-csbdeep
|
a216642750e0357b9b59002eaff1d69c54eb2316
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from cx_Freeze import setup, Executable
build_exe_options = {
"build_exe": "astrodenoisepy\\dist-gui",
"packages": ["kivy"],
"include_files": [
"LICENSE.txt",
["astrodenoisepy\\dist-models\\main", "models\\default"],
["astrodenoisepy\\data", "astrodenoisepy\\data"],
"astrodenoisepygui.kv",
#angle
".venv\\share\\angle\\bin\\d3dcompiler_47.dll",
".venv\\share\\angle\\bin\\libEGL.dll",
".venv\\share\\angle\\bin\\libGLESv2.dll",
#glew
".venv\\share\\glew\\bin\\glew32.dll",
#sdl2
".venv\\share\\sdl2\\bin\\libFLAC-8.dll",
".venv\\share\\sdl2\\bin\\libfreetype-6.dll",
".venv\\share\\sdl2\\bin\\libjpeg-9.dll",
".venv\\share\\sdl2\\bin\\libmodplug-1.dll",
".venv\\share\\sdl2\\bin\\libmpg123-0.dll",
".venv\\share\\sdl2\\bin\\libogg-0.dll",
".venv\\share\\sdl2\\bin\\libopus-0.dll",
".venv\\share\\sdl2\\bin\\libopusfile-0.dll",
".venv\\share\\sdl2\\bin\\libpng16-16.dll",
".venv\\share\\sdl2\\bin\\libtiff-5.dll",
".venv\\share\\sdl2\\bin\\libvorbis-0.dll",
".venv\\share\\sdl2\\bin\\libvorbisfile-3.dll",
".venv\\share\\sdl2\\bin\\libwebp-7.dll",
".venv\\share\\sdl2\\bin\\LICENSE.FLAC.txt",
".venv\\share\\sdl2\\bin\\LICENSE.freetype.txt",
".venv\\share\\sdl2\\bin\\LICENSE.jpeg.txt",
".venv\\share\\sdl2\\bin\\LICENSE.modplug.txt",
".venv\\share\\sdl2\\bin\\LICENSE.mpg123.txt",
".venv\\share\\sdl2\\bin\\LICENSE.ogg-vorbis.txt",
".venv\\share\\sdl2\\bin\\LICENSE.opus.txt",
".venv\\share\\sdl2\\bin\\LICENSE.opusfile.txt",
".venv\\share\\sdl2\\bin\\LICENSE.png.txt",
".venv\\share\\sdl2\\bin\\LICENSE.tiff.txt",
".venv\\share\\sdl2\\bin\\LICENSE.webp.txt",
".venv\\share\\sdl2\\bin\\LICENSE.zlib.txt",
".venv\\share\\sdl2\\bin\\SDL2.dll",
".venv\\share\\sdl2\\bin\\SDL2_image.dll",
".venv\\share\\sdl2\\bin\\SDL2_mixer.dll",
".venv\\share\\sdl2\\bin\\SDL2_ttf.dll",
".venv\\share\\sdl2\\bin\\zlib1.dll",
]
}
import astrodenoisepyguiversion
setup(
name="astrodenoisepygui",
version=astrodenoisepyguiversion.version,
options={"build_exe": build_exe_options},
description="astrodenoisepygui",
executables=[Executable("astrodenoisepygui.py")]
)
| 39.721311
| 65
| 0.588114
|
0859380d064e5fd3ce40f77b016f9678d66b0d7a
| 9,513
|
py
|
Python
|
test/functional/test_framework/blocktools.py
|
rednaxus/bitcoin
|
a47ac9e98bb3c1ead00843cf8068ac298543d536
|
[
"MIT"
] | 1
|
2020-10-27T09:27:31.000Z
|
2020-10-27T09:27:31.000Z
|
test/functional/test_framework/blocktools.py
|
blackzilla2126/bitcoin
|
88271184e82222f556d67511cc64230b0532f40d
|
[
"MIT"
] | 18
|
2020-10-31T01:04:18.000Z
|
2020-11-03T19:25:27.000Z
|
test/functional/test_framework/blocktools.py
|
blackzilla2126/bitcoin
|
88271184e82222f556d67511cc64230b0532f40d
|
[
"MIT"
] | 1
|
2021-09-18T04:39:58.000Z
|
2021-09-18T04:39:58.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from binascii import a2b_hex
import io
import struct
import time
import unittest
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
FromHex,
ToHex,
hash256,
hex_str_to_bytes,
ser_uint256,
sha256,
uint256_from_str,
)
from .script import (
CScript,
CScriptNum,
CScriptOp,
OP_0,
OP_1,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
hash160,
)
from .util import assert_equal
from io import BytesIO
WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1296688602
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
if tmpl is None:
tmpl = {}
block.nVersion = version or tmpl.get('version') or 1
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
if tmpl and not tmpl.get('bits') is None:
block.nBits = struct.unpack('>I', a2b_hex(tmpl['bits']))[0]
else:
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
if coinbase is None:
coinbase = create_coinbase(height=tmpl['height'])
block.vtx.append(coinbase)
if txlist:
for tx in txlist:
if not hasattr(tx, 'calc_sha256'):
txo = CTransaction()
txo.deserialize(io.BytesIO(tx))
tx = txo
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def script_BIP34_coinbase_height(height):
if height <= 16:
res = CScriptOp.encode_op_n(height)
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
return CScript([res, OP_1])
return CScript([CScriptNum(height)])
def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0):
"""Create a coinbase transaction.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output.
If extra_output_script is given, make a 0-value output to that
script. This is useful to pad block weight/sigops as needed. """
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50 * COIN
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.nValue += fees
if pubkey is not None:
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
if extra_output_script is not None:
coinbaseoutput2 = CTxOut()
coinbaseoutput2.nValue = 0
coinbaseoutput2.scriptPubKey = extra_output_script
coinbase.vout.append(coinbaseoutput2)
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert n < len(prevtx.vout)
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx)))
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must be able to sign for the
output that is being spent, and the node must not be running
multiple wallets.
"""
rawtx = node.createrawtransaction(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
signresult = node.signrawtransactionwithwallet(rawtx)
assert_equal(signresult["complete"], True)
return signresult['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-witness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert "errors" not in signed or len(["errors"]) == 0
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
class TestFrameworkBlockTools(unittest.TestCase):
def test_create_coinbase(self):
height = 20
coinbase_tx = create_coinbase(height=height)
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)
| 37.305882
| 108
| 0.706612
|
de530323577d058947b6914880479ebcbffb6ac7
| 10,902
|
py
|
Python
|
contextPred/chem/finetune.py
|
thomasly/slgnn
|
caa1e7814498da41ad025b4e62c569fe511848ff
|
[
"MIT"
] | 2
|
2020-08-31T00:55:31.000Z
|
2020-09-01T19:59:30.000Z
|
contextPred/chem/finetune.py
|
thomasly/slgnn
|
caa1e7814498da41ad025b4e62c569fe511848ff
|
[
"MIT"
] | null | null | null |
contextPred/chem/finetune.py
|
thomasly/slgnn
|
caa1e7814498da41ad025b4e62c569fe511848ff
|
[
"MIT"
] | null | null | null |
import argparse
from .loader import MoleculeDataset
from torch_geometric.data import DataLoader
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
import numpy as np
from .model import GNN, GNN_graphpred
from sklearn.metrics import roc_auc_score
from .splitters import scaffold_split, random_split, random_scaffold_split
import pandas as pd
import os
import shutil
from tensorboardX import SummaryWriter
criterion = nn.BCEWithLogitsLoss(reduction="none")
def train(args, model, device, loader, optimizer):
model.train()
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y = batch.y.view(pred.shape).to(torch.float64)
# Whether y is non-null or not.
is_valid = y ** 2 > 0
# Loss matrix
loss_mat = criterion(pred.double(), (y + 1) / 2)
# loss matrix after removing null target
loss_mat = torch.where(
is_valid,
loss_mat,
torch.zeros(loss_mat.shape).to(loss_mat.device).to(loss_mat.dtype),
)
optimizer.zero_grad()
loss = torch.sum(loss_mat) / torch.sum(is_valid)
loss.backward()
optimizer.step()
def eval(args, model, device, loader):
model.eval()
y_true = []
y_scores = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch.x, batch.edge_index, batch.edge_attr, batch.batch)
y_true.append(batch.y.view(pred.shape))
y_scores.append(pred)
y_true = torch.cat(y_true, dim=0).cpu().numpy()
y_scores = torch.cat(y_scores, dim=0).cpu().numpy()
roc_list = []
for i in range(y_true.shape[1]):
# AUC is only defined when there is at least one positive data.
if np.sum(y_true[:, i] == 1) > 0 and np.sum(y_true[:, i] == -1) > 0:
is_valid = y_true[:, i] ** 2 > 0
roc_list.append(
roc_auc_score((y_true[is_valid, i] + 1) / 2, y_scores[is_valid, i])
)
if len(roc_list) < y_true.shape[1]:
print("Some target is missing!")
print("Missing ratio: %f" % (1 - float(len(roc_list)) / y_true.shape[1]))
return sum(roc_list) / len(roc_list) # y_true.shape[1]
def main():
# Training settings
parser = argparse.ArgumentParser(
description="PyTorch implementation of pre-training of graph neural networks"
)
parser.add_argument(
"--device", type=int, default=0, help="which gpu to use if any (default: 0)"
)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="input batch size for training (default: 32)",
)
parser.add_argument(
"--epochs",
type=int,
default=100,
help="number of epochs to train (default: 100)",
)
parser.add_argument(
"--lr", type=float, default=0.001, help="learning rate (default: 0.001)"
)
parser.add_argument(
"--lr_scale",
type=float,
default=1,
help="relative learning rate for the feature extraction layer (default: 1)",
)
parser.add_argument(
"--decay", type=float, default=0, help="weight decay (default: 0)"
)
parser.add_argument(
"--num_layer",
type=int,
default=5,
help="number of GNN message passing layers (default: 5).",
)
parser.add_argument(
"--emb_dim", type=int, default=300, help="embedding dimensions (default: 300)"
)
parser.add_argument(
"--dropout_ratio", type=float, default=0.5, help="dropout ratio (default: 0.5)"
)
parser.add_argument(
"--graph_pooling",
type=str,
default="mean",
help="graph level pooling (sum, mean, max, set2set, attention)",
)
parser.add_argument(
"--JK",
type=str,
default="last",
help="how the node features across layers are combined. last, sum, max or concat",
)
parser.add_argument("--gnn_type", type=str, default="gin")
parser.add_argument(
"--dataset",
type=str,
default="tox21",
help="root directory of dataset. For now, only classification.",
)
parser.add_argument(
"--input_model_file",
type=str,
default="",
help="filename to read the model (if there is any)",
)
parser.add_argument(
"--save_model_to", type=str, default="", help="path to save the finetuned model"
)
parser.add_argument("--filename", type=str, default="", help="output filename")
parser.add_argument(
"--seed", type=int, default=42, help="Seed for splitting the dataset."
)
parser.add_argument(
"--runseed",
type=int,
default=0,
help="Seed for minibatch selection, random initialization.",
)
parser.add_argument(
"--split",
type=str,
default="scaffold",
help="random or scaffold or random_scaffold",
)
parser.add_argument(
"--eval_train", type=int, default=0, help="evaluating training or not"
)
parser.add_argument(
"--num_workers",
type=int,
default=4,
help="number of workers for dataset loading",
)
args = parser.parse_args()
torch.manual_seed(args.runseed)
np.random.seed(args.runseed)
device = (
torch.device("cuda:" + str(args.device))
if torch.cuda.is_available()
else torch.device("cpu")
)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.runseed)
# Bunch of classification tasks
if args.dataset == "tox21":
num_tasks = 12
elif args.dataset == "hiv":
num_tasks = 1
elif args.dataset == "pcba":
num_tasks = 128
elif args.dataset == "muv":
num_tasks = 17
elif args.dataset == "bace":
num_tasks = 1
elif args.dataset == "bbbp":
num_tasks = 1
elif args.dataset == "toxcast":
num_tasks = 617
elif args.dataset == "sider":
num_tasks = 27
elif args.dataset == "clintox":
num_tasks = 2
elif args.dataset in ["jak1", "jak2", "jak3"]:
num_tasks = 1
else:
raise ValueError("Invalid dataset name.")
# set up dataset
dataset = MoleculeDataset(
"contextPred/chem/dataset/" + args.dataset, dataset=args.dataset
)
print(dataset)
if args.split == "scaffold":
smiles_list = pd.read_csv(
"contextPred/chem/dataset/" + args.dataset + "/processed/smiles.csv",
header=None,
)[0].tolist()
train_dataset, valid_dataset, test_dataset = scaffold_split(
dataset,
smiles_list,
null_value=0,
frac_train=0.8,
frac_valid=0.1,
frac_test=0.1,
)
print("scaffold")
elif args.split == "random":
train_dataset, valid_dataset, test_dataset = random_split(
dataset,
null_value=0,
frac_train=0.8,
frac_valid=0.1,
frac_test=0.1,
seed=args.seed,
)
print("random")
elif args.split == "random_scaffold":
smiles_list = pd.read_csv(
"contextPred/chem/dataset/" + args.dataset + "/processed/smiles.csv",
header=None,
)[0].tolist()
train_dataset, valid_dataset, test_dataset = random_scaffold_split(
dataset,
smiles_list,
null_value=0,
frac_train=0.8,
frac_valid=0.1,
frac_test=0.1,
seed=args.seed,
)
print("random scaffold")
else:
raise ValueError("Invalid split option.")
print(train_dataset[0])
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
)
val_loader = DataLoader(
valid_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
)
test_loader = DataLoader(
test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
)
# set up model
model = GNN_graphpred(
args.num_layer,
args.emb_dim,
num_tasks,
JK=args.JK,
drop_ratio=args.dropout_ratio,
graph_pooling=args.graph_pooling,
gnn_type=args.gnn_type,
)
if not args.input_model_file == "":
model.from_pretrained(args.input_model_file)
model.to(device)
# set up optimizer
# different learning rate for different part of GNN
model_param_group = []
model_param_group.append({"params": model.gnn.parameters()})
if args.graph_pooling == "attention":
model_param_group.append(
{"params": model.pool.parameters(), "lr": args.lr * args.lr_scale}
)
model_param_group.append(
{"params": model.graph_pred_linear.parameters(), "lr": args.lr * args.lr_scale}
)
optimizer = optim.Adam(model_param_group, lr=args.lr, weight_decay=args.decay)
print(optimizer)
train_acc_list = []
val_acc_list = []
test_acc_list = []
if not args.filename == "":
# fname = "runs/finetune_cls_runseed" + str(args.runseed) + "/" + args.filename
# # delete the directory if there exists one
# if os.path.exists(fname):
# shutil.rmtree(fname)
# print("removed the existing file.")
writer = SummaryWriter(args.filename)
for epoch in range(1, args.epochs + 1):
print("====epoch " + str(epoch))
train(args, model, device, train_loader, optimizer)
print("====Evaluation")
if args.eval_train:
train_acc = eval(args, model, device, train_loader)
else:
print("omit the training accuracy computation")
train_acc = 0
val_acc = eval(args, model, device, val_loader)
test_acc = eval(args, model, device, test_loader)
print("train: %f val: %f test: %f" % (train_acc, val_acc, test_acc))
val_acc_list.append(val_acc)
test_acc_list.append(test_acc)
train_acc_list.append(train_acc)
if not args.filename == "":
writer.add_scalar("data/train auc", train_acc, epoch)
writer.add_scalar("data/val auc", val_acc, epoch)
writer.add_scalar("data/test auc", test_acc, epoch)
print("")
if not args.filename == "":
writer.close()
if not args.save_model_to == "":
torch.save(model.gnn.state_dict(), args.save_model_to)
if __name__ == "__main__":
main()
| 29.786885
| 95
| 0.593469
|
bb3abd3741997f065425533cb5360b15fac3df61
| 2,874
|
py
|
Python
|
CAIL2020/sfksz/tools/eval_tool.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 71
|
2020-07-16T01:49:27.000Z
|
2022-03-27T16:55:00.000Z
|
CAIL2020/sfksz/tools/eval_tool.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 11
|
2020-09-18T14:26:25.000Z
|
2022-02-09T23:49:33.000Z
|
CAIL2020/sfksz/tools/eval_tool.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 16
|
2020-07-15T07:24:30.000Z
|
2022-03-19T05:41:11.000Z
|
import logging
import os
import torch
from torch.autograd import Variable
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
from timeit import default_timer as timer
logger = logging.getLogger(__name__)
def gen_time_str(t):
t = int(t)
minute = t // 60
second = t % 60
return '%2d:%02d' % (minute, second)
def output_value(epoch, mode, step, time, loss, info, end, config):
try:
delimiter = config.get("output", "delimiter")
except Exception as e:
delimiter = " "
s = ""
s = s + str(epoch) + " "
while len(s) < 7:
s += " "
s = s + str(mode) + " "
while len(s) < 14:
s += " "
s = s + str(step) + " "
while len(s) < 25:
s += " "
s += str(time)
while len(s) < 40:
s += " "
s += str(loss)
while len(s) < 48:
s += " "
s += str(info)
s = s.replace(" ", delimiter)
if not (end is None):
print(s, end=end, flush=True)
else:
print(s, flush=True)
def valid(model, dataset, epoch, writer, config, gpu_list, output_function, mode="valid"):
model.eval()
acc_result = None
total_loss = 0
cnt = 0
total_len = len(dataset)
start_time = timer()
output_info = ""
output_time = config.getint("output", "output_time")
step = -1
more = ""
if total_len < 10000:
more = "\t"
for step, data in enumerate(dataset):
for key in data.keys():
if isinstance(data[key], torch.Tensor):
if len(gpu_list) > 0:
data[key] = Variable(data[key].cuda())
else:
data[key] = Variable(data[key])
results = model(data, config, gpu_list, acc_result, "valid")
loss, acc_result = results["loss"], results["acc_result"]
total_loss += float(loss)
cnt += 1
if step % output_time == 0:
delta_t = timer() - start_time
output_value(epoch, mode, "%d/%d" % (step + 1, total_len), "%s/%s" % (
gen_time_str(delta_t), gen_time_str(delta_t * (total_len - step - 1) / (step + 1))),
"%.3lf" % (total_loss / (step + 1)), output_info, '\r', config)
if step == -1:
logger.error("There is no data given to the model in this epoch, check your data.")
raise NotImplementedError
delta_t = timer() - start_time
output_info = output_function(acc_result, config)
output_value(epoch, mode, "%d/%d" % (step + 1, total_len), "%s/%s" % (
gen_time_str(delta_t), gen_time_str(delta_t * (total_len - step - 1) / (step + 1))),
"%.3lf" % (total_loss / (step + 1)), output_info, None, config)
writer.add_scalar(config.get("output", "model_name") + "_eval_epoch", float(total_loss) / (step + 1),
epoch)
model.train()
| 29.030303
| 105
| 0.549408
|
e11d1a30dc9a72975b031e3eae91c45ca3bcf30d
| 687
|
py
|
Python
|
files/question histoire pygame/thorpy_reaction_(show_place_mouse_onclick).py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | 1
|
2021-06-15T13:44:47.000Z
|
2021-06-15T13:44:47.000Z
|
files/question histoire pygame/thorpy_reaction_(show_place_mouse_onclick).py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | null | null | null |
files/question histoire pygame/thorpy_reaction_(show_place_mouse_onclick).py
|
HenraL/NSI_1ereG6_Programme_Python
|
9f46b848fa2331daca57e5e2e11cba41da45a67f
|
[
"Unlicense"
] | null | null | null |
import thorpy, pygame
def my_func_reaction(event):#Reactions functions must take an event as first arg
print("My reaction displays the pos of event:", event.pos)
#We declare a Reaction. Note that we do not filter the event here.
my_reaction = thorpy.Reaction(reacts_to=pygame.MOUSEBUTTONDOWN,
reac_func=my_func_reaction)
application = thorpy.Application(size=(300, 300), caption="Reaction tuto")
background = thorpy.Background(color=(255,255,255))
background.add_reaction(my_reaction) #add my_reaction to background's reactions
menu = thorpy.Menu(background) #create a menu for auto events handling
menu.play() #launch the menu
application.quit()
| 36.157895
| 80
| 0.754003
|
bc1ef50c3683fccefcc1680447d2bc2b43077777
| 27,227
|
py
|
Python
|
ax/plot/helper.py
|
stevemandala/Ax
|
8e289a154e3a2ed237bf27ddb90e09963c0d6a97
|
[
"MIT"
] | null | null | null |
ax/plot/helper.py
|
stevemandala/Ax
|
8e289a154e3a2ed237bf27ddb90e09963c0d6a97
|
[
"MIT"
] | null | null | null |
ax/plot/helper.py
|
stevemandala/Ax
|
8e289a154e3a2ed237bf27ddb90e09963c0d6a97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
from ax.core.generator_run import GeneratorRun
from ax.core.observation import ObservationFeatures
from ax.core.parameter import ChoiceParameter, FixedParameter, RangeParameter
from ax.core.types import TParameterization
from ax.modelbridge.base import ModelBridge
from ax.modelbridge.transforms.ivw import IVW
from ax.plot.base import DECIMALS, PlotData, PlotInSampleArm, PlotOutOfSampleArm, Z
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import not_none
logger = get_logger(name="PlotHelper")
# Typing alias
RawData = List[Dict[str, Union[str, float]]]
TNullableGeneratorRunsDict = Optional[Dict[str, GeneratorRun]]
def _format_dict(param_dict: TParameterization, name: str = "Parameterization") -> str:
"""Format a dictionary for labels.
Args:
param_dict: Dictionary to be formatted
name: String name of the thing being formatted.
Returns: stringified blob.
"""
if len(param_dict) >= 10:
blob = "{} has too many items to render on hover ({}).".format(
name, len(param_dict)
)
else:
blob = "<br><em>{}:</em><br>{}".format(
name, "<br>".join("{}: {}".format(n, v) for n, v in param_dict.items())
)
return blob
def _wrap_metric(metric_name: str) -> str:
"""Put a newline on "::" for metric names.
Args:
metric_name: metric name.
Returns: wrapped metric name.
"""
if "::" in metric_name:
return "<br>".join(metric_name.split("::"))
else:
return metric_name
def _format_CI(estimate: float, sd: float, relative: bool, zval: float = Z) -> str:
"""Format confidence intervals given estimate and standard deviation.
Args:
estimate: point estimate.
sd: standard deviation of point estimate.
relative: if True, '%' is appended.
zval: z-value associated with desired CI (e.g. 1.96 for 95% CIs)
Returns: formatted confidence interval.
"""
return "[{lb:.{digits}f}{perc}, {ub:.{digits}f}{perc}]".format(
lb=estimate - zval * sd,
ub=estimate + zval * sd,
digits=DECIMALS,
perc="%" if relative else "",
)
def arm_name_to_tuple(arm_name: str) -> Union[Tuple[int, int], Tuple[int]]:
tup = arm_name.split("_")
if len(tup) == 2:
try:
return (int(tup[0]), int(tup[1]))
except ValueError:
return (0,)
return (0,)
def resize_subtitles(figure: Dict[str, Any], size: int):
for ant in figure["layout"]["annotations"]:
ant["font"].update(size=size)
return figure
def _filter_dict(
param_dict: TParameterization, subset_keys: List[str]
) -> TParameterization:
"""Filter a dictionary to keys present in a given list."""
return {k: v for k, v in param_dict.items() if k in subset_keys}
def _get_in_sample_arms(
model: ModelBridge,
metric_names: Set[str],
fixed_features: Optional[ObservationFeatures] = None,
) -> Tuple[Dict[str, PlotInSampleArm], RawData, Dict[str, TParameterization]]:
"""Get in-sample arms from a model with observed and predicted values
for specified metrics.
Returns a PlotInSampleArm object in which repeated observations are merged
with IVW, and a RawData object in which every observation is listed.
Fixed features input can be used to override fields of the insample arms
when making model predictions.
Args:
model: An instance of the model bridge.
metric_names: Restrict predictions to these metrics. If None, uses all
metrics in the model.
fixed_features: Features that should be fixed in the arms this function
will obtain predictions for.
Returns:
A tuple containing
- Map from arm name to PlotInSampleArm.
- List of the data for each observation like::
{'metric_name': 'likes', 'arm_name': '0_0', 'mean': 1., 'sem': 0.1}
- Map from arm name to parameters
"""
observations = model.get_training_data()
# Calculate raw data
raw_data = []
arm_name_to_parameters = {}
for obs in observations:
arm_name_to_parameters[obs.arm_name] = obs.features.parameters
for j, metric_name in enumerate(obs.data.metric_names):
if metric_name in metric_names:
raw_data.append(
{
"metric_name": metric_name,
"arm_name": obs.arm_name,
"mean": obs.data.means[j],
"sem": np.sqrt(obs.data.covariance[j, j]),
}
)
# Check that we have one ObservationFeatures per arm name since we
# key by arm name and the model is not Multi-task.
# If "TrialAsTask" is present, one of the arms is also chosen.
if ("TrialAsTask" not in model.transforms.keys()) and (
len(arm_name_to_parameters) != len(observations)
):
logger.error(
"Have observations of arms with different features but same"
" name. Arbitrary one will be plotted."
)
# Merge multiple measurements within each Observation with IVW to get
# un-modeled prediction
t = IVW(None, [], [])
obs_data = t.transform_observation_data([obs.data for obs in observations], [])
# Start filling in plot data
in_sample_plot: Dict[str, PlotInSampleArm] = {}
for i, obs in enumerate(observations):
if obs.arm_name is None:
raise ValueError("Observation must have arm name for plotting.")
# Extract raw measurement
obs_y = {} # Observed metric means.
obs_se = {} # Observed metric standard errors.
# Use the IVW data, not obs.data
for j, metric_name in enumerate(obs_data[i].metric_names):
if metric_name in metric_names:
obs_y[metric_name] = obs_data[i].means[j]
obs_se[metric_name] = np.sqrt(obs_data[i].covariance[j, j])
# Make a prediction.
if model.training_in_design[i]:
features = obs.features
if fixed_features is not None:
features.update_features(fixed_features)
pred_y, pred_se = _predict_at_point(model, features, metric_names)
else:
# Use raw data for out-of-design points
pred_y = obs_y
pred_se = obs_se
in_sample_plot[not_none(obs.arm_name)] = PlotInSampleArm(
name=not_none(obs.arm_name),
y=obs_y,
se=obs_se,
parameters=obs.features.parameters,
y_hat=pred_y,
se_hat=pred_se,
context_stratum=None,
)
return in_sample_plot, raw_data, arm_name_to_parameters
def _predict_at_point(
model: ModelBridge, obsf: ObservationFeatures, metric_names: Set[str]
) -> Tuple[Dict[str, float], Dict[str, float]]:
"""Make a prediction at a point.
Returns mean and standard deviation in format expected by plotting.
Args:
model: ModelBridge
obsf: ObservationFeatures for which to predict
metric_names: Limit predictions to these metrics.
Returns:
A tuple containing
- Map from metric name to prediction.
- Map from metric name to standard error.
"""
y_hat = {}
se_hat = {}
f_pred, cov_pred = model.predict([obsf])
for metric_name in f_pred:
if metric_name in metric_names:
y_hat[metric_name] = f_pred[metric_name][0]
se_hat[metric_name] = np.sqrt(cov_pred[metric_name][metric_name][0])
return y_hat, se_hat
def _get_out_of_sample_arms(
model: ModelBridge,
generator_runs_dict: Dict[str, GeneratorRun],
metric_names: Set[str],
fixed_features: Optional[ObservationFeatures] = None,
) -> Dict[str, Dict[str, PlotOutOfSampleArm]]:
"""Get out-of-sample predictions from a model given a dict of generator runs.
Fixed features input can be used to override fields of the candidate arms
when making model predictions.
Args:
model: The model.
generator_runs_dict: a mapping from generator run name to generator run.
metric_names: metrics to include in the plot.
Returns:
A mapping from name to a mapping from arm name to plot.
"""
out_of_sample_plot: Dict[str, Dict[str, PlotOutOfSampleArm]] = {}
for generator_run_name, generator_run in generator_runs_dict.items():
out_of_sample_plot[generator_run_name] = {}
for arm in generator_run.arms:
# This assumes context is None
obsf = ObservationFeatures.from_arm(arm)
if fixed_features is not None:
obsf.update_features(fixed_features)
# Make a prediction
try:
pred_y, pred_se = _predict_at_point(model, obsf, metric_names)
except Exception:
# Check if it is an out-of-design arm.
if not model.model_space.check_membership(obsf.parameters):
# Skip this point
continue
else:
# It should have worked
raise
arm_name = arm.name_or_short_signature
out_of_sample_plot[generator_run_name][arm_name] = PlotOutOfSampleArm(
name=arm_name,
parameters=obsf.parameters,
y_hat=pred_y,
se_hat=pred_se,
context_stratum=None,
)
return out_of_sample_plot
def get_plot_data(
model: ModelBridge,
generator_runs_dict: Dict[str, GeneratorRun],
metric_names: Optional[Set[str]] = None,
fixed_features: Optional[ObservationFeatures] = None,
) -> Tuple[PlotData, RawData, Dict[str, TParameterization]]:
"""Format data object with metrics for in-sample and out-of-sample
arms.
Calculate both observed and predicted metrics for in-sample arms.
Calculate predicted metrics for out-of-sample arms passed via the
`generator_runs_dict` argument.
In PlotData, in-sample observations are merged with IVW. In RawData, they
are left un-merged and given as a list of dictionaries, one for each
observation and having keys 'arm_name', 'mean', and 'sem'.
Args:
model: The model.
generator_runs_dict: a mapping from generator run name to generator run.
metric_names: Restrict predictions to this set. If None, all metrics
in the model will be returned.
fixed_features: Fixed features to use when making model predictions.
Returns:
A tuple containing
- PlotData object with in-sample and out-of-sample predictions.
- List of observations like::
{'metric_name': 'likes', 'arm_name': '0_1', 'mean': 1., 'sem': 0.1}.
- Mapping from arm name to parameters.
"""
metrics_plot = model.metric_names if metric_names is None else metric_names
in_sample_plot, raw_data, cond_name_to_parameters = _get_in_sample_arms(
model=model, metric_names=metrics_plot, fixed_features=fixed_features
)
out_of_sample_plot = _get_out_of_sample_arms(
model=model,
generator_runs_dict=generator_runs_dict,
metric_names=metrics_plot,
fixed_features=fixed_features,
)
# pyre-fixme[16]: `Optional` has no attribute `arm_name`.
status_quo_name = None if model.status_quo is None else model.status_quo.arm_name
plot_data = PlotData(
metrics=list(metrics_plot),
in_sample=in_sample_plot,
out_of_sample=out_of_sample_plot,
status_quo_name=status_quo_name,
)
return plot_data, raw_data, cond_name_to_parameters
def get_range_parameter(model: ModelBridge, param_name: str) -> RangeParameter:
"""
Get the range parameter with the given name from the model.
Throws if parameter doesn't exist or is not a range parameter.
Args:
model: The model.
param_name: The name of the RangeParameter to be found.
Returns: The RangeParameter named `param_name`.
"""
range_param = model.model_space.parameters.get(param_name)
if range_param is None:
raise ValueError(f"Parameter `{param_name}` does not exist.")
if not isinstance(range_param, RangeParameter):
raise ValueError(f"{param_name} is not a RangeParameter")
return range_param
def get_range_parameters(model: ModelBridge) -> List[RangeParameter]:
"""
Get a list of range parameters from a model.
Args:
model: The model.
Returns: List of RangeParameters.
"""
return [
parameter
for parameter in model.model_space.parameters.values()
if isinstance(parameter, RangeParameter)
]
def get_grid_for_parameter(parameter: RangeParameter, density: int) -> np.ndarray:
"""Get a grid of points along the range of the parameter.
Will be a log-scale grid if parameter is log scale.
Args:
parameter: Parameter for which to generate grid.
density: Number of points in the grid.
"""
is_log = parameter.log_scale
if is_log:
grid = np.linspace(
np.log10(parameter.lower), np.log10(parameter.upper), density
)
grid = 10 ** grid
else:
grid = np.linspace(parameter.lower, parameter.upper, density)
return grid
def get_fixed_values(
model: ModelBridge,
slice_values: Optional[Dict[str, Any]] = None,
trial_index: Optional[int] = None,
) -> TParameterization:
"""Get fixed values for parameters in a slice plot.
If there is an in-design status quo, those values will be used. Otherwise,
the mean of RangeParameters or the mode of ChoiceParameters is used.
Any value in slice_values will override the above.
Args:
model: ModelBridge being used for plotting
slice_values: Map from parameter name to value at which is should be
fixed.
Returns: Map from parameter name to fixed value.
"""
if trial_index is not None:
if slice_values is None:
slice_values = {}
slice_values["TRIAL_PARAM"] = str(trial_index)
# Check if status_quo is in design
if model.status_quo is not None and model.model_space.check_membership(
# pyre-fixme[16]: `Optional` has no attribute `features`.
model.status_quo.features.parameters
):
setx = model.status_quo.features.parameters
else:
observations = model.get_training_data()
setx = {}
for p_name, parameter in model.model_space.parameters.items():
# Exclude out of design status quo (no parameters)
vals = [
obs.features.parameters[p_name]
for obs in observations
if (
len(obs.features.parameters) > 0
and parameter.validate(obs.features.parameters[p_name])
)
]
if isinstance(parameter, FixedParameter):
setx[p_name] = parameter.value
elif isinstance(parameter, ChoiceParameter):
setx[p_name] = Counter(vals).most_common(1)[0][0]
elif isinstance(parameter, RangeParameter):
setx[p_name] = parameter.cast(np.mean(vals))
if slice_values is not None:
# slice_values has type Dictionary[str, Any]
setx.update(slice_values)
return setx
# Utility methods ported from JS
def contour_config_to_trace(config):
# Load from config
arm_data = config["arm_data"]
density = config["density"]
grid_x = config["grid_x"]
grid_y = config["grid_y"]
f = config["f"]
lower_is_better = config["lower_is_better"]
metric = config["metric"]
rel = config["rel"]
sd = config["sd"]
xvar = config["xvar"]
yvar = config["yvar"]
green_scale = config["green_scale"]
green_pink_scale = config["green_pink_scale"]
blue_scale = config["blue_scale"]
# format data
res = relativize_data(f, sd, rel, arm_data, metric)
f_final = res[0]
sd_final = res[1]
# calculate max of abs(outcome), used for colorscale
f_absmax = max(abs(min(f_final)), max(f_final))
# transform to nested array
f_plt = []
for ind in range(0, len(f_final), density):
f_plt.append(f_final[ind : ind + density])
sd_plt = []
for ind in range(0, len(sd_final), density):
sd_plt.append(sd_final[ind : ind + density])
CONTOUR_CONFIG = {
"autocolorscale": False,
"autocontour": True,
"contours": {"coloring": "heatmap"},
"hoverinfo": "x+y+z",
"ncontours": int(density / 2),
"type": "contour",
"x": grid_x,
"y": grid_y,
}
if rel:
f_scale = reversed(green_pink_scale) if lower_is_better else green_pink_scale
else:
f_scale = green_scale
f_trace = {
"colorbar": {
"x": 0.45,
"y": 0.5,
"ticksuffix": "%" if rel else "",
"tickfont": {"size": 8},
},
"colorscale": [(i / (len(f_scale) - 1), rgb(v)) for i, v in enumerate(f_scale)],
"xaxis": "x",
"yaxis": "y",
"z": f_plt,
# zmax and zmin are ignored if zauto is true
"zauto": not rel,
"zmax": f_absmax,
"zmin": -f_absmax,
}
sd_trace = {
"colorbar": {
"x": 1,
"y": 0.5,
"ticksuffix": "%" if rel else "",
"tickfont": {"size": 8},
},
"colorscale": [
(i / (len(blue_scale) - 1), rgb(v)) for i, v in enumerate(blue_scale)
],
"xaxis": "x2",
"yaxis": "y2",
"z": sd_plt,
}
f_trace.update(CONTOUR_CONFIG)
sd_trace.update(CONTOUR_CONFIG)
# get in-sample arms
arm_text = list(arm_data["in_sample"].keys())
arm_x = [
arm_data["in_sample"][arm_name]["parameters"][xvar] for arm_name in arm_text
]
arm_y = [
arm_data["in_sample"][arm_name]["parameters"][yvar] for arm_name in arm_text
]
# configs for in-sample arms
base_in_sample_arm_config = {
"hoverinfo": "text",
"legendgroup": "In-sample",
"marker": {"color": "black", "symbol": 1, "opacity": 0.5},
"mode": "markers",
"name": "In-sample",
"text": arm_text,
"type": "scatter",
"x": arm_x,
"y": arm_y,
}
f_in_sample_arm_trace = {"xaxis": "x", "yaxis": "y"}
sd_in_sample_arm_trace = {"showlegend": False, "xaxis": "x2", "yaxis": "y2"}
f_in_sample_arm_trace.update(base_in_sample_arm_config)
sd_in_sample_arm_trace.update(base_in_sample_arm_config)
traces = [f_trace, sd_trace, f_in_sample_arm_trace, sd_in_sample_arm_trace]
# iterate over out-of-sample arms
for i, generator_run_name in enumerate(arm_data["out_of_sample"].keys()):
symbol = i + 2 # symbols starts from 2 for candidate markers
ax = []
ay = []
atext = []
for arm_name in arm_data["out_of_sample"][generator_run_name].keys():
ax.append(
arm_data["out_of_sample"][generator_run_name][arm_name]["parameters"][
xvar
]
)
ay.append(
arm_data["out_of_sample"][generator_run_name][arm_name]["parameters"][
yvar
]
)
atext.append("<em>Candidate " + arm_name + "</em>")
traces.append(
{
"hoverinfo": "text",
"legendgroup": generator_run_name,
"marker": {"color": "black", "symbol": symbol, "opacity": 0.5},
"mode": "markers",
"name": generator_run_name,
"text": atext,
"type": "scatter",
"xaxis": "x",
"x": ax,
"yaxis": "y",
"y": ay,
}
)
traces.append(
{
"hoverinfo": "text",
"legendgroup": generator_run_name,
"marker": {"color": "black", "symbol": symbol, "opacity": 0.5},
"mode": "markers",
"name": "In-sample",
"showlegend": False,
"text": atext,
"type": "scatter",
"x": ax,
"xaxis": "x2",
"y": ay,
"yaxis": "y2",
}
)
return traces
def axis_range(grid: List[float], is_log: bool) -> List[float]:
if is_log:
return [math.log10(min(grid)), math.log10(max(grid))]
else:
return [min(grid), max(grid)]
def _relativize(m_t: float, sem_t: float, m_c: float, sem_c: float) -> List[float]:
r_hat = (m_t - m_c) / abs(m_c) - sem_c ** 2 * m_t / abs(m_c) ** 3
variance = (sem_t ** 2 + (m_t / m_c * sem_c) ** 2) / m_c ** 2
return [r_hat, math.sqrt(variance)]
def relativize_data(
f: List[float], sd: List[float], rel: bool, arm_data: Dict[Any, Any], metric: str
) -> List[List[float]]:
# if relative, extract status quo & compute ratio
f_final = [] if rel else f
sd_final = [] if rel else sd
if rel:
f_sq = arm_data["in_sample"][arm_data["status_quo_name"]]["y"][metric]
sd_sq = arm_data["in_sample"][arm_data["status_quo_name"]]["se"][metric]
for i in range(len(f)):
res = _relativize(f[i], sd[i], f_sq, sd_sq)
f_final.append(100 * res[0])
sd_final.append(100 * res[1])
return [f_final, sd_final]
def rgb(arr: List[int]) -> str:
return "rgb({},{},{})".format(*arr)
def infer_is_relative(
model: ModelBridge, metrics: List[str], non_constraint_rel: bool
) -> Dict[str, bool]:
"""Determine whether or not to relativize a metric.
Metrics that are constraints will get this decision from their `relative` flag.
Other metrics will use the `default_rel`.
Args:
model: model fit on metrics.
metrics: list of metric names.
non_constraint_rel: whether or not to relativize non-constraint metrics
Returns:
Dict[str, bool] containing whether or not to relativize each input metric.
"""
relative = {}
constraint_relativity = {}
if model._optimization_config:
constraints = not_none(model._optimization_config).outcome_constraints
constraint_relativity = {
constraint.metric.name: constraint.relative for constraint in constraints
}
for metric in metrics:
if metric not in constraint_relativity:
relative[metric] = non_constraint_rel
else:
relative[metric] = constraint_relativity[metric]
return relative
def slice_config_to_trace(
arm_data,
arm_name_to_parameters,
f,
fit_data,
grid,
metric,
param,
rel,
setx,
sd,
is_log,
visible,
):
# format data
res = relativize_data(f, sd, rel, arm_data, metric)
f_final = res[0]
sd_final = res[1]
# get data for standard deviation fill plot
sd_upper = []
sd_lower = []
for i in range(len(sd)):
sd_upper.append(f_final[i] + 2 * sd_final[i])
sd_lower.append(f_final[i] - 2 * sd_final[i])
grid_rev = list(reversed(grid))
sd_lower_rev = list(reversed(sd_lower))
sd_x = grid + grid_rev
sd_y = sd_upper + sd_lower_rev
# get data for observed arms and error bars
arm_x = []
arm_y = []
arm_sem = []
for row in fit_data:
parameters = arm_name_to_parameters[row["arm_name"]]
plot = True
for p in setx.keys():
if p != param and parameters[p] != setx[p]:
plot = False
if plot:
arm_x.append(parameters[param])
arm_y.append(row["mean"])
arm_sem.append(row["sem"])
arm_res = relativize_data(arm_y, arm_sem, rel, arm_data, metric)
arm_y_final = arm_res[0]
arm_sem_final = [x * 2 for x in arm_res[1]]
# create traces
f_trace = {
"x": grid,
"y": f_final,
"showlegend": False,
"hoverinfo": "x+y",
"line": {"color": "rgba(128, 177, 211, 1)"},
"visible": visible,
}
arms_trace = {
"x": arm_x,
"y": arm_y_final,
"mode": "markers",
"error_y": {
"type": "data",
"array": arm_sem_final,
"visible": True,
"color": "black",
},
"line": {"color": "black"},
"showlegend": False,
"hoverinfo": "x+y",
"visible": visible,
}
sd_trace = {
"x": sd_x,
"y": sd_y,
"fill": "toself",
"fillcolor": "rgba(128, 177, 211, 0.2)",
"line": {"color": "rgba(128, 177, 211, 0.0)"},
"showlegend": False,
"hoverinfo": "none",
"visible": visible,
}
traces = [sd_trace, f_trace, arms_trace]
# iterate over out-of-sample arms
for i, generator_run_name in enumerate(arm_data["out_of_sample"].keys()):
ax = []
ay = []
asem = []
atext = []
for arm_name in arm_data["out_of_sample"][generator_run_name].keys():
parameters = arm_data["out_of_sample"][generator_run_name][arm_name][
"parameters"
]
plot = True
for p in setx.keys():
if p != param and parameters[p] != setx[p]:
plot = False
if plot:
ax.append(parameters[param])
ay.append(
arm_data["out_of_sample"][generator_run_name][arm_name]["y_hat"][
metric
]
)
asem.append(
arm_data["out_of_sample"][generator_run_name][arm_name]["se_hat"][
metric
]
)
atext.append("<em>Candidate " + arm_name + "</em>")
out_of_sample_arm_res = relativize_data(ay, asem, rel, arm_data, metric)
ay_final = out_of_sample_arm_res[0]
asem_final = [x * 2 for x in out_of_sample_arm_res[1]]
traces.append(
{
"hoverinfo": "text",
"legendgroup": generator_run_name,
"marker": {"color": "black", "symbol": i + 1, "opacity": 0.5},
"mode": "markers",
"error_y": {
"type": "data",
"array": asem_final,
"visible": True,
"color": "black",
},
"name": generator_run_name,
"text": atext,
"type": "scatter",
"xaxis": "x",
"x": ax,
"yaxis": "y",
"y": ay_final,
"visible": visible,
}
)
return traces
| 32.413095
| 88
| 0.592684
|
681c2dbcba09fc1f35e2ca0aae6848e63780e95e
| 678
|
py
|
Python
|
temboo/core/Library/Yahoo/Weather/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Yahoo/Weather/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Yahoo/Weather/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Yahoo.Weather.GetTemperature import GetTemperature, GetTemperatureInputSet, GetTemperatureResultSet, GetTemperatureChoreographyExecution
from temboo.Library.Yahoo.Weather.GetWeather import GetWeather, GetWeatherInputSet, GetWeatherResultSet, GetWeatherChoreographyExecution
from temboo.Library.Yahoo.Weather.GetWeatherByAddress import GetWeatherByAddress, GetWeatherByAddressInputSet, GetWeatherByAddressResultSet, GetWeatherByAddressChoreographyExecution
from temboo.Library.Yahoo.Weather.GetWeatherByCoordinates import GetWeatherByCoordinates, GetWeatherByCoordinatesInputSet, GetWeatherByCoordinatesResultSet, GetWeatherByCoordinatesChoreographyExecution
| 135.6
| 201
| 0.917404
|
66d5412d318d71e6cdc64b643cf0ad07e785f465
| 1,671
|
py
|
Python
|
setup.py
|
arnaud-morvan/papyrus
|
04ce5730e0af229cbded40fc96dfee132300f4f7
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
arnaud-morvan/papyrus
|
04ce5730e0af229cbded40fc96dfee132300f4f7
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
arnaud-morvan/papyrus
|
04ce5730e0af229cbded40fc96dfee132300f4f7
|
[
"BSD-2-Clause"
] | null | null | null |
from setuptools import setup, find_packages
import os
version = '2.3'
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
TODO = open(os.path.join(here, 'TODO.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
install_requires = [
'pyramid>=1.1a3',
'geojson>=1.1.0',
'GeoAlchemy2>=0.2.4',
'six',
]
if os.environ.get('READTHEDOCS') != 'True':
install_requires.append('Shapely>=1.2')
setup(name='papyrus',
version=version,
description="Geospatial Extensions for Pyramid",
classifiers=[
'Framework :: Pyramid',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
keywords='FOSS4G, Pylons, Pyramid',
author='Eric Lemoine',
author_email='eric.lemoine@gmail.com',
url='https://github.com/elemoine/papyrus',
license='BSD',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points="""
# -*- Entry points: -*-
""",
long_description=README + '\n\n' + TODO + '\n\n' + CHANGES,
)
| 32.134615
| 72
| 0.59605
|
24ce098b8e56d95ed789f5a04072350ecc609c5c
| 65,757
|
py
|
Python
|
reviewboard/testing/testcase.py
|
LloydFinch/reviewboard
|
563c1e8d4dfd860f372281dc0f380a0809f6ae15
|
[
"MIT"
] | 2
|
2020-06-19T14:57:49.000Z
|
2020-06-19T15:17:40.000Z
|
reviewboard/testing/testcase.py
|
LloydFinch/reviewboard
|
563c1e8d4dfd860f372281dc0f380a0809f6ae15
|
[
"MIT"
] | null | null | null |
reviewboard/testing/testcase.py
|
LloydFinch/reviewboard
|
563c1e8d4dfd860f372281dc0f380a0809f6ae15
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import os
import re
import warnings
from contextlib import contextmanager
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, Permission, User
from django.core.cache import cache
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.urlresolvers import ResolverMatch
from django.test.client import RequestFactory
from django.utils import six, timezone
from djblets.siteconfig.models import SiteConfiguration
from djblets.testing.testcases import (FixturesCompilerMixin,
TestCase as DjbletsTestCase)
from oauthlib.common import generate_token
from oauth2_provider.models import AccessToken
from reviewboard import scmtools, initialize
from reviewboard.accounts.models import ReviewRequestVisit
from reviewboard.admin.siteconfig import load_site_config
from reviewboard.attachments.models import (FileAttachment,
FileAttachmentHistory)
from reviewboard.diffviewer.differ import DiffCompatVersion
from reviewboard.diffviewer.models import (DiffCommit, DiffSet, DiffSetHistory,
FileDiff)
from reviewboard.notifications.models import WebHookTarget
from reviewboard.oauth.models import Application
from reviewboard.reviews.models import (Comment,
FileAttachmentComment,
GeneralComment,
Group,
Review,
ReviewRequest,
ReviewRequestDraft,
Screenshot,
ScreenshotComment,
StatusUpdate)
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.webapi.models import WebAPIToken
class TestCase(FixturesCompilerMixin, DjbletsTestCase):
"""The base class for Review Board test cases.
This class provides a number of convenient functions for creating
common objects for testing, such as review requests and comments. They're
populated with default data that can be overridden by the callers.
This also overcomes an annoyance with default Django unit tests where
the cache is not cleared across tests, leading to inconsistent results
and useless testing.
"""
local_site_name = 'local-site-1'
local_site_id = 1
ws_re = re.compile(r'\s+')
DEFAULT_FILEDIFF_DATA_DIFF = (
b'--- README\trevision 123\n'
b'+++ README\trevision 123\n'
b'@@ -1 +1 @@\n'
b'-Hello, world!\n'
b'+Hello, everybody!\n'
)
DEFAULT_GIT_FILEDIFF_DATA_DIFF = (
b'diff --git a/README b/README\n'
b'index 94bdd3e..197009f 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -2 +2 @@\n'
b'-blah blah\n'
b'+blah!\n'
)
DEFAULT_GIT_README_DIFF = (
b'diff --git a/readme b/readme\n'
b'index d6613f5..5b50866 100644\n'
b'--- a/readme\n'
b'+++ b/readme\n'
b'@@ -1 +1,3 @@\n'
b'Hello there\n'
b'+\n'
b'+Oh hi!\n'
)
DEFAULT_GIT_FILEMODE_DIFF = (
b'diff --git a/testing b/testing\n'
b'old mode 100755\n'
b'new mode 100644\n'
b'index e69de29..bcae657\n'
b'--- a/testing\n'
b'+++ b/testing\n'
b'@@ -0,0 +1 @@\n'
b'+ADD\n'
b'diff --git a/testing2 b/testing2\n'
b'old mode 100644\n'
b'new mode 100755\n'
)
DEFAULT_GIT_FILE_NOT_FOUND_DIFF = (
b'diff --git a/missing-file b/missing-file\n'
b'index d6613f0..5b50866 100644\n'
b'--- a/missing-file\n'
b'+++ b/missing-file\n'
b'@@ -1 +1,3 @@\n'
b'Hello there\n'
b'+\n'
b'+Oh hi!\n'
)
DEFAULT_GIT_BINARY_IMAGE_DIFF = (
b'diff --git a/logo.png b/logo.png\n'
b'index 86b520c..86b520d\n'
b'Binary files a/logo.png and b/logo.png differ\n'
)
def setUp(self):
super(TestCase, self).setUp()
siteconfig = SiteConfiguration.objects.get_current()
siteconfig.set('mail_from_spoofing', 'never')
siteconfig.save(update_fields=('settings',))
initialize(load_extensions=False)
self._local_sites = {}
# Clear the cache so that previous tests don't impact this one.
cache.clear()
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_local_site_or_none(self, name):
"""Returns a LocalSite matching the name, if provided, or None."""
if name:
return self.get_local_site(name=name)
else:
return None
def get_local_site(self, name):
if name not in self._local_sites:
self._local_sites[name] = LocalSite.objects.get(name=name)
return self._local_sites[name]
def create_http_request(self, path='/', user=None, method='get',
with_local_site=False, local_site=None,
resolver_match=None,
view=None, **kwargs):
"""Create an HttpRequest for testing.
This wraps :py:class:`~django.test.client.RequestFactory`,
automatically handing some common fields normally set by middleware,
including the user, resolver match, and Local Site.
Args:
path (unicode, optional):
The path for the HTTP request, relative to the server root.
user (django.contrib.auth.models.User, optional):
The user authenticated for the request. If not provided,
:py:class:`~django.contrib.auth.models.AnonymousUser` will
be used.
method (unicode, optional):
The method on :py:class:`~django.test.client.RequestFactory`
used to create the request.
with_local_site (bool, optional):
If set, the default Local Site will be assigned to the
request, if ``local_site`` is not provided in the call.
local_site (reviewboard.site.models.LocalSite, optional):
The Local Site to assign to the request.
resolver_match (django.core.urlresolvers.ResolverMatch, optional):
A custom resolver match to set for the request. This may be
used by views to determine which URL entry was invoked. If
not provided, a blank one pointing to the provided ``view``
will be used.
view (callable, optional):
The view used for a default
:py:class:`~django.core.urlresolvers.ResolverMatch`.
**kwargs (dict):
Additional keyword arguments to pass to the request factory
method.
Returns:
django.http.HttpRequest:
The resulting HTTP request.
Raises:
ValueError:
One or more of the values provided was invalid.
"""
factory = RequestFactory()
try:
factory_method = getattr(factory, method)
except AttributeError:
raise ValueError('Invalid RequestFactory method "%s"' % method)
if local_site is None:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
if resolver_match is None:
resolver_match = ResolverMatch(func=view,
args=[],
kwargs={})
request = factory_method(path, **kwargs)
request.local_site = local_site
request.resolver_match = resolver_match
request.user = user or AnonymousUser()
return request
def create_user(self, username='test-user', password='',
email='test@example.com', perms=None, **kwargs):
"""Create a User for testing.
Args:
username (unicode, optional):
The username.
password (unicode, optional):
The user's password.
email (unicode, optional):
The user's e-mail address.
perms (list of tuple, optional):
A list of permissions to assign. Each item is a tuple
of ``(app_label, permission_name)``.
**kwargs (dict):
Additional attributes for the user.
Returns:
django.contrib.auth.models.User:
The new User object.
"""
user = User.objects.create(username=username,
password=password,
email=email,
**kwargs)
if perms:
user.user_permissions.add(*[
Permission.objects.get(codename=perm_name,
content_type__app_label=perm_app_label)
for perm_app_label, perm_name in perms
])
return user
def create_webapi_token(self, user, note='Sample note',
policy={'access': 'rw'},
with_local_site=False,
**kwargs):
"""Creates a WebAPIToken for testing."""
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
return WebAPIToken.objects.generate_token(user=user,
note=note,
policy=policy,
local_site=local_site)
@contextmanager
def assert_warns(self, cls=DeprecationWarning, message=None):
"""A context manager for asserting code generates a warning.
This method only supports code which generates a single warning.
Tests which make use of code generating multiple warnings will
need to manually catch their warnings.
"""
with warnings.catch_warnings(record=True) as w:
# Some warnings such as DeprecationWarning are filtered by
# default, stop filtering them.
warnings.simplefilter('always')
# Now that we've done that, some warnings may come in that we
# really don't want. We want to turn those back off.
try:
from django.utils.deprecation import RemovedInDjango20Warning
warnings.filterwarnings('ignore',
category=RemovedInDjango20Warning)
except ImportError:
pass
self.assertEqual(len(w), 0)
yield
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, cls))
if message is not None:
self.assertEqual(message, six.text_type(w[-1].message))
def create_diff_file_attachment(self, filediff, from_modified=True,
review_request=None,
orig_filename='filename.png',
caption='My Caption',
mimetype='image/png',
**kwargs):
"""Creates a diff-based FileAttachment for testing.
The FileAttachment is tied to the given FileDiff. It's populated
with default data that can be overridden by the caller.
"""
file_attachment = FileAttachment.objects.create_from_filediff(
filediff=filediff,
from_modified=from_modified,
caption=caption,
orig_filename=orig_filename,
mimetype=mimetype,
**kwargs)
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
with open(filename, 'rb') as f:
file_attachment.file.save(os.path.basename(filename), File(f),
save=True)
if review_request:
review_request.file_attachments.add(file_attachment)
return file_attachment
def create_diffcommit(self,
repository=None,
diffset=None,
commit_id='r1',
parent_id='r0',
diff_contents=DEFAULT_GIT_FILEDIFF_DATA_DIFF,
parent_diff_contents=None,
author_name='Author',
author_email='author@example.com',
author_date=None,
commit_message='Commit message',
committer_name='Committer',
committer_email='committer@example.com',
committer_date=None,
**kwargs):
"""Create a DiffCommit for testing.
This also creates a
:py:class:`reviewboard.diffviewer.models.filediff.FileDiff` attached to
the commit.
Args:
repository (reviewboard.scmtools.models.Repository, optional):
The repository the commit is associated with.
diffset (reviewboard.diffviewer.models.diffset.DiffSet, optional):
The parent diffset.
commit_id (unicode, optional):
The commit ID.
parent_id (unicode, optional):
The commit ID of the parent commit.
diff_contents (bytes, optional):
The contents of the diff.
parent_diff_contents (bytes, optional):
The contents of the parent diff, if any.
author_name (unicode, optional):
The name of the commit's author.
author_email (unicode, optional):
The e-mail address of the commit's author.
author_date (datetime.datetime, optional):
The date the commit was authored.
commit_message (unicode, optional):
The commit message.
committer_name (unicode, optional):
The name of the committer, if any.
committer_email (unicode, optional):
The e-mail address of the committer, if any.
committer_date (datetime.datetime, optional):
The date the commit was committed, if any.
**kwargs (dict):
Keyword arguments to be passed to the
:py:class:`~reviewboard.diffviewer.models.diffcommit.
DiffCommit` initializer.
Returns:
reviewboard.diffviewer.models.diffcommit.DiffCommit:
The resulting DiffCommit.
"""
assert isinstance(diff_contents, bytes)
if diffset is None:
diffset = self.create_diffset(repository=repository)
else:
repository = diffset.repository
if author_date is None:
author_date = timezone.now()
if not committer_date and committer_name and committer_email:
committer_date = author_date
if ((not committer_name and committer_email) or
(committer_name and not committer_email)):
raise ValueError(
'Either both or neither of committer_name and committer_email '
'must be provided.')
if parent_diff_contents:
assert isinstance(parent_diff_contents, bytes)
parent_diff_file_name = 'parent_diff'
else:
parent_diff_file_name = None
return DiffCommit.objects.create_from_data(
repository=repository,
diff_file_name='diff',
diff_file_contents=diff_contents,
parent_diff_file_name=parent_diff_file_name,
parent_diff_file_contents=parent_diff_contents,
diffset=diffset,
commit_id=commit_id,
parent_id=parent_id,
author_name=author_name,
author_email=author_email,
author_date=author_date,
commit_message=commit_message,
request=None,
committer_name=committer_name,
committer_email=committer_email,
committer_date=committer_date,
check_existence=False,
**kwargs)
def create_diffset(self, review_request=None, revision=1, repository=None,
draft=False, name='diffset'):
"""Creates a DiffSet for testing.
The DiffSet defaults to revision 1. This can be overriden by the
caller.
DiffSets generally are tied to a ReviewRequest, but it's optional.
"""
if review_request:
repository = review_request.repository
diffset = DiffSet.objects.create(
name=name,
revision=revision,
repository=repository,
diffcompat=DiffCompatVersion.DEFAULT)
if review_request:
if draft:
review_request_draft = \
self.create_review_request_draft(review_request)
review_request_draft.diffset = diffset
review_request_draft.save()
else:
review_request.diffset_history.diffsets.add(diffset)
return diffset
def create_diff_comment(self, review, filediff, interfilediff=None,
text='My comment', issue_opened=False,
issue_status=None, first_line=1, num_lines=5,
extra_fields=None, reply_to=None, **kwargs):
"""Create a Comment for testing.
The comment is tied to the given Review and FileDiff (and, optionally,
an interfilediff). It's populated with default data that can be
overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
filediff (reviewboard.diffviewer.models.filediff.FileDiff):
The FileDiff associated with the comment.
interfilediff (reviewboard.diffviewer.models.filediff.FileDiff,
optional):
The FileDiff used for the end of an interdiff range associated
with the comment.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
first_line (int, optional):
The first line (0-based) of the comment range.
num_lines (int, optional):
The number of lines in the comment.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.diff_comment.Comment,
optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.diff_comment.Comment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = Comment.OPEN
comment = Comment(
filediff=filediff,
interfilediff=interfilediff,
first_line=first_line,
num_lines=num_lines,
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.comments.add(comment)
return comment
def create_file_attachment(self, review_request,
attachment_history=None,
draft=False,
active=True,
**kwargs):
"""Create a FileAttachment for testing.
The attachment is tied to the given
:py:class:`~reviewboard.reviews.models.review_request.ReviewRequest`.
It's populated with default data that can be overridden by the caller.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest):
The review request that ultimately owns the file attachment.
attachment_history (reviewboard.attachments.models.
FileAttachmentHistory,
optional):
An attachment history managing the file attachment.
draft (bool or
reviewboard.reviews.models.review_request_draft.
ReviewRequestDraft,
optional)
A draft to associate the attachment with. This can also be
a boolean, for legacy reasons, which will attempt to look up
or create a draft for the review request.
active (bool, optional)
Whether this attachment is considered active (not deleted).
**kwargs (dict):
Additional keyword arguments to pass to
:py:meth:`create_file_attachment_base`.
Returns:
reviewboard.attachments.models.FileAttachment:
The resulting file attachment.
"""
file_attachment = self.create_file_attachment_base(
attachment_history=attachment_history,
**kwargs)
if draft:
if isinstance(draft, ReviewRequestDraft):
review_request_draft = draft
else:
review_request_draft = \
self.create_review_request_draft(review_request)
if active:
attachments = review_request_draft.file_attachments
else:
attachments = review_request_draft.inactive_file_attachments
else:
if active:
attachments = review_request.file_attachments
else:
attachments = review_request.inactive_file_attachments
attachments.add(file_attachment)
return file_attachment
def create_user_file_attachment(self, user, has_file=False, **kwargs):
"""Create a user FileAttachment for testing.
The :py:class:`reviewboard.attachments.models.FileAttachment` is tied
to the given :py:class:`django.contrib.auth.models.User`. It's
populated with default data that can be overridden by the caller.
Notably, by default the FileAttachment will be created without a file
or a local_site.
Args:
user (django.contrib.auth.models.User):
The user who owns the file attachment.
has_file (bool, optional):
``True`` if an actual file object should be included in the
model. This is ``False`` by default.
**kwargs (dict):
Additional keyword arguments to pass to
:py:meth:`create_file_attachment_base`.
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment instance.
"""
return self.create_file_attachment_base(user=user,
has_file=has_file,
**kwargs)
def create_file_attachment_comment(self, review, file_attachment,
diff_against_file_attachment=None,
text='My comment', issue_opened=False,
issue_status=None, extra_fields=None,
reply_to=None, **kwargs):
"""Create a FileAttachmentComment for testing.
The comment is tied to the given Review and FileAttachment. It's
populated with default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
file_attachment (reviewboard.attachments.models.FileAttachment):
The file attachment associated with the comment.
diff_against_file_attachment (reviewboard.attachments.models.
FileAttachment, optional):
The file attachment being diff against, for comments on
attachment diffs.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.file_attachment_comment.
FileAttachmentComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.file_attachment_comment.FileAttachmentComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = FileAttachmentComment.OPEN
comment = FileAttachmentComment(
file_attachment=file_attachment,
diff_against_file_attachment=diff_against_file_attachment,
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.file_attachment_comments.add(comment)
return comment
def create_file_attachment_history(self, review_request=None,
display_position=None, **kwargs):
"""Create a FileAttachmentHistory for testing.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest, optional):
The optional review request to attach the history to.
display_position (int, optional):
The display position on the review request. If not provided,
a proper position will be computed.
**kwargs (dict):
Additional fields to set on the model.
Returns:
reviewboard.attachments.models.FileAttachmentHistory:
The new file attachment instance.
"""
if display_position is None:
if review_request is None:
display_position = 0
else:
display_position = \
FileAttachmentHistory.compute_next_display_position(
review_request)
attachment_history = FileAttachmentHistory.objects.create(
display_position=display_position,
**kwargs)
if review_request is not None:
review_request.file_attachment_histories.add(attachment_history)
return attachment_history
def create_filediff(self, diffset, source_file='/test-file',
dest_file='/test-file', source_revision='123',
dest_detail='124', status=FileDiff.MODIFIED,
diff=DEFAULT_FILEDIFF_DATA_DIFF, commit=None,
save=True):
"""Create a FileDiff for testing.
The FileDiff is tied to the given DiffSet. It's populated with
default data that can be overridden by the caller.
Args:
diffset (reviewboard.diffviewer.models.diffset.DiffSet):
The parent diff set that will own this file.
source_file (unicode, optional):
The source filename.
dest_file (unicode, optional):
The destination filename, which will be the same as
``source_file`` unless the file was moved/renamed/copied.
source_revision (unicode, optional):
The source revision.
dest_detail (unicode, optional):
The destination revision or other detail as found in the
parsed diff. This may be a timestamp or some other value.
status (unicode, optional):
The status of the file. This is the operation performed
as indicated in the diff.
diff (bytes, optional):
The diff contents.
commit (reviewboard.diffviewer.models.diffcommit.DiffCommit,
optional):
The commit to attach the FileDiff to.
save (bool, optional):
Whether to automatically save the resulting object.
Returns:
reviewboard.diffviewer.models.filediff.FileDiff:
The resulting FileDiff.
"""
filediff = FileDiff(
diffset=diffset,
source_file=source_file,
dest_file=dest_file,
source_revision=source_revision,
dest_detail=dest_detail,
status=status,
diff=diff,
commit=commit)
if save:
filediff.save()
return filediff
def create_repository(self, with_local_site=False, name='Test Repo',
tool_name='Git', path=None, local_site=None,
extra_data=None, **kwargs):
"""Create a Repository for testing.
The Repository may optionally be attached to a
:py:class:`~reviewboard.site.models.LocalSite`. It's also populated
with default data that can be overridden by the caller.
Args:
with_local_site (bool, optional):
Whether to create the repository using a Local Site. This
will choose one based on :py:attr:`local_site_name`.
If ``local_site`` is provided, this argument is ignored.
name (unicode, optional):
The name of the repository.
tool_name (unicode, optional):
The name of the registered SCM Tool for the repository.
path (unicode, optional):
The path for the repository. If not provided, one will be
computed.
local_site (reviewboard.site.models.LocalSite, optional):
The explicit Local Site to attach.
extra_data (dict, optional):
Explicit extra_data to attach to the repository.
**kwargs (dict):
Additional fields to set on the repository.
Returns:
reviewboard.scmtools.models.Repository:
The new repository.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
testdata_dir = os.path.join(os.path.dirname(scmtools.__file__),
'testdata')
if not path:
if tool_name in ('Git', 'Test',
'TestToolSupportsPendingChangeSets'):
path = os.path.join(testdata_dir, 'git_repo')
elif tool_name == 'Subversion':
path = 'file://' + os.path.join(testdata_dir, 'svn_repo')
elif tool_name == 'Mercurial':
path = os.path.join(testdata_dir, 'hg_repo.bundle')
elif tool_name == 'CVS':
path = os.path.join(testdata_dir, 'cvs_repo')
elif tool_name == 'Perforce':
path = 'localhost:1666'
else:
raise NotImplementedError
repository = Repository(name=name,
local_site=local_site,
tool=Tool.objects.get(name=tool_name),
path=path,
**kwargs)
if extra_data is not None:
repository.extra_data = extra_data
repository.save()
return repository
def create_review_request(self,
with_local_site=False,
create_repository=False,
create_with_history=False,
publish=False,
id=None,
local_id=1001,
local_site=None,
repository=None,
time_added=None,
last_updated=None,
status=ReviewRequest.PENDING_REVIEW,
submitter='doc',
summary='Test Summary',
description='Test Description',
testing_done='Testing',
branch='my-branch',
depends_on=None,
target_people=None,
target_groups=None,
**kwargs):
"""Create a ReviewRequest for testing.
The :py:class:`~reviewboard.reviews.models.review_request.
ReviewRequest` may optionally be attached to a
:py:class:`~reviewboard.site.models.LocalSite`. It's also
populated with default data that can be overridden by the caller.
Args:
with_local_site (bool, optional):
Whether to create this review request on a default
:term:`local site`.
This is ignored if ``local_site`` is provided.
create_repository (bool, optional):
Whether to create a new repository in the database for this
review request.
This can't be set if ``repository`` is provided.
create_with_history (bool, optional):
Whether or not the review request should support multiple
commits.
publish (bool, optional):
Whether to publish the review request after creation.
id (int, optional):
An explicit database ID to set for the review request.
local_id (int, optional):
The ID specific to the :term:`local site`, if one is used.
local_site (reviewboard.site.models.LocalSite, optional):
The LocalSite to associate the review request with.
If not provided, the LocalSite with the name specified in
:py:attr:`local_site_name` will be used.
repository (reviewboard.scmtools.models.Repository, optional):
An explicit repository to set for the review request.
time_added (datetime.datetime, optional):
An explicit creation timestamp to set for the review request.
last_updated (datetime.datetime, optional):
An explicit last updated timestamp to set for the review
request.
status (unicode, optional):
The status of the review request. This must be one of the
values listed in :py:attr:`~reviewboard.reviews.models.
review_request.ReviewRequest.STATUSES`.
submitter (unicode or django.contrib.auth.models.User, optional):
The submitter of the review request. This can be a username
(which will be looked up) or an explicit user.
summary (unicode, optional):
The summary for the review request.
description (unicode, optional):
The description for the review request.
testing_done (unicode, optional):
The Testing Done text for the review request.
branch (unicode, optional):
The branch for the review request.
depends_on (list of reviewboard.reviews.models.review_request.
ReviewRequest, optional):
A list of review requests to set as dependencies.
target_people (list of django.contrib.auth.models.User, optional):
A list of users to set as target reviewers.
target_groups (list of reviewboard.reviews.models.group.Group,
optional):
A list of review groups to set as target reviewers.
**kwargs (dict):
Additional fields to set on the review request.
Returns:
reviewboard.reviews.models.review_request.ReviewRequest:
The resulting review request.
Raises:
ValueError:
An invalid value was provided during initialization.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
if not local_site:
local_id = None
if create_repository:
assert not repository
repository = \
self.create_repository(with_local_site=with_local_site)
if not isinstance(submitter, User):
submitter = User.objects.get(username=submitter)
review_request = ReviewRequest(
summary=summary,
description=description,
branch=branch,
testing_done=testing_done,
local_site=local_site,
local_id=local_id,
submitter=submitter,
diffset_history=DiffSetHistory.objects.create(),
repository=repository,
status=status,
**kwargs)
review_request.created_with_history = create_with_history
# Set this separately to avoid issues with CounterField updates.
review_request.id = id
review_request.save()
if depends_on:
review_request.depends_on = depends_on
if target_people:
review_request.target_people = target_people
if target_groups:
review_request.target_groups = target_groups
if publish:
review_request.publish(review_request.submitter)
if time_added and last_updated:
ReviewRequest.objects.filter(pk=review_request.pk).update(
time_added=time_added,
last_updated=last_updated)
review_request.time_added = time_added
review_request.last_updated = last_updated
elif time_added:
ReviewRequest.objects.filter(pk=review_request.pk).update(
time_added=time_added)
review_request.time_added = time_added
elif last_updated:
ReviewRequest.objects.filter(pk=review_request.pk).update(
last_updated=last_updated)
review_request.last_updated = last_updated
return review_request
def create_review_request_draft(self, review_request):
"""Create a ReviewRequestDraft for testing.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest)
The review request for the draft.
Returns:
reviewboard.reviews.models.review_request_draft.ReviewRequestDraft:
The newly-created draft.
"""
return ReviewRequestDraft.create(review_request)
def create_visit(self, review_request, visibility, user='doc',
username=None, timestamp=None):
"""Create a ReviewRequestVisit for testing.
The ReviewRequestVisit is tied to the given ReviewRequest and User.
It's populated with default data that can be overridden by the caller.
The provided user may either be a username or a User object.
"""
if not isinstance(user, six.string_types):
user = User.objects.get(username=user)
return ReviewRequestVisit.objects.create(
review_request=review_request,
visibility=visibility,
user=user)
def create_review(self, review_request, user='dopey',
body_top='Test Body Top', body_bottom='Test Body Bottom',
ship_it=False, publish=False, timestamp=None, **kwargs):
"""Creates a Review for testing.
The Review is tied to the given ReviewRequest. It's populated with
default data that can be overridden by the caller.
The provided user may either be a username or a User object.
If publish is True, Review.publish() will be called.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest):
The review request the review is filed against.
user (unicode or django.contrib.auth.models.User, optional):
The username or User object owning the review.
body_top (unicode, optional):
The text for the ``body_top`` field.
body_bottom (unicode, optional):
The text for the ``body_bottom`` field.
ship_it (bool, optional):
The Ship It state for the review.
publish (bool, optional):
Whether to publish the review immediately after creation.
timestamp (datetime.datetime, optional):
The timestamp for the review.
**kwargs (dict):
Additional attributes to set in the review.
Returns:
reviewboard.reviews.models.review.Review:
The resulting review.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
review = Review.objects.create(
review_request=review_request,
user=user,
body_top=body_top,
body_bottom=body_bottom,
ship_it=ship_it,
**kwargs)
if publish:
review.publish()
if timestamp:
Review.objects.filter(pk=review.pk).update(timestamp=timestamp)
review.timestamp = timestamp
return review
def create_review_group(self, name='test-group', with_local_site=False,
local_site=None, visible=True, invite_only=False,
is_default_group=False):
"""Creates a review group for testing.
The group may optionally be attached to a LocalSite. It's also
populated with default data that can be overridden by the caller.
"""
if not local_site and with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
return Group.objects.create(
name=name,
local_site=local_site,
visible=visible,
invite_only=invite_only,
is_default_group=is_default_group)
def create_reply(self, review, user='grumpy', body_top='Test Body Top',
timestamp=None, publish=False, **kwargs):
"""Create a review reply for testing.
The reply is tied to the given Review. It's populated with default
data that can be overridden by the caller.
To reply to a ``body_top`` or ``body_bottom`` field, pass either
``body_top_reply_to=`` or ``body_bottom_reply_to=`` to this method.
This will be passed to the review's constructor.
Args:
review (reviewboard.reviews.models.review.Review):
The review being replied to.
user (django.contrib.auth.models.User or unicode, optional):
Either the user model or the username of the user who is
replying to the review.
body_top (unicode, optional):
The body top text.
timestamp (datetime.datetime, optional):
The timestamp of the review.
publish (bool, optional):
Whether the review should be published. By default it's in
draft form.
**kwargs (dict):
Additional arguments to pass to the
:py:class:`~reviewboard.reviews.models.review.Review`
constructor.
Returns:
reviewboard.reviews.models.review.Review:
The resulting review.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
reply = Review.objects.create(
review_request=review.review_request,
user=user,
body_top=body_top,
base_reply_to=review,
**kwargs)
if publish:
reply.publish()
if timestamp:
Review.objects.filter(pk=reply.pk).update(timestamp=timestamp)
reply.timestamp = timestamp
return reply
def create_screenshot(self, review_request, caption='My caption',
draft=False, active=True, **kwargs):
"""Create a Screenshot for testing.
The screenshot is tied to the given
:py:class:`~reviewboard.reviews.models.review_request.ReviewRequest`.
It's populated with default data that can be overridden by the caller.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest):
The review request that ultimately owns the screenshot.
caption (unicode, optional):
The caption to use for the screenshot.
draft (bool or
reviewboard.reviews.models.review_request_draft.
ReviewRequestDraft):
A draft to associate the screenshot with. This can also be
a boolean, for legacy reasons, which will attempt to look up
or create a draft for the review request.
active (bool):
Whether this screenshot is considered active (not deleted).
**kwargs (dict):
Additional fields to set on the screenshot.
Returns:
reviewboard.reviews.models.screenshot.Screenshot:
The resulting screenshot.
"""
screenshot = Screenshot(caption=caption, **kwargs)
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
with open(filename, 'rb') as f:
screenshot.image.save(os.path.basename(filename), File(f),
save=True)
if draft:
if isinstance(draft, ReviewRequestDraft):
review_request_draft = draft
else:
review_request_draft = \
self.create_review_request_draft(review_request)
if active:
screenshots = review_request_draft.screenshots
else:
screenshots = review_request_draft.inactive_screenshots
else:
if active:
screenshots = review_request.screenshots
else:
screenshots = review_request.inactive_screenshots
screenshots.add(screenshot)
return screenshot
def create_screenshot_comment(self, review, screenshot, text='My comment',
x=1, y=1, w=5, h=5, issue_opened=False,
issue_status=None, extra_fields=None,
reply_to=None, **kwargs):
"""Create a ScreenshotComment for testing.
The comment is tied to the given Review and Screenshot. It's
It's populated with default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
screenshot (reviewboard.reviews.models.screenshot.Screenshot):
The screenshot associated with the comment.
text (unicode):
The text for the comment.
x (int, optional):
The X location for the comment on the screenshot.
y (int, optional):
The Y location for the comment on the screenshot.
w (int, optional):
The width for the comment on the screenshot.
h (int, optional):
The height for the comment on the screenshot.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.general_comment.
GeneralComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.screenshot_comment.ScreenshotComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = ScreenshotComment.OPEN
comment = ScreenshotComment(
screenshot=screenshot,
text=text,
x=x,
y=y,
w=w,
h=h,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.screenshot_comments.add(comment)
return comment
def create_file_attachment_base(self,
caption='My Caption',
orig_filename='logo.png',
mimetype='image/png',
uuid='test-uuid',
has_file=True,
file_content=None,
user=None,
with_local_site=False,
local_site_name=None,
local_site=None,
**kwargs):
"""Base helper to create a FileAttachment object.
When creating a
:py:class:`reviewboard.attachments.models.FileAttachment` that will be
associated to a review request, a user and local_site should not be
specified.
This is not meant to be called directly by tests. Callers should
generallly use one of:
* :py:meth:`create_file_attachment`
* :py:meth:`create_user_file_attachment`
Args:
caption (unicode, optional):
The caption for the file attachment.
orig_filename (unicode, optional):
The original name of the file to set in the model.
mimetype (unicode, optional):
The mimetype of the file attachment.
uuid (unicode, optional):
The UUID used to prefix the filename and reference the
file attachment.
has_file (bool, optional):
``True`` if an actual file object should be included in the
model.
This will set the file content based on ``file_content``, if
one is provided. If not provided, the Review Board logo is used
as the file content.
file_content (bytes, optional):
The file content. This is only set if passing
``has_file=True``.
user (django.contrib.auth.models.User, optonal):
The user who owns the file attachment.
with_local_site (bool, optional):
``True`` if the file attachment should be associated with a
local site. If this is set, one of ``local_site_name`` or
``local_site`` should be provided as well.
local_site_name (unicode, optional):
The name of the local site to associate this attachment with.
local_site (reviewboard.site.models.LocalSite, optional):
The local site to associate this attachment with.
kwargs (dict):
Additional keyword arguments to pass into the FileAttachment
constructor.
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment instance.
"""
if with_local_site:
local_site = self.get_local_site(name=local_site_name)
filename = kwargs.get('filename', '%s-%s' % (uuid, orig_filename))
file_attachment = FileAttachment(
caption=caption,
mimetype=mimetype,
user=user,
uuid=uuid,
local_site=local_site,
orig_filename=orig_filename,
**kwargs)
if has_file:
if file_content is None:
logo_path = os.path.join(settings.STATIC_ROOT, 'rb',
'images', 'logo.png')
with open(logo_path, 'rb') as fp:
file_content = fp.read()
assert isinstance(file_content, bytes), (
'file_content must be passed as bytes, not %s'
% type(file_content))
file_attachment.file.save(filename,
ContentFile(file_content),
save=True)
file_attachment.save()
return file_attachment
def create_general_comment(self, review, text='My comment',
issue_opened=False, issue_status=None,
extra_fields=None, reply_to=None, **kwargs):
"""Create a GeneralComment for testing.
The comment is tied to the given Review. It is populated with
default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.general_comment.
GeneralComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.general_comment.GeneralComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = GeneralComment.OPEN
comment = GeneralComment(
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.general_comments.add(comment)
return comment
def create_status_update(self, review_request, user='dopey',
service_id='service', summary='Status Update',
state=StatusUpdate.PENDING,
review=None, change_description=None,
timestamp=None):
"""Create a status update for testing.
It is populated with default data that can be overridden by the caller.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to associate with the new status update.
user (django.contrib.auth.models.User or unicode):
Either the user model or the username of the user who should
own the status update.
service_id (unicode):
The ID to fill in for the new model.
summary (unicode):
The summary to fill in for the new model.
state (unicode):
The state for the new model. This must be one of the valid
choices for the state field.
review (reviewboard.reviews.models.review.Review, optional):
The review associated with this status update.
change_description (reviewboard.changedescs.models.
ChangeDescription, optional):
The change description for this status update.
timestamp (datetime.datetime):
The timestamp for the status update.
Returns:
reviewboard.reviews.models.StatusUpdate:
The new status update.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
status_update = StatusUpdate.objects.create(
review_request=review_request,
change_description=change_description,
service_id=service_id,
summary=summary,
state=state,
review=review,
user=user)
if timestamp:
StatusUpdate.objects.filter(pk=status_update.pk).update(
timestamp=timestamp)
status_update.timestamp = timestamp
return status_update
def create_webhook(self, enabled=False, events=WebHookTarget.ALL_EVENTS,
url='http://example.com',
encoding=WebHookTarget.ENCODING_JSON,
use_custom_content=False, custom_content='',
secret='', apply_to=WebHookTarget.APPLY_TO_ALL,
repositories=None, with_local_site=False,
local_site=None, extra_fields=None):
"""Create a webhook for testing.
It is populated with default data that can be overridden by the caller.
Args:
enabled (bool):
Whether or not the webhook is enabled when it is created.
events (unicode):
A comma-separated list of events that the webhook will trigger
on.
url (unicode):
The URL that requests will be made against.
encoding (unicode):
The encoding of the payload to send.
use_custom_content (bool):
Determines if custom content will be sent for the payload (if
``True``) or if it will be auto-generated (if ``False``).
custom_content (unicode):
The custom content to send when ``use_custom_content`` is
``True``.
secret (unicode):
An HMAC secret to sign the payload with.
apply_to (unicode):
The types of repositories the webhook will apply to.
repositories (list):
A list of repositories that the webhook will be limited to if
``apply_to`` is ``WebHookTarget.APPLY_TO_SELECTED_REPOS``.
with_local_site (bool):
Determines if this should be created with a local site.
local_site (reviewboard.site.models.LocalSite):
An optional local site. If ``with_local_site`` is ``True`` and
this argument is ``None``, the local site will be looked up.
extra_fields (dict):
Extra data to be imported into the webhook.
Returns:
WebHookTarget: A webhook constructed with the given arguments.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
webhook = WebHookTarget.objects.create(
enabled=enabled,
events=events.split(','),
url=url,
encoding=encoding,
use_custom_content=use_custom_content,
custom_content=custom_content,
secret=secret,
apply_to=apply_to,
local_site=local_site)
if repositories:
webhook.repositories = repositories
if extra_fields:
webhook.extra_data = extra_fields
webhook.save(update_fields=['extra_data'])
return webhook
def create_oauth_application(
self, user, local_site=None, with_local_site=False,
redirect_uris='http://example.com',
authorization_grant_type=Application.GRANT_CLIENT_CREDENTIALS,
client_type=Application.CLIENT_PUBLIC,
**kwargs):
"""Create an OAuth application.
Args:
user (django.contrib.auth.models.User):
The user whom is to own the application.
local_site (reviewboard.site.models.LocalSite, optional):
The LocalSite for the application to be associated with, if
any.
redirect_uris (unicode, optional):
A whitespace-separated list of allowable redirect URIs.
authorization_grant_type (unicode, optional):
The grant type for the application.
client_type (unicode, optional):
The application client type.
**kwargs (dict):
Additional keyword arguments to pass to the
:py:class:`~reviewboard.oauth.models.Application` initializer.
Returns:
reviewboard.oauth.models.Application:
The created application.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(self.local_site_name)
else:
local_site = None
return Application.objects.create(
user=user,
local_site=local_site,
authorization_grant_type=authorization_grant_type,
redirect_uris=redirect_uris,
client_type=client_type,
extra_data='{}',
**kwargs)
def create_oauth_token(self, application, user, scope='', expires=None,
**kwargs):
"""Create an OAuth2 access token for testing.
Args:
application (reviewboard.oauth.models.Application):
The application the token should be associated with.
user (django.contrib.auth.models.User):
The user who should own the token.
scope (unicode, optional):
The scopes of the token. This argument defaults to the empty
scope.
expires (datetime.timedelta, optional):
How far into the future the token expires. If not provided,
this argument defaults to one hour.
Returns:
oauth2_provider.models.AccessToken:
The created access token.
"""
if expires is None:
expires = timedelta(hours=1)
return AccessToken.objects.create(
application=application,
token=generate_token(),
expires=timezone.now() + expires,
scope=scope,
user=user,
)
@contextmanager
def siteconfig_settings(self, settings, reload_settings=True):
"""Temporarily sets siteconfig settings for a test.
Args:
settings (dict):
The new siteconfig settings to set.
reload_settings (bool, optional):
Whether to reload and recompute all settings, applying them
to Django and other objects.
Context:
The current site configuration will contain the new settings for
this test.
"""
try:
with super(TestCase, self).siteconfig_settings(settings):
if reload_settings:
load_site_config()
yield
finally:
if reload_settings:
load_site_config()
| 36.390149
| 85
| 0.567118
|
87e25da31bde304141f5902a30ed0de46184dbf2
| 273
|
py
|
Python
|
day5/d5p4.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | 2
|
2021-12-22T07:43:14.000Z
|
2021-12-24T12:07:33.000Z
|
day5/d5p4.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | null | null | null |
day5/d5p4.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | 1
|
2021-12-22T07:43:26.000Z
|
2021-12-22T07:43:26.000Z
|
def make_abba(a, b):
newstr = a + b + b + a
if len(a) >= 0 and len(b) >= 0:
return newstr
else:
return False
result = make_abba('Hi', 'Bye')
print(result)
result = make_abba('Yo', 'Alice')
print(result)
result = make_abba('x', '')
print(result)
| 21
| 35
| 0.575092
|
08bcb9abe4d7956bfe9c1ebe44296bce9e3a337f
| 14,774
|
py
|
Python
|
A1 - Search and Games/fishing_game_core/app.py
|
NickSmyr/ai-player-agents
|
f8972d02c53a2ba566b541b1270a0637e3d3e5c7
|
[
"MIT"
] | null | null | null |
A1 - Search and Games/fishing_game_core/app.py
|
NickSmyr/ai-player-agents
|
f8972d02c53a2ba566b541b1270a0637e3d3e5c7
|
[
"MIT"
] | null | null | null |
A1 - Search and Games/fishing_game_core/app.py
|
NickSmyr/ai-player-agents
|
f8972d02c53a2ba566b541b1270a0637e3d3e5c7
|
[
"MIT"
] | null | null | null |
import json
import sys
from datetime import datetime
from io import UnsupportedOperation
from os.path import join
from pathlib import Path
import numpy as np
from kivy.app import App
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.lang import Builder
from fishing_game_core.communicator import Communicator
from fishing_game_core.player_utils import Player
from fishing_game_core.sequences import Sequences
from fishing_game_core.shared import SettingLoader
from fishing_game_core.shared import TYPE_TO_SCORE
from fishing_game_core.widgets import Boat, TimeBoard, Stats, FishingDerby, Fish
home = str(Path.home())
class Fishes(SettingLoader):
def __init__(self):
super().__init__()
self.seq_types_fishes = None
self.observations_sequence = None
self.main_widget = None
self.fishes = {}
def init_fishes(self):
"""
Initialize fishes and their parameters
:return:
"""
# Generate fishes exactly according to the custom specification.
self.fishes.clear()
init_fishes = self.observations_sequence['init_fishes']
for i in range(len(init_fishes)):
init_x,init_y = init_fishes[str(i)]['init_pos']
score = init_fishes[str(i)]['score']
obs_seq = self.observations_sequence['sequence'][str(i)]
name = "fish"+str(i)
# Get the right fish type from the score.
type_fish = None
for key, value in TYPE_TO_SCORE.items():
if value == score:
type_fish = key
fish = Fish(init_state=(init_x, init_y),
type_fish=type_fish,
name=name,
observations_sequence=obs_seq,
settings=self.settings)
self.main_widget.ids.fish_layout.add_widget(fish)
self.fishes[name] = fish
class PrintScoresAbstract:
def __init__(self):
self.time = 0
self.total_time = 0
self.main_widget = None
self.players = {}
class PrintScore2Players(PrintScoresAbstract):
def print_score(self):
if hasattr(self, 'latest_msg') and self.latest_msg is not None and self.latest_msg['search_time'] is not None:
search_time = self.latest_msg['search_time']
# print("Elapsed time:", str(self.time) + '/' + str(self.total_time),
# "s\tScore:", self.players[0].score - self.players[1].score, '\tSearch time:', '%.2E' % search_time)
return
# print("Elapsed time:", str(self.time) + '/' + str(self.total_time),
# "s\tScore:", self.players[0].score - self.players[1].score)
class PrintScore1Player(PrintScoresAbstract):
def print_score(self):
# print("Elapsed time:", str(self.time) + '/' + str(self.total_time),
# "s\tScore:", self.players[0].score)
pass
class GamesWithBoats:
def __init__(self):
self.settings = None
self.main_widget = None
self.players = None
def introduce_boats_to_screen(self, n_boats):
"""
Introduce and draw the boats on the screen
:type n_boats: int. Number of boats to draw.
:return:
"""
colors = [[0, 0.5, 0, 1], [1, 0, 0, 1]]
space_subdivisions = 20
for i in range(1, n_boats + 1):
if not hasattr(self, 'observations_sequence'): # sanity check
raise Exception('wrong settings specification for boats...')
init_players = self.observations_sequence['init_players']
init_pos = init_players[str(i-1)]
init_pos_x_boat = init_pos[0]
init_pos_y_hook = init_pos[1]
boat = Boat(init_pos_x_boat, space_subdivisions=space_subdivisions,
source=f"fishing_game_core/images/fishing{i}.png",
init_hook=init_pos_y_hook)
boat.line_rod.color = colors[i - 1]
self.main_widget.ids.boats_layout.add_widget(boat)
self.main_widget.ids.hooks_layout.add_widget(boat.hook)
self.main_widget.ids.line_rods_layout.add_widget(boat.line_rod)
self.players[i - 1].boat = boat
class FishingDerbyApp(App, SettingLoader, Communicator):
def __init__(self):
App.__init__(self)
SettingLoader.__init__(self)
Communicator.__init__(self)
# Use the main kivy file to draw the board
Builder.load_file('fishing_game_core/main.kv')
# Create class variables and set default values
self.fishes = {} # Dictionary of fishes
self._cnt_steps = 0 # Count of the number of steps taken so far
self.move_x = [] # Next moves of the fishes in the x axis
self.move_y = [] # Next moves of the fishes in the y axis
self.action = "stay" # Actions received from player
self.time = 0 # Seconds since start
self.total_time = 60 # Total time of the game
self.players = [] # Players list
self.main_widget = None # Main widget of the game
self.time_board = None # Time board widget
# PID of the player loop in order to be able to kill it when the game is over
self.player_loop_pid = None
self.observations_sequence = None
self.update_scheduled = None
self.timer_scheduled = None
# Steps counter is a number that goes from 0 to frames_per_action
"""
@property
def cnt_steps(self):
frames_per_action = 10
return self._cnt_steps % frames_per_action
@cnt_steps.setter
def cnt_steps(self, val):
self._cnt_steps = val
"""
def set_player_loop_pid(self, pid):
self.player_loop_pid = pid
def create_players(self):
"""Always 2 players, not necessarily 2 boats"""
self.players = [Player(), Player()]
def update(self, dt):
raise NotImplementedError
def init_clock(self):
"""
Initialize the timer
:return:
"""
n_seq = self.observations_sequence["params"]["n_seq"]
self.total_time = n_seq * 10 * 1.0 / self.settings.frames_per_second
self.time_board = TimeBoard(seconds=int(self.total_time))
self.time_board.pos_hint['center_x'] = 0.5
self.main_widget.add_widget(self.time_board)
self.timer_scheduled = Clock.schedule_interval(self.update_clock, 1.0)
def check_fish_near(self, boat):
"""
Catch a random fish that is on the same position as the boat if possible
:param boat: Boat. It must not have a caught fish.
:return:
"""
indices = np.random.permutation(len(self.fishes))
keys = list(self.fishes.keys())
for f in indices:
fish = self.fishes[keys[f]]
if fish.position == boat.hook.position and fish.caught is None:
return fish
def new_action(self, msg):
"""
Assign the new action coming from the message
:param msg: dict. Message coming from the receiver.
:return:
"""
self.action = msg["action"]
def send_state_or_display_stats(self):
"""
Send msg in order to indicate the player we have updated the game. If game has ended, display the stats screen.
"""
msg = {
"game_over": self.main_widget.game_over
}
if self.main_widget.game_over:
self.timer_scheduled.cancel()
self.update_scheduled.cancel()
self.display_stats()
self.sender(msg)
return False
self.update_specific(msg)
return True
def update_clock(self, dl):
"""
Increase the clock by 1 second. If the remaining time is 0, the game is over.
:param dl: delta-time. Not used.
:return:
"""
if self.time_board.seconds == 0:
self.main_widget.game_over = True
else:
self.time_board.seconds -= 1
self.time += 1.0
def fishes_next_move(self):
"""
Calculate and store, for every fish, the infinitesimal moving step for the position changing process.
After that, increase each fish's updates counter.
:return:
"""
self.move_x.clear()
self.move_y.clear()
for fish in self.fishes.values():
move_x, move_y = fish.next_movement_and_flip_horizontally()
self.move_x += [move_x / self.settings.frames_per_action]
self.move_y += [move_y / self.settings.frames_per_action]
fish.updates_cnt += 1
def check_fishes_caught(self):
"""
For every boat in the game, do one of:
1) if no fish is caught by it, check whether any can be caught
2) if a fish has been caught and the player is at the surface, finish pulling the rod
:return:
"""
for player_number, player in enumerate(self.players):
boat = player.boat
if boat is None:
continue
elif boat.has_fish is None:
fish_near = self.check_fish_near(boat)
if fish_near is not None:
self.main_widget.ids.fish_layout.remove_widget(fish_near)
self.main_widget.ids.fish_layout.add_widget(fish_near)
boat.has_fish = fish_near
fish_near.caught = boat
if boat.has_fish is not None and boat.hook.position.y == 19:
self.main_widget.finish_pulling_fish(player_number)
def load_observations(self):
"""
Load the observations file stated in the settings
:return:
"""
try:
sequences = Sequences()
sequences.load(self.settings.observations_file)
self.observations_sequence = sequences.data
except AttributeError:
print("Observations file not provided", file=sys.stderr)
def init_specific(self):
"""
Specific initialization of App. Abstract.
:return:
"""
raise NotImplementedError
def update_specific(self, msg):
"""
Specific action to perform in the loop with the message from the player controlled.
:param msg:
:return:
"""
raise NotImplementedError
def update_fishes_position_and_increase_steps(self):
"""
Change the position of every fish by the amount inside move_x and move_y lists.
After that, increase the updates counter of the game.
:return:
"""
for i, fish in enumerate(self.fishes.values()):
fish.increase_x_y(self.move_x[i], self.move_y[i])
self._cnt_steps += 1
def calculate_strategy_for_next_frame_action(self):
pass
def display_stats(self):
scores_file = join(home, ".fishing_derby_scores")
stats = Stats(self.players, self.settings, self.fishes)
with open(scores_file, "a") as f:
try:
stats_file = json.load(f)
except UnsupportedOperation:
stats_file = dict()
stats_dict = stats.get_stats()
stats_file[datetime.now().timestamp()] = stats_dict
json.dump(stats_file, f)
stats.load(stats_dict)
stats.open()
def build(self):
"""Initialize the Kivy screen"""
# Set sky color
Window.clearcolor = 63 / 255, 191 / 255, 191 / 255, 0.3
# Create main widget
self.create_players()
self.main_widget = FishingDerby(fishes=self.fishes,
players=self.players,
settings=self.settings)
self.init_clock()
self.init_specific()
# Run initial update
self.fishes_next_move()
self.update_scheduled = Clock.schedule_interval(
self.update, 1.0 / self.settings.frames_per_second)
# Kivy receives main widget and draws it
return self.main_widget
class FishingDerbyHumanApp(FishingDerbyApp, Fishes, PrintScore1Player, GamesWithBoats):
def __init__(self):
super().__init__()
# Keyboard events
self._keyboard = None
self.last_action = None
def update_clock(self, dl):
super().update_clock(dl)
self.print_score()
def _keyboard_closed(self):
self._keyboard.unbind(
on_key_down=self._key_down_function, on_key_up=self._key_up_function)
self._keyboard = None
def _key_down_function(self, keyboard, key_code, text, modifiers):
self.last_action = key_code[1] if key_code[1] in [
'up', 'down', 'right', 'left'] else 'stay'
def _key_up_function(self, keyboard, key_code):
self.last_action = 'stay'
def update_specific(self, msg):
msg = {"action": self.last_action}
self.new_action(msg)
def build(self):
"""Initialize the Kivy screen"""
# Set sky color
Window.clearcolor = 63 / 255, 191 / 255, 191 / 255, 0.3
# Create main widget
self.load_observations()
self.create_players()
self.main_widget = FishingDerby(fishes=self.fishes,
players=self.players,
settings=self.settings)
self.init_clock()
self.init_specific()
# Run initial update
self.fishes_next_move()
self.update_scheduled = Clock.schedule_interval(
self.update, 1.0 / self.settings.frames_per_second)
# Attach the keyboard
self._keyboard = self.main_widget.keyboard
self._keyboard.bind(on_key_down=self._key_down_function,
on_key_up=self._key_up_function)
# Kivy receives main widget and draws it
return self.main_widget
def update(self, dt):
if self._cnt_steps % self.settings.frames_per_action == 0 and self._cnt_steps > 0:
# Check if a fish is to be caught by any of the players
self.check_fishes_caught()
# Check if game is about to timeout
if self.time >= self.total_time:
self.main_widget.game_over = True
self.send_state_or_display_stats()
self.fishes_next_move()
self.update_fishes_position_and_increase_steps()
self.execute_action()
def init_specific(self):
self.init_fishes()
self.introduce_boats_to_screen(1)
def execute_action(self):
if self.players[0].boat.has_fish:
self.main_widget.act("up", player=0)
else:
self.main_widget.act(self.action, player=0)
| 34.438228
| 119
| 0.607554
|
2d8c76401c7b11b1b733325ce1fb21cba63a5b30
| 1,036
|
py
|
Python
|
Alt/saliencymapper/api.py
|
MalcolmGomes/SoftwareEng2
|
2cc9417a30cd1350980bc6e272d1024e866397a6
|
[
"MIT"
] | null | null | null |
Alt/saliencymapper/api.py
|
MalcolmGomes/SoftwareEng2
|
2cc9417a30cd1350980bc6e272d1024e866397a6
|
[
"MIT"
] | null | null | null |
Alt/saliencymapper/api.py
|
MalcolmGomes/SoftwareEng2
|
2cc9417a30cd1350980bc6e272d1024e866397a6
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, send_file
from flask_restful import Resource, Api
from saliency_mapper import *
import os
import requests
import torch
import time
import sys
from torchvision import models
from torchvision import transforms
app = Flask(__name__)
api = Api(app)
class SaliencyMapAPI(Resource):
def get(self):
filename = "malcolm.jpg"
img = Image.open(filename)
output_path = generate_saliency_map(img, filename)
return send_file(output_path, attachment_filename=filename)
def post(self):
image_url = request.form["image_url"]
filename = image_url.split('/')[-1]
r = requests.get(image_url, allow_redirects=True)
open(filename, 'wb').write(r.content)
img = Image.open(filename)
output_path = generate_saliency_map(img, filename)
os.remove(filename)
return send_file(output_path, attachment_filename=filename)
# return {
# 'img': output_path
# }
api.add_resource(SaliencyMapAPI, '/')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
| 25.268293
| 61
| 0.732625
|
029c948456cb89b831f5a1ffdc142e7ca4638310
| 10,364
|
py
|
Python
|
verde/vector.py
|
djhoese/verde
|
ad14acf94717ee5c6672559f40576f65989753a5
|
[
"BSD-3-Clause"
] | null | null | null |
verde/vector.py
|
djhoese/verde
|
ad14acf94717ee5c6672559f40576f65989753a5
|
[
"BSD-3-Clause"
] | null | null | null |
verde/vector.py
|
djhoese/verde
|
ad14acf94717ee5c6672559f40576f65989753a5
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Vector gridding using elasticity Green's functions from Sandwell and Wessel
(2016).
"""
import numpy as np
from sklearn.utils.validation import check_is_fitted
from .base import check_fit_input
from .spline import Spline
from .coordinates import get_region
class Vector2D(Spline):
r"""
Elastically coupled interpolation of 2-component vector data.
Uses the Green's functions based on elastic deformation from
[SandwellWessel2016]_. The interpolation is done by estimating point forces
that generate an elastic deformation that fits the observed vector data.
The deformation equations are based on a 2D elastic sheet with a constant
Poisson's ratio. The data can then be predicted at any desired location.
The east and north data components are coupled through the elastic
deformation equations. This coupling is controlled by the Poisson's ratio,
which is usually between -1 and 1. The special case of Poisson's ratio -1
leads to an uncoupled interpolation, meaning that the east and north
components don't interfere with each other.
The point forces are traditionally placed under each data point. The force
locations are set the first time :meth:`~verde.Vector2D.fit` is called.
Subsequent calls will fit using the same force locations as the first call.
This configuration results in an exact prediction at the data points but
can be unstable.
[SandwellWessel2016]_ stabilize the solution using Singular Value
Decomposition but we use ridge regression instead. The regularization can
be controlled using the *damping* argument. Alternatively, we also allow
forces to be placed on a regular grid using the *spacing*, *shape*, and/or
*region* arguments. Regularization or forces on a grid will result in a
least-squares estimate at the data points, not an exact solution. Note that
the least-squares solution is required for data weights to have any effect.
The Jacobian (design, sensitivity, feature, etc) matrix for the spline
is normalized using :class:`sklearn.preprocessing.StandardScaler` without
centering the mean so that the transformation can be undone in the
estimated forces.
Parameters
----------
poisson : float
The Poisson's ratio for the elastic deformation Green's functions.
Default is 0.5. A value of -1 will lead to uncoupled interpolation of
the east and north data components.
fudge : float
The positive fudge factor applied to the Green's function to avoid
singularities.
damping : None or float
The positive damping regularization parameter. Controls how much
smoothness is imposed on the estimated forces. If None, no
regularization is used.
shape : None or tuple
If not None, then should be the shape of the regular grid of forces.
See :func:`verde.grid_coordinates` for details.
spacing : None or float or tuple
If not None, then should be the spacing of the regular grid of forces.
See :func:`verde.grid_coordinates` for details.
region : None or tuple
If not None, then the boundaries (``[W, E, S, N]``) used to generate a
regular grid of forces. If None is given, then will use the boundaries
of data given to the first call to :meth:`~verde.Vector2D.fit`.
Attributes
----------
forces_ : array
The estimated forces that fit the observed data.
force_coords_ : tuple of arrays
The easting and northing coordinates of the forces.
region_ : tuple
The boundaries (``[W, E, S, N]``) of the data used to fit the
interpolator. Used as the default region for the
:meth:`~verde.Vector2D.grid` and :meth:`~verde.Vector2D.scatter`
methods.
See also
--------
verde.vector2d_jacobian : Jacobian matrix for the 2D elastic deformation
"""
def __init__(
self,
poisson=0.5,
fudge=1e-5,
damping=None,
shape=None,
spacing=None,
region=None,
):
self.poisson = poisson
super().__init__(
fudge=fudge, damping=damping, shape=shape, spacing=spacing, region=region
)
def fit(self, coordinates, data, weights=None):
"""
Fit the gridder to the given 2-component vector data.
The data region is captured and used as default for the
:meth:`~verde.Vector2D.grid` and :meth:`~verde.Vector2D.scatter`
methods.
All input arrays must have the same shape.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting
and northing will be used, all subsequent coordinates will be
ignored.
data : tuple of array
A tuple ``(east_component, north_component)`` of arrays with the
vector data values at each point.
weights : None or tuple array
If not None, then the weights assigned to each data point. Must be
one array per data component. Typically, this should be 1 over the
data uncertainty squared.
Returns
-------
self
Returns this estimator instance for chaining operations.
"""
coordinates, data, weights = check_fit_input(
coordinates, data, weights, unpack=False
)
if len(data) != 2:
raise ValueError(
"Need two data components. Only {} given.".format(len(data))
)
# Capture the data region to use as a default when gridding.
self.region_ = get_region(coordinates[:2])
self.force_coords_ = self._get_force_coordinates(coordinates)
if any(w is not None for w in weights):
weights = np.concatenate([i.ravel() for i in weights])
else:
weights = None
self._check_weighted_exact_solution(weights)
data = np.concatenate([i.ravel() for i in data])
jacobian = vector2d_jacobian(
coordinates[:2], self.force_coords_, self.poisson, self.fudge
)
self.force_ = self._estimate_forces(jacobian, data, weights)
return self
def predict(self, coordinates):
"""
Evaluate the fitted gridder on the given set of points.
Requires a fitted estimator (see :meth:`~verde.Vector2D.fit`).
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting
and northing will be used, all subsequent coordinates will be
ignored.
Returns
-------
data : tuple of arrays
A tuple ``(east_component, north_component)`` of arrays with the
predicted vector data values at each point.
"""
check_is_fitted(self, ["force_", "force_coords_"])
jac = vector2d_jacobian(
coordinates[:2], self.force_coords_, self.poisson, self.fudge
)
cast = np.broadcast(*coordinates[:2])
npoints = cast.size
components = jac.dot(self.force_).reshape((2, npoints))
return tuple(comp.reshape(cast.shape) for comp in components)
def vector2d_jacobian(
coordinates, force_coordinates, poisson, fudge=1e-5, dtype="float32"
):
"""
Make the Jacobian matrix for the 2D coupled elastic deformation.
Follows [SandwellWessel2016]_.
The Jacobian is segmented into 4 parts, each relating a force component to
a data component::
| J_ee J_ne |*|f_e| = |d_e|
| J_ne J_nn | |f_n| |d_n|
The forces and data are assumed to be stacked into 1D arrays with the east
component on top of the north component.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting and
northing will be used, all subsequent coordinates will be ignored.
force_coordinates : tuple of arrays
Arrays with the coordinates of each vertical force. Should be in the
following order: (easting, northing, vertical, ...). Only easting and
northing will be used, all subsequent coordinates will be ignored.
poisson ; float
The Poisson's ratio for the elastic deformation Green's functions.
A value of -1 will lead to uncoupled interpolation of
the east and north data components (the ``J_ne`` component of the
Jacobian is null).
fudge : float
The positive fudge factor applied to the Green's function to avoid
singularities.
dtype : str or numpy dtype
The type of the Jacobian array.
Returns
-------
jacobian : 2D array
The (n_data*2, n_forces*2) Jacobian matrix.
See also
--------
verde.Vector2D : Coupled gridder for 2-component vector data
"""
force_coordinates = [np.atleast_1d(i).ravel() for i in force_coordinates[:2]]
coordinates = [np.atleast_1d(i).ravel() for i in coordinates[:2]]
npoints = coordinates[0].size
nforces = force_coordinates[0].size
# Reshaping the data coordinates to a column vector will automatically
# build a distance matrix between each data point and force.
east, north = (
datac.reshape((npoints, 1)) - forcec
for datac, forcec in zip(coordinates, force_coordinates)
)
distance = np.hypot(east, north, dtype=dtype)
# The fudge factor helps avoid singular matrices when the force and
# computation point are too close
distance += fudge
# Pre-compute common terms for the Green's functions of each component
ln_r = (3 - poisson) * np.log(distance)
over_r2 = (1 + poisson) / distance ** 2
jac = np.empty((npoints * 2, nforces * 2), dtype=dtype)
jac[:npoints, :nforces] = ln_r + over_r2 * north ** 2 # J_ee
jac[npoints:, nforces:] = ln_r + over_r2 * east ** 2 # J_nn
jac[:npoints, nforces:] = -over_r2 * east * north # J_ne
jac[npoints:, :nforces] = jac[:npoints, nforces:] # J is symmetric
return jac
| 40.015444
| 85
| 0.65988
|
2ae10782e28b42c62b64daf4ca0ae47ffbf83626
| 1,335
|
py
|
Python
|
app/core/tests/test_admin.py
|
AliSayyah/recipe-app-api
|
42c689ba2fd709dad35508d5f09452daa33f81ea
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
AliSayyah/recipe-app-api
|
42c689ba2fd709dad35508d5f09452daa33f81ea
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
AliSayyah/recipe-app-api
|
42c689ba2fd709dad35508d5f09452daa33f81ea
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import Client
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='ali.sayyah79@gmail.com',
password='test123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='ali.sayyah78@gmail.com',
password='test123',
name='testUser'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.785714
| 68
| 0.643446
|
eebe676f07082cced8c7fbd96dd3cb896c4776b4
| 5,125
|
py
|
Python
|
src/pykeen/models/unimodal/pair_re.py
|
rpatil524/pykeen
|
b76239ab68f15bbf52af744c2821c73c2115b5aa
|
[
"MIT"
] | null | null | null |
src/pykeen/models/unimodal/pair_re.py
|
rpatil524/pykeen
|
b76239ab68f15bbf52af744c2821c73c2115b5aa
|
[
"MIT"
] | null | null | null |
src/pykeen/models/unimodal/pair_re.py
|
rpatil524/pykeen
|
b76239ab68f15bbf52af744c2821c73c2115b5aa
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Implementation of PairRE."""
from typing import Any, ClassVar, Mapping, Optional
from torch.nn import functional
from torch.nn.init import uniform_
from ..nbase import ERModel
from ...constants import DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE
from ...nn.emb import EmbeddingSpecification
from ...nn.modules import PairREInteraction
from ...typing import Hint, Initializer, Normalizer
__all__ = [
'PairRE',
]
class PairRE(ERModel):
r"""An implementation of PairRE from [chao2020]_.
---
citation:
author: Chao
year: 2020
link: http://arxiv.org/abs/2011.03798
github: alipay/KnowledgeGraphEmbeddingsViaPairedRelationVectors_PairRE
"""
#: The default strategy for optimizing the model's hyper-parameters
hpo_default: ClassVar[Mapping[str, Any]] = dict(
embedding_dim=DEFAULT_EMBEDDING_HPO_EMBEDDING_DIM_RANGE,
p=dict(type=int, low=1, high=2),
)
#: The default entity normalizer parameters
#: The entity representations are normalized to L2 unit length
#: cf. https://github.com/alipay/KnowledgeGraphEmbeddingsViaPairedRelationVectors_PairRE/blob/0a95bcd54759207984c670af92ceefa19dd248ad/biokg/model.py#L232-L240 # noqa: E501
default_entity_normalizer_kwargs: ClassVar[Mapping[str, Any]] = dict(
p=2,
dim=-1,
)
def __init__(
self,
embedding_dim: int = 200,
p: int = 1,
power_norm: bool = False,
entity_initializer: Hint[Initializer] = uniform_,
entity_initializer_kwargs: Optional[Mapping[str, Any]] = None,
entity_normalizer: Hint[Normalizer] = functional.normalize,
entity_normalizer_kwargs: Optional[Mapping[str, Any]] = None,
relation_initializer: Hint[Initializer] = uniform_,
relation_initializer_kwargs: Optional[Mapping[str, Any]] = None,
**kwargs,
) -> None:
r"""Initialize PairRE via the :class:`pykeen.nn.modules.PairREInteraction` interaction.
:param embedding_dim: The entity embedding dimension $d$.
:param p: The $l_p$ norm.
:param power_norm: Should the power norm be used?
:param kwargs: Remaining keyword arguments passed through to :class:`pykeen.models.ERModel`.
"""
entity_normalizer_kwargs = _resolve_kwargs(
kwargs=entity_normalizer_kwargs,
default_kwargs=self.default_entity_normalizer_kwargs,
)
# update initializer settings, cf.
# https://github.com/alipay/KnowledgeGraphEmbeddingsViaPairedRelationVectors_PairRE/blob/0a95bcd54759207984c670af92ceefa19dd248ad/biokg/model.py#L45-L49
# https://github.com/alipay/KnowledgeGraphEmbeddingsViaPairedRelationVectors_PairRE/blob/0a95bcd54759207984c670af92ceefa19dd248ad/biokg/model.py#L29
# https://github.com/alipay/KnowledgeGraphEmbeddingsViaPairedRelationVectors_PairRE/blob/0a95bcd54759207984c670af92ceefa19dd248ad/biokg/run.py#L50
entity_initializer_kwargs = self._update_embedding_init_with_default(
entity_initializer_kwargs,
embedding_dim=embedding_dim,
)
relation_initializer_kwargs = self._update_embedding_init_with_default(
relation_initializer_kwargs,
# in the original implementation the embeddings are initialized in one parameter
embedding_dim=2 * embedding_dim,
)
super().__init__(
interaction=PairREInteraction,
interaction_kwargs=dict(p=p, power_norm=power_norm),
entity_representations=EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=entity_initializer,
initializer_kwargs=entity_initializer_kwargs,
normalizer=entity_normalizer,
normalizer_kwargs=entity_normalizer_kwargs,
),
relation_representations=[
EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=relation_initializer,
initializer_kwargs=relation_initializer_kwargs,
),
EmbeddingSpecification(
embedding_dim=embedding_dim,
initializer=relation_initializer,
initializer_kwargs=relation_initializer_kwargs,
),
],
**kwargs,
)
@staticmethod
def _update_embedding_init_with_default(
init_kwargs: Optional[Mapping[str, Any]],
embedding_dim: int,
) -> Mapping[str, float]:
"""Update kwargs by dimension-based default init range."""
init_kwargs = dict(init_kwargs or {})
embedding_range = 14 / embedding_dim
init_kwargs.setdefault("a", -embedding_range)
init_kwargs.setdefault("b", embedding_range)
return init_kwargs
def _resolve_kwargs(kwargs: Optional[Mapping[str, Any]], default_kwargs: Mapping[str, Any]) -> Mapping[str, Any]:
kwargs = dict(kwargs or {})
for k, v in default_kwargs.items():
kwargs.setdefault(k, v)
return kwargs
| 40.674603
| 177
| 0.677268
|
a8c79c79f3f09c0650c3ca9075b2f343a1ca6b1b
| 1,640
|
py
|
Python
|
constant/base.py
|
MacHu-GWU/constant-project
|
de44b1973d0457b856d47d6e17b3997b3611179e
|
[
"MIT"
] | null | null | null |
constant/base.py
|
MacHu-GWU/constant-project
|
de44b1973d0457b856d47d6e17b3997b3611179e
|
[
"MIT"
] | null | null | null |
constant/base.py
|
MacHu-GWU/constant-project
|
de44b1973d0457b856d47d6e17b3997b3611179e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Similar to ``collections.namedtuple``, ``nameddict`` is a data container class.
**中文文档**
和 ``collections.namedtuple`` 类似, ``nameddict`` 是一种数据容器类。提供了方便的方法
对属性, 值进行for循环, 以及和list, dict之间的IO交互。
"""
try:
from .pkg import nameddict, name_convention
except:
from constant.pkg import nameddict, name_convention
SEP = "____"
class Base(nameddict.Base):
"""nameddict base class.
"""
__attrs__ = None
"""该属性非常重要, 定义了哪些属性被真正视为 ``attributes``, 换言之, 就是在
:meth:`~Base.keys()`, :meth:`~Base.values()`, :meth:`~Base.items()`,
:meth:`~Base.to_list()`, :meth:`~Base.to_dict()`, :meth:`~Base.to_OrderedDict()`,
:meth:`~Base.to_json()`, 方法中要被包括的属性。
"""
def items(self):
return [(key, value) for key, value in super(Base, self).items()
if SEP not in key]
def _getattr_by_key_value(self, key):
"""High order function for self.getattr_by_field(value).
"""
def getattr_by_key_value(value):
return getattr(
self, "%s____%s" % (key, name_convention.to_index_key(value)))
return getattr_by_key_value
def __getattr__(self, attr):
"""
>>> obj.getattr_by_name("John") == obj.name____John
>>> True
>>> obj.name____John.name == "John"
>>> True
"""
if attr.startswith("getattr_by_"):
key = attr.replace("getattr_by_", "")
return self._getattr_by_key_value(key)
else:
return object.__getattribute__(self, attr)
#--- Unittest ---
if __name__ == "__main__":
"""
"""
| 26.031746
| 85
| 0.590854
|
41d5809adb7d350a2852159d1a56f85a78d56f9c
| 6,362
|
py
|
Python
|
code/zoltar-scripts/upload_covid19_forecasts_to_zoltar.py
|
Yannael/covid19-forecast-hub
|
a16c9a147df5536ea75f2870b1f8a0227ef6d3ee
|
[
"MIT"
] | null | null | null |
code/zoltar-scripts/upload_covid19_forecasts_to_zoltar.py
|
Yannael/covid19-forecast-hub
|
a16c9a147df5536ea75f2870b1f8a0227ef6d3ee
|
[
"MIT"
] | null | null | null |
code/zoltar-scripts/upload_covid19_forecasts_to_zoltar.py
|
Yannael/covid19-forecast-hub
|
a16c9a147df5536ea75f2870b1f8a0227ef6d3ee
|
[
"MIT"
] | null | null | null |
from zoltpy.quantile_io import json_io_dict_from_quantile_csv_file
from zoltpy import util
from zoltpy.connection import ZoltarConnection
from zoltpy.covid19 import VALID_TARGET_NAMES, covid19_row_validator, validate_quantile_csv_file
import os
import sys
import yaml
import hashlib
import pickle
# meta info
project_name = 'COVID-19 Forecasts'
project_obj = None
project_timezeros = []
conn = util.authenticate()
url = 'https://github.com/reichlab/covid19-forecast-hub/tree/master/data-processed/'
try:
with open('./code/zoltar-scripts/validated_file_db.p', 'rb') as f:
l = pickle.load(f)
f.close()
except Exception as ex:
l = []
db = dict(l)
# Get all existing timezeros and models in the project
project_obj = [project for project in conn.projects if project.name == project_name][0]
project_timezeros = [timezero.timezero_date for timezero in project_obj.timezeros]
models = [model for model in project_obj.models]
model_names = [model.name for model in models]
# Function to read metadata file to get model name
def metadata_dict_for_file(metadata_file):
with open(metadata_file, encoding="utf8") as metadata_fp:
metadata_dict = yaml.safe_load(metadata_fp)
return metadata_dict
# Function to upload all forecasts in a specific directory
def upload_covid_all_forecasts(path_to_processed_model_forecasts, dir_name):
global models
global model_names
# Get all forecasts in the directory of this model
forecasts = os.listdir(path_to_processed_model_forecasts)
# Get model name or create a new model if it's not in the current Zoltar project
try:
metadata = metadata_dict_for_file(path_to_processed_model_forecasts+'metadata-'+dir_name+'.txt')
except Exception as ex:
return ex
model_name = metadata['model_name']
if model_name not in model_names:
model_config = {}
model_config['name'], model_config['abbreviation'], model_config['team_name'], model_config['description'], model_config['home_url'], model_config['aux_data_url'] \
= metadata['model_name'], metadata['team_abbr']+'-'+metadata['model_abbr'], metadata['team_name'], metadata['methods'], url + dir_name, 'NA'
try:
project_obj.create_model(model_config)
models = project_obj.models
model_names = [model.name for model in models]
except Exception as ex:
return ex
model = [model for model in models if model.name == model_name][0]
# Get names of existing forecasts to avoid re-upload
existing_forecasts = [forecast.source for forecast in model.forecasts]
# Batch upload
json_io_dict_batch = []
forecast_filename_batch = []
timezero_date_batch = []
for forecast in forecasts:
over_write = False
# Check if forecast is already on zoltar
with open(path_to_processed_model_forecasts+forecast, "rb") as f:
# Get the current hash of a processed file
checksum = hashlib.md5(f.read()).hexdigest()
f.close()
# Check this hash against the previous version of hash
if db.get(forecast, None) != checksum:
print(forecast)
db[forecast] = checksum
if forecast in existing_forecasts:
over_write = True
else:
continue
# Skip metadata text file
if '.txt' in forecast:
continue
with open(path_to_processed_model_forecasts+forecast) as fp:
# Get timezero and create timezero on zoltar if not existed
time_zero_date = forecast.split(dir_name)[0][:-1]
if time_zero_date not in project_timezeros:
try:
project_obj.create_timezero(time_zero_date)
project_timezeros.append(time_zero_date)
except Exception as ex:
return ex
# Validate covid19 file
errors_from_validation = validate_quantile_csv_file(path_to_processed_model_forecasts+forecast)
# Upload forecast
if "no errors" == errors_from_validation:
quantile_json, error_from_transformation = json_io_dict_from_quantile_csv_file(fp, VALID_TARGET_NAMES, covid19_row_validator)
if len(error_from_transformation) >0 :
return error_from_transformation
else:
try:
util.upload_forecast(conn, quantile_json, forecast,
project_name, model_name , time_zero_date, overwrite=over_write)
except Exception as ex:
print(ex)
return ex
json_io_dict_batch.append(quantile_json)
timezero_date_batch.append(time_zero_date)
forecast_filename_batch.append(forecast)
else:
return errors_from_validation
fp.close()
# # Batch upload for better performance
# if len(json_io_dict_batch) > 0:
# try:
# util.upload_forecast_batch(conn, json_io_dict_batch, forecast_filename_batch, project_name, model_name, timezero_date_batch, overwrite = over_write)
# except Exception as ex:
# return ex
return "Pass"
# Example Run: python3 ./code/zoltar-scripts/upload_covid19_forecasts_to_zoltar.py
if __name__ == '__main__':
list_of_model_directories = os.listdir('./data-processed/')
output_errors = {}
for directory in list_of_model_directories:
# if "CovidActNow-SEIR_CAN" not in directory:
# continue
if "." in directory:
continue
output = upload_covid_all_forecasts('./data-processed/'+directory+'/',directory)
if output != "Pass":
output_errors[directory] = output
# List all files that did not get upload and its error
if len(output_errors) > 0:
for directory, errors in output_errors.items():
print("\n* ERROR IN '", directory, "'")
print(errors)
sys.exit("\n ERRORS FOUND EXITING BUILD...")
else:
print("✓ no errors")
with open('./code/zoltar-scripts/validated_file_db.p', 'wb') as fw:
pickle.dump(db, fw)
fw.close()
| 39.515528
| 172
| 0.649167
|
2afcc43ccfa943fff4102358ceb907ab6f08151c
| 1,031
|
py
|
Python
|
www/SYS/FILE.py
|
ranyxr/infoVis
|
307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b
|
[
"MIT"
] | 2
|
2020-05-27T11:12:41.000Z
|
2020-12-17T19:33:41.000Z
|
www/SYS/FILE.py
|
ranyxr/infoVis
|
307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b
|
[
"MIT"
] | null | null | null |
www/SYS/FILE.py
|
ranyxr/infoVis
|
307c2ffc4c7d6cf87ed000310a1f2b6233bd7a3b
|
[
"MIT"
] | 3
|
2020-03-18T19:20:24.000Z
|
2020-12-17T17:37:24.000Z
|
from SYS import DIR
import os
"picture-info.csv"
_meta_data_fnm = "metadatas.csv"
_meta_data_uri = os.path.join(DIR.raw_data, _meta_data_fnm)
meta_data_uri = _meta_data_uri
_meta_level_meta_fnm = "picture-description.csv"
_meta_level_meta_uri = os.path.join(DIR.raw_data, _meta_level_meta_fnm)
pic_desc_uri = _meta_level_meta_uri
_data_dump_fnm = "datadump.csv"
_data_dump_uri = os.path.join(DIR.raw_data, _data_dump_fnm)
data_dump_uri = _data_dump_uri
_reproductions_fnm = "picture-info.csv"
_reproductions_uri = os.path.join(DIR.raw_data, _reproductions_fnm)
pic_info_uri = _reproductions_uri
cleaned_data1_fnm = DIR.clean_data1
cleaned_data1_uri = DIR.clean_data1
cleaned_data2_fnm = DIR.clean_data2
cleaned_data2_uri = DIR.clean_data2
word_cloud_data1_fnm = DIR.word_cloud_data1
word_cloud_data1_uri = DIR.word_cloud_data1
result_stack_data_uri = DIR.result_stack_data
result_stack_x_axis_uri = DIR.result_stack_x_axis
result_stack_art_type_uri = DIR.result_stack_art_type
result_word_cloud_uri = DIR.result_word_cloud
| 30.323529
| 71
| 0.845781
|
1ab40d03f981526d9e031f31a0832b6f0bb50bd7
| 81,522
|
py
|
Python
|
sectionproperties/pre/nastran_sections.py
|
audunarn/section-properties
|
6dd68d0cb6b31adcaffeb5ea9f78f985a8955f95
|
[
"MIT"
] | 2
|
2021-01-18T08:04:55.000Z
|
2021-03-06T01:23:39.000Z
|
sectionproperties/pre/nastran_sections.py
|
audunarn/section-properties
|
6dd68d0cb6b31adcaffeb5ea9f78f985a8955f95
|
[
"MIT"
] | null | null | null |
sectionproperties/pre/nastran_sections.py
|
audunarn/section-properties
|
6dd68d0cb6b31adcaffeb5ea9f78f985a8955f95
|
[
"MIT"
] | null | null | null |
from sectionproperties.pre.sections import (
Geometry, RectangularSection, CustomSection, MergedSection
)
from sectionproperties.pre.pre import create_mesh
import numpy as np
class BARSection(Geometry):
"""Constructs a BAR section with the center at the origin *(0, 0)*, with two parameters
defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ [5]_ for definition of
parameters. Added by JohnDN90.
:param float DIM1: Width (x) of bar
:param float DIM2: Depth (y) of bar
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a BAR cross-section with a depth of 1.5 and width of 2.0, and
generates a mesh with a maximum triangular area of 0.001::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.BARSection(DIM1=2.0, DIM2=1.5)
mesh = geometry.create_mesh(mesh_sizes=[0.001])
.. figure:: ../images/sections/bar_geometry.png
:align: center
:scale: 75 %
BAR section geometry.
.. figure:: ../images/sections/bar_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, shift=[0, 0]):
"""Inits the BARSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
# assign control point
control_points = [[0., 0.]]
# shift = [-0.5*DIM1+shift[0], -0.5*DIM2+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [
[-0.5*DIM1, -0.5*DIM2], [0.5*DIM1, -0.5*DIM2],
[0.5*DIM1, 0.5*DIM2], [-0.5*DIM1, 0.5*DIM2]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
D = (0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
E = (-0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
F = (-0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
return C, D, E, F
class BOXSection(Geometry):
""" Constructs a BOX section with the center at the origin *(0, 0)*, with four parameters
defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ [5]_ for definition of
parameters. Added by JohnDN90.
:param float DIM1: Width (x) of box
:param float DIM2: Depth (y) of box
:param float DIM3: Thickness of box in y direction
:param float DIM4: Thickness of box in x direction
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a BOX cross-section with a depth of 3.0 and width of 4.0, and
generates a mesh with a maximum triangular area of 0.001::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.BOXSection(DIM1=4.0, DIM2=3.0, DIM3=0.375, DIM4=0.5)
mesh = geometry.create_mesh(mesh_sizes=[0.001])
.. figure:: ../images/sections/box_geometry.png
:align: center
:scale: 75 %
BOX section geometry.
.. figure:: ../images/sections/box_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the BOXSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(2.0*DIM4 < DIM1, "Invalid geometry specified.")
np.testing.assert_(2.0*DIM3 < DIM2, "Invalid geometry specified.")
# assign control point
control_points = [[0., 0.5*DIM2 - 0.5*DIM3]]
super().__init__(control_points, shift)
# specify a hole in the centre of the Box
self.holes = [[0., 0.]]
# construct the points and facets
self.points = [
[-0.5*DIM1, -0.5*DIM2], [0.5*DIM1, -0.5*DIM2], [0.5*DIM1, 0.5*DIM2],
[-0.5*DIM1, 0.5*DIM2], [-0.5*DIM1 + DIM4, -0.5*DIM2 + DIM3],
[0.5*DIM1 - DIM4, -0.5*DIM2 + DIM3], [0.5*DIM1 - DIM4, 0.5*DIM2 - DIM3],
[-0.5*DIM1 + DIM4, 0.5*DIM2 - DIM3]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
D = (0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
E = (-0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
F = (-0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
return C, D, E, F
class BOX1Section(Geometry):
"""Constructs a BOX1 section with the center at the origin *(0, 0)*, with six parameters
defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more details. Added by
JohnDN90.
:param float DIM1: Width (x) of box
:param float DIM2: Depth (y) of box
:param float DIM3: Thickness of top wall
:param float DIM4: Thickness of bottom wall
:param float DIM5: Thickness of left wall
:param float DIM6: Thickness of right wall
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a BOX1 cross-section with a depth of 3.0 and width of 4.0, and
generates a mesh with a maximum triangular area of 0.007::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.BOX1Section(
DIM1=4.0, DIM2=3.0, DIM3=0.375, DIM4=0.5, DIM5=0.25, DIM6=0.75
)
mesh = geometry.create_mesh(mesh_sizes=[0.007])
.. figure:: ../images/sections/box1_geometry.png
:align: center
:scale: 75 %
BOX1 section geometry.
.. figure:: ../images/sections/box1_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, DIM5, DIM6, shift=[0, 0]):
"""Inits the Box1Section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
DIM5 *= 1.0
DIM6 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
self.DIM5 = DIM5
self.DIM6 = DIM6
# Ensure dimensions are physically relevant
np.testing.assert_(DIM5+DIM6 < DIM1, "Invalid geometry specified.")
np.testing.assert_(DIM3+DIM4 < DIM2, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM1, 0.5*DIM4]]
shift = [-0.5*DIM1+shift[0], -0.5*DIM2+shift[1]]
super().__init__(control_points, shift)
# specify a hole in the centre of the Box
self.holes = [[DIM6 + 0.5*(DIM1-DIM5), DIM4+0.5*(DIM2-DIM3)]]
# construct the points and facets
self.points = [
[0., 0.], [DIM1, 0.], [DIM1, DIM2], [0., DIM2], [DIM6, DIM4], [DIM1-DIM5, DIM4],
[DIM1-DIM5, DIM2-DIM3], [DIM6, DIM2-DIM3]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
""" Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
D = (0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
E = (-0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
F = (-0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
return C, D, E, F
class CHANSection(Geometry):
""" Constructs a CHAN (C-Channel) section with the web's middle center at the origin *(0, 0)*,
with four parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for
more details. Added by JohnDN90.
:param float DIM1: Width (x) of the CHAN-section
:param float DIM2: Depth (y) of the CHAN-section
:param float DIM3: Thickness of web (vertical portion)
:param float DIM4: Thickness of flanges (top/bottom portion)
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a CHAN cross-section with a depth of 4.0 and width of 2.0, and
generates a mesh with a maximum triangular area of 0.008::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.CHANSection(DIM1=2.0, DIM2=4.0, DIM3=0.25, DIM4=0.5)
mesh = geometry.create_mesh(mesh_sizes=[0.008])
.. figure:: ../images/sections/chan_geometry.png
:align: center
:scale: 75 %
CHAN section geometry.
.. figure:: ../images/sections/chan_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the CHANSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(2.0*DIM4 < DIM2, "Invalid geometry specified.")
np.testing.assert_(DIM3 < DIM1, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM1, 0.5*DIM4]]
shift = [-0.5*DIM3+shift[0], -0.5*DIM2+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [
[0., 0.], [DIM1, 0.], [DIM1, DIM4], [DIM3, DIM4], [DIM3, DIM2-DIM4], [DIM1, DIM2-DIM4],
[DIM1, DIM2], [0., DIM2]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
""" Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (self.DIM1-0.5*self.DIM3-shift[0], 0.5*self.DIM2-shift[1])
D = (self.DIM1-0.5*self.DIM3-shift[0], -0.5*self.DIM2-shift[1])
E = (-0.5*self.DIM3-shift[0], -0.5*self.DIM2-shift[1])
F = (-0.5*self.DIM3-shift[0], 0.5*self.DIM2-shift[1])
return C, D, E, F
class CHAN1Section(Geometry):
""" Constructs a CHAN1 (C-Channel) section with the web's middle center at the origin *(0, 0)*,
with four parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for
more details. Added by JohnDN90.
:param float DIM1: Width (x) of channels
:param float DIM2: Thickness (x) of web
:param float DIM3: Spacing between channels (length of web)
:param float DIM4: Depth (y) of CHAN1-section
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a CHAN1 cross-section with a depth of 4.0 and width of 1.75, and
generates a mesh with a maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.CHAN1Section(DIM1=0.75, DIM2=1.0, DIM3=3.5, DIM4=4.0)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/chan1_geometry.png
:align: center
:scale: 75 %
CHAN1 section geometry.
.. figure:: ../images/sections/chan1_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the CHAN1Section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 > DIM3, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM1, 0.5*DIM4]]
shift = [-0.5*DIM2+shift[0], -0.5*DIM4+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
tf = 0.5 * (DIM4 - DIM3)
self.points = [
[0, 0], [DIM1+DIM2, 0], [DIM1+DIM2, tf], [DIM2, tf], [DIM2, tf+DIM3],
[DIM2+DIM1, tf+DIM3], [DIM2+DIM1, DIM4], [0, DIM4]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
""" Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM2+self.DIM1-shift[0], 0.5*self.DIM4-shift[1])
D = (0.5*self.DIM2+self.DIM1-shift[0], -0.5*self.DIM4-shift[1])
E = (-0.5*self.DIM2-shift[0], -0.5*self.DIM4-shift[1])
F = (-0.5*self.DIM2-shift[0], 0.5*self.DIM4-shift[1])
return C, D, E, F
class CHAN2Section(Geometry):
""" Constructs a CHAN2 (C-Channel) section with the bottom web's middle center at the origin
*(0, 0)*, with four parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_
[4]_ for more details. Added by JohnDN90.
:param float DIM1: Thickness of channels
:param float DIM2: Thickness of web
:param float DIM3: Depth (y) of CHAN2-section
:param float DIM4: Width (x) of CHAN2-section
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a CHAN2 cross-section with a depth of 2.0 and width of 4.0, and
generates a mesh with a maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.CHAN2Section(DIM1=0.375, DIM2=0.5, DIM3=2.0, DIM4=4.0)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/chan2_geometry.png
:align: center
:scale: 75 %
CHAN2 section geometry.
.. figure:: ../images/sections/chan2_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the CHAN2Section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 > 2.0*DIM1, "Invalid geometry specified.")
np.testing.assert_(DIM3 > DIM2, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM4, 0.5*DIM2]]
shift = [-0.5*DIM4+shift[0], -0.5*DIM2+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [
[0., 0.], [DIM4, 0.], [DIM4, DIM3], [DIM4-DIM1, DIM3], [DIM4-DIM1, DIM2], [DIM1, DIM2],
[DIM1, DIM3], [0., DIM3]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
""" Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM4-shift[0], self.DIM3-0.5*self.DIM2-shift[1])
D = (0.5*self.DIM4-shift[0], -0.5*self.DIM2-shift[1])
E = (-0.5*self.DIM4-shift[0], -0.5*self.DIM2-shift[1])
F = (-0.5*self.DIM4-shift[0], self.DIM3-0.5*self.DIM2-shift[1])
return C, D, E, F
class CROSSSection(Geometry):
""" Constructs Nastran's cruciform/cross section with the intersection's middle center at the
origin *(0, 0)*, with four parameters defining dimensions. See Nastran documentation [1]_ [2]_
[3]_ [4]_ for more details. Added by JohnDN90.
:param float DIM1: Twice the width of horizontal member protruding from the vertical center
member
:param float DIM2: Thickness of the vertical member
:param float DIM3: Depth (y) of the CROSS-section
:param float DIM4: Thickness of the horizontal members
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a rectangular cross-section with a depth of 3.0 and width of
1.875, and generates a mesh with a maximum triangular area of 0.008::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.CROSSSection(DIM1=1.5, DIM2=0.375, DIM3=3.0, DIM4=0.25)
mesh = geometry.create_mesh(mesh_sizes=[0.008])
.. figure:: ../images/sections/cross_geometry.png
:align: center
:scale: 75 %
Cruciform/cross section geometry.
.. figure:: ../images/sections/cross_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the CROSSSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 < DIM3, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM1+0.5*DIM2, 0.5*DIM3]]
shift = [-(0.5*DIM1+0.5*DIM2)+shift[0], -(0.5*DIM3)+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
d = 0.5*(DIM3 - DIM4)
self.points = [
[0.5*DIM1, 0], [0.5*DIM1+DIM2, 0], [0.5*DIM1+DIM2, d], [DIM1+DIM2, d],
[DIM1+DIM2, d+DIM4], [0.5*DIM1+DIM2, d+DIM4], [0.5*DIM1+DIM2, DIM3], [0.5*DIM1, DIM3],
[0.5*DIM1, d+DIM4], [0, d+DIM4], [0, d], [0.5*DIM1, d]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 0]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-shift[0], 0.5*self.DIM3-shift[1])
D = (0.5*(self.DIM1+self.DIM2)-shift[0], -shift[1])
E = (-shift[0], -0.5*self.DIM3-shift[1])
F = (-0.5*(self.DIM1+self.DIM2)-shift[0], -shift[1])
return C, D, E, F
class FCROSSSection(Geometry):
""" Constructs a flanged cruciform/cross section with the intersection's middle center at the
origin *(0, 0)*, with eight parameters defining dimensions. Added by JohnDN90.
:param float DIM1: Depth (y) of flanged cruciform
:param float DIM2: Width (x) of flanged cruciform
:param float DIM3: Thickness of vertical web
:param float DIM4: Thickness of horizontal web
:param float DIM5: Length of flange attached to vertical web
:param float DIM6: Thickness of flange attached to vertical web
:param float DIM7: Length of flange attached to horizontal web
:param float DIM8: Thickness of flange attached to horizontal web
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example demonstrates the creation of a flanged cross section::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.FCROSSSection(
DIM1=9.0, DIM2=6.0, DIM3=0.75, DIM4=0.625, DIM5=2.1, DIM6=0.375, DIM7=4.5, DIM8=0.564
)
mesh = geometry.create_mesh(mesh_sizes=[0.03])
.. figure:: ../images/sections/fcross_geometry.png
:align: center
:scale: 75 %
Flanged Cruciform/cross section geometry.
.. figure:: ../images/sections/fcross_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, DIM5, DIM6, DIM7, DIM8, shift=[0, 0]):
"""Inits the FCROSSSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
DIM5 *= 1.0
DIM6 *= 1.0
DIM7 *= 1.0
DIM8 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
self.DIM5 = DIM5
self.DIM6 = DIM6
self.DIM7 = DIM7
self.DIM7 = DIM8
# Ensure dimensions are physically relevant
# TODO: Finish dimension checks.
np.testing.assert_(DIM5 > DIM3, "Invalid geometry specified.")
np.testing.assert_(DIM7 > DIM4, "Invalid geometry specified.")
np.testing.assert_(DIM7 < DIM1, "Invalid geometry specified.")
np.testing.assert_(DIM5 < DIM2, "Invalid geometry specified.")
np.testing.assert_(DIM8 < (0.5*DIM2-0.5*DIM3), "Invalid geometry specified.")
np.testing.assert_(DIM6 < (0.5*DIM1-0.5*DIM4), "Invalid geometry specified.")
# assign control point
control_points = [[0.0, 0.0]]
shift = [shift[0], shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [
[0.5*DIM3, -0.5*DIM4], [0.5*DIM2-DIM8, -0.5*DIM4], [0.5*DIM2-DIM8, -0.5*DIM7],
[0.5*DIM2, -0.5*DIM7], [0.5*DIM2, 0.5*DIM7], [0.5*DIM2-DIM8, 0.5*DIM7],
[0.5*DIM2-DIM8, 0.5*DIM4], [0.5*DIM3, 0.5*DIM4], [0.5*DIM3, 0.5*DIM1-DIM6],
[0.5*DIM5, 0.5*DIM1-DIM6], [0.5*DIM5, 0.5*DIM1], [-0.5*DIM5, 0.5*DIM1],
[-0.5*DIM5, 0.5*DIM1-DIM6], [-0.5*DIM3, 0.5*DIM1-DIM6], [-0.5*DIM3, 0.5*DIM4],
[-0.5*DIM2+DIM8, 0.5*DIM4], [-0.5*DIM2+DIM8, 0.5*DIM7], [-0.5*DIM2, 0.5*DIM7],
[-0.5*DIM2, -0.5*DIM7], [-0.5*DIM2+DIM8, -0.5*DIM7], [-0.5*DIM2+DIM8, -0.5*DIM4],
[-0.5*DIM3, -0.5*DIM4], [-0.5*DIM3, -0.5*DIM1+DIM6], [-0.5*DIM5, -0.5*DIM1+DIM6],
[-0.5*DIM5, -0.5*DIM1], [0.5*DIM5, -0.5*DIM1], [0.5*DIM5, -0.5*DIM1+DIM6],
[0.5*DIM3, -0.5*DIM1+DIM6]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 12], [12, 13], [13, 14], [14, 15], [15, 16], [16, 17], [17, 18],
[18, 19], [19, 20], [20, 21], [21, 22], [22, 23], [23, 24], [24, 25], [25, 26],
[26, 27], [27, 0]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-shift[0], 0.5*self.DIM1-shift[1])
D = (0.5*self.DIM2-shift[0], -shift[1])
E = (-shift[0], -0.5*self.DIM1-shift[1])
F = (-0.5*self.DIM2-shift[0], -shift[1])
return C, D, E, F
class DBOXSection(Geometry):
""" Constructs a DBOX section with the center at the origin *(0, 0)*, with ten parameters
defining dimensions. See MSC Nastran documentation [1]_ for more details. Added by JohnDN90.
:param float DIM1: Width (x) of the DBOX-section
:param float DIM2: Depth (y) of the DBOX-section
:param float DIM3: Width (x) of left-side box
:param float DIM4: Thickness of left wall
:param float DIM5: Thickness of center wall
:param float DIM6: Thickness of right wall
:param float DIM7: Thickness of top left wall
:param float DIM8: Thickness of bottom left wall
:param float DIM9: Thickness of top right wall
:param float DIM10: Thickness of bottom right wall
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a DBOX cross-section with a depth of 3.0 and width of 8.0, and
generates a mesh with a maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.DBOXSection(
DIM1=8.0, DIM2=3.0, DIM3=3.0, DIM4=0.5, DIM5=0.625, DIM6=0.75, DIM7=0.375, DIM8=0.25,
DIM9=0.5, DIM10=0.375
)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/dbox_geometry.png
:align: center
:scale: 75 %
DBOX section geometry.
.. figure:: ../images/sections/dbox_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, DIM5, DIM6, DIM7, DIM8, DIM9, DIM10, shift=[0, 0]):
"""Inits the DBOXSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
DIM5 *= 1.0
DIM6 *= 1.0
DIM7 *= 1.0
DIM8 *= 1.0
DIM9 *= 1.0
DIM10 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
self.DIM5 = DIM5
self.DIM6 = DIM6
self.DIM7 = DIM7
self.DIM8 = DIM8
self.DIM9 = DIM9
self.DIM10 = DIM10
# Ensure dimensions are physically relevant
np.testing.assert_((DIM4+DIM5+DIM6) < DIM1, "Invalid geometry specified.")
np.testing.assert_((DIM4+0.5*DIM5) < DIM3, "Invalid geometry specified.")
np.testing.assert_((DIM7+DIM8) < DIM2, "Invalid geometry specified.")
np.testing.assert_((DIM9+DIM10) < DIM2, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM3, 0.5*DIM8]]
shift = [-0.5*DIM1+shift[0], -0.5*DIM2+shift[1]]
super().__init__(control_points, shift)
# specify a hole in the centre of the Box
d2 = 0.5*(DIM1 - DIM6 - DIM3 - 0.5*DIM5)
self.holes = [
[DIM4 + 0.5*(DIM3 - DIM4 - 0.5*DIM5), DIM8 + 0.5*(DIM2 - DIM8 - DIM7)],
[DIM3 + 0.5*DIM5 + d2, DIM10 + 0.5*(DIM2 - DIM10 - DIM9)]
]
# construct the points and facets
self.points = [
[0., 0.], [DIM1, 0.], [DIM1, DIM2], [0., DIM2], [DIM4, DIM8], [DIM3-DIM5/2., DIM8],
[DIM3-DIM5/2., DIM2-DIM7], [DIM4, DIM2-DIM7], [DIM3+DIM5/2., DIM10],
[DIM1-DIM6, DIM10], [DIM1-DIM6, DIM2-DIM9], [DIM3+DIM5/2., DIM2-DIM9]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [8, 9], [9, 10],
[10, 11], [11, 8]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""
Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5 * self.DIM1 - shift[0], 0.5 * self.DIM2 - shift[1])
D = (0.5 * self.DIM1 - shift[0], -0.5 * self.DIM2 - shift[1])
E = (-0.5 * self.DIM1 - shift[0], -0.5 * self.DIM2 - shift[1])
F = (-0.5 * self.DIM1 - shift[0], 0.5 * self.DIM2 - shift[1])
return C, D, E, F
class GBOXSection(Geometry):
""" Constructs a GBOX section with the center at the origin *(0, 0)*, with six parameters
defining dimensions. See ASTROS documentation [5]_ for more details. Added by JohnDN90.
:param float DIM1: Width (x) of the GBOX-section
:param float DIM2: Depth (y) of the GBOX-section
:param float DIM3: Thickness of top flange
:param float DIM4: Thickness of bottom flange
:param float DIM5: Thickness of webs
:param float DIM6: Spacing between webs
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a GBOX cross-section with a depth of 2.5 and width of 6.0, and
generates a mesh with a maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.GBOXSection(
DIM1=6.0, DIM2=2.5, DIM3=0.375, DIM4=0.25, DIM5=0.625, DIM6=1.0
)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/gbox_geometry.png
:align: center
:scale: 75 %
GBOX section geometry.
.. figure:: ../images/sections/gbox_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, DIM5, DIM6, shift=[0, 0]):
"""Inits the GBOXSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
DIM5 *= 1.0
DIM6 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
self.DIM5 = DIM5
self.DIM6 = DIM6
# Ensure dimensions are physically relevant
np.testing.assert_((DIM3+DIM4) < DIM2, "Invalid geometry specified.")
np.testing.assert_((2.0*DIM5+DIM6) < DIM1, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM1, 0.5*DIM4]]
shift = [-(0.5*DIM1)+shift[0], -(DIM4 + 0.5*(DIM2-DIM3-DIM4))+shift[1]]
super().__init__(control_points, shift)
# specify a hole in the centre of the GBOX
self.holes = [[0.5*DIM1, 0.5*DIM2]]
# construct the points and facets
d = 0.5*(DIM1 - DIM6 - 2.0 * DIM5)
self.points = [
[0., 0.], [DIM1, 0.], [DIM1, DIM4], [d + 2. * DIM5 + DIM6, DIM4],
[d + 2. * DIM5 + DIM6, DIM2 - DIM3], [DIM1, DIM2 - DIM3], [DIM1, DIM2], [0., DIM2],
[0., DIM2 - DIM3], [d, DIM2 - DIM3], [d, DIM4], [0., DIM4], [d + DIM5, DIM4],
[d + DIM5 + DIM6, DIM4], [d + DIM5 + DIM6, DIM2 - DIM3], [d + DIM5, DIM2 - DIM3]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 0], [12, 13], [13, 14], [14, 15], [15, 12]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
D = (0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
E = (-0.5*self.DIM1-shift[0], -0.5*self.DIM2-shift[1])
F = (-0.5*self.DIM1-shift[0], 0.5*self.DIM2-shift[1])
return C, D, E, F
class HSection(Geometry):
"""Constructs a H section with the middle web's middle center at the origin *(0, 0)*, with four
parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more details.
Added by JohnDN90.
:param float DIM1: Spacing between vertical flanges (length of web)
:param float DIM2: Twice the thickness of the vertical flanges
:param float DIM3: Depth (y) of the H-section
:param float DIM4: Thickness of the middle web
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a H cross-section with a depth of 3.5 and width of 2.75, and
generates a mesh with a maximum triangular area of 0.005::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.HSection(DIM1=2.0, DIM2=0.75, DIM3=3.5, DIM4=0.25)
mesh = geometry.create_mesh(mesh_sizes=[0.005])
.. figure:: ../images/sections/h_geometry.png
:align: center
:scale: 75 %
H section geometry.
.. figure:: ../images/sections/h_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the HSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 < DIM3, "Invalid geometry specified.")
d1 = 0.5 * (DIM3 - DIM4)
d2 = 0.5 * DIM2
# assign control point
control_points = [[0.5*d2, 0.5*DIM3]]
shift = [-0.5*(DIM2+DIM1)+shift[0], -0.5*DIM3+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [
[0, 0], [d2, 0], [d2, d1], [d2+DIM1, d1], [d2+DIM1, 0], [DIM1+DIM2, 0],
[DIM1+DIM2, DIM3], [DIM1+DIM2-d2, DIM3], [DIM1+DIM2-d2, d1+DIM4], [d2, d1+DIM4],
[d2, DIM3], [0, DIM3]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 0]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*(self.DIM1+self.DIM2)-shift[0], 0.5*self.DIM3-shift[1])
D = (0.5*(self.DIM1+self.DIM2)-shift[0], -0.5*self.DIM3-shift[1])
E = (-0.5*(self.DIM1+self.DIM2)-shift[0], -0.5*self.DIM3-shift[1])
F = (-0.5*(self.DIM1+self.DIM2)-shift[0], 0.5*self.DIM3-shift[1])
return C, D, E, F
class HATSection(Geometry):
"""Constructs a Hat section with the top most section's middle center at the origin *(0, 0)*,
with four parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for
more details. Note that HAT in ASTROS is actually HAT1 in this code. Added by JohnDN90.
:param float DIM1: Depth (y) of HAT-section
:param float DIM2: Thickness of HAT-section
:param float DIM3: Width (x) of top most section
:param float DIM4: Width (x) of bottom sections
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a HAT cross-section with a depth of 1.25 and width of 2.5, and
generates a mesh with a maximum triangular area of 0.001::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.HATSection(DIM1=1.25, DIM2=0.25, DIM3=1.5, DIM4=0.5)
mesh = geometry.create_mesh(mesh_sizes=[0.001])
.. figure:: ../images/sections/hat_geometry.png
:align: center
:scale: 75 %
HAT section geometry.
.. figure:: ../images/sections/hat_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the HATSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(2.0*DIM2 < DIM1, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM4, 0.5*DIM2]]
shift = [-DIM4-0.5*DIM3+shift[0], -DIM1+0.5*DIM2+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [
[0., 0.], [DIM4+DIM2, 0.], [DIM4+DIM2, DIM1-DIM2], [DIM4+DIM3-DIM2, DIM1-DIM2],
[DIM4+DIM3-DIM2, 0.], [2*DIM4+DIM3, 0.], [2.*DIM4+DIM3, DIM2], [DIM4+DIM3, DIM2],
[DIM4+DIM3, DIM1], [DIM4, DIM1], [DIM4, DIM2], [0., DIM2]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 0]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the origin by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM3 - shift[0], 0.5*self.DIM2 - shift[1])
D = (0.5*self.DIM3 + self.DIM4 - shift[0], -self.DIM1 + self.DIM2 - shift[1])
E = (-0.5*self.DIM3 - self.DIM4 - shift[0], -self.DIM1 + self.DIM2 - shift[1])
F = (-0.5*self.DIM3 - shift[0], 0.5*self.DIM2 - shift[1])
return C, D, E, F
class HAT1Section(Geometry):
""" Constructs a HAT1 section with the bottom plate's bottom center at the origin *(0, 0)*,
with five parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [5]_ for
definition of parameters. Note that in ASTROS, HAT1 is called HAT. Added by JohnDN90.
:param float DIM1: Width(x) of the HAT1-section
:param float DIM2: Depth (y) of the HAT1-section
:param float DIM3: Width (x) of hat's top flange
:param float DIM4: Thickness of hat stiffener
:param float DIM5: Thickness of bottom plate
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a HAT1 cross-section with a depth of 2.0 and width of 4.0, and
generates a mesh with a maximum triangular area of 0.005::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.HAT1Section(DIM1=4.0, DIM2=2.0, DIM3=1.5, DIM4=0.1875, DIM5=0.375)
mesh = geometry.create_mesh(mesh_sizes=[0.005])
.. figure:: ../images/sections/hat1_geometry.png
:align: center
:scale: 75 %
HAT1 section geometry.
.. figure:: ../images/sections/hat1_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, DIM5, shift=[0, 0]):
"""Inits the HAT1Section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
DIM5 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
self.DIM5 = DIM5
# Ensure dimensions are physically relevant
np.testing.assert_((2.0*DIM4+DIM5) < DIM2, "Invalid geometry specified.")
np.testing.assert_(DIM3 < DIM1, "Invalid geometry specified.")
shift = [-0.5*DIM1+shift[0], shift[1]]
# create bottom rectangular plate
bottom_plate = RectangularSection(d=DIM5, b=DIM1, shift=shift)
# create the hat stiffener
d1 = DIM2 - DIM5
d2 = DIM4
d4 = 0.5*(DIM1 - DIM3)
# specify a hole in the combined plate and hat structure
holes = [[0.5*DIM1, 0.5*DIM2]]
# assign control point
control_points = [[0.5*d4, DIM5 + 0.5*DIM4]]
super().__init__(control_points, shift)
# construct the points and facets
points = [
[0., DIM5 + 0.], [d4 + d2, DIM5 + 0.], [d4 + d2, DIM5 + d1 - d2],
[d4 + DIM3 - d2, DIM5 + d1 - d2], [d4 + DIM3 - d2, DIM5 + 0.],
[2. * d4 + DIM3, DIM5 + 0.], [2. * d4 + DIM3, DIM5 + d2], [d4 + DIM3, DIM5 + d2],
[d4 + DIM3, DIM5 + d1], [d4, DIM5 + d1], [d4, DIM5 + d2], [0, DIM5 + d2]
]
facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 0]
]
hat = CustomSection(points, facets, holes, control_points, shift=shift)
# Create a list of the sections to merge
section_list = [bottom_plate, hat]
# Merge the three sections into one geometry
geometry = MergedSection(section_list)
# Clean the geometry and print information to the terminal
geometry.clean_geometry(verbose=False)
self.control_points = geometry.control_points
self.shift = geometry.shift
self.points = geometry.points
self.facets = geometry.facets
self.holes = geometry.holes
def create_mesh(self, mesh_sizes):
"""Creates a quadratic triangular mesh from the Geometry object. This is overloaded here to
allow specifying only one mesh_size which is used for both regions in the Hat1 section.
:param mesh_sizes: A list of maximum element areas corresponding to each region within the
cross-section geometry.
:type mesh_size: list[float]
:return: Object containing generated mesh data
:rtype: :class:`meshpy.triangle.MeshInfo`
:raises AssertionError: If the number of mesh sizes does not match the number of regions
"""
mesh_sizes *= 2
str = "Number of mesh_sizes ({0}), should match the number of regions ({1})".format(
len(mesh_sizes), len(self.control_points)
)
assert(len(mesh_sizes) == len(self.control_points)), str
return create_mesh(self.points, self.facets, self.holes, self.control_points, mesh_sizes)
def getStressPoints(self, shift=(0., 0.)):
""" Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the origin by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-0.5*self.DIM1 - shift[0], -shift[1])
D = (0.5*self.DIM1 - shift[0], -shift[1])
E = (-0.5*self.DIM3 - shift[0], self.DIM2 - shift[1])
F = (0.5*self.DIM3 - shift[0], self.DIM2 - shift[1])
return C, D, E, F
class HEXASection(Geometry):
""" Constructs a HEXA (hexagon) section with the center at the origin *(0, 0)*, with three
parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more details.
Added by JohnDN90.
:param float DIM1: Spacing between bottom right point and right most point
:param float DIM2: Width (x) of hexagon
:param float DIM3: Depth (y) of hexagon
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a rectangular cross-section with a depth of 1.5 and width of 2.0,
and generates a mesh with a maximum triangular area of 0.005::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.HEXASection(DIM1=0.5, DIM2=2.0, DIM3=1.5)
mesh = geometry.create_mesh(mesh_sizes=[0.005])
.. figure:: ../images/sections/hexa_geometry.png
:align: center
:scale: 75 %
HEXA section geometry.
.. figure:: ../images/sections/hexa_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, shift=[0, 0]):
"""Inits the HEXASection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
# Ensure dimensions are physically relevant
np.testing.assert_(DIM2 > DIM1, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM2, 0.5*DIM3]]
shift = [-0.5*DIM2+shift[0], -0.5*DIM3+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [
[DIM1, 0.], [DIM2-DIM1, 0.], [DIM2, 0.5*DIM3], [DIM2-DIM1, DIM3], [DIM1, DIM3],
[0., 0.5*DIM3]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-shift[0], 0.5*self.DIM3-shift[1])
D = (-shift[0], -0.5*self.DIM3-shift[1])
E = (0.5*self.DIM2-shift[0], -shift[1])
F = (-0.5*self.DIM2-shift[0], -shift[1])
return C, D, E, F
class NISection(Geometry):
"""Constructs Nastran's I section with the bottom flange's middle center at the origin
*(0, 0)*, with six parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_
[4]_ for definition of parameters. Added by JohnDN90.
:param float DIM1: Depth(y) of the I-section
:param float DIM2: Width (x) of bottom flange
:param float DIM3: Width (x) of top flange
:param float DIM4: Thickness of web
:param float DIM5: Thickness of bottom web
:param float DIM6: Thickness of top web
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a Nastran I cross-section with a depth of 5.0, and generates a
mesh with a maximum triangular area of 0.008::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.NISection(
DIM1=5.0, DIM2=2.0, DIM3=3.0, DIM4=0.25, DIM5=0.375, DIM6=0.5
)
mesh = geometry.create_mesh(mesh_sizes=[0.008])
.. figure:: ../images/sections/ni_geometry.png
:align: center
:scale: 75 %
Nastran's I section geometry.
.. figure:: ../images/sections/ni_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, DIM5, DIM6, shift=[0, 0]):
"""Inits the NISection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
DIM5 *= 1.0
DIM6 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
self.DIM5 = DIM5
self.DIM6 = DIM6
# Ensure dimensions are physically relevant
np.testing.assert_((DIM5 + DIM6) < DIM1, "Invalid geometry specified.")
np.testing.assert_(DIM4 < DIM3, "Invalid geometry specified.")
np.testing.assert_(DIM4 < DIM2, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM2, 0.5*DIM5]]
shift = [-0.5*DIM2+shift[0], -0.5*DIM1+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
db = 0.5*(DIM2 - DIM4)
dt = 0.5*(DIM3 - DIM4)
self.points = [
[0., 0.], [DIM2, 0.], [DIM2, DIM5], [db+DIM4, DIM5], [db + DIM4, DIM1-DIM6],
[db+DIM4+dt, DIM1-DIM6], [db+DIM4+dt, DIM1], [db-dt, DIM1], [db-dt, DIM1-DIM6],
[db, DIM1-DIM6], [db, DIM5], [0, DIM5]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 0]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM3-shift[0], 0.5*self.DIM1-shift[1])
D = (0.5*self.DIM3-shift[0], -0.5*self.DIM1-shift[1])
E = (-0.5*self.DIM3-shift[0], -0.5*self.DIM1-shift[1])
F = (-0.5*self.DIM3-shift[0], 0.5*self.DIM1-shift[1])
return C, D, E, F
class I1Section(Geometry):
"""Constructs a I1 section with the web's middle center at the origin *(0, 0)*, with four
parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more details.
Added by JohnDN90.
:param float DIM1: Twice distance from web end to flange end
:param float DIM2: Thickness of web
:param float DIM3: Length of web (spacing between flanges)
:param float DIM4: Depth (y) of the I1-section
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a I1 cross-section with a depth of
5.0 and width of 1.75, and generates a mesh with a maximum triangular area of
0.02::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.I1Section(DIM1=1.0, DIM2=0.75, DIM3=4.0, DIM4=5.0)
mesh = geometry.create_mesh(mesh_sizes=[0.02])
.. figure:: ../images/sections/i1_geometry.png
:align: center
:scale: 75 %
I1 section geometry.
.. figure:: ../images/sections/i1_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the I1section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 > DIM3, "Invalid geometry specified.")
shift = [-0.5*(DIM1+DIM2)+shift[0], -0.5*DIM4+shift[1]]
# assign control point
control_points = [[0.5*(DIM1+DIM2), 0.5*DIM4]]
super().__init__(control_points, shift)
# construct the points and facets
t = 0.5*(DIM4 - DIM3)
self.points = [
[0., 0.], [DIM1+DIM2, 0.], [DIM1+DIM2, t], [0.5*DIM1+DIM2, t], [0.5*DIM1+DIM2, t+DIM3],
[DIM1+DIM2, t+DIM3], [DIM1+DIM2, DIM4], [0., DIM4], [0., t+DIM3], [0.5*DIM1, t+DIM3],
[0.5*DIM1, t], [0., t]
]
self.facets = [
[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8], [8, 9], [9, 10],
[10, 11], [11, 0]
]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*(self.DIM1+self.DIM2)-shift[0], 0.5*self.DIM4-shift[1])
D = (0.5*(self.DIM1+self.DIM2)-shift[0], -0.5*self.DIM4-shift[1])
E = (-0.5*(self.DIM1+self.DIM2)-shift[0], -0.5*self.DIM4-shift[1])
F = (-0.5*(self.DIM1+self.DIM2)-shift[0], 0.5*self.DIM4-shift[1])
return C, D, E, F
class LSection(Geometry):
"""Constructs a L section with the intersection's center at the origin *(0, 0)*, with four
parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ for more details.
Added by JohnDN90.
:param float DIM1: Width (x) of the L-section
:param float DIM2: Depth (y) of the L-section
:param float DIM3: Thickness of flange (horizontal portion)
:param float DIM4: Thickness of web (vertical portion)
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a L cross-section with a depth of 6.0 and width of 3.0, and
generates a mesh with a maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.LSection(DIM1=3.0, DIM2=6.0, DIM3=0.375, DIM4=0.625)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/l_geometry.png
:align: center
:scale: 75 %
L section geometry.
.. figure:: ../images/sections/l_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the LSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 < DIM1, "Invalid geometry specified.")
np.testing.assert_(DIM3 < DIM2, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM1, 0.5*DIM3]]
shift = [-0.5*DIM4+shift[0], -0.5*DIM3+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
self.points = [[0, 0], [DIM1, 0], [DIM1, DIM3], [DIM4, DIM3], [DIM4, DIM2], [0, DIM2]]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM4-shift[0], self.DIM2-0.5*self.DIM3-shift[1])
D = (self.DIM1-0.5*self.DIM4-shift[0], -0.5*self.DIM3-shift[1])
E = (-0.5*self.DIM4-shift[0], -0.5*self.DIM3-shift[1])
F = (-0.5*self.DIM4-shift[0], self.DIM2-0.5*self.DIM3-shift[1])
return C, D, E, F
class RODSection(Geometry):
"""Constructs a circular rod section with the center at the origin *(0, 0)*, with one parameter
defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more details. Added by
JohnDN90.
:param float DIM1: Radius of the circular rod section
:param int n: Number of points discretising the circle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a circular rod with a radius of 3.0 and 50 points discretising
the boundary, and generates a mesh with a maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.RODSection(DIM1=3.0, n=50)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/rod_geometry.png
:align: center
:scale: 75 %
Rod section geometry.
.. figure:: ../images/sections/rod_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, n, shift=[0, 0]):
"""Inits the RODSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
self.DIM1 = DIM1
# assign control point
control_points = [[0, 0]]
super().__init__(control_points, shift)
# loop through each point on the circle
d = 2.0*DIM1
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of the point
x = 0.5 * d * np.cos(theta)
y = 0.5 * d * np.sin(theta)
# append the current point to the points list
self.points.append([x, y])
# if we are not at the last point
if i != n - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the circle
else:
self.facets.append([i, 0])
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param float DIM1: Radius of the circular rod section
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-shift[0], self.DIM1-shift[1])
D = (self.DIM1-shift[0], -shift[1])
E = (-shift[0], -self.DIM1-shift[1])
F = (-self.DIM1-shift[0], -shift[1])
return C, D, E, F
class TSection(Geometry):
"""Constructs a T section with the top flange's middle center at the origin *(0, 0)*, with four
parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ [5]_ for more
details. Added by JohnDN90.
:param float DIM1: Width (x) of top flange
:param float DIM2: Depth (y) of the T-section
:param float DIM3: Thickness of top flange
:param float DIM4: Thickness of web
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a T cross-section with a depth of 4.0 and width of 3.0, and
generates a mesh with a maximum triangular area of 0.001::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.TSection(DIM1=3.0, DIM2=4.0, DIM3=0.375, DIM4=0.25)
mesh = geometry.create_mesh(mesh_sizes=[0.001])
.. figure:: ../images/sections/t_geometry.png
:align: center
:scale: 75 %
T section geometry.
.. figure:: ../images/sections/t_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the TSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 < DIM1, "Invalid geometry specified.")
np.testing.assert_(DIM3 < DIM2, "Invalid geometry specified.")
d = DIM2
b = DIM1
t_f = DIM3
t_w = DIM4
r = 0
n_r = 1
shift = [-DIM1/2.0+shift[0], -(DIM2-DIM3/2.0)+shift[1]]
# assign control point
control_points = [[b * 0.5, d - t_f * 0.5]]
super().__init__(control_points, shift)
# add first two points
self.points.append([b * 0.5 - t_w * 0.5, 0])
self.points.append([b * 0.5 + t_w * 0.5, 0])
# construct the top right radius
pt = [b * 0.5 + t_w * 0.5 + r, d - t_f - r]
self.draw_radius(pt, r, np.pi, n_r, False)
# add next four points
self.points.append([b, d - t_f])
self.points.append([b, d])
self.points.append([0, d])
self.points.append([0, d - t_f])
# construct the top left radius
pt = [b * 0.5 - t_w * 0.5 - r, d - t_f - r]
self.draw_radius(pt, r, 0.5 * np.pi, n_r, False)
# build the facet list
for i in range(len(self.points)):
# if we are not at the last point
if i != len(self.points) - 1:
self.facets.append([i, i + 1])
# if we are at the last point, complete the loop
else:
self.facets.append([len(self.points) - 1, 0])
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""
Returns the coordinates of the stress evaluation points relative to the origin
of the cross-section. The shift parameter can be used to make the coordinates
relative to the centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-shift[0], 0.5*self.DIM3-shift[1])
D = (0.5*self.DIM1-shift[0], 0.5*self.DIM3-shift[1])
E = (-shift[0], 0.5*self.DIM3-self.DIM2-shift[1])
F = (-0.5*self.DIM1-shift[0], 0.5*self.DIM3-shift[1])
return C, D, E, F
class T1Section(Geometry):
"""Constructs a T1 section with the right flange's middle center at the origin *(0, 0)*, with
four parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more
details. Added by JohnDN90.
:param float DIM1: Depth (y) of T1-section
:param float DIM2: Length (x) of web
:param float DIM3: Thickness of right flange
:param float DIM4: Thickness of web
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a T1 cross-section with a depth of 3.0 and width of 3.875, and
generates a mesh with a maximum triangular area of 0.001::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.T1Section(DIM1=3.0, DIM2=3.5, DIM3=0.375, DIM4=0.25)
mesh = geometry.create_mesh(mesh_sizes=[0.001])
.. figure:: ../images/sections/t1_geometry.png
:align: center
:scale: 75 %
T1 section geometry.
.. figure:: ../images/sections/t1_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the T1section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 < DIM1, "Invalid geometry specified.")
shift = [-0.5*DIM3+shift[0], -0.5*DIM1+shift[1]]
# assign control point
control_points = [[0.5*DIM3, 0.5*DIM1]]
super().__init__(control_points, shift)
# construct the points and facets
d1 = (DIM1 - DIM4) / 2.0
self.points = [
[0, 0], [DIM3, 0], [DIM3, DIM1], [0, DIM1], [0, d1 + DIM4], [-DIM2, d1 + DIM4],
[-DIM2, d1], [0, d1]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM3-shift[0], -shift[1])
D = (0.5*self.DIM3-shift[0], -0.5*self.DIM1-shift[1])
E = (-0.5*self.DIM3-self.DIM2-shift[0], -shift[1])
F = (0.5*self.DIM3-shift[0], 0.5*self.DIM1-shift[1])
return C, D, E, F
class T2Section(Geometry):
"""Constructs a T2 section with the bottom flange's middle center at the origin *(0, 0)*, with
four parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more
details. Added by JohnDN90.
:param float DIM1: Width (x) of T2-section
:param float DIM2: Depth (y) of T2-section
:param float DIM3: Thickness of bottom flange
:param float DIM4: Thickness of web
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a T2 cross-section with a depth of 4.0 and width of 3.0, and
generates a mesh with a maximum triangular area of 0.005::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.T2Section(DIM1=3.0, DIM2=4.0, DIM3=0.375, DIM4=0.5)
mesh = geometry.create_mesh(mesh_sizes=[0.005])
.. figure:: ../images/sections/t2_geometry.png
:align: center
:scale: 75 %
T2 section geometry.
.. figure:: ../images/sections/t2_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the T2Section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 < DIM1, "Invalid geometry specified.")
np.testing.assert_(DIM3 < DIM2, "Invalid geometry specified.")
# assign control point
control_points = [[0.5*DIM1, 0.5*DIM3]]
shift = [-0.5*DIM1+shift[0], -0.5*DIM3+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
d1 = 0.5*(DIM1 - DIM4)
self.points = [
[0., 0.], [DIM1, 0.], [DIM1, DIM3], [DIM1-d1, DIM3], [DIM1-d1, DIM2], [d1, DIM2],
[d1, DIM3], [0, DIM3]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM4-shift[0], self.DIM2-0.5*self.DIM3-shift[1])
D = (0.5*self.DIM1-shift[0], -0.5*self.DIM3-shift[1])
E = (-0.5*self.DIM1-shift[0], -0.5*self.DIM3-shift[1])
F = (-0.5*self.DIM4-shift[0], self.DIM2-0.5*self.DIM3-shift[1])
return C, D, E, F
class TUBESection(Geometry):
"""Constructs a circular tube section with the center at the origin *(0, 0)*, with two
parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more
details. Added by JohnDN90.
:param float DIM1: Outer radius of the circular tube section
:param float DIM2: Inner radius of the circular tube section
:param int n: Number of points discretising the circle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a circular tube cross-section with an outer radius of 3.0 and an
inner radius of 2.5, and generates a mesh with 37 points discretising the boundaries and a
maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.TUBESection(DIM1=3.0, DIM2=2.5, n=37)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/tube_geometry.png
:align: center
:scale: 75 %
TUBE section geometry.
.. figure:: ../images/sections/tube_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, n, shift=[0, 0]):
"""Inits the TUBESection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
# Ensure dimensions are physically relevant
np.testing.assert_(DIM2 < DIM1, "Invalid geometry specified.")
d = 2.0*DIM1
t = DIM1-DIM2
# assign control point
control_points = [[d * 0.5 - t * 0.5, 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the CHS
self.holes = [[0., 0.]]
# loop through each point of the CHS
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d * np.cos(theta)
y_outer = 0.5 * d * np.sin(theta)
x_inner = (0.5 * d - t) * np.cos(theta)
y_inner = (0.5 * d - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-shift[0], self.DIM1-shift[1])
D = (self.DIM1-shift[0], -shift[1])
E = (-shift[0], -self.DIM1-shift[1])
F = (-self.DIM1-shift[0], -shift[1])
return C, D, E, F
class TUBE2Section(Geometry):
"""Constructs a circular TUBE2 section with the center at the origin *(0, 0)*, with two
parameters defining dimensions. See MSC Nastran documentation [1]_ for more details. Added by
JohnDN90.
:param float DIM1: Outer radius of the circular tube section
:param float DIM2: Thickness of wall
:param int n: Number of points discretising the circle
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a circular TUBE2 cross-section with an outer radius of 3.0 and a
wall thickness of 0.5, and generates a mesh with 37 point discretising the boundary and a
maximum triangular area of 0.01::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.TUBE2Section(DIM1=3.0, DIM2=0.5, n=37)
mesh = geometry.create_mesh(mesh_sizes=[0.01])
.. figure:: ../images/sections/tube2_geometry.png
:align: center
:scale: 75 %
TUBE2 section geometry.
.. figure:: ../images/sections/tube2_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, n, shift=[0, 0]):
"""Inits the TUBE2Section class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
# Ensure dimensions are physically relevant
np.testing.assert_(DIM2 < DIM1, "Invalid geometry specified.")
d = 2.0*DIM1
t = DIM2
# assign control point
control_points = [[d * 0.5 - t * 0.5, 0]]
super().__init__(control_points, shift)
# specify a hole in the centre of the section
self.holes = [[0., 0.]]
# loop through each point of the section
for i in range(n):
# determine polar angle
theta = i * 2 * np.pi * 1.0 / n
# calculate location of outer and inner points
x_outer = 0.5 * d * np.cos(theta)
y_outer = 0.5 * d * np.sin(theta)
x_inner = (0.5 * d - t) * np.cos(theta)
y_inner = (0.5 * d - t) * np.sin(theta)
# append the current points to the points list
self.points.append([x_outer, y_outer])
self.points.append([x_inner, y_inner])
# if we are not at the last point
if i != n - 1:
self.facets.append([i * 2, i * 2 + 2])
self.facets.append([i * 2 + 1, i * 2 + 3])
# if we are at the last point, complete the circle
else:
self.facets.append([i * 2, 0])
self.facets.append([i * 2 + 1, 1])
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (-shift[0], self.DIM1-shift[1])
D = (self.DIM1-shift[0], -shift[1])
E = (-shift[0], -self.DIM1-shift[1])
F = (-self.DIM1-shift[0], -shift[1])
return C, D, E, F
class ZSection(Geometry):
"""Constructs a Z section with the web's middle center at the origin *(0, 0)*, with four
parameters defining dimensions. See Nastran documentation [1]_ [2]_ [3]_ [4]_ for more details.
Added by JohnDN90.
:param float DIM1: Width (x) of horizontal members
:param float DIM2: Thickness of web
:param float DIM3: Spacing between horizontal members (length of web)
:param float DIM4: Depth (y) of Z-section
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: list[float, float]
The following example creates a rectangular cross-section with a depth of 4.0 and width of
2.75, and generates a mesh with a maximum triangular area of 0.005::
import sectionproperties.pre.nastran_sections as nsections
geometry = nsections.ZSection(DIM1=1.125, DIM2=0.5, DIM3=3.5, DIM4=4.0)
mesh = geometry.create_mesh(mesh_sizes=[0.005])
.. figure:: ../images/sections/z_geometry.png
:align: center
:scale: 75 %
Z section geometry.
.. figure:: ../images/sections/z_mesh.png
:align: center
:scale: 75 %
Mesh generated from the above geometry.
"""
def __init__(self, DIM1, DIM2, DIM3, DIM4, shift=[0, 0]):
"""Inits the ZSection class."""
# force dimensions to be floating point values
DIM1 *= 1.0
DIM2 *= 1.0
DIM3 *= 1.0
DIM4 *= 1.0
self.DIM1 = DIM1
self.DIM2 = DIM2
self.DIM3 = DIM3
self.DIM4 = DIM4
# Ensure dimensions are physically relevant
np.testing.assert_(DIM4 > DIM3, "Invalid geometry specified.")
# assign control point
control_points = [[DIM1+0.5*DIM2, 0.5*DIM4]]
shift = [-0.5*(DIM1+DIM2)+shift[0], -0.5*DIM4+shift[1]]
super().__init__(control_points, shift)
# construct the points and facets
t = 0.5*(DIM4 - DIM3)
self.points = [
[DIM1, 0.], [2.*DIM1+DIM2, 0.], [2.*DIM1+DIM2, t], [DIM1+DIM2, t], [DIM1+DIM2, DIM4],
[0., DIM4], [0., DIM4-t], [DIM1, DIM4-t]
]
self.facets = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 0]]
self.shift_section()
def getStressPoints(self, shift=(0., 0.)):
"""Returns the coordinates of the stress evaluation points relative to the origin of the
cross-section. The shift parameter can be used to make the coordinates relative to the
centroid or the shear center.
:param shift: Vector that shifts the cross-section by *(x, y)*
:type shift: tuple(float, float)
:returns: Stress evaluation points relative to shifted origin - C, D, E, F
"""
C = (0.5*self.DIM2-shift[0], 0.5*self.DIM4-shift[1])
D = (0.5*self.DIM2+self.DIM1-shift[0], -0.5*self.DIM4-shift[1])
E = (-0.5*self.DIM2-shift[0], -0.5*self.DIM4-shift[1])
F = (-0.5*self.DIM2-self.DIM1-shift[0], 0.5*self.DIM4-shift[1])
return C, D, E, F
| 36.475168
| 99
| 0.590466
|
477cd6d865b307291a4a798405b8d7b2ff819de1
| 17,248
|
py
|
Python
|
tests/providers/google/cloud/hooks/test_dataproc.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | null | null | null |
tests/providers/google/cloud/hooks/test_dataproc.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | 1
|
2019-05-14T14:32:40.000Z
|
2019-05-14T14:32:40.000Z
|
tests/providers/google/cloud/hooks/test_dataproc.py
|
dorranh/airflow
|
1a9a2cadcf8606cfcb729d1323dd33dfacc64633
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import mock
from google.cloud.dataproc_v1beta2.types import JobStatus # pylint: disable=no-name-in-module
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.dataproc import DataprocHook, DataProcJobBuilder
from airflow.version import version
AIRFLOW_VERSION = "v" + version.replace(".", "-").replace("+", "-")
JOB = {"job": "test-job"}
JOB_ID = "test-id"
TASK_ID = "test-task-id"
GCP_LOCATION = "global"
GCP_PROJECT = "test-project"
CLUSTER = {"test": "test"}
CLUSTER_NAME = "cluster-name"
PARENT = "parent"
NAME = "name"
BASE_STRING = "airflow.providers.google.cloud.hooks.base.{}"
DATAPROC_STRING = "airflow.providers.google.cloud.hooks.dataproc.{}"
def mock_init(*args, **kwargs):
pass
class TestDataprocHook(unittest.TestCase):
def setUp(self):
with mock.patch(
BASE_STRING.format("CloudBaseHook.__init__"), new=mock_init
):
self.hook = DataprocHook(gcp_conn_id="test")
@mock.patch(DATAPROC_STRING.format("DataprocHook._get_credentials"))
@mock.patch(
DATAPROC_STRING.format("DataprocHook.client_info"),
new_callable=mock.PropertyMock,
)
@mock.patch(DATAPROC_STRING.format("ClusterControllerClient"))
def test_get_cluster_client(
self, mock_client, mock_client_info, mock_get_credentials
):
self.hook.get_cluster_client(location=GCP_LOCATION)
mock_client.assert_called_once_with(
credentials=mock_get_credentials.return_value,
client_info=mock_client_info.return_value,
client_options={
"api_endpoint": "{}-dataproc.googleapis.com:443".format(GCP_LOCATION)
},
)
@mock.patch(DATAPROC_STRING.format("DataprocHook._get_credentials"))
@mock.patch(
DATAPROC_STRING.format("DataprocHook.client_info"),
new_callable=mock.PropertyMock,
)
@mock.patch(DATAPROC_STRING.format("WorkflowTemplateServiceClient"))
def test_get_template_client(
self, mock_client, mock_client_info, mock_get_credentials
):
_ = self.hook.get_template_client
mock_client.assert_called_once_with(
credentials=mock_get_credentials.return_value,
client_info=mock_client_info.return_value,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook._get_credentials"))
@mock.patch(
DATAPROC_STRING.format("DataprocHook.client_info"),
new_callable=mock.PropertyMock,
)
@mock.patch(DATAPROC_STRING.format("JobControllerClient"))
def test_get_job_client(self, mock_client, mock_client_info, mock_get_credentials):
self.hook.get_job_client(location=GCP_LOCATION)
mock_client.assert_called_once_with(
credentials=mock_get_credentials.return_value,
client_info=mock_client_info.return_value,
client_options={
"api_endpoint": "{}-dataproc.googleapis.com:443".format(GCP_LOCATION)
},
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_cluster_client"))
def test_create_cluster(self, mock_client):
self.hook.create_cluster(
project_id=GCP_PROJECT, region=GCP_LOCATION, cluster=CLUSTER
)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.create_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
cluster=CLUSTER,
metadata=None,
request_id=None,
retry=None,
timeout=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_cluster_client"))
def test_delete_cluster(self, mock_client):
self.hook.delete_cluster(
project_id=GCP_PROJECT, region=GCP_LOCATION, cluster_name=CLUSTER_NAME
)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.delete_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
cluster_name=CLUSTER_NAME,
cluster_uuid=None,
metadata=None,
request_id=None,
retry=None,
timeout=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_cluster_client"))
def test_diagnose_cluster(self, mock_client):
self.hook.diagnose_cluster(
project_id=GCP_PROJECT, region=GCP_LOCATION, cluster_name=CLUSTER_NAME
)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.diagnose_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
cluster_name=CLUSTER_NAME,
metadata=None,
retry=None,
timeout=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_cluster_client"))
def test_get_cluster(self, mock_client):
self.hook.get_cluster(
project_id=GCP_PROJECT, region=GCP_LOCATION, cluster_name=CLUSTER_NAME
)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.get_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
cluster_name=CLUSTER_NAME,
metadata=None,
retry=None,
timeout=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_cluster_client"))
def test_list_clusters(self, mock_client):
filter_ = "filter"
self.hook.list_clusters(
project_id=GCP_PROJECT, region=GCP_LOCATION, filter_=filter_
)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.list_clusters.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
filter_=filter_,
page_size=None,
metadata=None,
retry=None,
timeout=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_cluster_client"))
def test_update_cluster(self, mock_client):
update_mask = "update-mask"
self.hook.update_cluster(
project_id=GCP_PROJECT,
location=GCP_LOCATION,
cluster=CLUSTER,
cluster_name=CLUSTER_NAME,
update_mask=update_mask,
)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.update_cluster.assert_called_once_with(
project_id=GCP_PROJECT,
region=GCP_LOCATION,
cluster=CLUSTER,
cluster_name=CLUSTER_NAME,
update_mask=update_mask,
graceful_decommission_timeout=None,
metadata=None,
request_id=None,
retry=None,
timeout=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_template_client"))
def test_create_workflow_template(self, mock_client):
template = {"test": "test"}
mock_client.region_path.return_value = PARENT
self.hook.create_workflow_template(
location=GCP_LOCATION, template=template, project_id=GCP_PROJECT
)
mock_client.region_path.assert_called_once_with(GCP_PROJECT, GCP_LOCATION)
mock_client.create_workflow_template.assert_called_once_with(
parent=PARENT, template=template, retry=None, timeout=None, metadata=None
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_template_client"))
def test_instantiate_workflow_template(self, mock_client):
template_name = "template_name"
mock_client.workflow_template_path.return_value = NAME
self.hook.instantiate_workflow_template(
location=GCP_LOCATION, template_name=template_name, project_id=GCP_PROJECT
)
mock_client.workflow_template_path.assert_called_once_with(
GCP_PROJECT, GCP_LOCATION, template_name
)
mock_client.instantiate_workflow_template.assert_called_once_with(
name=NAME,
version=None,
parameters=None,
request_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_template_client"))
def test_instantiate_inline_workflow_template(self, mock_client):
template = {"test": "test"}
mock_client.region_path.return_value = PARENT
self.hook.instantiate_inline_workflow_template(
location=GCP_LOCATION, template=template, project_id=GCP_PROJECT
)
mock_client.region_path.assert_called_once_with(GCP_PROJECT, GCP_LOCATION)
mock_client.instantiate_inline_workflow_template.assert_called_once_with(
parent=PARENT,
template=template,
request_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_job"))
def test_wait_for_job(self, mock_get_job):
mock_get_job.side_effect = [
mock.MagicMock(status=mock.MagicMock(state=JobStatus.RUNNING)),
mock.MagicMock(status=mock.MagicMock(state=JobStatus.ERROR)),
]
with self.assertRaises(AirflowException):
self.hook.wait_for_job(
job_id=JOB_ID,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
wait_time=0,
)
calls = [
mock.call(location=GCP_LOCATION, job_id=JOB_ID, project_id=GCP_PROJECT),
mock.call(location=GCP_LOCATION, job_id=JOB_ID, project_id=GCP_PROJECT),
]
mock_get_job.has_calls(calls)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_job_client"))
def test_get_job(self, mock_client):
self.hook.get_job(location=GCP_LOCATION, job_id=JOB_ID, project_id=GCP_PROJECT)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.get_job.assert_called_once_with(
region=GCP_LOCATION,
job_id=JOB_ID,
project_id=GCP_PROJECT,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_job_client"))
def test_submit_job(self, mock_client):
self.hook.submit_job(location=GCP_LOCATION, job=JOB, project_id=GCP_PROJECT)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.submit_job.assert_called_once_with(
region=GCP_LOCATION,
job=JOB,
project_id=GCP_PROJECT,
request_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.wait_for_job"))
@mock.patch(DATAPROC_STRING.format("DataprocHook.submit_job"))
def test_submit(self, mock_submit_job, mock_wait_for_job):
mock_submit_job.return_value.reference.job_id = JOB_ID
with self.assertWarns(DeprecationWarning):
self.hook.submit(project_id=GCP_PROJECT, job=JOB, region=GCP_LOCATION)
mock_submit_job.assert_called_once_with(
location=GCP_LOCATION, project_id=GCP_PROJECT, job=JOB
)
mock_wait_for_job.assert_called_once_with(
location=GCP_LOCATION, project_id=GCP_PROJECT, job_id=JOB_ID
)
@mock.patch(DATAPROC_STRING.format("DataprocHook.get_job_client"))
def test_cancel_job(self, mock_client):
self.hook.cancel_job(
location=GCP_LOCATION, job_id=JOB_ID, project_id=GCP_PROJECT
)
mock_client.assert_called_once_with(location=GCP_LOCATION)
mock_client.return_value.cancel_job.assert_called_once_with(
region=GCP_LOCATION,
job_id=JOB_ID,
project_id=GCP_PROJECT,
retry=None,
timeout=None,
metadata=None,
)
class TestDataProcJobBuilder(unittest.TestCase):
def setUp(self) -> None:
self.job_type = "test"
self.builder = DataProcJobBuilder(
project_id=GCP_PROJECT,
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
job_type=self.job_type,
properties={"test": "test"},
)
@mock.patch(DATAPROC_STRING.format("uuid.uuid4"))
def test_init(self, mock_uuid):
mock_uuid.return_value = "uuid"
properties = {"test": "test"}
job = {
"job": {
"labels": {"airflow-version": AIRFLOW_VERSION},
"placement": {"cluster_name": CLUSTER_NAME},
"reference": {"job_id": TASK_ID + "_uuid", "project_id": GCP_PROJECT},
"test": {"properties": properties},
}
}
builder = DataProcJobBuilder(
project_id=GCP_PROJECT,
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
job_type="test",
properties=properties,
)
self.assertDictEqual(job, builder.job)
def test_add_labels(self):
labels = {"key": "value"}
self.builder.add_labels(labels)
self.assertIn("key", self.builder.job["job"]["labels"])
self.assertEqual("value", self.builder.job["job"]["labels"]["key"])
def test_add_variables(self):
variables = ["variable"]
self.builder.add_variables(variables)
self.assertEqual(
variables, self.builder.job["job"][self.job_type]["script_variables"]
)
def test_add_args(self):
args = ["args"]
self.builder.add_args(args)
self.assertEqual(args, self.builder.job["job"][self.job_type]["args"])
def test_add_query(self):
query = ["query"]
self.builder.add_query(query)
self.assertEqual(
{"queries": [query]}, self.builder.job["job"][self.job_type]["query_list"]
)
def test_add_query_uri(self):
query_uri = "query_uri"
self.builder.add_query_uri(query_uri)
self.assertEqual(
query_uri, self.builder.job["job"][self.job_type]["query_file_uri"]
)
def test_add_jar_file_uris(self):
jar_file_uris = ["jar_file_uris"]
self.builder.add_jar_file_uris(jar_file_uris)
self.assertEqual(
jar_file_uris, self.builder.job["job"][self.job_type]["jar_file_uris"]
)
def test_add_archive_uris(self):
archive_uris = ["archive_uris"]
self.builder.add_archive_uris(archive_uris)
self.assertEqual(
archive_uris, self.builder.job["job"][self.job_type]["archive_uris"]
)
def test_add_file_uris(self):
file_uris = ["file_uris"]
self.builder.add_file_uris(file_uris)
self.assertEqual(file_uris, self.builder.job["job"][self.job_type]["file_uris"])
def test_add_python_file_uris(self):
python_file_uris = ["python_file_uris"]
self.builder.add_python_file_uris(python_file_uris)
self.assertEqual(
python_file_uris, self.builder.job["job"][self.job_type]["python_file_uris"]
)
def test_set_main_error(self):
with self.assertRaises(Exception):
self.builder.set_main("test", "test")
def test_set_main_class(self):
main = "main"
self.builder.set_main(main_class=main, main_jar=None)
self.assertEqual(main, self.builder.job["job"][self.job_type]["main_class"])
def test_set_main_jar(self):
main = "main"
self.builder.set_main(main_class=None, main_jar=main)
self.assertEqual(
main, self.builder.job["job"][self.job_type]["main_jar_file_uri"]
)
def test_set_python_main(self):
main = "main"
self.builder.set_python_main(main)
self.assertEqual(
main, self.builder.job["job"][self.job_type]["main_python_file_uri"]
)
@mock.patch(DATAPROC_STRING.format("uuid.uuid4"))
def test_set_job_name(self, mock_uuid):
uuid = "test_uuid"
mock_uuid.return_value = uuid
name = "name"
self.builder.set_job_name(name)
name += "_" + uuid[:8]
self.assertEqual(name, self.builder.job["job"]["reference"]["job_id"])
def test_build(self):
self.assertEqual(self.builder.job, self.builder.build())
| 37.577342
| 94
| 0.659439
|
661e7836a9d7a82b1ef6be7a3b4f25e82394b3ce
| 1,643
|
py
|
Python
|
src/python/zquantum/core/bitstring_distribution/distance_measures/clipped_negative_log_likelihood.py
|
alexjuda2/z-quantum-core
|
c258100dbd091f0b22495b77b36399426ae9abac
|
[
"Apache-2.0"
] | 24
|
2020-04-15T17:36:59.000Z
|
2022-01-25T05:02:14.000Z
|
src/python/zquantum/core/bitstring_distribution/distance_measures/clipped_negative_log_likelihood.py
|
alexjuda2/z-quantum-core
|
c258100dbd091f0b22495b77b36399426ae9abac
|
[
"Apache-2.0"
] | 177
|
2020-04-23T15:19:59.000Z
|
2022-03-30T18:06:17.000Z
|
src/python/zquantum/core/bitstring_distribution/distance_measures/clipped_negative_log_likelihood.py
|
alexjuda2/z-quantum-core
|
c258100dbd091f0b22495b77b36399426ae9abac
|
[
"Apache-2.0"
] | 19
|
2020-06-24T10:56:02.000Z
|
2021-09-30T13:02:21.000Z
|
import math
from typing import TYPE_CHECKING, Dict
if TYPE_CHECKING:
from zquantum.core.bitstring_distribution import BitstringDistribution
def compute_clipped_negative_log_likelihood(
target_distribution: "BitstringDistribution",
measured_distribution: "BitstringDistribution",
distance_measure_parameters: Dict,
) -> float:
"""Compute the value of the clipped negative log likelihood between a target
bitstring distribution and a measured bitstring distribution.
See Equation (4) in https://advances.sciencemag.org/content/5/10/eaaw9918?rss=1
Args:
target_distribution: The target bitstring probability distribution.
measured_distribution: The measured bitstring probability distribution.
distance_measure_parameters:
epsilon (float): The small parameter needed to regularize log computation
when argument is zero. The default value is 1e-9.
Returns:
The value of the clipped negative log likelihood
"""
epsilon = distance_measure_parameters.get("epsilon", 1e-9)
value = 0.0
target_keys = target_distribution.distribution_dict.keys()
measured_keys = measured_distribution.distribution_dict.keys()
all_keys = set(target_keys).union(measured_keys)
for bitstring in all_keys:
target_bitstring_value = target_distribution.distribution_dict.get(bitstring, 0)
measured_bitstring_value = measured_distribution.distribution_dict.get(
bitstring, 0
)
value += target_bitstring_value * math.log(
max(epsilon, measured_bitstring_value)
)
return -value
| 36.511111
| 88
| 0.736458
|
e219461fe40ed8a26ec6ec4adcd87016828bf126
| 1,858
|
py
|
Python
|
easy/python3/c0168_690_employee-importance/00_leetcode_0168.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
easy/python3/c0168_690_employee-importance/00_leetcode_0168.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | null | null | null |
easy/python3/c0168_690_employee-importance/00_leetcode_0168.py
|
drunkwater/leetcode
|
8cc4a07763e71efbaedb523015f0c1eff2927f60
|
[
"Ruby"
] | 3
|
2018-02-09T02:46:48.000Z
|
2021-02-20T08:32:03.000Z
|
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#690. Employee Importance
#You are given a data structure of employee information, which includes the employee's unique id, his importance value and his direct subordinates' id.
#For example, employee 1 is the leader of employee 2, and employee 2 is the leader of employee 3. They have importance value 15, 10 and 5, respectively. Then employee 1 has a data structure like [1, 15, [2]], and employee 2 has [2, 10, [3]], and employee 3 has [3, 5, []]. Note that although employee 3 is also a subordinate of employee 1, the relationship is not direct.
#Now given the employee information of a company, and an employee id, you need to return the total importance value of this employee and all his subordinates.
#Example 1:
#Input: [[1, 5, [2, 3]], [2, 3, []], [3, 3, []]], 1
#Output: 11
#Explanation:
#Employee 1 has importance value 5, and he has two direct subordinates: employee 2 and employee 3. They both have importance value 3. So the total importance value of employee 1 is 5 + 3 + 3 = 11.
#Note:
#One employee has at most one direct leader and may have several subordinates.
#The maximum number of employees won't exceed 2000.
#"""
## Employee info
#class Employee:
# def __init__(self, id, importance, subordinates):
# # It's the unique id of each node.
# # unique id of this employee
# self.id = id
# # the importance value of this employee
# self.importance = importance
# # the id of direct subordinates
# self.subordinates = subordinates
#"""
#class Solution:
# def getImportance(self, employees, id):
# """
# :type employees: Employee
# :type id: int
# :rtype: int
# """
# Time Is Money
| 48.894737
| 371
| 0.700215
|
7a23de5c5144bde724c8cecbda2be9d2ac255baf
| 8,652
|
py
|
Python
|
pyro/distributions/transforms/planar.py
|
chiragnagpal/pyro
|
9b67c84798f39310345a6cf80f602195c0571166
|
[
"Apache-2.0"
] | null | null | null |
pyro/distributions/transforms/planar.py
|
chiragnagpal/pyro
|
9b67c84798f39310345a6cf80f602195c0571166
|
[
"Apache-2.0"
] | null | null | null |
pyro/distributions/transforms/planar.py
|
chiragnagpal/pyro
|
9b67c84798f39310345a6cf80f602195c0571166
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Transform, constraints
from pyro.distributions.conditional import ConditionalTransformModule
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions.util import copy_docs_from
from pyro.nn import DenseNN
@copy_docs_from(Transform)
class ConditionedPlanar(Transform):
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, params):
super().__init__(cache_size=1)
self._params = params
self._cached_logDetJ = None
# This method ensures that torch(u_hat, w) > -1, required for invertibility
def u_hat(self, u, w):
alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)
a_prime = -1 + F.softplus(alpha)
return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x => y; in the prototypical context of a
:class:`~pyro.distributions.TransformedDistribution` `x` is a sample from
the base distribution (or the output of a previous transform)
"""
bias, u, w = self._params() if callable(self._params) else self._params
# x ~ (batch_size, dim_size, 1)
# w ~ (batch_size, 1, dim_size)
# bias ~ (batch_size, 1)
act = torch.tanh(torch.matmul(w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + bias)
u_hat = self.u_hat(u, w)
y = x + u_hat * act
psi_z = (1. - act.pow(2)) * w
self._cached_logDetJ = torch.log(
torch.abs(1 + torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1)).squeeze(-1).squeeze(-1)))
return y
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. As noted above, this implementation is incapable of
inverting arbitrary values `y`; rather it assumes `y` is the result of a
previously computed application of the bijector to some `x` (which was
cached on the forward call)
"""
raise KeyError("ConditionedPlanar object expected to find key in intermediates cache but didn't")
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log Jacobian
"""
x_old, y_old = self._cached_x_y
if x is not x_old or y is not y_old:
# This call to the parent class Transform will update the cache
# as well as calling self._call and recalculating y and log_detJ
self(x)
return self._cached_logDetJ
@copy_docs_from(ConditionedPlanar)
class Planar(ConditionedPlanar, TransformModule):
r"""
A 'planar' bijective transform with equation,
:math:`\mathbf{y} = \mathbf{x} + \mathbf{u}\tanh(\mathbf{w}^T\mathbf{z}+b)`
where :math:`\mathbf{x}` are the inputs, :math:`\mathbf{y}` are the outputs,
and the learnable parameters are :math:`b\in\mathbb{R}`,
:math:`\mathbf{u}\in\mathbb{R}^D`, :math:`\mathbf{w}\in\mathbb{R}^D` for
input dimension :math:`D`. For this to be an invertible transformation, the
condition :math:`\mathbf{w}^T\mathbf{u}>-1` is enforced.
Together with :class:`~pyro.distributions.TransformedDistribution` this provides
a way to create richer variational approximations.
Example usage:
>>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
>>> transform = Planar(10)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
>>> flow_dist.sample() # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using the planar transform can be
scored.
:param input_dim: the dimension of the input (and output) variable.
:type input_dim: int
References:
[1] Danilo Jimenez Rezende, Shakir Mohamed. Variational Inference with
Normalizing Flows. [arXiv:1505.05770]
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, input_dim):
super().__init__(self._params)
self.bias = nn.Parameter(torch.Tensor(1,))
self.u = nn.Parameter(torch.Tensor(input_dim,))
self.w = nn.Parameter(torch.Tensor(input_dim,))
self.input_dim = input_dim
self.reset_parameters()
def _params(self):
return self.bias, self.u, self.w
def reset_parameters(self):
stdv = 1. / math.sqrt(self.u.size(0))
self.w.data.uniform_(-stdv, stdv)
self.u.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
@copy_docs_from(ConditionalTransformModule)
class ConditionalPlanar(ConditionalTransformModule):
r"""
A conditional 'planar' bijective transform using the equation,
:math:`\mathbf{y} = \mathbf{x} + \mathbf{u}\tanh(\mathbf{w}^T\mathbf{z}+b)`
where :math:`\mathbf{x}` are the inputs with dimension :math:`D`,
:math:`\mathbf{y}` are the outputs, and the pseudo-parameters
:math:`b\in\mathbb{R}`, :math:`\mathbf{u}\in\mathbb{R}^D`, and
:math:`\mathbf{w}\in\mathbb{R}^D` are the output of a function, e.g. a NN,
with input :math:`z\in\mathbb{R}^{M}` representing the context variable to
condition on. For this to be an invertible transformation, the condition
:math:`\mathbf{w}^T\mathbf{u}>-1` is enforced.
Together with :class:`~pyro.distributions.ConditionalTransformedDistribution`
this provides a way to create richer variational approximations.
Example usage:
>>> from pyro.nn.dense_nn import DenseNN
>>> input_dim = 10
>>> context_dim = 5
>>> batch_size = 3
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [1, input_dim, input_dim]
>>> hypernet = DenseNN(context_dim, [50, 50], param_dims)
>>> transform = ConditionalPlanar(hypernet)
>>> z = torch.rand(batch_size, context_dim)
>>> flow_dist = dist.ConditionalTransformedDistribution(base_dist,
... [transform]).condition(z)
>>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using the planar transform can be
scored.
:param nn: a function inputting the context variable and outputting a triplet of
real-valued parameters of dimensions :math:`(1, D, D)`.
:type nn: callable
References:
[1] Variational Inference with Normalizing Flows [arXiv:1505.05770]
Danilo Jimenez Rezende, Shakir Mohamed
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, nn):
super().__init__()
self.nn = nn
def _params(self, context):
return self.nn(context)
def condition(self, context):
params = partial(self._params, context)
return ConditionedPlanar(params)
def planar(input_dim):
"""
A helper function to create a :class:`~pyro.distributions.transforms.Planar`
object for consistency with other helpers.
:param input_dim: Dimension of input variable
:type input_dim: int
"""
return Planar(input_dim)
def conditional_planar(input_dim, context_dim, hidden_dims=None):
"""
A helper function to create a
:class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care
of constructing a dense network with the correct input/output dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param context_dim: Dimension of context variable
:type context_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [input_dim * 10, input_dim * 10]
:type hidden_dims: list[int]
"""
if hidden_dims is None:
hidden_dims = [input_dim * 10, input_dim * 10]
nn = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])
return ConditionalPlanar(nn)
| 35.170732
| 106
| 0.67337
|
ef6ded6a2a27334d79faf61095e7a422571f9097
| 668
|
py
|
Python
|
src/ndc/create_hist.py
|
TEI-EAJ/auto_aozora_tei
|
5535abef680a1e186f8a7dc6efc30a1dcf4efeec
|
[
"CC0-1.0"
] | 3
|
2019-02-12T13:28:22.000Z
|
2021-07-25T20:58:07.000Z
|
src/ndc/create_hist.py
|
TEI-EAJ/auto_aozora_tei
|
5535abef680a1e186f8a7dc6efc30a1dcf4efeec
|
[
"CC0-1.0"
] | null | null | null |
src/ndc/create_hist.py
|
TEI-EAJ/auto_aozora_tei
|
5535abef680a1e186f8a7dc6efc30a1dcf4efeec
|
[
"CC0-1.0"
] | 1
|
2019-02-12T22:04:00.000Z
|
2019-02-12T22:04:00.000Z
|
import json
with open('data/ndc.json', 'r') as f:
data = json.load(f)
with open('data/subjects.json', 'r') as f:
subjects = json.load(f)
arr = data["children"]
for lev1 in arr:
for lev2 in lev1["children"]:
for lev3 in lev2["children"]:
for lev4 in lev3["children"]:
id = lev4["id"]
id = id.split("/")[-1].replace("ndc", "")
id = id.upper()
if id in subjects:
lev4["value"] = subjects[id]
with open('../../docs/ndc/hist.json', 'w') as outfile:
json.dump(data, outfile, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| 24.740741
| 98
| 0.523952
|
febc05664ea50c97d67b8f34fd7a95fd90ce24a7
| 6,471
|
py
|
Python
|
torchseq/models/samplers/parallel_nucleus.py
|
tomhosking/torchseq
|
1b08c16822a553ecb77b96289fb21eb0a13d9c6b
|
[
"Apache-2.0"
] | 17
|
2021-02-25T14:24:06.000Z
|
2021-12-12T07:12:26.000Z
|
torchseq/models/samplers/parallel_nucleus.py
|
tomhosking/torchseq
|
1b08c16822a553ecb77b96289fb21eb0a13d9c6b
|
[
"Apache-2.0"
] | null | null | null |
torchseq/models/samplers/parallel_nucleus.py
|
tomhosking/torchseq
|
1b08c16822a553ecb77b96289fb21eb0a13d9c6b
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torchseq.utils.tokenizer import Tokenizer
from torchseq.utils.functions import onehot
# FROM: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
"""Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k >0: keep only top k tokens with highest probability (top-k filtering).
top_p >0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
"""
# assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
orig_shape = logits.shape
logits = logits.view(-1, orig_shape[-1])
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits.reshape(orig_shape)
class ParallelNucleusSampler(nn.Module):
def __init__(self, config, device):
super(ParallelNucleusSampler, self).__init__()
self.config = config
self.device = device
def forward(self, model, batch, tgt_field):
curr_batch_size = batch[[k for k in batch.keys() if k[-5:] != "_text"][0]].size()[0]
max_output_len = self.config.eval.data.get("max_out_len", 32)
prevent_repetition = (
self.config.nucleus_sampling.prevent_repetition
if "prevent_repetition" in self.config.nucleus_sampling.data
else True
)
if not self.config.eval.data.get("shifted_decoding", True):
print("Unshifted decoding not supported by nucleus decoder!")
beam_width = self.config.nucleus_sampling.beam_width # number of total hypotheses to maintain
prob_cutoff = self.config.nucleus_sampling.cutoff
# Create vector of SOS + placeholder for first prediction
output_seq = torch.LongTensor(curr_batch_size, beam_width, 1).fill_(Tokenizer().bos_id).to(self.device)
scores = torch.FloatTensor(curr_batch_size, beam_width, 1).fill_(1).to(self.device)
output_done = torch.BoolTensor(curr_batch_size, beam_width).fill_(False).to(self.device)
padding = torch.LongTensor(curr_batch_size, beam_width).fill_(Tokenizer().pad_id).to(self.device)
pad_probs = (
torch.FloatTensor(curr_batch_size, beam_width, self.config.prepro.vocab_size)
.fill_(float("0"))
.to(self.device)
)
pad_probs[:, :, Tokenizer().pad_id] = float("1")
def _tile_batch(x):
return x.repeat_interleave(beam_width, dim=0)
batch_tiled = {k: (_tile_batch(x) if k[-5:] != "_text" and k[0] != "_" else x) for k, x in batch.items()}
seq_ix = 0
memory = {}
while torch.sum(output_done) < curr_batch_size * beam_width and seq_ix < max_output_len:
new_logits, memory = model(batch_tiled, output_seq.view(curr_batch_size * beam_width, -1), memory)
new_logits = new_logits.view(curr_batch_size, beam_width, -1, self.config.prepro.vocab_size)
output_done = (output_seq[:, :, -1] == Tokenizer().pad_id) | (output_seq[:, :, -1] == Tokenizer().eos_id)
new_logits = top_k_top_p_filtering(logits=new_logits, top_p=prob_cutoff)
if prevent_repetition:
one_hot_prev = onehot(output_seq[:, :, -1], N=self.config.prepro.vocab_size)
new_logits[:, :, -1, :] = new_logits[:, :, -1, :] + (one_hot_prev * float("-1e-16"))
new_probs = torch.where(
output_done.unsqueeze(-1), pad_probs, nn.functional.softmax(new_logits[:, :, -1, :], -1)
)
sampled_indices = (
torch.multinomial(new_probs.view(curr_batch_size * beam_width, -1).cpu(), 1)
.view(curr_batch_size, beam_width, -1)
.to(self.device)
)
sampled_scores = new_probs.gather(index=sampled_indices, dim=-1)
new_output = torch.cat([output_seq, sampled_indices], dim=-1)
scores = torch.cat([scores, sampled_scores], dim=-1)
# Use pad for the output for elements that have completed
if seq_ix > 0:
output_done = (new_output[:, :, -2] == Tokenizer().eos_id) | (
new_output[:, :, -2] == Tokenizer().pad_id
)
new_output[:, :, -1] = torch.where(output_done, padding, new_output[:, :, -1])
output_seq = new_output
seq_ix += 1
# Take top-1 beam:
hypothesis_len = torch.sum(output_seq != Tokenizer().pad_id, dim=-1)
# Length penalty needs to be applied to *overall* score, not score for this token
len_alpha = self.config.nucleus_sampling.length_alpha
length_penalty = torch.pow((5 + hypothesis_len).float(), len_alpha) / pow(5.0 + 1.0, len_alpha)
beam_scores = (
torch.log(scores).where(output_seq != Tokenizer().pad_id, torch.FloatTensor([0.0]).to(self.device)).sum(-1)
/ length_penalty
)
sorted_scores, sorted_indices = torch.sort(beam_scores, descending=True)
output_seq = torch.gather(output_seq, 1, sorted_indices.unsqueeze(-1).expand(-1, -1, output_seq.shape[2]))
output = output_seq
return output, sorted_scores, torch.sum(output_seq != Tokenizer().pad_id, dim=-1), memory
| 44.321918
| 119
| 0.643332
|
20bebc733d7674e861ad7c07684a3007af031c69
| 3,160
|
py
|
Python
|
activelearning/settings.py
|
evanlouie/activelearning
|
7ee6e9d2d795f85a441ad70e70ac0d8de9c25e31
|
[
"MIT"
] | null | null | null |
activelearning/settings.py
|
evanlouie/activelearning
|
7ee6e9d2d795f85a441ad70e70ac0d8de9c25e31
|
[
"MIT"
] | null | null | null |
activelearning/settings.py
|
evanlouie/activelearning
|
7ee6e9d2d795f85a441ad70e70ac0d8de9c25e31
|
[
"MIT"
] | 1
|
2019-01-03T18:03:18.000Z
|
2019-01-03T18:03:18.000Z
|
"""
Django settings for activelearning project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "!y4%@!k#=%=&k%!(2em8$rrb-mjt2kup1pa(d@=(x#6y_uloh^"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"ensemble.apps.EnsembleConfig",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "activelearning.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "activelearning.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
# Media Files
MEDIA_ROOT = "media"
MEDIA_URL = "/media/"
| 25.901639
| 90
| 0.705696
|
2a9ee1efeb8c721c00cc33cda209834c6a1353bd
| 783
|
py
|
Python
|
bacchus/celery.py
|
hudecof/bacchus
|
1d7bafa2331535b27b336b42f07f8fe328f6d131
|
[
"Apache-2.0"
] | 1
|
2020-04-15T14:31:48.000Z
|
2020-04-15T14:31:48.000Z
|
bacchus/celery.py
|
hudecof/bacchus
|
1d7bafa2331535b27b336b42f07f8fe328f6d131
|
[
"Apache-2.0"
] | 4
|
2019-04-13T08:35:51.000Z
|
2019-04-13T15:08:47.000Z
|
bacchus/celery.py
|
hudecof/bacchus
|
1d7bafa2331535b27b336b42f07f8fe328f6d131
|
[
"Apache-2.0"
] | 1
|
2019-03-25T07:48:29.000Z
|
2019-03-25T07:48:29.000Z
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bacchus.settings')
app = Celery('bacchus')
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.timezone = 'Europe/Istanbul'
app.conf.enable_utc = False
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
| 29
| 67
| 0.773946
|
998662d5fc197c662c69592197835dcd1578e87b
| 2,468
|
py
|
Python
|
js/analysis.py
|
Omar-Ceesay/2048-AI
|
520a319b2f3cdf56e0f298e49e08cb7f0864b52c
|
[
"MIT"
] | null | null | null |
js/analysis.py
|
Omar-Ceesay/2048-AI
|
520a319b2f3cdf56e0f298e49e08cb7f0864b52c
|
[
"MIT"
] | null | null | null |
js/analysis.py
|
Omar-Ceesay/2048-AI
|
520a319b2f3cdf56e0f298e49e08cb7f0864b52c
|
[
"MIT"
] | null | null | null |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from os import listdir
from os.path import isfile, join
import json
file_path = "./data"
all_data_files = [f for f in listdir(file_path) if isfile(join(file_path, f))]
all_data = []
for filename in all_data_files:
f = open("./data/"+filename, "r")
json_data = json.loads(f.read())
for entry in json_data:
all_data.append(entry)
mc_data = {
"10": {
"count": 0,
"items": []
},
"50": {
"count": 0,
"items": []
},
"60": {
"count": 0,
"items": []
},
"100": {
"count": 0,
"items": []
},
"150": {
"count": 0,
"items": []
},
"200": {
"count": 0,
"items": []
},
"250": {
"count": 0,
"items": []
},
"300": {
"count": 0,
"items": []
}
}
for entry in all_data:
sim_number_as_string = str(entry["simulation_number"])
mc_data[sim_number_as_string]["count"] += 1
mc_data[sim_number_as_string]["items"].append(entry)
count = 0
total_score = 0
total_time = 0
data = []
scores = []
average_scores = []
wins = 0
get_these = ["50", "100", "150", "200", "250", "300"]
for simulation_number in get_these:
total_score = 0
total_time = 0
wins = 0
count = 0
scores = []
for entry in mc_data[simulation_number]["items"]:
count += 1
total_score += entry["score"]
scores.append(entry["score"])
if (entry["highestTile"] >= 2048):
wins += 1
if "time" in entry:
# I have some messed up times in here
if (entry["time"] < 150000):
total_time += entry["time"]
print("*"*20)
print("Total number of samples: " + str(count))
print("Total wins: " + str(wins))
print("Average number of wins: " + str(round(wins/count, 3)))
average_scores.append(total_score/count)
print("Average score for "+ simulation_number +" simulations is " + str(total_score/count))
print("Average time for "+ simulation_number +" simulations is " + str((total_time/count)/1000) + " seconds")
print("*"*20)
data.append(scores)
np.random.seed(19680801)
# Data for plotting
fig1, ax1 = plt.subplots()
ax1.set_title('Final score vs number of simulations')
ax1.boxplot(data, labels=get_these)
plt.plot([1,2,3,4,5,6], average_scores)
# fig.savefig("test.png")
plt.show()
| 22.642202
| 113
| 0.561588
|
b7b81266e65b2023132e0196ca92bab0cd73f2d4
| 617
|
py
|
Python
|
workload-traces/mysql/tpcc/extract_lock_names.py
|
spcl/vldb19-distributed-locking
|
efa9ffa4065cf17ccdf0b59672c173eb2d23934c
|
[
"MIT"
] | 8
|
2019-11-04T19:05:40.000Z
|
2022-01-19T06:05:21.000Z
|
workload-traces/mysql/tpcc/extract_lock_names.py
|
spcl/vldb19-distributed-locking
|
efa9ffa4065cf17ccdf0b59672c173eb2d23934c
|
[
"MIT"
] | null | null | null |
workload-traces/mysql/tpcc/extract_lock_names.py
|
spcl/vldb19-distributed-locking
|
efa9ffa4065cf17ccdf0b59672c173eb2d23934c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import fileinput
import json
from collections import OrderedDict
locks = set()
for line in fileinput.input():
data = json.loads(line)
# Get only lock events
if data.get('action') not in ['lock', 'unlock']:
continue
# Insert ware house where missing (home ware house by default)
ware_house = data['ware_house'] if 'ware_house' in data else data['home_ware_house']
ware_house = int(ware_house)
locks.add((ware_house, data['object_name']))
for l in locks:
data = OrderedDict([('ware_house', l[0]), ('object_name', l[1])])
print(json.dumps(data))
| 24.68
| 88
| 0.672609
|
a9a988c3221786da4c41b780858f77a389d50793
| 550
|
py
|
Python
|
thumbor/detectors/glasses_detector/__init__.py
|
jairhenrique/thumbor
|
fa29ba0efab2dd420c6840616a079756fd75293a
|
[
"MIT"
] | 6,837
|
2015-01-01T14:33:12.000Z
|
2022-03-31T22:21:05.000Z
|
thumbor/detectors/glasses_detector/__init__.py
|
jairhenrique/thumbor
|
fa29ba0efab2dd420c6840616a079756fd75293a
|
[
"MIT"
] | 1,055
|
2015-01-03T22:22:05.000Z
|
2022-03-31T21:56:17.000Z
|
thumbor/detectors/glasses_detector/__init__.py
|
jairhenrique/thumbor
|
fa29ba0efab2dd420c6840616a079756fd75293a
|
[
"MIT"
] | 744
|
2015-01-05T03:49:31.000Z
|
2022-03-30T02:35:16.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.detectors.local_detector import CascadeLoaderDetector
class Detector(CascadeLoaderDetector):
def __init__(self, context, index, detectors):
super().__init__(context, index, detectors)
self.load_cascade_file(__file__, context.config.GLASSES_DETECTOR_CASCADE_FILE)
| 30.555556
| 86
| 0.761818
|
4aa954cf6c840968e8d972c9934f8f658bedecb6
| 4,394
|
py
|
Python
|
src/addit/ncf.py
|
HajimeKawahara/addit
|
7b19006bd338d7887f1a600b66fc74fc72c21c70
|
[
"MIT"
] | 3
|
2021-06-28T15:40:50.000Z
|
2021-07-13T17:53:06.000Z
|
src/addit/ncf.py
|
HajimeKawahara/addit
|
7b19006bd338d7887f1a600b66fc74fc72c21c70
|
[
"MIT"
] | 4
|
2021-07-13T22:02:56.000Z
|
2021-08-13T00:47:02.000Z
|
src/addit/ncf.py
|
HajimeKawahara/addit
|
7b19006bd338d7887f1a600b66fc74fc72c21c70
|
[
"MIT"
] | 1
|
2021-08-10T09:43:36.000Z
|
2021-08-10T09:43:36.000Z
|
"""Neighbouring Contribution Function (new version updated Aug 10th 2021)
* Assume a given x-grid xv[k], and a value x. For a 1D case, the Neighbouring Contribution Function gives ncf(k,x) = (x-xv[s])/dv for k=s, (xv[s]-x)/dv for k=s+1, and 0 elsewhere, where s is the nearest index of xv[k] to x but xv[k]<x.
* For a 2D case, NCF gives the non-zero values for 4 points around (x,y)
"""
import jax.numpy as jnp
import numpy as np
from jax import jit
from jax import vmap
from jax.lax import scan
from jax.ops import index_add
from jax.ops import index as joi
def getix(x,xv):
indarr=jnp.arange(len(xv))
pos = jnp.interp(x,xv,indarr)
index = (pos).astype(int)
cont = (pos-index)
return cont,index
@jit
def inc1D(w,x,xv):
cx,ix=getix(x,xv)
a=jnp.zeros(len(xv))
a=index_add(a,joi[ix],w*(1.0-cx))
a=index_add(a,joi[ix+1],w*cx)
return a
@jit
def inc2D(w,x,y,xv,yv):
"""integrated neighbouring contribution function for 2D (memory reduced sum).
Args:
w: weight (N)
x: x values (N)
y: y values (N)
xv: x grid
yv: y grid
Returns:
integrated neighbouring contribution function
Note:
This function computes \sum_n w_n fx_n \otimes fy_n,
where w_n is the weight, fx_n and fy_n are the n-th NCFs for 1D.
A direct sum uses huge RAM.
In this function, we use jax.lax.scan to compute the sum
Example:
>>> N=10000
>>> xv=jnp.linspace(0,1,11) #grid
>>> yv=jnp.linspace(0,1,13) #grid
>>> w=np.logspace(1.0,3.0,N)
>>> x=np.random.rand(N)
>>> y=np.random.rand(N)
>>> val=inc2D(w,x,y,xv,yv)
>>> #the comparision with the direct sum
>>> valdirect=jnp.sum(nc2D(x,y,xv,yv)*w,axis=2)
>>> #maximum deviation
>>> print(jnp.max(jnp.abs((val-valdirect)/jnp.mean(valdirect)))*100,"%") #%
>>> 5.196106e-05 %
>>> #mean deviation
>>> print(jnp.sqrt(jnp.mean((val-valdirect)**2))/jnp.mean(valdirect)*100,"%") #%
>>> 1.6135311e-05 %
"""
cx,ix=getix(x,xv)
cy,iy=getix(y,yv)
a=jnp.zeros((len(xv),len(yv)))
a=index_add(a,joi[ix,iy],w*(1-cx)*(1-cy))
a=index_add(a,joi[ix,iy+1],w*(1-cx)*cy)
a=index_add(a,joi[ix+1,iy],w*cx*(1-cy))
a=index_add(a,joi[ix+1,iy+1],w*cx*cy)
return a
@jit
def inc3D(w,x,y,z,xv,yv,zv):
"""The lineshape distribution matrix = integrated neighbouring contribution for 3D (memory reduced sum).
Args:
w: weight (N)
x: x values (N)
y: y values (N)
z: z values (N)
xv: x grid
yv: y grid
zv: z grid
Returns:
lineshape distribution matrix (integrated neighbouring contribution for 3D)
Note:
This function computes \sum_n w_n fx_n \otimes fy_n \otimes fz_n,
where w_n is the weight, fx_n, fy_n, and fz_n are the n-th NCFs for 1D.
A direct sum uses huge RAM.
In this function, we use jax.lax.scan to compute the sum
Example:
>>> N=10000
>>> xv=jnp.linspace(0,1,11) #grid
>>> yv=jnp.linspace(0,1,13) #grid
>>> zv=jnp.linspace(0,1,17) #grid
>>> w=np.logspace(1.0,3.0,N)
>>> x=np.random.rand(N)
>>> y=np.random.rand(N)
>>> z=np.random.rand(N)
>>> val=inc3D(w,x,y,z,xv,yv,zv)
>>> #the comparision with the direct sum
>>> valdirect=jnp.sum(nc3D(x,y,z,xv,yv,zv)*w,axis=3)
>>> #maximum deviation
>>> print(jnp.max(jnp.abs((val-valdirect)/jnp.mean(valdirect)))*100,"%") #%
>>> 5.520862e-05 %
>>> #mean deviation
>>> print(jnp.sqrt(jnp.mean((val-valdirect)**2))/jnp.mean(valdirect)*100,"%") #%
>>> 8.418057e-06 %
"""
cx,ix=getix(x,xv)
cy,iy=getix(y,yv)
cz,iz=getix(z,zv)
a=jnp.zeros((len(xv),len(yv),len(zv)))
a=index_add(a,joi[ix,iy,iz],w*(1-cx)*(1-cy)*(1-cz))
a=index_add(a,joi[ix,iy+1,iz],w*(1-cx)*cy*(1-cz))
a=index_add(a,joi[ix+1,iy,iz],w*cx*(1-cy)*(1-cz))
a=index_add(a,joi[ix+1,iy+1,iz],w*cx*cy*(1-cz))
a=index_add(a,joi[ix,iy,iz+1],w*(1-cx)*(1-cy)*cz)
a=index_add(a,joi[ix,iy+1,iz+1],w*(1-cx)*cy*cz)
a=index_add(a,joi[ix+1,iy,iz+1],w*cx*(1-cy)*cz)
a=index_add(a,joi[ix+1,iy+1,iz+1],w*cx*cy*cz)
return a
| 32.548148
| 239
| 0.565316
|
2aebd98258592fe26c2e32d74e3485c8f047ecb3
| 6,920
|
py
|
Python
|
plugins/action/node_deployment.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/node_deployment.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
plugins/action/node_deployment.py
|
steinzi/ansible-ise
|
0add9c8858ed8e0e5e7219fbaf0c936b6d7cc6c0
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.module_utils.ise import (
ISESDK,
ise_argument_spec,
ise_compare_equality,
get_dict_result,
)
from ansible_collections.cisco.ise.plugins.module_utils.exceptions import (
InconsistentParameters,
)
# Get common arguments specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
state=dict(type="str", default="present", choices=["present", "absent"]),
fdqn=dict(type="str"),
userName=dict(type="str"),
password=dict(type="str", no_log=True),
administration=dict(type="dict"),
generalSettings=dict(type="dict"),
profileConfiguration=dict(type="dict"),
hostname=dict(type="str"),
))
required_if = [
("state", "present", ["hostname"], True),
("state", "absent", ["hostname"], True),
]
required_one_of = []
mutually_exclusive = []
required_together = []
class NodeDeployment(object):
def __init__(self, params, ise):
self.ise = ise
self.new_object = dict(
fdqn=params.get("fdqn"),
user_name=params.get("userName"),
password=params.get("password"),
administration=params.get("administration"),
general_settings=params.get("generalSettings"),
profile_configuration=params.get("profileConfiguration"),
hostname=params.get("hostname"),
)
def get_object_by_name(self, name):
try:
result = self.ise.exec(
family="node_deployment",
function="get_node_details",
params={"hostname": name}
).response.get('response', {})
result = get_dict_result(result, 'name', name)
except Exception as e:
result = None
return result
def get_object_by_id(self, id):
# NOTICE: Does not have a get by id method or it is in another action
result = None
return result
def exists(self):
prev_obj = None
id_exists = False
name_exists = False
o_id = self.new_object.get("id")
name = self.new_object.get("hostname")
if o_id:
prev_obj = self.get_object_by_id(o_id)
id_exists = prev_obj is not None and isinstance(prev_obj, dict)
if not id_exists and name:
prev_obj = self.get_object_by_name(name)
name_exists = prev_obj is not None and isinstance(prev_obj, dict)
if name_exists:
_id = prev_obj.get("id")
if id_exists and name_exists and o_id != _id:
raise InconsistentParameters("The 'id' and 'name' params don't refer to the same object")
it_exists = prev_obj is not None and isinstance(prev_obj, dict)
return (it_exists, prev_obj)
def requires_update(self, current_obj):
requested_obj = self.new_object
obj_params = [
("fdqn", "fdqn"),
("userName", "user_name"),
("password", "password"),
("administration", "administration"),
("generalSettings", "general_settings"),
("profileConfiguration", "profile_configuration"),
("hostname", "hostname"),
]
# Method 1. Params present in request (Ansible) obj are the same as the current (ISE) params
# If any does not have eq params, it requires update
return any(not ise_compare_equality(current_obj.get(ise_param),
requested_obj.get(ansible_param))
for (ise_param, ansible_param) in obj_params)
def create(self):
result = self.ise.exec(
family="node_deployment",
function="register_node",
params=self.new_object,
).response
return result
def update(self):
result = self.ise.exec(
family="node_deployment",
function="update_node",
params=self.new_object
).response
return result
def delete(self):
result = self.ise.exec(
family="node_deployment",
function="delete_node",
params=self.new_object
).response
return result
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = True
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(self._task.args)
obj = NodeDeployment(self._task.args, ise)
state = self._task.args.get("state")
response = None
if state == "present":
(obj_exists, prev_obj) = obj.exists()
if obj_exists:
if obj.requires_update(prev_obj):
response = obj.update()
ise.object_updated()
else:
response = prev_obj
ise.object_already_present()
else:
response = obj.create()
ise.object_created()
elif state == "absent":
(obj_exists, prev_obj) = obj.exists()
if obj_exists:
response = obj.delete()
ise.object_deleted()
else:
ise.object_already_absent()
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
| 34.427861
| 128
| 0.606936
|
6d476c15a429eadd7dbd717a575d515102beb574
| 39,738
|
py
|
Python
|
robot_sim/robots/ur3_dual/ur3_dual.py
|
liang324/wrs
|
46eadec355c61a9c7bac1fa0f3cf419b2aac19aa
|
[
"MIT"
] | null | null | null |
robot_sim/robots/ur3_dual/ur3_dual.py
|
liang324/wrs
|
46eadec355c61a9c7bac1fa0f3cf419b2aac19aa
|
[
"MIT"
] | null | null | null |
robot_sim/robots/ur3_dual/ur3_dual.py
|
liang324/wrs
|
46eadec355c61a9c7bac1fa0f3cf419b2aac19aa
|
[
"MIT"
] | null | null | null |
import os
import math
import numpy as np
import basis.robot_math as rm
import modeling.model_collection as mc
import modeling.collision_model as cm
import robot_sim._kinematics.jlchain as jl
import robot_sim.manipulators.ur3.ur3 as ur
import robot_sim.end_effectors.grippers.robotiq85.robotiq85 as rtq
from panda3d.core import CollisionNode, CollisionBox, Point3
import robot_sim.robots.robot_interface as ri
class UR3Dual(ri.RobotInterface):
def __init__(self, pos=np.zeros(3), rotmat=np.eye(3), name='ur3dual', enable_cc=True):
super().__init__(pos=pos, rotmat=rotmat, name=name)
this_dir, this_filename = os.path.split(__file__)
# left side
self.lft_body = jl.JLChain(pos=pos, rotmat=rotmat, homeconf=np.zeros(12), name='lft_body_jl')
self.lft_body.jnts[0]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[1]['loc_pos'] = np.array([-0.0, 0.0, 0.0])
self.lft_body.jnts[2]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[3]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[4]['loc_pos'] = np.array([-0.0, 0.0, 0.0])
self.lft_body.jnts[5]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[6]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[7]['loc_pos'] = np.array([-0.0, 0.0, 0.0])
self.lft_body.jnts[8]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[9]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[10]['loc_pos'] = np.array([-0.0, 0.0, 0.0])
self.lft_body.jnts[11]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[12]['loc_pos'] = np.array([0.0, 0.0, 0.0])
self.lft_body.jnts[13]['loc_pos'] = np.array([.0, .258485281374, 1.61051471863])
self.lft_body.jnts[13]['loc_rotmat'] = rm.rotmat_from_euler(-3.0 * math.pi / 4.0, 0, math.pi, 'rxyz')
# body
self.lft_body.lnks[0]['name'] = "ur3_dual_lft_body"
self.lft_body.lnks[0]['loc_pos'] = np.array([0, 0, 0])
self.lft_body.lnks[0]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_base.stl"),
cdprimit_type="user_defined", expand_radius=.005,
userdefined_cdprimitive_fn=self._base_combined_cdnp)
self.lft_body.lnks[0]['rgba'] = [.3, .3, .3, 1.0]
# columns
self.lft_body.lnks[1]['name'] = "ur3_dual_back_rgt_column"
self.lft_body.lnks[1]['loc_pos'] = np.array([-0.41, -0.945, 0])
self.lft_body.lnks[1]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column2400x60x60.stl"))
self.lft_body.lnks[2]['name'] = "ur3_dual_back_lft_column"
self.lft_body.lnks[2]['loc_pos'] = np.array([-0.41, 0.945, 0])
self.lft_body.lnks[2]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column2400x60x60.stl"))
self.lft_body.lnks[3]['name'] = "ur3_dual_front_rgt_column"
self.lft_body.lnks[3]['loc_pos'] = np.array([0.73, -0.945, 0])
self.lft_body.lnks[3]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column2400x60x60.stl"))
self.lft_body.lnks[4]['name'] = "ur3_dual_front_lft_column"
self.lft_body.lnks[4]['loc_pos'] = np.array([0.73, 0.945, 0])
self.lft_body.lnks[4]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column2400x60x60.stl"))
# x_rows
self.lft_body.lnks[5]['name'] = "ur3_dual_up_rgt_xrow"
self.lft_body.lnks[5]['loc_pos'] = np.array([-0.38, -0.945, 2.37])
self.lft_body.lnks[5]['loc_rotmat'] = rm.rotmat_from_euler(0, math.pi / 2, 0)
self.lft_body.lnks[5]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column1080x60x60.stl"))
self.lft_body.lnks[6]['name'] = "ur3_dual_bottom_rgt_xrow"
self.lft_body.lnks[6]['loc_pos'] = np.array([-0.38, -0.945, 1.07])
self.lft_body.lnks[6]['loc_rotmat'] = rm.rotmat_from_euler(0, math.pi / 2, 0)
self.lft_body.lnks[6]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column1080x60x60.stl"))
self.lft_body.lnks[7]['name'] = "ur3_dual_up_lft_xrow"
self.lft_body.lnks[7]['loc_pos'] = np.array([-0.38, 0.945, 2.37])
self.lft_body.lnks[7]['loc_rotmat'] = rm.rotmat_from_euler(0, math.pi / 2, 0)
self.lft_body.lnks[7]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column1080x60x60.stl"))
self.lft_body.lnks[8]['name'] = "ur3_dual_bottom_lft_xrow"
self.lft_body.lnks[8]['loc_pos'] = np.array([-0.38, 0.945, 1.07])
self.lft_body.lnks[8]['loc_rotmat'] = rm.rotmat_from_euler(0, math.pi / 2, 0)
self.lft_body.lnks[8]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column1080x60x60.stl"))
# y_rows
self.lft_body.lnks[9]['name'] = "ur3_dual_back_up_yrow"
self.lft_body.lnks[9]['loc_pos'] = np.array([-0.41, -0.915, 2.37])
self.lft_body.lnks[9]['loc_rotmat'] = rm.rotmat_from_euler(-math.pi / 2, 0, 0)
self.lft_body.lnks[9]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column1830x60x60.stl"))
self.lft_body.lnks[10]['name'] = "ur3_dual_back_bottom_yrow"
self.lft_body.lnks[10]['loc_pos'] = np.array([-0.41, -0.915, 0.35])
self.lft_body.lnks[10]['loc_rotmat'] = rm.rotmat_from_euler(-math.pi / 2, 0, 0)
self.lft_body.lnks[10]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column1830x60x60.stl"))
self.lft_body.lnks[11]['name'] = "ur3_dual_front_up_yrow"
self.lft_body.lnks[11]['loc_pos'] = np.array([0.73, -0.915, 2.37])
self.lft_body.lnks[11]['loc_rotmat'] = rm.rotmat_from_euler(-math.pi / 2, 0, 0)
self.lft_body.lnks[11]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_column1830x60x60.stl"))
# table TODO update using vision sensors
self.lft_body.lnks[12]['name'] = "ur3_dual_table"
self.lft_body.lnks[12]['loc_pos'] = np.array([0.36, 0.0, 1.046])
self.lft_body.lnks[12]['loc_rotmat'] = rm.rotmat_from_euler(0, 0, math.pi / 2)
self.lft_body.lnks[12]['collisionmodel'] = cm.CollisionModel(
os.path.join(this_dir, "meshes", "ur3_dual_table1820x54x800.stl"))
self.lft_body.lnks[12]['rgba'] = [.9, .77, .52, 1.0]
self.lft_body.reinitialize()
lft_arm_homeconf = np.zeros(6)
lft_arm_homeconf[0] = math.pi / 3.0
lft_arm_homeconf[1] = -math.pi * 1.0 / 3.0
lft_arm_homeconf[2] = -math.pi * 2.0 / 3.0
lft_arm_homeconf[3] = math.pi
lft_arm_homeconf[4] = -math.pi / 2.0
self.lft_arm = ur.UR3(pos=self.lft_body.jnts[-1]['gl_posq'],
rotmat=self.lft_body.jnts[-1]['gl_rotmatq'],
homeconf=lft_arm_homeconf,
enable_cc=False)
# lft hand ftsensor
self.lft_ft_sensor = jl.JLChain(pos=self.lft_arm.jnts[-1]['gl_posq'],
rotmat=self.lft_arm.jnts[-1]['gl_rotmatq'],
homeconf=np.zeros(0), name='lft_ft_sensor_jl')
self.lft_ft_sensor.jnts[1]['loc_pos'] = np.array([.0, .0, .0484])
self.lft_ft_sensor.lnks[0]['name'] = "ur3_dual_lft_ft_sensor"
self.lft_ft_sensor.lnks[0]['loc_pos'] = np.array([0, 0, 0])
self.lft_ft_sensor.lnks[0]['collisionmodel'] = cm.gen_stick(spos=self.lft_ft_sensor.jnts[0]['loc_pos'],
epos=self.lft_ft_sensor.jnts[1]['loc_pos'],
thickness=.067, rgba=[.2, .3, .3, 1], sections=24)
self.lft_ft_sensor.reinitialize()
# lft hand
self.lft_hnd = rtq.Robotiq85(pos=self.lft_ft_sensor.jnts[-1]['gl_posq'],
rotmat=self.lft_ft_sensor.jnts[-1]['gl_rotmatq'],
enable_cc=False)
# rigth side
self.rgt_body = jl.JLChain(pos=pos, rotmat=rotmat, homeconf=np.zeros(0), name='rgt_body_jl')
self.rgt_body.jnts[1]['loc_pos'] = np.array([.0, -.258485281374, 1.61051471863]) # right from robot_s view
self.rgt_body.jnts[1]['loc_rotmat'] = rm.rotmat_from_euler(3.0 * math.pi / 4.0, .0, .0) # left from robot_s view
self.rgt_body.lnks[0]['name'] = "ur3_dual_rgt_body"
self.rgt_body.lnks[0]['loc_pos'] = np.array([0, 0, 0])
self.rgt_body.lnks[0]['meshfile'] = None
self.rgt_body.lnks[0]['rgba'] = [.3, .3, .3, 1.0]
self.rgt_body.reinitialize()
rgt_arm_homeconf = np.zeros(6)
rgt_arm_homeconf[0] = -math.pi * 1.0 / 3.0
rgt_arm_homeconf[1] = -math.pi * 2.0 / 3.0
rgt_arm_homeconf[2] = math.pi * 2.0 / 3.0
rgt_arm_homeconf[4] = math.pi / 2.0
self.rgt_arm = ur.UR3(pos=self.rgt_body.jnts[-1]['gl_posq'],
rotmat=self.rgt_body.jnts[-1]['gl_rotmatq'],
homeconf=rgt_arm_homeconf,
enable_cc=False)
# rgt hand ft sensor
self.rgt_ft_sensor = jl.JLChain(pos=self.rgt_arm.jnts[-1]['gl_posq'],
rotmat=self.rgt_arm.jnts[-1]['gl_rotmatq'],
homeconf=np.zeros(0), name='rgt_ft_sensor_jl')
self.rgt_ft_sensor.jnts[1]['loc_pos'] = np.array([.0, .0, .0484])
self.rgt_ft_sensor.lnks[0]['name'] = "ur3_dual_rgt_ft_sensor"
self.rgt_ft_sensor.lnks[0]['loc_pos'] = np.array([0, 0, 0])
self.rgt_ft_sensor.lnks[0]['collisionmodel'] = cm.gen_stick(spos=self.rgt_ft_sensor.jnts[0]['loc_pos'],
epos=self.rgt_ft_sensor.jnts[1]['loc_pos'],
thickness=.067, rgba=[.2, .3, .3, 1], sections=24)
self.rgt_ft_sensor.reinitialize()
# TODO replace using copy
self.rgt_hnd = rtq.Robotiq85(pos=self.rgt_ft_sensor.jnts[-1]['gl_posq'],
rotmat=self.rgt_ft_sensor.jnts[-1]['gl_rotmatq'],
enable_cc=False)
# tool center point
# lft
self.lft_arm.tcp_jntid = -1
self.lft_arm.tcp_loc_pos = self.lft_ft_sensor.jnts[-1]['loc_pos'] + self.lft_hnd.jaw_center_pos
self.lft_arm.tcp_loc_rotmat = self.lft_ft_sensor.jnts[-1]['loc_rotmat'].dot(self.lft_hnd.jaw_center_rotmat)
# rgt
self.rgt_arm.tcp_jntid = -1
self.rgt_arm.tcp_loc_pos = self.lft_ft_sensor.jnts[-1]['loc_pos'] + self.lft_hnd.jaw_center_pos
self.rgt_arm.tcp_loc_rotmat = self.lft_ft_sensor.jnts[-1]['loc_rotmat'].dot(self.lft_hnd.jaw_center_rotmat)
# a list of detailed information about objects in hand, see CollisionChecker.add_objinhnd
self.lft_oih_infos = []
self.rgt_oih_infos = []
# collision detection
if enable_cc:
self.enable_cc()
# component map
self.manipulator_dict['rgt_arm'] = self.rgt_arm
self.manipulator_dict['lft_arm'] = self.lft_arm
self.manipulator_dict['rgt_hnd'] = self.rgt_arm # specify which hand is a gripper installed to
self.manipulator_dict['lft_hnd'] = self.lft_arm # specify which hand is a gripper installed to
self.manipulator_dict['rgt_ftsensor'] = self.rgt_arm # specify which hand is a gripper installed to
self.manipulator_dict['lft_ftsensor'] = self.lft_arm # specify which hand is a gripper installed to
self.hnd_dict['rgt_hnd'] = self.rgt_hnd
self.hnd_dict['lft_hnd'] = self.lft_hnd
self.hnd_dict['rgt_arm'] = self.rgt_hnd
self.hnd_dict['lft_arm'] = self.lft_hnd
self.hnd_dict['rgt_ftsensor'] = self.rgt_hnd
self.hnd_dict['lft_ftsensor'] = self.lft_hnd
self.ft_sensor_dict['rgt_ftsensor'] = self.rgt_ft_sensor
self.ft_sensor_dict['lft_ftsensor'] = self.lft_ft_sensor
self.ft_sensor_dict['rgt_arm'] = self.rgt_ft_sensor
self.ft_sensor_dict['lft_arm'] = self.lft_ft_sensor
self.ft_sensor_dict['rgt_hnd'] = self.rgt_ft_sensor
self.ft_sensor_dict['lft_hnd'] = self.lft_ft_sensor
@staticmethod
def _base_combined_cdnp(name, radius):
collision_node = CollisionNode(name)
collision_primitive_c0 = CollisionBox(Point3(0.18, 0.0, 0.105),
x=.61 + radius, y=.41 + radius, z=.105 + radius)
collision_node.addSolid(collision_primitive_c0)
collision_primitive_c1 = CollisionBox(Point3(0.0, 0.0, 0.4445),
x=.321 + radius, y=.321 + radius, z=.2345 + radius)
collision_node.addSolid(collision_primitive_c1)
collision_primitive_c2 = CollisionBox(Point3(0.0, 0.0, 0.8895),
x=.05 + radius, y=.05 + radius, z=.6795 + radius)
collision_node.addSolid(collision_primitive_c2)
collision_primitive_c3 = CollisionBox(Point3(0.0, 0.0, 1.619),
x=.1 + radius, y=.275 + radius, z=.05 + radius)
collision_node.addSolid(collision_primitive_c3)
collision_primitive_l0 = CollisionBox(Point3(0.0, 0.300, 1.669),
x=.1 + radius, y=.029 + radius, z=.021 + radius)
collision_node.addSolid(collision_primitive_l0)
collision_primitive_r0 = CollisionBox(Point3(0.0, -0.300, 1.669),
x=.1 + radius, y=.029 + radius, z=.021 + radius)
collision_node.addSolid(collision_primitive_r0)
return collision_node
def enable_cc(self):
# TODO when pose is changed, oih info goes wrong
super().enable_cc()
self.cc.add_cdlnks(self.lft_body, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
self.cc.add_cdlnks(self.lft_arm, [1, 2, 3, 4, 5, 6])
self.cc.add_cdlnks(self.lft_ft_sensor, [0])
self.cc.add_cdlnks(self.lft_hnd.lft_outer, [0, 1, 2, 3, 4])
self.cc.add_cdlnks(self.lft_hnd.rgt_outer, [1, 2, 3, 4])
self.cc.add_cdlnks(self.rgt_arm, [1, 2, 3, 4, 5, 6])
self.cc.add_cdlnks(self.rgt_ft_sensor, [0])
self.cc.add_cdlnks(self.rgt_hnd.lft_outer, [0, 1, 2, 3, 4])
self.cc.add_cdlnks(self.rgt_hnd.rgt_outer, [1, 2, 3, 4])
# lnks used for cd with external stationary objects
activelist = [self.lft_arm.lnks[2],
self.lft_arm.lnks[3],
self.lft_arm.lnks[4],
self.lft_arm.lnks[5],
self.lft_arm.lnks[6],
self.lft_ft_sensor.lnks[0],
self.lft_hnd.lft_outer.lnks[0],
self.lft_hnd.lft_outer.lnks[1],
self.lft_hnd.lft_outer.lnks[2],
self.lft_hnd.lft_outer.lnks[3],
self.lft_hnd.lft_outer.lnks[4],
self.lft_hnd.rgt_outer.lnks[1],
self.lft_hnd.rgt_outer.lnks[2],
self.lft_hnd.rgt_outer.lnks[3],
self.lft_hnd.rgt_outer.lnks[4],
self.rgt_arm.lnks[2],
self.rgt_arm.lnks[3],
self.rgt_arm.lnks[4],
self.rgt_arm.lnks[5],
self.rgt_arm.lnks[6],
self.rgt_ft_sensor.lnks[0],
self.rgt_hnd.lft_outer.lnks[0],
self.rgt_hnd.lft_outer.lnks[1],
self.rgt_hnd.lft_outer.lnks[2],
self.rgt_hnd.lft_outer.lnks[3],
self.rgt_hnd.lft_outer.lnks[4],
self.rgt_hnd.rgt_outer.lnks[1],
self.rgt_hnd.rgt_outer.lnks[2],
self.rgt_hnd.rgt_outer.lnks[3],
self.rgt_hnd.rgt_outer.lnks[4]]
self.cc.set_active_cdlnks(activelist)
# lnks used for arm-body collision detection
fromlist = [self.lft_body.lnks[0], # body
self.lft_body.lnks[1], # back-rgt column
self.lft_body.lnks[2], # back-lft column
self.lft_body.lnks[3], # head-rgt row
self.lft_body.lnks[4], # head-lft row
self.lft_body.lnks[5], # up right x_row
self.lft_body.lnks[6], # bottom right x_row
self.lft_body.lnks[7], # up left row
self.lft_body.lnks[8], # bottom left row
self.lft_body.lnks[9], # back up y_row
self.lft_body.lnks[10], # back bottom y_row
self.lft_body.lnks[11], # head up y_row
self.lft_body.lnks[12], # table
self.lft_arm.lnks[1],
self.rgt_arm.lnks[1]]
intolist = [self.lft_arm.lnks[3],
self.lft_arm.lnks[4],
self.lft_arm.lnks[5],
self.lft_arm.lnks[6],
self.lft_ft_sensor.lnks[0],
self.lft_hnd.lft_outer.lnks[0],
self.lft_hnd.lft_outer.lnks[1],
self.lft_hnd.lft_outer.lnks[2],
self.lft_hnd.lft_outer.lnks[3],
self.lft_hnd.lft_outer.lnks[4],
self.lft_hnd.rgt_outer.lnks[1],
self.lft_hnd.rgt_outer.lnks[2],
self.lft_hnd.rgt_outer.lnks[3],
self.lft_hnd.rgt_outer.lnks[4],
self.rgt_arm.lnks[3],
self.rgt_arm.lnks[4],
self.rgt_arm.lnks[5],
self.rgt_arm.lnks[6],
self.rgt_ft_sensor.lnks[0],
self.rgt_hnd.lft_outer.lnks[0],
self.rgt_hnd.lft_outer.lnks[1],
self.rgt_hnd.lft_outer.lnks[2],
self.rgt_hnd.lft_outer.lnks[3],
self.rgt_hnd.lft_outer.lnks[4],
self.rgt_hnd.rgt_outer.lnks[1],
self.rgt_hnd.rgt_outer.lnks[2],
self.rgt_hnd.rgt_outer.lnks[3],
self.rgt_hnd.rgt_outer.lnks[4]]
self.cc.set_cdpair(fromlist, intolist)
# lnks used for arm-body collision detection -- extra
fromlist = [self.lft_body.lnks[0]] # body
intolist = [self.lft_arm.lnks[2],
self.rgt_arm.lnks[2]]
self.cc.set_cdpair(fromlist, intolist)
# arm-arm collision
fromlist = [self.lft_arm.lnks[3],
self.lft_arm.lnks[4],
self.lft_arm.lnks[5],
self.lft_arm.lnks[6],
self.lft_ft_sensor.lnks[0],
self.lft_hnd.lft_outer.lnks[0],
self.lft_hnd.lft_outer.lnks[1],
self.lft_hnd.lft_outer.lnks[2],
self.lft_hnd.lft_outer.lnks[3],
self.lft_hnd.lft_outer.lnks[4],
self.lft_hnd.rgt_outer.lnks[1],
self.lft_hnd.rgt_outer.lnks[2],
self.lft_hnd.rgt_outer.lnks[3],
self.lft_hnd.rgt_outer.lnks[4]]
intolist = [self.rgt_arm.lnks[3],
self.rgt_arm.lnks[4],
self.rgt_arm.lnks[5],
self.rgt_arm.lnks[6],
self.rgt_ft_sensor.lnks[0],
self.rgt_hnd.lft_outer.lnks[0],
self.rgt_hnd.lft_outer.lnks[1],
self.rgt_hnd.lft_outer.lnks[2],
self.rgt_hnd.lft_outer.lnks[3],
self.rgt_hnd.lft_outer.lnks[4],
self.rgt_hnd.rgt_outer.lnks[1],
self.rgt_hnd.rgt_outer.lnks[2],
self.rgt_hnd.rgt_outer.lnks[3],
self.rgt_hnd.rgt_outer.lnks[4]]
self.cc.set_cdpair(fromlist, intolist)
def get_hnd_on_manipulator(self, manipulator_name):
if manipulator_name == 'rgt_arm':
return self.rgt_hnd
elif manipulator_name == 'lft_arm':
return self.lft_hnd
else:
raise ValueError("The given jlc does not have a hand!")
def fix_to(self, pos, rotmat):
super().fix_to(pos, rotmat)
self.pos = pos
self.rotmat = rotmat
self.lft_body.fix_to(self.pos, self.rotmat)
self.lft_arm.fix_to(pos=self.lft_body.jnts[-1]['gl_posq'], rotmat=self.lft_body.jnts[-1]['gl_rotmatq'])
self.lft_ft_sensor.fix_to(pos=self.lft_arm.jnts[-1]['gl_posq'], rotmat=self.lft_arm.jnts[-1]['gl_rotmatq'])
self.lft_hnd.fix_to(pos=self.lft_ft_sensor.jnts[-1]['gl_posq'],
rotmat=self.lft_ft_sensor.jnts[-1]['gl_rotmatq'])
self.rgt_body.fix_to(self.pos, self.rotmat)
self.rgt_arm.fix_to(pos=self.rgt_body.jnts[-1]['gl_posq'], rotmat=self.rgt_body.jnts[-1]['gl_rotmatq'])
self.rgt_ft_sensor.fix_to(pos=self.rgt_arm.jnts[-1]['gl_posq'], rotmat=self.rgt_arm.jnts[-1]['gl_rotmatq'])
self.rgt_hnd.fix_to(pos=self.rgt_ft_sensor.jnts[-1]['gl_posq'],
rotmat=self.rgt_ft_sensor.jnts[-1]['gl_rotmatq'])
def fk(self, component_name, jnt_values):
"""
:param jnt_values: nparray 1x6 or 1x12 depending on component_names
:hnd_name 'lft_arm', 'rgt_arm', 'both_arm'
:param component_name:
:return:
author: weiwei
date: 20201208toyonaka, 20210403osaka
"""
def update_oih(component_name='rgt_arm'):
# inline function for update objects in hand
if component_name == 'rgt_arm':
oih_info_list = self.rgt_oih_infos
elif component_name == 'lft_arm':
oih_info_list = self.lft_oih_infos
for obj_info in oih_info_list:
gl_pos, gl_rotmat = self.cvt_loc_tcp_to_gl(component_name, obj_info['rel_pos'], obj_info['rel_rotmat'])
obj_info['gl_pos'] = gl_pos
obj_info['gl_rotmat'] = gl_rotmat
def update_component(component_name, jnt_values):
self.manipulator_dict[component_name].fk(jnt_values=jnt_values)
self.ft_sensor_dict[component_name].fix_to(pos=self.manipulator_dict[component_name].jnts[-1]['gl_posq'],
rotmat=self.manipulator_dict[component_name].jnts[-1][
'gl_rotmatq'])
self.get_hnd_on_manipulator(component_name).fix_to(
pos=self.ft_sensor_dict[component_name].jnts[-1]['gl_posq'],
rotmat=self.ft_sensor_dict[component_name].jnts[-1]['gl_rotmatq'])
update_oih(component_name=component_name)
super().fk(component_name, jnt_values)
# examine length
if component_name == 'lft_arm' or component_name == 'rgt_arm':
if not isinstance(jnt_values, np.ndarray) or jnt_values.size != 6:
raise ValueError("An 1x6 npdarray must be specified to move a single arm!")
update_component(component_name, jnt_values)
elif component_name == 'both_arm':
if (jnt_values.size != 12):
raise ValueError("A 1x12 npdarrays must be specified to move both arm!")
update_component('lft_arm', jnt_values[0:6])
update_component('rgt_arm', jnt_values[6:12])
elif component_name == 'all':
raise NotImplementedError
else:
raise ValueError("The given component name is not available!")
def rand_conf(self, component_name):
"""
override robot_interface.rand_conf
:param component_name:
:return:
author: weiwei
date: 20210406
"""
if component_name == 'lft_arm' or component_name == 'rgt_arm':
return super().rand_conf(component_name)
elif component_name == 'both_arm':
return np.hstack((super().rand_conf('lft_arm'), super().rand_conf('rgt_arm')))
else:
raise NotImplementedError
def hold(self, objcm, jaw_width=None, hnd_name='lft_hnd'):
"""
the objcm is added as a part of the robot_s to the cd checker
:param jaw_width:
:param objcm:
:return:
"""
if hnd_name == 'lft_hnd':
rel_pos, rel_rotmat = self.lft_arm.cvt_gl_to_loc_tcp(objcm.get_pos(), objcm.get_rotmat())
intolist = [self.lft_body.lnks[0], # body
self.lft_body.lnks[1], # back-rgt column
self.lft_body.lnks[2], # back-lft column
self.lft_body.lnks[3], # head-rgt row
self.lft_body.lnks[4], # head-lft row
self.lft_body.lnks[5], # up right x_row
self.lft_body.lnks[6], # bottom right x_row
self.lft_body.lnks[7], # up left row
self.lft_body.lnks[8], # bottom left row
self.lft_body.lnks[9], # back up y_row
self.lft_body.lnks[10], # back bottom y_row
self.lft_body.lnks[11], # head up y_row
self.lft_body.lnks[12], # table
self.lft_arm.lnks[1],
self.lft_arm.lnks[2],
self.lft_arm.lnks[3],
self.lft_arm.lnks[4],
self.rgt_arm.lnks[1],
self.rgt_arm.lnks[2],
self.rgt_arm.lnks[3],
self.rgt_arm.lnks[4],
self.rgt_arm.lnks[5],
self.rgt_arm.lnks[6],
self.rgt_ft_sensor.lnks[0],
self.rgt_hnd.rgt_outer.lnks[1],
self.rgt_hnd.rgt_outer.lnks[2],
self.rgt_hnd.rgt_outer.lnks[3],
self.rgt_hnd.rgt_outer.lnks[4]]
self.lft_oih_infos.append(self.cc.add_cdobj(objcm, rel_pos, rel_rotmat, intolist))
elif hnd_name == 'rgt_hnd':
rel_pos, rel_rotmat = self.rgt_arm.cvt_gl_to_loc_tcp(objcm.get_pos(), objcm.get_rotmat())
intolist = [self.lft_body.lnks[0], # body
self.lft_body.lnks[1], # back-rgt column
self.lft_body.lnks[2], # back-lft column
self.lft_body.lnks[3], # head-rgt row
self.lft_body.lnks[4], # head-lft row
self.lft_body.lnks[5], # up right x_row
self.lft_body.lnks[6], # bottom right x_row
self.lft_body.lnks[7], # up left row
self.lft_body.lnks[8], # bottom left row
self.lft_body.lnks[9], # back up y_row
self.lft_body.lnks[10], # back bottom y_row
self.lft_body.lnks[11], # head up y_row
self.lft_body.lnks[12], # table
self.lft_arm.lnks[1],
self.lft_arm.lnks[2],
self.lft_arm.lnks[3],
self.lft_arm.lnks[4],
self.lft_arm.lnks[5],
self.lft_arm.lnks[6],
self.lft_ft_sensor.lnks[0],
self.lft_hnd.rgt_outer.lnks[1],
self.lft_hnd.rgt_outer.lnks[2],
self.lft_hnd.rgt_outer.lnks[3],
self.lft_hnd.rgt_outer.lnks[4],
self.rgt_arm.lnks[1],
self.rgt_arm.lnks[2],
self.rgt_arm.lnks[3],
self.rgt_arm.lnks[4]]
self.rgt_oih_infos.append(self.cc.add_cdobj(objcm, rel_pos, rel_rotmat, intolist))
else:
raise ValueError("hnd_name must be lft_hnd or rgt_hnd!")
if jaw_width is not None:
self.jaw_to(hnd_name, jaw_width)
return rel_pos, rel_rotmat
def get_loc_pose_from_hio(self, hio_pos, hio_rotmat, component_name='lft_arm'):
"""
get the loc pose of an object from a grasp pose described in an object's local frame
:param hio_pos: a grasp pose described in an object's local frame -- pos
:param hio_rotmat: a grasp pose described in an object's local frame -- rotmat
:return:
author: weiwei
date: 20210302
"""
if component_name == 'lft_arm':
arm = self.lft_arm
elif component_name == 'rgt_arm':
arm = self.rgt_arm
hnd_pos = arm.jnts[-1]['gl_posq']
hnd_rotmat = arm.jnts[-1]['gl_rotmatq']
hnd_homomat = rm.homomat_from_posrot(hnd_pos, hnd_rotmat)
hio_homomat = rm.homomat_from_posrot(hio_pos, hio_rotmat)
oih_homomat = rm.homomat_inverse(hio_homomat)
gl_obj_homomat = hnd_homomat.dot(oih_homomat)
return self.cvt_gl_to_loc_tcp(component_name, gl_obj_homomat[:3, 3], gl_obj_homomat[:3, :3])
def get_gl_pose_from_hio(self, hio_pos, hio_rotmat, component_name='lft_arm'):
"""
get the loc pose of an object from a grasp pose described in an object's local frame
:param hio_pos: a grasp pose described in an object's local frame -- pos
:param hio_rotmat: a grasp pose described in an object's local frame -- rotmat
:return:
author: weiwei
date: 20210302
"""
if component_name == 'lft_arm':
arm = self.lft_arm
elif component_name == 'rgt_arm':
arm = self.rgt_arm
hnd_pos = arm.jnts[-1]['gl_posq']
hnd_rotmat = arm.jnts[-1]['gl_rotmatq']
hnd_homomat = rm.homomat_from_posrot(hnd_pos, hnd_rotmat)
hio_homomat = rm.homomat_from_posrot(hio_pos, hio_rotmat)
oih_homomat = rm.homomat_inverse(hio_homomat)
gl_obj_homomat = hnd_homomat.dot(oih_homomat)
return gl_obj_homomat[:3, 3], gl_obj_homomat[:3, :3]
def get_oih_cm_list(self, hnd_name='lft_hnd'):
"""
oih = object in hand list
:param hnd_name:
:return:
"""
if hnd_name == 'lft_hnd':
oih_infos = self.lft_oih_infos
elif hnd_name == 'rgt_hnd':
oih_infos = self.rgt_oih_infos
else:
raise ValueError("hnd_name must be lft_hnd or rgt_hnd!")
return_list = []
for obj_info in oih_infos:
objcm = obj_info['collisionmodel']
objcm.set_pos(obj_info['gl_pos'])
objcm.set_rotmat(obj_info['gl_rotmat'])
return_list.append(objcm)
return return_list
def get_oih_glhomomat_list(self, hnd_name='lft_hnd'):
"""
oih = object in hand list
:param hnd_name:
:return:
author: weiwei
date: 20210302
"""
if hnd_name == 'lft_hnd':
oih_infos = self.lft_oih_infos
elif hnd_name == 'rgt_hnd':
oih_infos = self.rgt_oih_infos
else:
raise ValueError("hnd_name must be lft_hnd or rgt_hnd!")
return_list = []
for obj_info in oih_infos:
return_list.append(rm.homomat_from_posrot(obj_info['gl_pos']), obj_info['gl_rotmat'])
return return_list
def get_oih_relhomomat(self, objcm, hnd_name='lft_hnd'):
"""
TODO: useless? 20210320
oih = object in hand list
:param objcm
:param hnd_name:
:return:
author: weiwei
date: 20210302
"""
if hnd_name == 'lft_hnd':
oih_info_list = self.lft_oih_infos
elif hnd_name == 'rgt_hnd':
oih_info_list = self.rgt_oih_infos
else:
raise ValueError("hnd_name must be lft_hnd or rgt_hnd!")
for obj_info in oih_info_list:
if obj_info['collisionmodel'] is objcm:
return rm.homomat_from_posrot(obj_info['rel_pos']), obj_info['rel_rotmat']
def release(self, hnd_name, objcm, jaw_width=None):
"""
the objcm is added as a part of the robot_s to the cd checker
:param jaw_width:
:param objcm:
:param hnd_name:
:return:
"""
if hnd_name == 'lft_hnd':
oih_infos = self.lft_oih_infos
elif hnd_name == 'rgt_hnd':
oih_infos = self.rgt_oih_infos
else:
raise ValueError("hnd_name must be lft_hnd or rgt_hnd!")
if jaw_width is not None:
self.jaw_to(hnd_name, jaw_width)
for obj_info in oih_infos:
if obj_info['collisionmodel'] is objcm:
self.cc.delete_cdobj(obj_info)
oih_infos.remove(obj_info)
break
def release_all(self, jaw_width=None, hnd_name='lft_hnd'):
"""
release all objects from the specified hand
:param jaw_width:
:param hnd_name:
:return:
author: weiwei
date: 20210125
"""
if hnd_name == 'lft_hnd':
oih_infos = self.lft_oih_infos
elif hnd_name == 'rgt_hnd':
oih_infos = self.rgt_oih_infos
else:
raise ValueError("hnd_name must be lft_hnd or rgt_hnd!")
if jaw_width is not None:
self.jaw_to(hnd_name, jaw_width)
for obj_info in oih_infos:
self.cc.delete_cdobj(obj_info)
oih_infos.clear()
def gen_stickmodel(self,
tcp_jntid=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=False,
toggle_connjnt=False,
name='ur3dual'):
stickmodel = mc.ModelCollection(name=name)
self.lft_body.gen_stickmodel(tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs).attach_to(stickmodel)
self.lft_arm.gen_stickmodel(tcp_jntid=tcp_jntid,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
self.lft_hnd.gen_stickmodel(toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
self.rgt_body.gen_stickmodel(tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs).attach_to(stickmodel)
self.rgt_arm.gen_stickmodel(tcp_jntid=tcp_jntid,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
self.rgt_hnd.gen_stickmodel(toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
self.lft_ft_sensor.gen_stickmodel(toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
self.rgt_ft_sensor.gen_stickmodel(toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
return stickmodel
def gen_meshmodel(self,
tcp_jntid=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=False,
rgba=None,
name='xarm_gripper_meshmodel'):
meshmodel = mc.ModelCollection(name=name)
self.lft_body.gen_meshmodel(tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
self.lft_arm.gen_meshmodel(tcp_jntid=tcp_jntid,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
self.lft_hnd.gen_meshmodel(toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
self.rgt_arm.gen_meshmodel(tcp_jntid=tcp_jntid,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
self.rgt_hnd.gen_meshmodel(toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
self.lft_ft_sensor.gen_meshmodel(toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
self.rgt_ft_sensor.gen_meshmodel(toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
for obj_info in self.lft_oih_infos:
objcm = obj_info['collisionmodel']
objcm.set_pos(obj_info['gl_pos'])
objcm.set_rotmat(obj_info['gl_rotmat'])
objcm.copy().attach_to(meshmodel)
for obj_info in self.rgt_oih_infos:
objcm = obj_info['collisionmodel']
objcm.set_pos(obj_info['gl_pos'])
objcm.set_rotmat(obj_info['gl_rotmat'])
objcm.copy().attach_to(meshmodel)
return meshmodel
if __name__ == '__main__':
import visualization.panda.world as wd
import modeling.geometric_model as gm
base = wd.World(cam_pos=[2, 0, 3], lookat_pos=[0, 0, 1])
gm.gen_frame().attach_to(base)
u3d = UR3Dual()
u3d.show_cdprimit()
# u3d.fk(.85)
u3d_meshmodel = u3d.gen_meshmodel(toggle_tcpcs=True)
u3d_meshmodel.attach_to(base)
u3d.gen_stickmodel().attach_to(base)
base.run()
| 52.424802
| 121
| 0.555614
|
5a2a509d3147a39a338049132f2f0356fb20e1f4
| 4,513
|
py
|
Python
|
lib/roi_data_layer/roidb.py
|
aaamourao/faster-rcnn.pytorch
|
e5f476360fbdc8af9ae6a4981b3eaac3b744cee9
|
[
"MIT"
] | null | null | null |
lib/roi_data_layer/roidb.py
|
aaamourao/faster-rcnn.pytorch
|
e5f476360fbdc8af9ae6a4981b3eaac3b744cee9
|
[
"MIT"
] | null | null | null |
lib/roi_data_layer/roidb.py
|
aaamourao/faster-rcnn.pytorch
|
e5f476360fbdc8af9ae6a4981b3eaac3b744cee9
|
[
"MIT"
] | null | null | null |
"""Transform a roidb into a trainable roidb by adding a bunch of metadata."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import datasets
import numpy as np
from model.utils.config import cfg
from datasets.factory import get_imdb
import PIL
import pdb
def prepare_roidb(imdb):
"""Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function precomputes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
roidb = imdb.roidb
if not (imdb.name.startswith('coco')):
cache_file = os.path.join(imdb.cache_path, imdb.name + '_sizes.pkl')
if os.path.exists(cache_file):
print('Image sizes loaded from %s' % cache_file)
with open(cache_file, 'rb') as f:
sizes = pickle.load(f)
else:
print('Extracting image sizes... (It may take long time)')
sizes = [PIL.Image.open(imdb.image_path_at(i)).size
for i in range(imdb.num_images)]
with open(cache_file, 'wb') as f:
pickle.dump(sizes, f)
print('Done!!')
for i in range(len(imdb.image_index)):
roidb[i]['img_id'] = imdb.image_id_at(i)
roidb[i]['image'] = imdb.image_path_at(i)
if not (imdb.name.startswith('coco')):
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def rank_roidb_ratio(roidb):
# rank roidb based on the ratio between width and height.
ratio_large = 2 # largest ratio to preserve.
ratio_small = 0.5 # smallest ratio to preserve.
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = width / float(height)
if ratio > ratio_large:
roidb[i]['need_crop'] = 1
ratio = ratio_large
elif ratio < ratio_small:
roidb[i]['need_crop'] = 1
ratio = ratio_small
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return ratio_list[ratio_index], ratio_index
def filter_roidb(roidb):
# filter the image without bounding box.
print('before filtering, there are %d images...' % (len(roidb)))
i = 0
while i < len(roidb):
if len(roidb[i]['boxes']) == 0:
del roidb[i]
i -= 1
i += 1
print('after filtering, there are %d images...' % (len(roidb)))
return roidb
def combined_roidb(imdb_names, training=True):
"""
Combine multiple roidbs
"""
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print('Appending horizontally-flipped training examples...')
imdb.append_flipped_images()
print('done')
print('Preparing training data...')
prepare_roidb(imdb)
#ratio_index = rank_roidb_ratio(imdb)
print('done')
return imdb.roidb
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print('Loaded dataset `{:s}`'.format(imdb.name))
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print('Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD))
roidb = get_training_roidb(imdb)
return roidb
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
for r in roidbs[1:]:
roidb.extend(r)
tmp = get_imdb(imdb_names.split('+')[1])
imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
else:
imdb = get_imdb(imdb_names)
if training:
roidb = filter_roidb(roidb)
ratio_list, ratio_index = rank_roidb_ratio(roidb)
return imdb, roidb, ratio_list, ratio_index
| 30.910959
| 77
| 0.662087
|
bce455de8873952dd4d49cf03dcb314bc8611f78
| 1,751
|
py
|
Python
|
test/feature/test_scale_space_detector.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | 1
|
2019-11-21T13:18:56.000Z
|
2019-11-21T13:18:56.000Z
|
test/feature/test_scale_space_detector.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | null | null | null |
test/feature/test_scale_space_detector.py
|
tdchaitanya/kornia
|
6dd16563f66f979c7a95846ef86678894b7d54fd
|
[
"Apache-2.0"
] | 2
|
2020-01-08T17:11:34.000Z
|
2020-10-14T00:44:18.000Z
|
import pytest
import kornia.testing as utils # test utils
import kornia
from torch.testing import assert_allclose
from torch.autograd import gradcheck
from kornia.feature.scale_space_detector import *
class TestScaleSpaceDetector:
def test_shape(self):
inp = torch.rand(1, 1, 32, 32)
n_feats = 10
det = ScaleSpaceDetector(n_feats)
lafs, resps = det(inp)
assert lafs.shape == torch.Size([1, n_feats, 2, 3])
assert resps.shape == torch.Size([1, n_feats])
def test_shape_batch(self):
inp = torch.rand(7, 1, 32, 32)
n_feats = 10
det = ScaleSpaceDetector(n_feats)
lafs, resps = det(inp)
assert lafs.shape == torch.Size([7, n_feats, 2, 3])
assert resps.shape == torch.Size([7, n_feats])
def test_print(self):
sift = ScaleSpaceDetector()
sift.__repr__()
def test_toy(self):
inp = torch.zeros(1, 1, 33, 33)
inp[:, :, 13:-13, 13:-13] = 1.0
n_feats = 1
det = ScaleSpaceDetector(n_feats,
resp_module=kornia.feature.BlobHessian(),
mr_size=3.0)
lafs, resps = det(inp)
expected_laf = torch.tensor([[[[6.0548, 0.0000, 16.0], [0.0, 6.0548, 16.0]]]])
expected_resp = torch.tensor([[0.0806]])
assert_allclose(expected_laf, lafs)
assert_allclose(expected_resp, resps)
def test_gradcheck(self):
batch_size, channels, height, width = 1, 1, 31, 21
patches = torch.rand(batch_size, channels, height, width)
patches = utils.tensor_to_gradcheck_var(patches) # to var
assert gradcheck(ScaleSpaceDetector(2), (patches),
raise_exception=True)
| 35.02
| 86
| 0.6008
|
8b861a4d6360161dcb30202dddad4ded13612036
| 4,504
|
py
|
Python
|
slot/a/__init__.py
|
LorentzB/dl
|
c2af8498ba868abcd2ddb08eb9e4b4bb79594ba2
|
[
"Apache-2.0"
] | 45
|
2018-12-30T14:19:37.000Z
|
2021-01-28T08:16:41.000Z
|
slot/a/__init__.py
|
LorentzB/dl
|
c2af8498ba868abcd2ddb08eb9e4b4bb79594ba2
|
[
"Apache-2.0"
] | 23
|
2019-01-07T22:32:00.000Z
|
2019-10-04T10:23:02.000Z
|
slot/a/__init__.py
|
LorentzB/dl
|
c2af8498ba868abcd2ddb08eb9e4b4bb79594ba2
|
[
"Apache-2.0"
] | 36
|
2019-01-11T21:38:02.000Z
|
2021-01-28T08:16:53.000Z
|
from slot import *
from ability import Ability
from collections import defaultdict
class Amulet(AmuletBase):
a = []
def __init__(self):
self.mod = []
self.conf = Conf()
self.mmax = {
'a' : 0.20, # attack
's' : 0.40, # skill damage
'cc' : 0.15, # crit chance
'cd' : 0.25, # crit damage
'fs' : 0.50, # force strike
'bt' : 0.30, # buff time
'sp' : 0.15, # skill haste
'bk' : 0.30, # break killer
'od' : 0.15, # od killer
'lo' : 0.60, # lastoffence
'bc' : 0.15, # buffchain
'sts' : 0.06, # striker strength
'sls' : 0.06, # slayer stength
'dc' : 3, # dragon's claw
'ds' : 3, # dragon's skill
'prep' : 100, # skill prep
'resist' : 10000, # resist
'da' : 0.18, # dragon damage
'dt' : 0.20, # dragon time
'eprep' : 5, # energy prep
}
from core.afflic import AFFLICT_LIST
for afflic in AFFLICT_LIST:
self.mmax['k_'+afflic] = 0.25
self.mmax['k_burn'] = 0.30
self.base_a = self.a
def setup(self, c):
abilities = self.base_a
if self.a2:
abilities += self.a2.base_a
self.att += self.a2.att
sorted_abilities = defaultdict(lambda: [])
for ab in abilities:
name = ab[0]
sorted_abilities[name].append(ab)
self.a = []
for name, ab_list in sorted_abilities.items():
if name in self.mmax:
max_value = self.mmax[name]
for ab in sorted(ab_list, key=lambda x: '' if len(x) < 3 or x[2] in ('flame', 'water', 'wind', 'light', 'shadow') else x[2]):
if len(ab) > 2:
new_ab = (ab[0], min(ab[1], max_value), *ab[2:])
else:
new_ab = (ab[0], min(ab[1], max_value))
self.a.append(new_ab)
max_value -= ab[1]
if max_value <= 0:
break
else:
self.a.extend(ab_list)
# def oninit(self, adv):
# super(Amulet, self).oninit(adv)
# for i in self.a:
# i.oninit(adv)
# def merge(self, a, b):
# k = b[0]
# if k not in a:
# a[k] = b
# else:
# a[k] = (b[0],a[k][1]+b[1])
# def merge_cond(self, a, b):
# k = b[0]+b[2]
# if k not in a:
# a[k] = b
# else:
# a[k] = (b[0],a[k][1]+b[1],b[2])
# def setup(self, c):
# super(Amulet,self).setup(c)
# if self.a2:
# self.on(c)
# self.a2.on(c)
# self.att += self.a2.att
# self.tmp = self.a + self.a2.a
# self.a = {}
# else:
# self.on(c)
# self.tmp = self.a
# self.a = {}
# for i in self.tmp:
# if len(i)==2 or (len(i)==3 and not isinstance(i[2], str)):
# k = i[0]
# if k not in self.mmax:
# self.merge(self.a, i)
# elif self.mmax[k] > 0:
# if self.mmax[k] > i[1]:
# self.merge(self.a, i)
# self.mmax[k] -= i[1]
# else :
# i = (i[0],self.mmax[k])
# self.merge(self.a, i)
# self.mmax[k] = 0
# for i in self.tmp:
# if len(i)==3 and isinstance(i[2], str):
# k = i[0]
# if k not in self.mmax:
# self.merge_cond(self.a, i)
# elif self.mmax[k] > 0:
# if self.mmax[k] > i[1]:
# self.merge_cond(self.a, i)
# self.mmax[k] -= i[1]
# else:
# i = (i[0],self.mmax[k],i[2])
# self.merge_cond(self.a, i)
# self.mmax[k] = 0
# tmp = []
# for k,i in self.a.items():
# tmp.append(i)
# self.a = tmp
# def on(self, c):
# return
from slot.a.all import *
| 32.637681
| 141
| 0.37833
|
f8ce34cf1949eddb3fbe5b13b838cd17283a54af
| 397
|
py
|
Python
|
magma/config.py
|
Kuree/magma
|
be2439aa897768c5810be72e3a55a6f772ac83cf
|
[
"MIT"
] | null | null | null |
magma/config.py
|
Kuree/magma
|
be2439aa897768c5810be72e3a55a6f772ac83cf
|
[
"MIT"
] | null | null | null |
magma/config.py
|
Kuree/magma
|
be2439aa897768c5810be72e3a55a6f772ac83cf
|
[
"MIT"
] | null | null | null |
__COMPILE_DIR = 'normal'
def set_compile_dir(target):
global __COMPILE_DIR
assert target in ['normal', 'callee_file_dir']
__COMPILE_DIR = target
def get_compile_dir():
return __COMPILE_DIR
__DEBUG_MODE = False
def set_debug_mode(value=True):
global __DEBUG_MODE
assert value in {True, False}
__DEBUG_MODE = value
def get_debug_mode():
return __DEBUG_MODE
| 15.88
| 50
| 0.725441
|
c09f30d44726e2b15a51003e0329ee3d1c49e5a9
| 13,739
|
py
|
Python
|
python/oneflow/nn/modules/constant.py
|
L-Net-1992/oneflow
|
4dc08d65caea36fdd137841ac95551218897e730
|
[
"Apache-2.0"
] | 1
|
2022-03-14T11:17:56.000Z
|
2022-03-14T11:17:56.000Z
|
python/oneflow/nn/modules/constant.py
|
L-Net-1992/oneflow
|
4dc08d65caea36fdd137841ac95551218897e730
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/nn/modules/constant.py
|
L-Net-1992/oneflow
|
4dc08d65caea36fdd137841ac95551218897e730
|
[
"Apache-2.0"
] | 1
|
2021-12-15T02:14:49.000Z
|
2021-12-15T02:14:49.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Optional, Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.common_types import _size_any_t
from oneflow.nn.module import Module
from oneflow.nn.modules.utils import _single, _handle_size_arg
class _ConstantBase(Module):
def __init__(
self,
size: Union[_size_any_t, flow.Size],
value: Union[float, int],
dtype: Optional[flow.dtype],
device: Union[flow.device, str] = None,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
requires_grad: bool = False,
) -> None:
super().__init__()
assert size is not None, "shape must not be None!"
assert isinstance(
size, (int, tuple, list, flow.Size)
), "shape should be int or tuple int!"
self.device = device
if isinstance(self.device, str):
self.device = flow.device(self.device)
self.requires_grad = requires_grad
size = _single(size)
if dtype is None:
dtype = flow.float32
if placement is None:
if device is None:
self.device = flow.device("cpu")
else:
assert device is None
self.placement = placement
self.sbp = sbp
if placement is not None:
assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
if isinstance(self.sbp, flow.sbp.sbp):
self.sbp = (self.sbp,)
else:
for elem in sbp:
assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
assert len(self.sbp) == len(placement.ranks.shape)
else:
assert sbp is None, "sbp: %s" % sbp
self.shape = size
self.value = value
self.dtype = dtype
def forward(self):
if self.placement is not None:
res = flow._C.global_constant(
self.shape,
self.value,
dtype=self.dtype,
placement=self.placement,
sbp=self.sbp,
)
else:
res = flow._C.constant(
self.shape, self.value, dtype=self.dtype, device=self.device
)
res.requires_grad = self.requires_grad
return res
class Ones(_ConstantBase):
def __init__(
self,
size,
dtype=None,
device=None,
placement=None,
sbp=None,
requires_grad=False,
):
super().__init__(size, 1, dtype, device, placement, sbp, requires_grad)
def ones_op(
*size: Union[_size_any_t, flow.Size, List[int]],
dtype: Optional[flow.dtype] = None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False,
):
"""
Returns a tensor filled with the scalar value 1,
with the shape defined by the variable argument `size`.
Args:
size (an integer or tuple of integer values): defining the shape of the output tensor. Can be \\
a variable number of arguments or a collection like a list or tuple.
dtype (flow.dtype, optional): the desired data type of returned tensor.
device (flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor type
placement (flow.placement, optional): the desired placement of returned global tensor. Default: if None, the returned tensor is local one using the argument `device`.
sbp (flow.sbp.sbp or tuple of flow.sbp.sbp, optional): the desired sbp descriptor of returned global tensor. Default: if None, the returned tensor is local one using the argument `device`.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> y = flow.ones(5)
>>> y
tensor([1., 1., 1., 1., 1.], dtype=oneflow.float32)
>>> y = flow.ones(2,3) # construct local tensor
>>> y
tensor([[1., 1., 1.],
[1., 1., 1.]], dtype=oneflow.float32)
>>> placement = flow.placement("cpu", ranks=[0])
>>> y = flow.ones(4, 5, placement=placement, sbp=flow.sbp.broadcast) # construct global tensor
>>> y.is_global
True
"""
size = _handle_size_arg(size)
return Ones(size, dtype, device, placement, sbp, requires_grad)()
class Zeros(_ConstantBase):
def __init__(
self,
size,
dtype=None,
device=None,
placement=None,
sbp=None,
requires_grad=False,
):
super().__init__(size, 0, dtype, device, placement, sbp, requires_grad)
def zeros_op(
*size: Union[_size_any_t, flow.Size, List[int]],
dtype: Optional[flow.dtype] = None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False,
):
"""
Returns a tensor filled with the scalar value 0,
with the shape defined by the variable argument `size`.
Args:
size(an integer or tuple of integer values) - defining the shape of the output tensor. Can be \\
a variable number of arguments or a collection like a list or tuple.
dtype (flow.dtype, optional): the desired data type of returned tensor.
device (flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor type
placement (flow.placement, optional): the desired placement of returned global tensor. Default: if None, the returned tensor is local one using the argument `device`.
sbp (flow.sbp.sbp or tuple of flow.sbp.sbp, optional): the desired sbp descriptor of returned global tensor. Default: if None, the returned tensor is local one using the argument `device`.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> y = flow.zeros(5)
>>> y
tensor([0., 0., 0., 0., 0.], dtype=oneflow.float32)
>>> y = flow.zeros(2,3)
>>> y
tensor([[0., 0., 0.],
[0., 0., 0.]], dtype=oneflow.float32)
"""
size = _handle_size_arg(size)
return Zeros(size, dtype, device, placement, sbp, requires_grad)()
class Full(_ConstantBase):
def __init__(
self,
size,
value,
dtype,
device=None,
placement=None,
sbp=None,
requires_grad=False,
):
super().__init__(size, value, dtype, device, placement, sbp, requires_grad)
def full_op(
size: Union[_size_any_t, flow.Size],
value: Union[float, int],
dtype: Optional[flow.dtype] = None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False,
):
"""
Creates a tensor of size `size` filled with fill_value.
The tensor’s dtype is inferred from `value`.
Args:
size(int...): a list, tuple, or oneflow.Size of integers defining the shape of the output tensor.
fill_value(Scalar): the value to fill the output tensor with.
dtype (flow.dtype, optional): the desired data type of returned tensor.
device (flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor type
placement (flow.placement, optional): the desired placement of returned global tensor. Default: if None, the returned tensor is local one using the argument `device`.
sbp (flow.sbp.sbp or tuple of flow.sbp.sbp, optional): the desired sbp descriptor of returned global tensor. Default: if None, the returned tensor is local one using the argument `device`.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> y = flow.full((5,),5)
>>> y
tensor([5, 5, 5, 5, 5], dtype=oneflow.int64)
>>> y = flow.full((2,3),5.0) # construct local tensor
>>> y
tensor([[5., 5., 5.],
[5., 5., 5.]], dtype=oneflow.float32)
>>> placement = flow.placement("cpu", ranks=[0])
>>> y = flow.full((2,3),5.0, placement=placement, sbp=flow.sbp.broadcast) # construct global tensor
>>> y.is_global
True
"""
size = _handle_size_arg(size)
if dtype is None:
dtype = flow.tensor(value).dtype
return Full(size, value, dtype, device, placement, sbp, requires_grad)()
def new_ones_op(
x, size=None, dtype=None, device=None, placement=None, sbp=None, requires_grad=False
):
if isinstance(device, str):
device = flow.device(device)
if size != None:
size = _single(size)
new_size = size
new_dtype = dtype
new_device = device
new_placement = placement
new_sbp = sbp
new_requires_grad = requires_grad
if size is None:
new_size = x.shape
if dtype is None:
new_dtype = x.dtype
if device is None:
new_device = x.device if x.is_local else None
if placement is None:
new_placement = x.placement if x.is_global else None
if sbp is None:
new_sbp = x.sbp if x.is_global else None
if new_placement is not None:
assert device is None
assert new_sbp is not None
assert isinstance(
new_size, (int, tuple, flow.Size)
), f"size parameter not correct, please check!"
assert isinstance(
new_dtype, flow.dtype
), f"dtype parameter not correct, please check!"
if new_placement is not None:
assert isinstance(
new_placement, flow.placement
), f"device parameter not correct, please check!"
assert isinstance(
new_sbp, flow.sbp.sbp
), f"device parameter not correct, please check!"
else:
assert isinstance(
new_device, (str, flow.device)
), f"device parameter not correct, please check!"
assert isinstance(
new_requires_grad, bool
), f"requires_grad parameter not correct, please check!"
if placement is not None:
res = flow._C.global_constant(
new_size, 1.0, dtype=new_dtype, placement=placement, sbp=sbp
)
else:
res = flow._C.constant(new_size, 1.0, dtype=new_dtype, device=new_device)
res.requires_grad = new_requires_grad
return res
def new_zeros_op(
x, size=None, dtype=None, device=None, placement=None, sbp=None, requires_grad=False
):
if isinstance(device, str):
device = flow.device(device)
if size is None or len(size) == 0:
new_size = x.shape
else:
new_size = _handle_size_arg(size)
new_dtype = dtype
new_device = device
new_placement = placement
new_sbp = sbp
new_requires_grad = requires_grad
if dtype is None:
new_dtype = x.dtype
if device is None:
new_device = x.device if x.is_local else None
if placement is None:
new_placement = x.placement if x.is_global else None
if sbp is None:
new_sbp = x.sbp if x.is_global else None
if new_placement is not None:
assert (
device is None
), "argument 'device' must be None when argument 'placement' exist"
assert (
new_sbp is not None
), "argument 'sbp' must not be None when argument 'placement' exist"
assert isinstance(
new_size, (int, tuple, list, flow.Size)
), f"argument 'size' must be tuple of ints, not %s" % (type(new_size))
assert isinstance(
new_dtype, flow.dtype
), f"argument 'dtype' must be flow.dtype, not %s" % (type(new_dtype))
if new_placement is not None:
assert isinstance(
new_placement, flow.placement
), f"argument 'placement' must be flow.placement, not %s" % (
type(new_placement)
)
assert isinstance(
new_sbp, (flow.sbp.sbp, tuple)
), f"argument 'sbp' must be flow.sbp.sbp, not %s" % (type(new_sbp))
else:
assert isinstance(
new_device, (str, flow.device)
), f"argument 'device' must be flow.device, not %s" % (type(new_device))
assert isinstance(
new_requires_grad, bool
), f"argument 'requires_grad' must be bool, not %s" % (type(new_requires_grad))
if new_placement is not None:
res = flow._C.global_constant(
new_size, 0.0, dtype=new_dtype, placement=new_placement, sbp=new_sbp
)
else:
res = flow._C.constant(new_size, 0.0, dtype=new_dtype, device=new_device)
res.requires_grad = new_requires_grad
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 36.442971
| 196
| 0.628576
|
dd8b44021ee9134ebd2a4d988f8f4d28a476fdbd
| 3,543
|
py
|
Python
|
lz4steg.py
|
jvarho/lz77steg
|
09b78ed226139e554d21fc52b599502661d81a3a
|
[
"MIT"
] | null | null | null |
lz4steg.py
|
jvarho/lz77steg
|
09b78ed226139e554d21fc52b599502661d81a3a
|
[
"MIT"
] | null | null | null |
lz4steg.py
|
jvarho/lz77steg
|
09b78ed226139e554d21fc52b599502661d81a3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2014, Jan Varho <jan@varho.org>
# Some rights reserved, see COPYING
import argparse
import sys
from lz77steg import LZ77Steg, _hash
class LZ4Steg(LZ77Steg):
TOK_LITERAL = 1
TOK_MATCH = 2
def init(self, cover):
super(LZ4Steg, self).init(cover)
self.end = self.get_littleendian(4)
def get_tokens(self):
'''Generator for tokens, must be implemented'''
while self.pos < self.end:
token = self.get_cbyte()
llen = token >> 4
mlen = token & 0xf
if llen == 15:
while self.cover[self.cpos] == 255:
llen += self.get_cbyte()
llen += self.get_cbyte()
if llen:
yield (self.TOK_LITERAL, llen)
if self.pos == self.end:
return
opos = self.cpos
moff = self.get_littleendian(2)
if mlen == 15:
while self.cover[self.cpos] == 255:
mlen += self.get_cbyte()
mlen += self.get_cbyte()
mlen += 4
yield (self.TOK_MATCH, mlen, moff, opos)
def is_match(self, t):
'''Is token a match token?'''
return t[0] == self.TOK_MATCH
def update_window(self, t):
'''Update window with token'''
if t[0] == self.TOK_LITERAL:
self.update_window_literal(t[1])
elif t[0] == self.TOK_MATCH:
self.update_window_match(t[1], t[2])
else:
raise TypeError
def list_possible_matches_t(self, t):
'''Return a list of possible matches for t'''
tt, mlen, moff, opos = t
return self.list_possible_matches(mlen, moff)
def update_match(self, t, nmatch):
'''Updates cover token to new match, must be implemented'''
self.cover[t[3]] = nmatch & 0xff
self.cover[t[3] + 1] = nmatch >> 8
def get_index(self, mlist, t):
'''Get the index of the match'''
return mlist.index(t[2])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LZ4 steganography')
action = parser.add_mutually_exclusive_group()
action.add_argument('-d', '--decode', action='store_true')
action.add_argument('-m', '--message')
output = parser.add_mutually_exclusive_group()
output.add_argument('-i', '--inplace', action='store_true')
output.add_argument('-o', '--output')
parser.add_argument('FILE')
args = parser.parse_args()
with open(args.FILE) as f:
cover = f.read()
if args.decode:
message = LZ4Steg().retrieve(cover, nullterm=True)
print message
elif args.message:
assert len(args.message)
cover = LZ4Steg().store(cover, args.message, nullterm=True)
if args.output:
with open(args.output, 'wb') as f:
f.write(cover)
elif args.inplace:
with open(args.FILE, 'wb') as f:
f.write(cover)
else:
sys.stdout.write(cover)
else:
s = LZ4Steg()
cap, pcap = s.scan(cover)
clen = len(cover)
su = 'Size uncompressed %d' % s.end
dl = len(su) - len('Size uncompressed ')
d = '%' + str(dl) + 'd'
print su
print ('Size compressed '+d+' (%.2f%%)') % (clen, clen * 100. / s.end)
print ('Storage '+d+' (%.2f%%)') % (cap, cap * 100. / clen)
print ('Storage potential '+d+' (%.2f%%)') % (pcap, pcap * 100. / clen)
| 29.525
| 80
| 0.544736
|
c31eead604399b77c2a3111c2ca12b71f51b722a
| 12,036
|
py
|
Python
|
zerver/data_import/gitter.py
|
pranayshahxyz/zulip
|
3da483487af79fde9dce2d21124dfa39b94936a5
|
[
"Apache-2.0"
] | 1
|
2020-04-09T18:34:44.000Z
|
2020-04-09T18:34:44.000Z
|
zerver/data_import/gitter.py
|
pranayshahxyz/zulip
|
3da483487af79fde9dce2d21124dfa39b94936a5
|
[
"Apache-2.0"
] | null | null | null |
zerver/data_import/gitter.py
|
pranayshahxyz/zulip
|
3da483487af79fde9dce2d21124dfa39b94936a5
|
[
"Apache-2.0"
] | null | null | null |
import os
import dateutil.parser
import logging
import subprocess
import ujson
from django.conf import settings
from django.forms.models import model_to_dict
from django.utils.timezone import now as timezone_now
from typing import Any, Dict, List, Set, Tuple
from zerver.models import UserProfile, Recipient
from zerver.lib.export import MESSAGE_BATCH_CHUNK_SIZE
from zerver.data_import.import_util import ZerverFieldsT, build_zerver_realm, \
build_avatar, build_subscription, build_recipient, build_usermessages, \
build_defaultstream, process_avatars, build_realm, build_stream, \
build_message, create_converted_data_files, make_subscriber_map
# stubs
GitterDataT = List[Dict[str, Any]]
realm_id = 0
def gitter_workspace_to_realm(domain_name: str, gitter_data: GitterDataT,
realm_subdomain: str) -> Tuple[ZerverFieldsT,
List[ZerverFieldsT],
Dict[str, int]]:
"""
Returns:
1. realm, Converted Realm data
2. avatars, which is list to map avatars to zulip avatar records.json
3. user_map, which is a dictionary to map from gitter user id to zulip user id
"""
NOW = float(timezone_now().timestamp())
zerver_realm = build_zerver_realm(realm_id, realm_subdomain, NOW, 'Gitter') # type: List[ZerverFieldsT]
realm = build_realm(zerver_realm, realm_id, domain_name)
zerver_userprofile, avatars, user_map = build_userprofile(int(NOW), domain_name, gitter_data)
zerver_stream, zerver_defaultstream = build_stream_and_defaultstream(int(NOW))
zerver_recipient, zerver_subscription = build_recipient_and_subscription(
zerver_userprofile, zerver_stream)
realm['zerver_userprofile'] = zerver_userprofile
realm['zerver_stream'] = zerver_stream
realm['zerver_defaultstream'] = zerver_defaultstream
realm['zerver_recipient'] = zerver_recipient
realm['zerver_subscription'] = zerver_subscription
return realm, avatars, user_map
def build_userprofile(timestamp: Any, domain_name: str,
gitter_data: GitterDataT) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT],
Dict[str, int]]:
"""
Returns:
1. zerver_userprofile, which is a list of user profile
2. avatar_list, which is list to map avatars to zulip avatars records.json
3. added_users, which is a dictionary to map from gitter user id to zulip id
"""
logging.info('######### IMPORTING USERS STARTED #########\n')
zerver_userprofile = []
avatar_list = [] # type: List[ZerverFieldsT]
user_map = {} # type: Dict[str, int]
user_id = 0
for data in gitter_data:
if data['fromUser']['id'] not in user_map:
user_data = data['fromUser']
user_map[user_data['id']] = user_id
email = get_user_email(user_data, domain_name)
build_avatar(user_id, realm_id, email, user_data['avatarUrl'],
timestamp, avatar_list)
# Build userprofile object
userprofile = UserProfile(
full_name=user_data['displayName'],
short_name=user_data['username'],
id=user_id,
email=email,
delivery_email=email,
avatar_source='U',
pointer=-1,
date_joined=timestamp,
last_login=timestamp)
userprofile_dict = model_to_dict(userprofile)
# Set realm id separately as the corresponding realm is not yet a Realm model
# instance
userprofile_dict['realm'] = realm_id
zerver_userprofile.append(userprofile_dict)
user_id += 1
logging.info('######### IMPORTING USERS FINISHED #########\n')
return zerver_userprofile, avatar_list, user_map
def get_user_email(user_data: ZerverFieldsT, domain_name: str) -> str:
# TODO Get user email from github
email = ("%s@users.noreply.github.com" % (user_data['username'],))
return email
def build_stream_and_defaultstream(timestamp: Any) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT]]:
logging.info('######### IMPORTING STREAM STARTED #########\n')
# We have only one stream for gitter export
stream_name = 'from gitter'
stream_description = "Imported from gitter"
stream_id = 0
stream = build_stream(timestamp, realm_id, stream_name, stream_description,
stream_id)
defaultstream = build_defaultstream(realm_id=realm_id, stream_id=stream_id,
defaultstream_id=0)
logging.info('######### IMPORTING STREAMS FINISHED #########\n')
return [stream], [defaultstream]
def build_recipient_and_subscription(
zerver_userprofile: List[ZerverFieldsT],
zerver_stream: List[ZerverFieldsT]) -> Tuple[List[ZerverFieldsT],
List[ZerverFieldsT]]:
"""
Returns:
1. zerver_recipient, which is a list of mapped recipient
2. zerver_subscription, which is a list of mapped subscription
"""
zerver_recipient = []
zerver_subscription = []
recipient_id = subscription_id = 0
# For stream
# We have only one recipient, because we have only one stream
# Hence 'recipient_id'=0 corresponds to 'stream_id'=0
recipient = build_recipient(0, recipient_id, Recipient.STREAM)
zerver_recipient.append(recipient)
for user in zerver_userprofile:
subscription = build_subscription(recipient_id, user['id'], subscription_id)
zerver_subscription.append(subscription)
subscription_id += 1
recipient_id += 1
# For users
for user in zerver_userprofile:
recipient = build_recipient(user['id'], recipient_id, Recipient.PERSONAL)
subscription = build_subscription(recipient_id, user['id'], subscription_id)
zerver_recipient.append(recipient)
zerver_subscription.append(subscription)
recipient_id += 1
subscription_id += 1
return zerver_recipient, zerver_subscription
def convert_gitter_workspace_messages(gitter_data: GitterDataT, output_dir: str,
subscriber_map: Dict[int, Set[int]],
user_map: Dict[str, int],
user_short_name_to_full_name: Dict[str, str],
chunk_size: int=MESSAGE_BATCH_CHUNK_SIZE) -> None:
"""
Messages are stored in batches
"""
logging.info('######### IMPORTING MESSAGES STARTED #########\n')
message_id = 0
recipient_id = 0 # Corresponding to stream "gitter"
low_index = 0
upper_index = low_index + chunk_size
dump_file_id = 1
while True:
message_json = {}
zerver_message = []
zerver_usermessage = [] # type: List[ZerverFieldsT]
message_data = gitter_data[low_index: upper_index]
if len(message_data) == 0:
break
for message in message_data:
message_time = dateutil.parser.parse(message['sent']).timestamp()
mentioned_user_ids = get_usermentions(message, user_map,
user_short_name_to_full_name)
rendered_content = None
topic_name = 'imported from gitter'
user_id = user_map[message['fromUser']['id']]
zulip_message = build_message(topic_name, float(message_time), message_id, message['text'],
rendered_content, user_id, recipient_id)
zerver_message.append(zulip_message)
build_usermessages(
zerver_usermessage=zerver_usermessage,
subscriber_map=subscriber_map,
recipient_id=recipient_id,
mentioned_user_ids=mentioned_user_ids,
message_id=message_id,
is_private=False,
)
message_id += 1
message_json['zerver_message'] = zerver_message
message_json['zerver_usermessage'] = zerver_usermessage
message_filename = os.path.join(output_dir, "messages-%06d.json" % (dump_file_id,))
logging.info("Writing Messages to %s\n" % (message_filename,))
write_data_to_file(os.path.join(message_filename), message_json)
low_index = upper_index
upper_index = chunk_size + low_index
dump_file_id += 1
logging.info('######### IMPORTING MESSAGES FINISHED #########\n')
def get_usermentions(message: Dict[str, Any], user_map: Dict[str, int],
user_short_name_to_full_name: Dict[str, str]) -> List[int]:
mentioned_user_ids = []
if 'mentions' in message:
for mention in message['mentions']:
if mention.get('userId') in user_map:
gitter_mention = '@%s' % (mention['screenName'],)
if mention['screenName'] not in user_short_name_to_full_name:
logging.info("Mentioned user %s never sent any messages, so has no full name data" %
(mention['screenName'],))
full_name = mention['screenName']
else:
full_name = user_short_name_to_full_name[mention['screenName']]
zulip_mention = ('@**%s**' % (full_name,))
message['text'] = message['text'].replace(gitter_mention, zulip_mention)
mentioned_user_ids.append(user_map[mention['userId']])
return mentioned_user_ids
def do_convert_data(gitter_data_file: str, output_dir: str, threads: int=6) -> None:
# Subdomain is set by the user while running the import commands
realm_subdomain = ""
domain_name = settings.EXTERNAL_HOST
os.makedirs(output_dir, exist_ok=True)
# output directory should be empty initially
if os.listdir(output_dir):
raise Exception("Output directory should be empty!")
# Read data from the gitter file
with open(gitter_data_file) as fp:
gitter_data = ujson.load(fp)
realm, avatar_list, user_map = gitter_workspace_to_realm(
domain_name, gitter_data, realm_subdomain)
subscriber_map = make_subscriber_map(
zerver_subscription=realm['zerver_subscription'],
)
# For user mentions
user_short_name_to_full_name = {}
for userprofile in realm['zerver_userprofile']:
user_short_name_to_full_name[userprofile['short_name']] = userprofile['full_name']
convert_gitter_workspace_messages(
gitter_data, output_dir, subscriber_map, user_map,
user_short_name_to_full_name)
avatar_folder = os.path.join(output_dir, 'avatars')
avatar_realm_folder = os.path.join(avatar_folder, str(realm_id))
os.makedirs(avatar_realm_folder, exist_ok=True)
avatar_records = process_avatars(avatar_list, avatar_folder, realm_id, threads)
attachment = {"zerver_attachment": []} # type: Dict[str, List[Any]]
# IO realm.json
create_converted_data_files(realm, output_dir, '/realm.json')
# IO emoji records
create_converted_data_files([], output_dir, '/emoji/records.json')
# IO avatar records
create_converted_data_files(avatar_records, output_dir, '/avatars/records.json')
# IO uploads records
create_converted_data_files([], output_dir, '/uploads/records.json')
# IO attachments records
create_converted_data_files(attachment, output_dir, '/attachment.json')
subprocess.check_call(["tar", "-czf", output_dir + '.tar.gz', output_dir, '-P'])
logging.info('######### DATA CONVERSION FINISHED #########\n')
logging.info("Zulip data dump created at %s" % (output_dir,))
def write_data_to_file(output_file: str, data: Any) -> None:
with open(output_file, "w") as f:
f.write(ujson.dumps(data, indent=4))
| 42.083916
| 108
| 0.640329
|
44d7a50a47788884bd8198a0fa340ce382afe25b
| 3,823
|
py
|
Python
|
dhcp/forms.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 22
|
2015-01-16T01:36:32.000Z
|
2020-06-08T00:46:18.000Z
|
dhcp/forms.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 9
|
2019-03-15T11:39:32.000Z
|
2019-04-30T00:59:50.000Z
|
dhcp/forms.py
|
jlin/inventory
|
c098c98e570c3bf9fadfd811eb75e1213f6ea428
|
[
"BSD-3-Clause"
] | 13
|
2015-01-13T20:56:22.000Z
|
2022-02-23T06:01:17.000Z
|
from django import forms
from django.forms.extras.widgets import SelectDateWidget
import models
class AddDHCPScopeForm(forms.Form):
scope_name = forms.CharField(max_length=32, required=True, widget=forms.TextInput(attrs={'size':'48'}))
scope_description = forms.CharField(max_length=32, required=True, widget=forms.TextInput(attrs={'size':'48'}))
class DHCPScopeOverrideForm(forms.ModelForm):
dhcp_scope = forms.CharField(max_length=32, required=True, widget=forms.TextInput(attrs={'size':'48'}))
#override_text = forms.CharField(max_length=32, required=True, widget=forms.Textarea(attrs={'rows':'60', 'cols':'80'}))
class Meta:
model = models.DHCPOverride
class EditDHCPScopeForm(forms.Form):
SUBNET_CHOICES = (
('255.255.224.0', '255.255.224.0'),
('255.255.240.0', '255.255.240.0'),
('255.255.248.0', '255.255.248.0'),
('255.255.252.0', '255.255.252.0'),
('255.255.254.0', '255.255.254.0'),
('255.255.255.0', '255.255.255.0'),
('255.255.255.128', '255.255.255.128'),
('255.255.255.192', '255.255.255.192'),
('255.255.255.224', '255.255.255.224'),
('255.255.255.240', '255.255.255.240'),
('255.255.255.248', '255.255.255.248'),
('255.255.255.252', '255.255.255.252'),
('255.255.255.254', '255.255.255.254')
)
YES_NO_CHOICES = (
(0, 'No'),
(1, 'Yes'),
)
CHOICES = (
('True', 'True'),
('False', 'False'),
)
scope_name = forms.CharField(max_length=32,widget=forms.TextInput(attrs={'size':'48'}))
domain_name = forms.CharField(max_length=64, required=False, widget=forms.TextInput(attrs={'size':'48'}))
router = forms.CharField(max_length=64, required=False, widget=forms.TextInput(attrs={'size':'48'}))
scope_start = forms.CharField(max_length=64, required=True, widget=forms.TextInput(attrs={'size':'48'}))
scope_end = forms.CharField(max_length=64, required=True, widget=forms.TextInput(attrs={'size':'48'}))
scope_netmask = forms.CharField(max_length=64, required=True, widget=forms.Select(choices=SUBNET_CHOICES))
pool_start = forms.CharField(max_length=64, required=True, widget=forms.TextInput(attrs={'size':'48'}))
pool_end = forms.CharField(max_length=64, required=True, widget=forms.TextInput(attrs={'size':'48'}))
ntp_server1 = forms.CharField(max_length=64, required=False, widget=forms.TextInput(attrs={'size':'48'}))
ntp_server2 = forms.CharField(max_length=64, required=False, widget=forms.TextInput(attrs={'size':'48'}))
dns_server1 = forms.CharField(max_length=64, required=False, widget=forms.TextInput(attrs={'size':'48'}))
dns_server2 = forms.CharField(max_length=64, required=False, widget=forms.TextInput(attrs={'size':'48'}))
allow_booting = forms.CharField(max_length=64, required=False, widget=forms.Select(choices=CHOICES))
allow_bootp = forms.CharField(max_length=64, required=False, widget=forms.Select(choices=CHOICES))
class Meta:
fields = [
"scope_name",
"scope_start",
'scope_netmask',
'scope_notes',
'filename',
'pool_start',
'pool_end',
'pool_deny_dynamic_bootp_agents',
'allow_booting',
'allow_bootp',
'filename',
'option_subnet_mask',
'ntp_server1',
'ntp_server2',
'dns_server1',
'dns_server2',
'router',
'domain_name',
'option_routers'
]
| 50.302632
| 123
| 0.588805
|
faeafd869188374215d860a3e647c2b440b2371f
| 418
|
py
|
Python
|
vtkplotter_examples/volumetric/probePlaneUGrid.py
|
ismarou/vtkplotter-examples
|
1eefcc026be169ab7a77a5bce6dec8044c33b554
|
[
"MIT"
] | 4
|
2020-07-30T02:38:29.000Z
|
2021-09-12T14:30:18.000Z
|
vtkplotter_examples/volumetric/probePlaneUGrid.py
|
ismarou/vtkplotter-examples
|
1eefcc026be169ab7a77a5bce6dec8044c33b554
|
[
"MIT"
] | null | null | null |
vtkplotter_examples/volumetric/probePlaneUGrid.py
|
ismarou/vtkplotter-examples
|
1eefcc026be169ab7a77a5bce6dec8044c33b554
|
[
"MIT"
] | null | null | null |
"""Probe a vtkUnStructuredGrid with a plane"""
from vtkplotter import *
# same could be done with vtkRectilinearGrid etc..
data = loadUnStructuredGrid(datadir+"ugrid.vtk")
# create the outline of the data
outermesh = Mesh(data).alpha(0.2).wireframe()
orig = data.GetCenter()
pl = probePlane(data, origin=orig, normal=(0.1,0.2,1))
#pl.printInfo()
#pl.pointColors('scalars', cmap='hot')
show(pl, outermesh, axes=1)
| 24.588235
| 54
| 0.727273
|
58c6d8c54c31cdf1aae4643f65e4ab4e67bc8c3f
| 731
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/ListViewItemMouseHoverEventArgs.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/ListViewItemMouseHoverEventArgs.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/ListViewItemMouseHoverEventArgs.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
class ListViewItemMouseHoverEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.ListView.ItemMouseHover event.
ListViewItemMouseHoverEventArgs(item: ListViewItem)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return ListViewItemMouseHoverEventArgs()
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y] """
pass
@staticmethod
def __new__(self,item):
""" __new__(cls: type,item: ListViewItem) """
pass
Item=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the item the mouse pointer is currently hovering over.
Get: Item(self: ListViewItemMouseHoverEventArgs) -> ListViewItem
"""
| 28.115385
| 76
| 0.70725
|
79813757233743f26c579fa95ec121acc5f9ff67
| 261
|
py
|
Python
|
pyvisdk/enums/host_unresolved_vmfs_resolution_spec_vmfs_uuid_resolution.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/enums/host_unresolved_vmfs_resolution_spec_vmfs_uuid_resolution.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/enums/host_unresolved_vmfs_resolution_spec_vmfs_uuid_resolution.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
HostUnresolvedVmfsResolutionSpecVmfsUuidResolution = Enum(
'forceMount',
'resignature',
)
| 18.642857
| 58
| 0.524904
|
30ddcd586781074a0952c680467261ec209612bf
| 5,871
|
py
|
Python
|
C/.ycm_extra_conf.py
|
Kwpolska/roman_numerals
|
887e648a39fa73583f4b7cf330436f94bf88325e
|
[
"BSD-3-Clause"
] | null | null | null |
C/.ycm_extra_conf.py
|
Kwpolska/roman_numerals
|
887e648a39fa73583f4b7cf330436f94bf88325e
|
[
"BSD-3-Clause"
] | null | null | null |
C/.ycm_extra_conf.py
|
Kwpolska/roman_numerals
|
887e648a39fa73583f4b7cf330436f94bf88325e
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c99',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| 35.79878
| 80
| 0.720491
|
5ce744a9d09ba9ccc548371f8d8c425687b14425
| 3,558
|
py
|
Python
|
sktime/forecasting/compose/_ensemble.py
|
MFaroukB/sktime
|
29932fc071ab04797bc2f5c00cd533726b31eb46
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/forecasting/compose/_ensemble.py
|
MFaroukB/sktime
|
29932fc071ab04797bc2f5c00cd533726b31eb46
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/forecasting/compose/_ensemble.py
|
MFaroukB/sktime
|
29932fc071ab04797bc2f5c00cd533726b31eb46
|
[
"BSD-3-Clause"
] | 1
|
2021-04-30T08:12:18.000Z
|
2021-04-30T08:12:18.000Z
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["Markus Löning"]
__all__ = ["EnsembleForecaster"]
import pandas as pd
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._meta import _HeterogenousEnsembleForecaster
from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
class EnsembleForecaster(
_OptionalForecastingHorizonMixin, _HeterogenousEnsembleForecaster
):
"""Ensemble of forecasters
Parameters
----------
forecasters : list of (str, estimator) tuples
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for fit. None means 1 unless
in a joblib.parallel_backend context.
-1 means using all processors.
aggfunc : str, {'mean', 'median', 'min', 'max'}, (default='mean')
The function to aggregate prediction from individual forecasters.
"""
_required_parameters = ["forecasters"]
def __init__(self, forecasters, n_jobs=None, aggfunc="mean"):
super(EnsembleForecaster, self).__init__(forecasters=forecasters, n_jobs=n_jobs)
self.aggfunc = aggfunc
def fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
self._is_fitted = False
self._set_y_X(y, X)
self._set_fh(fh)
names, forecasters = self._check_forecasters()
self._fit_forecasters(forecasters, y, X, fh)
self._is_fitted = True
return self
def update(self, y, X=None, update_params=True):
"""Update fitted parameters
Parameters
----------
y : pd.Series
X : pd.DataFrame
update_params : bool, optional (default=True)
Returns
-------
self : an instance of self
"""
self.check_is_fitted()
self._update_y_X(y, X)
for forecaster in self.forecasters_:
forecaster.update(y, X, update_params=update_params)
return self
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""return the predicted reduction
Parameters
----------
fh : int, list or np.array, optional (default=None)
X : pd.DataFrame
return_pred_int : boolean, optional (default=False)
alpha : fh : float, (default=DEFAULT_ALPHA)
Returns
-------
y_pred : pd.Series
Aggregated predictions.
"""
if return_pred_int:
raise NotImplementedError()
y_pred = pd.concat(self._predict_forecasters(fh, X), axis=1)
valid_aggfuncs = ("median", "mean", "min", "max")
if self.aggfunc not in valid_aggfuncs:
raise ValueError(f"Invalid `aggfunc`. Please use one of {valid_aggfuncs}")
if self.aggfunc == "median":
return y_pred.median(axis=1)
elif self.aggfunc == "min":
return y_pred.min(axis=1)
elif self.aggfunc == "max":
return y_pred.max(axis=1)
else:
return y_pred.mean(axis=1)
| 31.767857
| 88
| 0.61692
|
e88f5c7e5c5526a23be42528470ea313e6284e45
| 420
|
py
|
Python
|
app/commands.py
|
nattesharan/delhivery
|
3ef419d403b27fc490a8557590d81f50a8dbfc4f
|
[
"MIT"
] | 2
|
2019-06-02T04:32:54.000Z
|
2021-01-05T12:27:50.000Z
|
app/commands.py
|
nattesharan/delhivery
|
3ef419d403b27fc490a8557590d81f50a8dbfc4f
|
[
"MIT"
] | null | null | null |
app/commands.py
|
nattesharan/delhivery
|
3ef419d403b27fc490a8557590d81f50a8dbfc4f
|
[
"MIT"
] | null | null | null |
from flask_script import Command
from app.settings import ROLES
from delhivery.models import DelhiveryHierarchy
class CreateRoles(Command):
def run(self):
print("Creating roles in database")
for role in ROLES:
DelhiveryHierarchy.objects.create(name=role['name'], role=role['role'], features=role['features'])
def add_command(manager):
manager.add_command('initroles', CreateRoles())
| 35
| 110
| 0.730952
|
15552e06d8192e717651395e0c5336c7682d37ce
| 15,273
|
py
|
Python
|
src/gluonts/model/san/_layers.py
|
unibeck/gluon-ts
|
73d0e9de689ab4bb014cdb4423642dff030e7678
|
[
"Apache-2.0"
] | 1
|
2019-10-15T01:47:40.000Z
|
2019-10-15T01:47:40.000Z
|
src/gluonts/model/san/_layers.py
|
unibeck/gluon-ts
|
73d0e9de689ab4bb014cdb4423642dff030e7678
|
[
"Apache-2.0"
] | null | null | null |
src/gluonts/model/san/_layers.py
|
unibeck/gluon-ts
|
73d0e9de689ab4bb014cdb4423642dff030e7678
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import math
from typing import List, Optional, Tuple
import mxnet as mx
# Third-party import
import numpy as np
from mxnet import init
from mxnet.gluon import HybridBlock, Parameter, nn
from mxnet.gluon.contrib.nn import HybridConcurrent
from gluonts.core.component import validated
from gluonts.mx import Tensor
from gluonts.mx.block.feature import FeatureEmbedder
def _torch_gather(F, data: Tensor, idx: Tensor, axis: int):
"""
Pytorch-style gather_nd
"""
ndim = 4
if axis < 0:
axis = ndim + axis
mx_idx = []
for dim in range(ndim):
if dim == axis:
d_idx = F.broadcast_like(idx, data)
else:
d_idx = F.contrib.arange_like(data, axis=dim)
for _ in range(dim):
d_idx = F.expand_dims(data=d_idx, axis=0)
for _ in range(ndim - dim - 1):
d_idx = F.expand_dims(data=d_idx, axis=-1)
d_idx = F.broadcast_like(d_idx, data)
mx_idx.append(d_idx)
mx_idx = F.stack(*mx_idx, axis=0)
return F.gather_nd(data, mx_idx)
class SinusoidalPositionalEmbedding(HybridBlock):
@validated()
def __init__(self, d_embed: int, **kwargs):
super(SinusoidalPositionalEmbedding, self).__init__(**kwargs)
if d_embed % 2 != 0:
raise ValueError(
"sinusoidal embedding must have an even dimension"
)
self.d_embed = d_embed
def hybrid_forward(self, F, pos_seq: Tensor) -> Tensor:
inv_freq = F.arange(0, self.d_embed, 2)
inv_freq = F.exp((inv_freq / self.d_embed) * -math.log(1e4))
pos_seq = F.reshape(data=pos_seq, shape=(0, 0, 1))
pos_seq = F.broadcast_mul(pos_seq, inv_freq)
return F.concat(F.sin(pos_seq), F.cos(pos_seq), dim=-1)
class CausalConv1D(HybridBlock):
@validated()
def __init__(
self,
channels: int,
kernel_size: int,
activation: str = "tanh",
**kwargs,
):
super(CausalConv1D, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.channels = channels
with self.name_scope():
self.net = nn.Conv1D(
channels,
kernel_size,
use_bias=False,
activation="tanh",
weight_initializer=init.Xavier(),
)
def hybrid_forward(self, F, x: Tensor, *args) -> Tensor:
pad = (
F.zeros_like(x)
.slice_axis(axis=1, begin=0, end=1)
.tile(reps=(1, self.kernel_size - 1, 1))
)
x = F.concat(pad, x, dim=1)
x = F.swapaxes(x, dim1=1, dim2=2)
x = self.net(x)
x = F.swapaxes(x, dim1=1, dim2=2)
return x
class SelfAttention(HybridBlock):
@validated()
def __init__(
self,
d_hidden: int,
kernel_sizes: List[int],
n_head: int = 1,
bias: bool = True,
bidirectional: bool = False,
dist_enc: Optional[str] = None,
share_values: bool = False,
dropout: float = 0.0,
temperature: float = 1.0,
**kwargs,
):
"""
Self-attention module with q,k,v from the same input
Parameters
----------
d_hidden : int
hidden dimension
kernel_sizes: int
kernel sizes of convolutions to generate queries and keys
n_head : int, optional
number of attention heads, by default 1
bias : bool, optional
add bias term in input and output projections, by default True
bidirectional : bool, optional
if False, add a mask to avoid backward attention, by default False
dist_enc : Optional[str], optional
add relative distance embeddings to dot-product attention, can be
'add' (linearly combine key and dist),
'dot' (dot product between key and dist),
or None (disabled),
by default None
share_values : bool, optional
if True, a value reprensentation is shared by all attention heads, by default False
ref. https://arxiv.org/abs/1912.09363
dropout : float, optional
dropout rate, by default 0.0
temperature : float, optional
softmax temperature, by default 1.0
"""
super(SelfAttention, self).__init__(**kwargs)
n_groups = len(kernel_sizes)
assert (
d_hidden % n_head == 0
), f"hidden dim {d_hidden} cannot be split into {n_head} heads."
assert (
d_hidden % n_groups == 0
), f"hidden dim {d_hidden} cannot be split into {n_groups} groups."
assert (
n_head % n_groups == 0
), f"num_heads {n_head} cannot be allocated for {n_groups} groups."
self.d_hidden = d_hidden
self.kernel_sizes = kernel_sizes
self.n_groups = n_groups
self.d_group = self.d_hidden // self.n_groups
self.n_head = n_head
self.d_head = self.d_hidden // self.n_head
self.bias = bias
self.dist_enc = dist_enc
self.bidirectional = bidirectional
self.share_values = share_values
self.temperature = temperature
with self.name_scope():
self.qk_proj = HybridConcurrent(axis=-1, prefix="qk_proj_")
for ksize in self.kernel_sizes:
self.qk_proj.add(
CausalConv1D(
channels=self.d_group * 2,
kernel_size=ksize,
prefix=f"conv{ksize}_",
)
)
self.v_proj = nn.Dense(
units=self.d_head if self.share_values else d_hidden,
use_bias=bias,
flatten=False,
weight_initializer=init.Xavier(),
prefix="v_proj_",
)
self.out_proj = nn.Dense(
units=d_hidden,
use_bias=bias,
flatten=False,
weight_initializer=init.Xavier(),
prefix="out_proj_",
)
if self.dist_enc is not None:
assert self.dist_enc in [
"dot",
"add",
], f"distance encoding type {self.dist_enc} is not supported"
self.posemb = SinusoidalPositionalEmbedding(d_hidden)
self.pos_proj = nn.Dense(
units=d_hidden,
use_bias=bias,
flatten=False,
weight_initializer=init.Xavier(),
prefix="pos_proj_",
)
if self.dist_enc == "add":
self._ctt_bias_weight = Parameter(
"_ctt_bias_weight",
shape=(1, n_head, 1, self.d_head),
init=init.Xavier(),
)
self._pos_bias_weight = Parameter(
"_pos_bias_weight",
shape=(1, n_head, 1, self.d_head),
init=init.Xavier(),
)
self.dropout = nn.Dropout(dropout)
def _split_head(self, F, x: Tensor) -> Tensor:
"""
Split hidden state into multi-heads
Args
----------
x : Tensor [batch, length, d_hidden]
Returns
-------
Tensor [batch, n_head, length, d_head]
"""
x = F.reshape(data=x, shape=(0, 0, -4, self.n_head, self.d_head))
x = F.swapaxes(data=x, dim1=1, dim2=2)
return x
def _merge_head(self, F, x: Tensor) -> Tensor:
"""
Merge multi-heads into one hidden state
Args
----------
x : Tensor [batch, n_head, length, d_head]
Returns
-------
Tensor [batch, length, d_hidden]
"""
x = F.swapaxes(data=x, dim1=1, dim2=2)
x = F.reshape(data=x, shape=(0, 0, self.d_hidden))
return x
def _compute_qkv(self, F, x: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
qk = self.qk_proj(x)
qk = F.split(qk, num_outputs=self.n_groups * 2, axis=-1)
q = F.concat(*qk[0::2], dim=-1)
k = F.concat(*qk[1::2], dim=-1)
q = self._split_head(F, q)
k = self._split_head(F, k)
v = self.v_proj(x)
if self.share_values:
v = F.broadcast_like(v.expand_dims(axis=1), k)
else:
v = self._split_head(F, v)
return q, k, v
def _apply_mask(
self, F, score: Tensor, key_mask: Optional[Tensor]
) -> Tensor:
if not self.bidirectional:
k_idx = F.contrib.arange_like(score, axis=-1)
k_idx = (
k_idx.expand_dims(axis=0)
.expand_dims(axis=0)
.expand_dims(axis=0)
)
q_idx = F.contrib.arange_like(score, axis=-2)
q_idx = (
q_idx.expand_dims(axis=-1)
.expand_dims(axis=0)
.expand_dims(axis=0)
)
unidir_mask = F.broadcast_lesser_equal(k_idx, q_idx)
unidir_mask = F.broadcast_like(unidir_mask, score)
score = F.where(unidir_mask, score, F.ones_like(score) * 1e-9)
if key_mask is not None:
mem_mask = key_mask.squeeze(axis=-1)
mem_mask = mem_mask.expand_dims(axis=1) # head
mem_mask = mem_mask.expand_dims(axis=2) # query
mem_mask = F.broadcast_like(mem_mask, score)
score = F.where(mem_mask, score, F.ones_like(score) * 1e-9)
return score
def _compute_attn_score(
self,
F,
q: Tensor,
k: Tensor,
mask: Optional[Tensor],
_ctt_bias_weight: Optional[Tensor],
_pos_bias_weight: Optional[Tensor],
) -> Tensor:
score = F.batch_dot(lhs=q, rhs=k, transpose_b=True)
if self.dist_enc is not None:
# score_{ij} = <q_i, k_j> + s_{ij}
# idx.shape = [klen, klen]
# idx[i][j] = i-j
idx = F.contrib.arange_like(k, axis=2)
idx = F.broadcast_sub(
idx.expand_dims(axis=1), idx.expand_dims(axis=0)
)
# idx[i][j] = |i-j|
idx = idx.abs()
# idx.shape = [1, 1, klen, klen]
idx = idx.expand_dims(axis=0).expand_dims(axis=0)
# dist representation r for attention
# r.shape = [1, klen, d_hidden]
r = F.contrib.arange_like(k, axis=2).expand_dims(axis=0)
r = self.posemb(r)
r = self.pos_proj(r)
# r.shape = [1, n_head, klen, d_head]
r = self._split_head(F, r)
# r.shape = [batch, n_head, klen, d_head]
r = r.broadcast_like(k)
if self.dist_enc == "add":
# transformer-xl style: https://arxiv.org/abs/1901.02860
# s_{ij} = <q_i, r_{|i-j|}> + <u, k_j> + <v, r_{|i-j|}>
# u = _content_bias_weight
# v = _position_bias_weight
# qr_{ij} = <q_i, r_j>
# qr'_{ij} = qr_{i,idx[i][j]} = qr_{i,|i-j|}
qr = F.batch_dot(lhs=q, rhs=r, transpose_b=True)
qr = _torch_gather(F, data=qr, idx=idx, axis=-1)
# rk_{ij} = <v, r_i> + <u, k_j>
# rk'_{ij} = rk_{idx[i][j], j} = rk_{|i-j|, j}
u = F.broadcast_to(_ctt_bias_weight, k)
v = F.broadcast_to(_pos_bias_weight, r)
rk = F.batch_dot(u, k, transpose_b=True) + F.batch_dot(
v, r, transpose_b=True
)
rk = _torch_gather(F, data=rk, idx=idx, axis=-2)
# s_{ij} = qr_{i,|i-j|} + rk_{|i-j|, j}
s = qr + rk
else:
# s_{ij} = <r_{|i-j|}, (q_i+k_j)>
# = <q_i, r_{|i-j|}> + <r_{|i-j|}, k_j>
# qr_{ij} = <q_i, r_j>
# qr'_{ij} = qr_{i,idx[i][j]} = qr_{i,|i-j|}
qr = F.batch_dot(lhs=q, rhs=r, transpose_b=True)
qr = _torch_gather(F, data=qr, idx=idx, axis=-1)
# rk_{ij} = <r_i, k_j>
# rk'_{ij} = rk_{idx[i][j], j} = rk_{|i-j|, j}
rk = F.batch_dot(lhs=r, rhs=k, transpose_b=True)
rk = _torch_gather(F, data=rk, idx=idx, axis=-2)
# s_{ij} = qr_{i,|i-j|} + rk_{|i-j|,j}
s = qr + rk
# add relative positional bias to content-based attention score
score = score + s
score = self._apply_mask(F, score, mask)
score = score / (math.sqrt(self.d_head) * self.temperature)
score = F.softmax(score, axis=-1)
score = self.dropout(score)
return score
def _compute_attn_output(self, F, score: Tensor, v: Tensor) -> Tensor:
v = F.batch_dot(score, v)
v = self._merge_head(F, v)
v = self.out_proj(v)
return v
def hybrid_forward(
self,
F,
x: Tensor,
mask: Tensor,
_ctt_bias_weight: Optional[Tensor] = None,
_pos_bias_weight: Optional[Tensor] = None,
) -> Tensor:
q, k, v = self._compute_qkv(F, x)
score = self._compute_attn_score(
F, q, k, mask, _ctt_bias_weight, _pos_bias_weight
)
v = self._compute_attn_output(F, score, v)
return v
class PosFFN(HybridBlock):
@validated()
def __init__(
self,
d_model: int,
d_hidden: int,
activation: str = "softrelu",
pre_ln: bool = True,
dropout: float = 0.0,
**kwargs,
):
super(PosFFN, self).__init__(**kwargs)
self.pre_ln = pre_ln
with self.name_scope():
self.linear1 = nn.Dense(
units=d_hidden,
use_bias=True,
flatten=False,
activation=activation,
weight_initializer=init.Xavier(),
)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Dense(
units=d_model,
use_bias=True,
flatten=False,
weight_initializer=init.Xavier(),
)
self.lnorm = nn.LayerNorm(axis=-1)
def hybrid_forward(self, F, x: Tensor) -> Tensor:
if self.pre_ln:
y = self.lnorm(x)
else:
y = x
y = self.linear1(y)
y = self.dropout(y)
y = self.linear2(y)
y = y + x
if not self.pre_ln:
y = self.lnorm(y)
return y
| 35.191244
| 95
| 0.521443
|
b9f14e24661a562864244ce1401a8c6a7371104d
| 2,287
|
py
|
Python
|
logic/reserved/save_row.py
|
rdevost/pymixup
|
9004fbdc7939033014b0eefa669056014647a0c8
|
[
"MIT"
] | 5
|
2017-01-02T15:12:31.000Z
|
2021-09-03T15:51:39.000Z
|
logic/reserved/save_row.py
|
rdevost/pymixup
|
9004fbdc7939033014b0eefa669056014647a0c8
|
[
"MIT"
] | null | null | null |
logic/reserved/save_row.py
|
rdevost/pymixup
|
9004fbdc7939033014b0eefa669056014647a0c8
|
[
"MIT"
] | 1
|
2021-09-03T15:51:41.000Z
|
2021-09-03T15:51:41.000Z
|
from peewee import IntegrityError, DoesNotExist
from data.base import obfuscatedb
from data.save_row import save_row
from logic.reserved import reserved_prefixes
def save_reserved(reserved_row, **kwargs):
"""Save a Reserved row."""
from logic.identifier import get_identifier_by_name, \
get_identifier_by_obfuscated, save_identifier, get_identifier
try:
for name, value in kwargs.iteritems():
getattr(reserved_row, name) # Make sure column exists
setattr(reserved_row, name, value)
except AttributeError:
raise
with obfuscatedb.atomic():
try:
reserved_id = save_row(reserved_row, **kwargs)
except IntegrityError:
raise
####################
# Update identifiers
####################
if reserved_row.name[0] in [reserved_prefixes.reserved_dir,
reserved_prefixes.non_obfuscated_dir]:
identifier_name = reserved_row.name[1:]
elif reserved_row.name[0] in [reserved_prefixes.reserved_file,
reserved_prefixes.non_obfuscated_file]:
identifier_name = reserved_row.name[1:-3]
else:
identifier_name = reserved_row.name
# Reassign identifier obfuscated name if it exists for another name
try:
identifier_row = get_identifier_by_obfuscated(identifier_name)
except DoesNotExist:
pass
else:
if identifier_row.name != identifier_name:
identifier_row.obfuscated_name = None
save_identifier(identifier_row)
# Unobfuscate name in identifiers
try:
identifier_row = get_identifier_by_name(identifier_name)
except DoesNotExist:
identifier_row = get_identifier(None)
save_identifier(
identifier_row,
name=identifier_name,
obfuscated_name=identifier_name)
else:
if identifier_row.obfuscated_name != identifier_name:
save_identifier(
identifier_row,
name=identifier_name,
obfuscated_name=identifier_name)
return reserved_id
| 35.184615
| 77
| 0.609532
|
f01e4e692f228accacfe185a6c2aef5402234333
| 10,821
|
py
|
Python
|
tensorflow_transform/coders/example_proto_coder_test.py
|
devidipak/transform
|
56efe455b29fa3d0a29ce2f8872adc41ed6012c3
|
[
"Apache-2.0"
] | 2
|
2021-07-19T02:00:30.000Z
|
2021-07-19T02:00:37.000Z
|
tensorflow_transform/coders/example_proto_coder_test.py
|
devidipak/transform
|
56efe455b29fa3d0a29ce2f8872adc41ed6012c3
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_transform/coders/example_proto_coder_test.py
|
devidipak/transform
|
56efe455b29fa3d0a29ce2f8872adc41ed6012c3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow-transform ExampleProtoCoder tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
# Note that this needs to happen before any non-python imports, so we do it
# pretty early on.
if any(arg == '--proto_implementation_type=python' for arg in sys.argv):
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'
elif any(arg == '--proto_implementation_type=cpp' for arg in sys.argv):
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
elif any(arg.startswith('--proto_implementation_type') for arg in sys.argv):
raise ValueError('Unexpected value for --proto_implementation_type')
import numpy as np
import tensorflow as tf
from tensorflow_transform import test_case
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_schema
from google.protobuf.internal import api_implementation
from google.protobuf import text_format
_FEATURE_SPEC = {
'scalar_feature_1': tf.FixedLenFeature([], tf.int64),
'scalar_feature_2': tf.FixedLenFeature([], tf.int64),
'scalar_feature_3': tf.FixedLenFeature([], tf.float32),
'varlen_feature_1': tf.VarLenFeature(tf.float32),
'varlen_feature_2': tf.VarLenFeature(tf.string),
'1d_vector_feature': tf.FixedLenFeature([1], tf.string),
'2d_vector_feature': tf.FixedLenFeature([2, 2], tf.float32),
'sparse_feature': tf.SparseFeature('idx', 'value', tf.float32, 10),
}
_ENCODE_DECODE_CASES = [
dict(
testcase_name='multiple_columns',
feature_spec=_FEATURE_SPEC,
ascii_proto="""\
features {
feature { key: "scalar_feature_1" value { int64_list { value: [ 12 ] } } }
feature { key: "varlen_feature_1"
value { float_list { value: [ 89.0 ] } } }
feature { key: "scalar_feature_2" value { int64_list { value: [ 12 ] } } }
feature { key: "scalar_feature_3"
value { float_list { value: [ 1.0 ] } } }
feature { key: "1d_vector_feature"
value { bytes_list { value: [ 'this is a ,text' ] } } }
feature { key: "2d_vector_feature"
value { float_list { value: [ 1.0, 2.0, 3.0, 4.0 ] } } }
feature { key: "varlen_feature_2"
value { bytes_list { value: [ 'female' ] } } }
feature { key: "value" value { float_list { value: [ 12.0, 20.0 ] } } }
feature { key: "idx" value { int64_list { value: [ 1, 4 ] } } }
}""",
instance={
'scalar_feature_1': 12,
'scalar_feature_2': 12,
'scalar_feature_3': 1.0,
'varlen_feature_1': [89.0],
'1d_vector_feature': [b'this is a ,text'],
'2d_vector_feature': [[1.0, 2.0], [3.0, 4.0]],
'varlen_feature_2': [b'female'],
'sparse_feature': ([1, 4], [12.0, 20.0])
}),
dict(
testcase_name='multiple_columns_ndarray',
feature_spec=_FEATURE_SPEC,
ascii_proto="""\
features {
feature { key: "scalar_feature_1" value { int64_list { value: [ 13 ] } } }
feature { key: "varlen_feature_1" value { float_list { } } }
feature { key: "scalar_feature_2"
value { int64_list { value: [ 214 ] } } }
feature { key: "scalar_feature_3"
value { float_list { value: [ 2.0 ] } } }
feature { key: "1d_vector_feature"
value { bytes_list { value: [ 'this is another ,text' ] } } }
feature { key: "2d_vector_feature"
value { float_list { value: [ 9.0, 8.0, 7.0, 6.0 ] } } }
feature { key: "varlen_feature_2"
value { bytes_list { value: [ 'male' ] } } }
feature { key: "value" value { float_list { value: [ 13.0, 21.0 ] } } }
feature { key: "idx" value { int64_list { value: [ 2, 5 ] } } }
}""",
instance={
'scalar_feature_1': np.array(13),
'scalar_feature_2': np.int32(214),
'scalar_feature_3': np.array(2.0),
'varlen_feature_1': np.array([]),
'1d_vector_feature': np.array([b'this is another ,text']),
'2d_vector_feature': np.array([[9.0, 8.0], [7.0, 6.0]]),
'varlen_feature_2': np.array([b'male']),
'sparse_feature': (np.array([2, 5]), np.array([13.0, 21.0]))
}),
]
_ENCODE_ONLY_CASES = [
dict(
testcase_name='unicode',
feature_spec={'unicode_feature': tf.FixedLenFeature([], tf.string)},
ascii_proto="""\
features {
feature { key: "unicode_feature" value { bytes_list { value: [ "Hello κόσμε" ] } } }
}""",
instance={'unicode_feature': u'Hello κόσμε'}),
]
_DECODE_ONLY_CASES = [
]
_DECODE_ERROR_CASES = [
dict(
testcase_name='to_few_values',
feature_spec={
'2d_vector_feature': tf.FixedLenFeature([2, 2], tf.int64),
},
ascii_proto="""\
features {
feature {
key: "2d_vector_feature"
value { int64_list { value: [ 1, 2, 3 ] } }
}
}""",
error_msg='got wrong number of values'),
]
_ENCODE_ERROR_CASES = [
dict(
testcase_name='to_few_values',
feature_spec={
'2d_vector_feature': tf.FixedLenFeature([2, 2], tf.int64),
},
instance={'2d_vector_feature': [1, 2, 3]},
error_msg='got wrong number of values'),
]
def _ascii_to_example(ascii_proto):
return text_format.Merge(ascii_proto, tf.train.Example())
def _ascii_to_binary(ascii_proto):
return _ascii_to_example(ascii_proto).SerializeToString()
def _binary_to_example(serialized_proto):
return tf.train.Example.FromString(serialized_proto)
class ExampleProtoCoderTest(test_case.TransformTestCase):
def assertSerializedProtosEqual(self, a, b):
np.testing.assert_equal(_binary_to_example(a), _binary_to_example(b))
@test_case.named_parameters(*(_ENCODE_DECODE_CASES + _DECODE_ONLY_CASES))
def test_decode(self, feature_spec, ascii_proto, instance, **kwargs):
schema = dataset_schema.from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
serialized_proto = _ascii_to_binary(ascii_proto)
np.testing.assert_equal(coder.decode(serialized_proto), instance)
@test_case.named_parameters(*(_ENCODE_DECODE_CASES + _DECODE_ONLY_CASES))
def test_decode_non_serialized(self, feature_spec, ascii_proto, instance,
**kwargs):
schema = dataset_schema.from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(
schema, serialized=False, **kwargs)
proto = _ascii_to_example(ascii_proto)
np.testing.assert_equal(coder.decode(proto), instance)
@test_case.named_parameters(*(_ENCODE_DECODE_CASES + _ENCODE_ONLY_CASES))
def test_encode(self, feature_spec, ascii_proto, instance, **kwargs):
schema = dataset_schema.from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
serialized_proto = _ascii_to_binary(ascii_proto)
self.assertSerializedProtosEqual(coder.encode(instance), serialized_proto)
@test_case.named_parameters(*(_ENCODE_DECODE_CASES + _ENCODE_ONLY_CASES))
def test_encode_non_serialized(self, feature_spec, ascii_proto, instance,
**kwargs):
schema = dataset_schema.from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(
schema, serialized=False, **kwargs)
proto = _ascii_to_example(ascii_proto)
np.testing.assert_equal(coder.encode(instance), proto)
@test_case.named_parameters(*_DECODE_ERROR_CASES)
def test_decode_error(self,
feature_spec,
ascii_proto,
error_msg,
error_type=ValueError,
**kwargs):
schema = dataset_schema.from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
serialized_proto = _ascii_to_binary(ascii_proto)
with self.assertRaisesRegexp(error_type, error_msg):
coder.decode(serialized_proto)
@test_case.named_parameters(*_ENCODE_ERROR_CASES)
def test_encode_error(self,
feature_spec,
instance,
error_msg,
error_type=ValueError,
**kwargs):
schema = dataset_schema.from_feature_spec(feature_spec)
coder = example_proto_coder.ExampleProtoCoder(schema, **kwargs)
with self.assertRaisesRegexp(error_type, error_msg):
coder.encode(instance)
def test_example_proto_coder_picklable(self):
schema = dataset_schema.from_feature_spec(_FEATURE_SPEC)
coder = example_proto_coder.ExampleProtoCoder(schema)
ascii_proto = """
features {
feature { key: "scalar_feature_1" value { int64_list { value: [ 12 ] } } }
feature { key: "varlen_feature_1"
value { float_list { value: [ 89.0 ] } } }
feature { key: "scalar_feature_2" value { int64_list { value: [ 12 ] } } }
feature { key: "scalar_feature_3"
value { float_list { value: [ 2.0 ] } } }
feature { key: "1d_vector_feature"
value { bytes_list { value: [ 'this is a ,text' ] } } }
feature { key: "2d_vector_feature"
value { float_list { value: [ 1.0, 2.0, 3.0, 4.0 ] } } }
feature { key: "varlen_feature_2"
value { bytes_list { value: [ 'female' ] } } }
feature { key: "value" value { float_list { value: [ 12.0, 20.0 ] } } }
feature { key: "idx" value { int64_list { value: [ 1, 4 ] } } }
}
"""
instance = {
'scalar_feature_1': 12,
'scalar_feature_2': 12,
'scalar_feature_3': 2.0,
'varlen_feature_1': [89.0],
'1d_vector_feature': [b'this is a ,text'],
'2d_vector_feature': [[1.0, 2.0], [3.0, 4.0]],
'varlen_feature_2': [b'female'],
'sparse_feature': ([1, 4], [12.0, 20.0])
}
serialized_proto = _ascii_to_binary(ascii_proto)
for _ in range(2):
coder = pickle.loads(pickle.dumps(coder))
np.testing.assert_equal(coder.decode(serialized_proto), instance)
self.assertSerializedProtosEqual(coder.encode(instance), serialized_proto)
if __name__ == '__main__':
test_case.main()
| 39.349091
| 86
| 0.651049
|
abe3a1d1717f7c2c95e1e56375fac48249fea210
| 23,334
|
py
|
Python
|
Main.py
|
dydx-git/Calcy
|
fba4510220599a1a148dad15da0cbb508905034c
|
[
"MIT"
] | null | null | null |
Main.py
|
dydx-git/Calcy
|
fba4510220599a1a148dad15da0cbb508905034c
|
[
"MIT"
] | null | null | null |
Main.py
|
dydx-git/Calcy
|
fba4510220599a1a148dad15da0cbb508905034c
|
[
"MIT"
] | null | null | null |
#<--------------------------------------------------------------------------------------IMPORTS------------------------------------------------------------------------>
from tkinter import *
import add, subtract, multiply, divide, sin, cos, tan, asin, acos, atan, sinh, cosh, tanh, asinh, acosh, atanh, log, ln, x2, xy, e, fact, x3, sqrt, cubert, mod
#<---------------------------------------------------------------------------------------GLOBALS------------------------------------------------------------------------>
root= Tk()
root.title('Calcy')
num1=StringVar()
ans = None
common = ''
condition = ''
isOpAllowed = False
listOfNumbers = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
isDegreeOn = True
mem = None
num1.set("Start Calculation")
isExtraOff = False
finalans = None
#<---------------------------------------------------------------------------------------ENTRY MANAGEMENT---------------------------------------------------------------->
def Clear():
global ans, common, isOpAllowed, condition
ans = None
condition = ''
common = ''
isOpAllowed = False
num1.set('0')
history = ''
def Backspace(event):
global common, ans
if common=='':
pass
else:
common = str(common)[:-1]
num1.set(common)
#<---------------------------------------------------------------------------------------SETTERS------------------------------------------------------------------------->
def SetAns():
global ans, finalans
print('this is ans:', ans)
print(common)
try:
i = str(ans).index('.')
tempans = str(ans)[-1: i: -1]
print('this is ans:', ans)
if '0.' in str(ans):
num1.set(round(ans,12))
else:
for digit in tempans:
if digit in listOfNumbers:
num1.set(round(ans,12))
elif tempans == '0':
finalans = str(ans)[:i]
num1.set(finalans)
else:
finalans = str(ans)[:i]
num1.set(finalans)
except:
pass
def SetAdd(event):
global condition
if condition == '':
GetAdd()
else:
decider[condition]()
condition = '+'
SetAns()
global isOpAllowed
isOpAllowed = True
def SetSubtract(event):
global condition, isOpAllowed
if condition == '':
GetSubtract()
else:
decider[condition]()
condition = '-'
SetAns()
isOpAllowed = True
def SetMultiply(event):
global condition, isOpAllowed
if condition == '':
GetMultiply()
else:
decider[condition]()
condition = '*'
SetAns()
isOpAllowed = True
def SetDivide(event):
global condition, isOpAllowed
if condition == '':
GetDivide()
else:
decider[condition]()
condition = '/'
SetAns()
isOpAllowed = True
def SetMod():
global condition, isOpAllowed
if condition == '':
GetMod()
else:
decider[condition]()
condition = 'mod'
SetAns()
isOpAllowed = True
def SetSin():
GetSin()
SetAns()
def SetCos():
GetCos()
SetAns()
def SetTan():
GetTan()
SetAns()
def SetASin():
GetASin()
SetAns()
def SetACos():
GetACos()
SetAns()
def SetATan():
GetATan()
SetAns()
def SetSinh():
GetSinh()
SetAns()
def SetCosh():
GetCosh()
SetAns()
def SetTanh():
GetTanh()
SetAns()
def SetASinh():
GetASinh()
SetAns()
def SetACosh():
GetACosh()
SetAns()
def SetATanh():
GetATanh()
SetAns()
def SetLog(event):
GetLog()
SetAns()
def SetLn(event):
GetLn()
SetAns()
def SetX2(event):
GetX2()
SetAns()
def SetX3():
GetX3()
SetAns()
def SetXy(event):
global condition, isOpAllowed
if condition == '':
GetXy()
else:
decider[condition]()
condition = 'xy'
SetAns()
isOpAllowed = True
SetAns()
def SetE(event):
GetE()
SetAns()
def SetSqrt():
GetSqrt()
SetAns()
def SetCubert():
GetCubert()
SetAns()
def SetFact(event):
GetFact()
num1.set(ans)
def SetEquals(event):
global condition, isOpAllowed, ans
decider[condition]()
SetAns()
condition = ''
ans = None
common = ''
#<---------------------------------------------------------------------------------------GETTERS----------------------------------------------------------------------->
def GetAdd():
global ans, common
ans, common = add.Add(ans, common)
def GetSubtract():
global ans, common
ans, common = subtract.Subtract(ans, common)
def GetMultiply():
global ans, common
ans, common = multiply.Multiply(ans, common)
def GetDivide():
global ans, common
ans, common = divide.Divide(ans, common)
def GetSin():
global ans, common, isDegreeOn
ans, common = sin.Sin(ans,common, isDegreeOn)
def GetCos():
global ans, common, isDegreeOn
ans, common = cos.Cos(ans,common, isDegreeOn)
def GetTan():
global ans, common, isDegreeOn
ans, common = tan.Tan(ans, common, isDegreeOn)
def GetASin():
global ans, common, isDegreeOn
ans, common = asin.ASin(ans,common, isDegreeOn)
def GetACos():
global ans, common, isDegreeOn
ans, common = acos.ACos(ans,common, isDegreeOn)
def GetATan():
global ans, common, isDegreeOn
ans, common = atan.ATan(ans,common, isDegreeOn)
def GetSinh():
global ans, common, isDegreeOn
ans, common = sinh.Sinh(ans,common, isDegreeOn)
def GetCosh():
global ans, common, isDegreeOn
ans, common = cosh.Cosh(ans,common, isDegreeOn)
def GetTanh():
global ans, common, isDegreeOn
ans, common = tanh.Tanh(ans,common, isDegreeOn)
def GetASinh():
global ans, common, isDegreeOn
ans, common = asinh.ASinh(ans,common, isDegreeOn)
def GetACosh():
global ans, common, isDegreeOn
ans, common = acosh.ACosh(ans,common, isDegreeOn)
def GetATanh():
global ans, common, isDegreeOn
ans, common = atanh.ATanh(ans,common, isDegreeOn)
def GetLog():
global ans, common
ans, common = log.Log(ans, common)
def GetLn():
global ans, common
ans, common = ln.Ln(ans, common)
def GetX2():
global ans, common
ans, common = x2.X2(ans, common)
def GetX3():
global ans, common
ans, common = x3.X3(ans, common)
def GetXy():
global ans, common
ans, common = xy.Xy(ans, common)
def GetE():
global ans, common
ans, common = e.E(ans, common)
def GetSqrt():
global ans, common
ans, common = sqrt.Sqrt(ans, common)
def GetCubert():
global ans, common
ans, common = cubert.Cubert(ans, common)
def GetFact():
global ans, common
ans, common = fact.Fact(ans, common)
def GetMod():
global ans, common
ans, common = mod.Mod(ans, common)
#<--------------------------------------------------------------------------------------MEMORY MANAGER----------------------------------------------------------------->
def MC():
global mem
mem = None
def MR():
global mem, common
if mem != None:
common = mem
else:
pass
print('this is MR:', common)
num1.set(common)
def MS():
global mem, common, ans
if common != '':
mem = int(common)
else:
print(ans)
mem = str(ans)
#<------------------------------------------------------------------------------------DEGREE/RADIAN------------------------------------------------------------------- >
def DegRad():
global isDegreeOn
if isDegreeOn == False:
isDegreeOn = True
degRadButton.config(image=imgDeg)
elif isDegreeOn == True:
isDegreeOn = False
degRadButton.config(image=imgRad)
#<----------------------------------------------------------------------------------EXTRAS MANAGER---------------------------------------------------------------------->
def ActivateExtra():
global isExtraOff
if isExtraOff == False:
asinButton.grid(row=3,column=0)
acosButton.grid(row=4,column=0)
atanButton.grid(row=5,column=0)
asinhButton.grid(row=7, column=0)
acoshButton.grid(row=7, column=1)
atanhButton.grid(row=7, column=2)
lnButton.grid(row=7, column=3)
x3Button.grid(row=2, column = 0)
cubertButton.grid(row=2, column=4)
modButton.grid(row=8, column=0)
hisLabel.grid(row=0, column=2, columnspan=3, pady=(60,0))
xtraButton.config(image=imgXtra2)
isExtraOff = True
else:
acosButton.grid_forget()
asinButton.grid_forget()
atanButton.grid_forget()
acoshButton.grid_forget()
asinhButton.grid_forget()
atanhButton.grid_forget()
lnButton.grid_forget()
x3Button.grid_forget()
cubertButton.grid_forget()
modButton.grid_forget()
hisLabel.place(x=10,y=10)
xtraButton.config(image=imgXtra1)
isExtraOff = False
#<---------------------------------------------------------------------------------------ENTRY------------------------------------------------------------------------->
txtDisplay = Entry(root, textvariable = num1, width=17,justify="right", fg='white', borderwidth=0);
txtDisplay.config(readonlybackground='#0b486b', font = ("Segoe UI", 26))
txtDisplay.focus();
txtDisplay.config(state='readonly')
txtDisplay.grid(columnspan=7,row=0,ipady=6, pady=(0,24))
#<--------------------------------------------------------------------------------------DECORATING BUTTONS------------------------------------------------------------->
img1 = PhotoImage(file="assets/nb1.png")
img2 = PhotoImage(file="assets/nb2.png")
img3 = PhotoImage(file="assets/nb3.png")
img4 = PhotoImage(file="assets/nb4.png")
img5 = PhotoImage(file="assets/nb5.png")
img6 = PhotoImage(file="assets/nb6.png")
img7 = PhotoImage(file="assets/nb7.png")
img8 = PhotoImage(file="assets/nb8.png")
img9 = PhotoImage(file="assets/nb9.png")
imgPlus = PhotoImage(file="assets/+.png")
imgMinus = PhotoImage(file="assets/-.png")
imgMultiply = PhotoImage(file="assets/x.png")
imgDivide = PhotoImage(file="assets/dvd.png")
imgMC = PhotoImage(file='assets/MC.png')
imgMR = PhotoImage(file='assets/MR.png')
imgMS = PhotoImage(file='assets/MS.png')
imgBKSPC = PhotoImage(file='assets/backspace.png')
imgX2 = PhotoImage(file='assets/x2.png')
imgX3 = PhotoImage(file='assets/x3.png')
imgxy = PhotoImage(file='assets/xy.png')
imgSin = PhotoImage(file='assets/sin.png')
imgCos = PhotoImage(file='assets/cos.png')
imgTan = PhotoImage(file='assets/tan.png')
imgAsin = PhotoImage(file='assets/asin.png')
imgAcos = PhotoImage(file='assets/acos.png')
imgAtan = PhotoImage(file='assets/atan.png')
imgAsinh = PhotoImage(file='assets/asinh.png')
imgAcosh = PhotoImage(file='assets/acosh.png')
imgAtanh = PhotoImage(file='assets/atanh.png')
imgSinh = PhotoImage(file='assets/sinh.png')
imgCosh = PhotoImage(file='assets/cosh.png')
imgTanh = PhotoImage(file='assets/tanh.png')
imgDec = PhotoImage(file='assets/dec.png')
img0 = PhotoImage(file='assets/nb0.png')
imgDeg = PhotoImage(file='assets/deg.png')
imgRad = PhotoImage(file='assets/rad.png')
imgLog = PhotoImage(file='assets/log.png')
imgLn = PhotoImage(file='assets/ln.png')
imgE = PhotoImage(file='assets/e.png')
imgPi = PhotoImage(file='assets/pi.png')
imgC = PhotoImage(file='assets/c.png')
imgFact = PhotoImage(file='assets/fact.png')
imgSqrt = PhotoImage(file='assets/sqrt.png')
imgCubert = PhotoImage(file='assets/cubert.png')
imgPM = PhotoImage(file='assets/PM.png')
imgEquals = PhotoImage(file='assets/equals.png')
imgXtra1 = PhotoImage(file='assets/blue.png')
imgXtra2 = PhotoImage(file='assets/red.png')
imgdydx = PhotoImage(file='assets/dydx.png')
imgMod = PhotoImage(file='assets/mod.png')
dydxLabel = Label(root, image=imgdydx, bg='#232323')
dydxLabel.grid(row=8, column=2, columnspan=2)
#<---------------------------------------------------------------------------------------DEFINING BUTTONS------------------------------------------------------------------------->
oneButton = Button(root, height = '32', width='32', borderwidth=0, image=img1, bg='#232323', highlightthickness=0,command = lambda: clck('', 1))
twoButton = Button(root, height = '32', width='32', borderwidth=0, image=img2, bg='#232323', highlightthickness=0,command = lambda: clck('', 2))
threeButton = Button(root, height = '32', width='32', borderwidth=0, image=img3, bg='#232323', highlightthickness=0,command = lambda: clck('', 3))
fourButton = Button(root, height = '32', width='32', borderwidth=0, image=img4, bg='#232323', highlightthickness=0,command = lambda: clck('', 4))
sevenButton = Button(root, height = '32', width='32', borderwidth=0, image=img7, bg='#232323', highlightthickness=0,command = lambda: clck('', 7))
eightButton = Button(root, height = '32', width='32', borderwidth=0, image=img8, bg='#232323', highlightthickness=0,command = lambda: clck('', 8))
nineButton = Button(root, height = '32', width='32', borderwidth=0, image=img9, bg='#232323', highlightthickness=0,command = lambda: clck('', 9))
fiveButton = Button(root, height = '32', width='32', borderwidth=0, image=img5, bg='#232323', highlightthickness=0,command = lambda: clck('', 5))
sixButton = Button(root, height = '32', width='32', borderwidth=0, image=img6, bg='#232323', highlightthickness=0,command = lambda: clck('', 6))
zeroButton = Button(root, height = '32', width='32', borderwidth=0, image=img0, bg='#232323', highlightthickness=0,command = lambda: clck('', 0))
clearButton = Button(root, height = '46', width='46', borderwidth=0, image=imgC, bg='#232323', highlightthickness=0,command=Clear)
addButton = Button(root, text="+", height = '32', width='32',borderwidth=0, image=imgPlus, bg='#232323')
subButton = Button(root, text="-", height = '32', width='32',borderwidth=0, image=imgMinus, bg='#232323')
mcButton = Button(root, text='MC',height = '46', width='67', borderwidth=0, image=imgMC, bg='#232323', highlightthickness=0, command=MC);
mrButton = Button(root, text='MR', height = '46', width='67', borderwidth=0, image=imgMR, bg='#232323', highlightthickness=0, command=MR);
msButton = Button(root, text='M+', height = '46', width='67',borderwidth=0, image=imgMS, bg='#232323', highlightthickness=0, command=MS);
multiplyButton = Button(root, text="*",height = '46', width='46' ,borderwidth=0, image=imgMultiply, bg='#232323')
divideButton = Button(root, text="/",height = '46', width='46',borderwidth=0, image=imgDivide, bg='#232323')
sinButton = Button(root, text='sin', height = '36', width='36',borderwidth=0, image=imgSin, bg='#232323', highlightthickness=0, command=SetSin);
cosButton = Button(root, text='cos', height = '36', width='36',borderwidth=0, image=imgCos, bg='#232323', highlightthickness=0, command=SetCos);
tanButton = Button(root, text='tan', height = '36', width='36',borderwidth=0, image=imgTan, bg='#232323', highlightthickness=0, command=SetTan);
asinButton = Button(root, text='asin', height = '36', width='36',borderwidth=0, image=imgAsin, bg='#232323', highlightthickness=0, command=SetASin);
decButton = Button(root, text='.', height = '36', width='36',borderwidth=0, image=imgDec, bg='#232323', highlightthickness=0, command =lambda: clck('', '.'))
degRadButton = Button(root, text='deg', borderwidth=0, bg='#232323', highlightthickness=0, command=DegRad, image=imgDeg)
xtraButton = Button(root,borderwidth=0, bg='#232323', highlightthickness=0, command=ActivateExtra, image=imgXtra1);
acosButton = Button(root, text='acos', height = '36', width='36',borderwidth=0, image=imgAcos, bg='#232323', highlightthickness=0, command=SetACos);
atanButton = Button(root, text='atan', height = '36', width='36',borderwidth=0, image=imgAtan, bg='#232323', highlightthickness=0, command=SetATan);
sinhButton = Button(root, text='sinh', height = '36', width='36',borderwidth=0, image=imgSinh, bg='#232323', highlightthickness=0, command=SetSinh);
coshButton = Button(root, text='cosh', height = '36', width='36',borderwidth=0, image=imgCosh, bg='#232323', highlightthickness=0, command=SetCosh);
tanhButton = Button(root, text='tanh', height = '36', width='36',borderwidth=0, image=imgTanh, bg='#232323', highlightthickness=0, command=SetTanh);
asinhButton = Button(root, text='asinh', height = '36', width='36',borderwidth=0, image=imgAsinh, bg='#232323', highlightthickness=0, command=SetASinh);
acoshButton = Button(root, text='acosh', height = '36', width='36',borderwidth=0, image=imgAcosh, bg='#232323', highlightthickness=0, command=SetACosh);
atanhButton = Button(root, text='atanh', height = '36', width='36',borderwidth=0, image=imgAtanh, bg='#232323', highlightthickness=0, command=SetATanh);
logButton = Button(root, text='log', height = '36', width='36',borderwidth=0, image=imgLog, bg='#232323', highlightthickness=0);
lnButton = Button(root, text='log', height = '36', width='36',borderwidth=0, image=imgLn, bg='#232323', highlightthickness=0);
backspaceButton = Button(root, width = '46', height = '46', image = imgBKSPC, bg = '#232323',borderwidth=0, highlightthickness=0)
x2Button = Button(root, width = '27', height = '29', image = imgX2, bg = '#232323',borderwidth=0, highlightthickness=0)
x3Button = Button(root, width = '27', height = '29', image = imgX3, bg = '#232323',borderwidth=0, highlightthickness=0, command=SetX3)
xyButton = Button(root, width = '27', height = '29', image = imgxy, bg = '#232323',borderwidth=0, highlightthickness=0)
eButton = Button(root, width = '27', height = '29', image = imgE, bg = '#232323',borderwidth=0, highlightthickness=0)
piButton = Button(root, width = '27', height = '29', image = imgPi, bg = '#232323',borderwidth=0, highlightthickness=0, command= lambda: clck('', 3.14159265358))
factButton = Button(root, width = '36', height = '36', image = imgFact, bg = '#232323',borderwidth=0, highlightthickness=0)
sqrtButton = Button(root, width = '36', height = '36', image = imgSqrt, bg = '#232323',borderwidth=0, highlightthickness=0, command=SetSqrt)
cubertButton = Button(root, width = '36', height = '36', image = imgCubert, bg = '#232323',borderwidth=0, highlightthickness=0, command=SetCubert)
equalsButton = Button(root, width = '36', height = '72', image = imgEquals, bg = '#232323',borderwidth=0, highlightthickness=0)
pmButton = Button(root, width = '36', height = '36', image = imgPM, bg = '#232323',borderwidth=0, highlightthickness=0, command= lambda: clck('', '-'))
modButton = Button(root, width = '36', height = '36', image = imgMod, bg = '#232323',borderwidth=0, highlightthickness=0, command= SetMod)
#<---------------------------------------------------------------------------------------BUTTON PLACEMENTS-------------------------------------------------------------------->
mcButton.grid(row = 1, column = 0)
mrButton.grid(row = 1, column = 1)
msButton.grid(row = 1, column = 2)
clearButton.grid(row = 1, column = 3)
backspaceButton.grid(row=1, column = 4)
x2Button.grid(row=2, column = 0, ipady=12)
eButton.grid(row=2, column = 1, ipady=12)
xyButton.grid(row=2, column = 3, ipady=12)
sinButton.grid(row=3,column=0)
cosButton.grid(row=4,column=0)
tanButton.grid(row=5, column=0)
xtraButton.grid(row=6, column=0)
sevenButton.grid(row=3, column=1, ipady=8)
eightButton.grid(row=3, column=2)
nineButton.grid(row=3, column=3)
sixButton.grid(row=4, column=3, ipady=12)
fiveButton.grid(row=4, column=2)
fourButton.grid(row=4, column=1)
threeButton.grid(row=5, column=3, ipady=12)
twoButton.grid(row=5, column=2)
oneButton.grid(row=5, column=1)
decButton.grid(row=6, column=1)
zeroButton.grid(row=6, column=2)
degRadButton.grid(row=6, column=3)
sinhButton.grid(row=7, column=0)
coshButton.grid(row=7, column=1)
tanhButton.grid(row=7, column=2, ipady=12)
logButton.grid(row=7, column=3)
piButton.grid(row=2, column =2)
addButton.grid(row=3, column=4)
subButton.grid(row=4, column=4)
multiplyButton.grid(row=5, column=4)
divideButton.grid(row=6, column=4)
factButton.grid(row=8, column=0, ipady=8)
sqrtButton.grid(row=2, column=4)
pmButton.grid(row=8, column= 1)
equalsButton.grid(row=7, column=4, rowspan=2)
#<---------------------------------------------------------------------------------------CLICK/DECIDER------------------------------------------------------------------------>
def clck (event, number):
global condition
global isOpAllowed
global ans
print(isOpAllowed)
print('Condition', condition)
global common
print(number)
if '-' in common:
print('this is common', common)
common = list(common)
common[0] = ''
common = ''.join(common)
num1.set(common)
elif number == '-':
common = str(number)+ common
num1.set(common)
else:
common+= str(number)
if common[0] == '.':
common = '0' + common
num1.set(common)
elif common.count('.')>1 :
num1.set('Not allowed')
common = ''
else:
num1.set(common)
decider = {'+': GetAdd, '-': GetSubtract, '*':GetMultiply, '/':GetDivide, 'xy': GetXy, 'mod': GetMod}
#<---------------------------------------------------------------------------------------KEYBOARD BUTTON BINDERS------------------------------------------------------------------>
root.bind("1", lambda event, arg = 1 : clck(event, arg))
root.bind("2", lambda event, arg = 2 : clck(event, arg))
root.bind("3", lambda event, arg = 3 : clck(event, arg))
root.bind("4", lambda event, arg = 4 : clck(event, arg))
root.bind("5", lambda event, arg = 5 : clck(event, arg))
root.bind("6", lambda event, arg = 6 : clck(event, arg))
root.bind("7", lambda event, arg = 7 : clck(event, arg))
root.bind("8", lambda event, arg = 8 : clck(event, arg))
root.bind("9", lambda event, arg = 9 : clck(event, arg))
root.bind("0", lambda event, arg = 0 : clck(event, arg))
root.bind("<plus>", SetAdd)
root.bind("<minus>", SetSubtract)
root.bind("<asterisk>", SetMultiply)
root.bind("<slash>", SetDivide)
root.bind("<BackSpace>", Backspace)
root.bind("<period>", lambda event, arg = '.' : clck(event, arg))
root.bind("<Return>", SetEquals)
#<--------------------------------------------------------------------------------------MOUSE BUTTON BINDERS------------------------------------------------------------------>
addButton.bind("<Button-1>", SetAdd)
subButton.bind("<Button-1>", SetSubtract)
multiplyButton.bind("<Button-1>", SetMultiply)
divideButton.bind("<Button-1>", SetDivide)
logButton.bind("<Button-1>", SetLog)
lnButton.bind("<Button-1>", SetLn)
x2Button.bind("<Button-1>", SetX2)
eButton.bind("<Button-1>", SetE)
backspaceButton.bind("<Button-1>", Backspace)
factButton.bind("<Button-1>", SetFact)
xyButton.bind("<Button-1>", SetXy)
equalsButton.bind("<Button-1>", SetEquals)
root.configure(background='#232323')
root.mainloop()
| 39.085427
| 180
| 0.577955
|
f6eff9e897288c7563a293b83e2a0c82907ad90e
| 632
|
py
|
Python
|
jass/mongo_utils.py
|
crim-ca/JASS
|
8a2d0bdd4cb50021c890fbb3059e75fa6f9adebb
|
[
"MIT",
"Python-2.0",
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2017-01-18T13:05:21.000Z
|
2017-01-18T13:05:21.000Z
|
jass/mongo_utils.py
|
crim-ca/JASS
|
8a2d0bdd4cb50021c890fbb3059e75fa6f9adebb
|
[
"MIT",
"Python-2.0",
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
jass/mongo_utils.py
|
crim-ca/JASS
|
8a2d0bdd4cb50021c890fbb3059e75fa6f9adebb
|
[
"MIT",
"Python-2.0",
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2017-05-18T18:38:53.000Z
|
2017-05-18T18:38:53.000Z
|
#!/usr/bin/env python
# coding:utf-8
"""
Various utilities for mongoDB usage.
"""
from bson.objectid import ObjectId
def changeDocIdToString(mongoDoc):
"""
Changes the _id to string.
Will crash if mongoDoc is not a valid Mongo Document
"""
if(mongoDoc is not None):
mongoDoc['_id'] = str(mongoDoc['_id'])
def changeDocIdToMongoId(jsonDoc):
"""
Changes the _id to ObjectId.
Will crash if jsonDoc is not a simple JSON object with _id field
"""
if(jsonDoc is not None):
jsonDoc['_id'] = ObjectId(jsonDoc['_id'])
def isObjectId(strId):
return ObjectId.is_valid(strId)
| 20.387097
| 68
| 0.664557
|
051ca1df8e3e82a4bdd0c9d489e4643cfa3fbb5b
| 4,386
|
py
|
Python
|
generate.py
|
CodeProcessor/python-project-template
|
5dd9c461aca011458d2f52ccd0b67f3ffa90254b
|
[
"MIT"
] | null | null | null |
generate.py
|
CodeProcessor/python-project-template
|
5dd9c461aca011458d2f52ccd0b67f3ffa90254b
|
[
"MIT"
] | null | null | null |
generate.py
|
CodeProcessor/python-project-template
|
5dd9c461aca011458d2f52ccd0b67f3ffa90254b
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) CUBE Content Governance Global Limited - All Rights Reserved
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
Written by Dulan Jayasuriya <dulan.jayasuriya@cube.global>, 11 February 2022
"""
import json
import os.path
from datetime import datetime
class Generator:
def __init__(self, config_path='config.json'):
self.configs = json.load(open(config_path))
self.project_name = self.configs['project_name']
self.library_name = self.configs['library_name']
def write_content(self, file_path, content):
"""
Write content to file
:param file_path:
:param content:
:return:
"""
_dir = os.path.dirname(file_path)
if not os.path.exists(_dir):
os.makedirs(_dir)
with open(file_path, 'w') as f:
f.write(content)
def get_banner(self):
banner = f"\"\"\"\n\
Copyright (C) {self.configs['library_author_company']} - All Rights Reserved \n\
Unauthorized copying of this file, via any medium is strictly prohibited \n\
Proprietary and confidential \n\
Written by {self.configs['library_author']} <{self.configs['library_author_email']}>, {datetime.now().strftime('%d %B %Y')} \n\
\"\"\"\n\n"
return banner
def get_setup_content(self):
content = f"import {self.library_name} \n\
from setuptools import setup, find_packages \n\
\n\
\n\
setup(\n\
name='{self.library_name}', \n\
version={self.library_name}.__version__, \n\
description='{self.configs['library_description']}', \n\
url='{self.configs['library_author_url']}', \n\
author='{self.configs['library_author']}', \n\
author_email='{self.configs['library_author_email']}', \n\
license='{self.configs['library_license']}', \n\
packages=find_packages(), \n\
zip_safe=False\n\
)\n"
return content
def generate(self):
"""
Main function
:return:
"""
self.write_content(os.path.join(self.project_name, 'README.md'), f"#{self.project_name}")
self.write_content(os.path.join(self.project_name, 'LICENSE'), f"#{self.project_name}")
self.write_content(os.path.join(self.project_name, self.library_name, '__init__.py'),
self.get_banner() + f"__version__ = '{self.configs['library_version']}'\n")
self.write_content(os.path.join(self.project_name, 'setup.py'), self.get_banner() + self.get_setup_content())
self.write_content(os.path.join(self.project_name, 'requirements.txt'), "")
self.write_content(os.path.join(self.project_name, "tests", 'README.md'),
"#Package integration and unit tests.")
self.write_content(os.path.join(self.project_name, "docs", 'README.md'), "#Package reference documentation.")
if self.configs["add_sample_data"]:
self.write_content(os.path.join(self.project_name, self.library_name, 'hello.py'),
self.get_banner() + self.get_sample_hello())
self.write_content(os.path.join(self.project_name, "tests", 'test_hello.py'),
self.get_banner() + self.get_sample_test())
def get_sample_hello(self):
content = "class Hello:\n" \
" def __init__(self, name):\n" \
" self.name = name\n" \
" \n" \
" def say_hello(self):\n" \
" return f'Hello {self.name}'\n" \
" "
return content
def get_sample_test(self):
content = f"from {self.library_name}.hello import Hello \n\
\n\
\n\
def test_say_hello():\n\
h = Hello('{self.configs['library_author']}')\n\
assert h.say_hello() == 'Hello {self.configs['library_author']}'\n\
"
return content
def clean(self):
"""
Clean the project
:return:
"""
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Generate a new project')
parser.add_argument('--config_file', help='Path to the config file', default='config.json')
args = parser.parse_args()
# get input filename from user
# generate the project
gen = Generator(args.config_file)
gen.generate()
print('Generated!')
| 36.247934
| 127
| 0.618103
|
f58d2ccac4af29972b878702f522bd7071a45924
| 2,106
|
py
|
Python
|
generateDates.py
|
Husterknupp/2020-oster-squash
|
43e8742c89ad1225119e8d2c4d2dba6a2914dd0d
|
[
"MIT"
] | 1
|
2020-03-06T16:06:00.000Z
|
2020-03-06T16:06:00.000Z
|
generateDates.py
|
Husterknupp/2020-oster-squash
|
43e8742c89ad1225119e8d2c4d2dba6a2914dd0d
|
[
"MIT"
] | 1
|
2021-06-10T18:36:46.000Z
|
2021-06-10T18:36:46.000Z
|
generateDates.py
|
Husterknupp/2020-oster-squash
|
43e8742c89ad1225119e8d2c4d2dba6a2914dd0d
|
[
"MIT"
] | 1
|
2020-03-05T23:38:21.000Z
|
2020-03-05T23:38:21.000Z
|
# 2017-09-14T03:21:47.070-04:00
# VERSION 3
days = {}
for day in range(7, 12):
timeFrames = []
for hour in range(14, 18):
timeFrames.append(hour)
days[day] = timeFrames
for day, hours in days.items():
if day < 10:
print(f'{{day: "2020-04-0{day}", hours: {hours}}}')
else:
print(f'{{day: "2020-04-{day}", hours: {hours}}}')
"""
{day: "2020-04-07", hours: [14, 15, 16, 17]}
{day: "2020-04-08", hours: [14, 15, 16, 17]}
{day: "2020-04-09", hours: [14, 15, 16, 17]}
{day: "2020-04-10", hours: [14, 15, 16, 17]}
{day: "2020-04-11", hours: [14, 15, 16, 17]}
"""
# VERSION 2
# for day in range(7, 12):
# for hour in range(14, 18):
# if day < 10:
# print(f'{{day: "2020-04-0{day}", hour: "{hour}"}}')
# else:
# print(f'{{day: "2020-04-{day}", hour: "{hour}"}}')
"""
{day: "2020-04-07", hour: "14"}
{day: "2020-04-07", hour: "15"}
{day: "2020-04-07", hour: "16"}
{day: "2020-04-07", hour: "17"}
{day: "2020-04-08", hour: "14"}
{day: "2020-04-08", hour: "15"}
{day: "2020-04-08", hour: "16"}
{day: "2020-04-08", hour: "17"}
{day: "2020-04-09", hour: "14"}
{day: "2020-04-09", hour: "15"}
{day: "2020-04-09", hour: "16"}
{day: "2020-04-09", hour: "17"}
{day: "2020-04-10", hour: "14"}
{day: "2020-04-10", hour: "15"}
{day: "2020-04-10", hour: "16"}
{day: "2020-04-10", hour: "17"}
{day: "2020-04-11", hour: "14"}
{day: "2020-04-11", hour: "15"}
{day: "2020-04-11", hour: "16"}
{day: "2020-04-11", hour: "17"}
"""
"""
2020-04-07T14:00:00.000+01:00
2020-04-07T15:00:00.000+01:00
2020-04-07T16:00:00.000+01:00
2020-04-07T17:00:00.000+01:00
2020-04-08T14:00:00.000+01:00
2020-04-08T15:00:00.000+01:00
2020-04-08T16:00:00.000+01:00
2020-04-08T17:00:00.000+01:00
2020-04-09T14:00:00.000+01:00
2020-04-09T15:00:00.000+01:00
2020-04-09T16:00:00.000+01:00
2020-04-09T17:00:00.000+01:00
2020-04-10T14:00:00.000+01:00
2020-04-10T15:00:00.000+01:00
2020-04-10T16:00:00.000+01:00
2020-04-10T17:00:00.000+01:00
2020-04-11T14:00:00.000+01:00
2020-04-11T15:00:00.000+01:00
2020-04-11T16:00:00.000+01:00
2020-04-11T17:00:00.000+01:00
"""
| 26.325
| 65
| 0.575024
|
5b9d1385a38db639b838aaf556603ad2f7af03c9
| 895
|
py
|
Python
|
lightkit/nn/_protocols.py
|
borchero/lightkit
|
725cde3dff1cfbccf78bf10b9e922145a43959ca
|
[
"MIT"
] | 1
|
2022-01-26T07:58:04.000Z
|
2022-01-26T07:58:04.000Z
|
lightkit/nn/_protocols.py
|
borchero/lightkit
|
725cde3dff1cfbccf78bf10b9e922145a43959ca
|
[
"MIT"
] | null | null | null |
lightkit/nn/_protocols.py
|
borchero/lightkit
|
725cde3dff1cfbccf78bf10b9e922145a43959ca
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-class-docstring,missing-function-docstring
from typing import Generic, Iterator, OrderedDict, Protocol, Tuple, Type, TypeVar
import torch
from torch import nn
from lightkit.utils import PathType
C = TypeVar("C", covariant=True)
M = TypeVar("M", bound="ConfigurableModule") # type: ignore
class ConfigurableModule(Protocol, Generic[C]):
@property
def config(self) -> C:
...
@classmethod
def load(cls: Type[M], path: PathType) -> M:
...
def save(self, path: PathType, compile_model: bool = False) -> None:
...
def save_config(self, path: PathType) -> None:
...
def named_children(self) -> Iterator[Tuple[str, nn.Module]]:
...
def state_dict(self) -> OrderedDict[str, torch.Tensor]:
...
def load_state_dict(self, state_dict: OrderedDict[str, torch.Tensor]) -> None:
...
| 26.323529
| 82
| 0.64581
|
7b5a18350f540be7cf4eaf8053f1a8c17a766f2c
| 1,120
|
py
|
Python
|
setup.py
|
kbarnhart/corebreakout
|
fa8dc7575b330b7c1ce47a35b44deca7856bd05c
|
[
"MIT"
] | 20
|
2019-12-09T23:56:32.000Z
|
2021-08-11T18:57:59.000Z
|
setup.py
|
kbarnhart/corebreakout
|
fa8dc7575b330b7c1ce47a35b44deca7856bd05c
|
[
"MIT"
] | 13
|
2019-11-05T00:13:39.000Z
|
2021-08-20T19:08:13.000Z
|
setup.py
|
kbarnhart/corebreakout
|
fa8dc7575b330b7c1ce47a35b44deca7856bd05c
|
[
"MIT"
] | 12
|
2019-12-12T17:35:44.000Z
|
2021-10-05T05:45:49.000Z
|
#!/usr/bin/env python3
import os
from setuptools import find_packages
try:
from setuptools import setup
except ImportError:
raise UserWarning('`distutils` is not supported since you must use Python>=3.6')
try:
import tensorflow
except ImportError:
raise UserWarning('`tensorflow` or `tensorflow-gpu` must be installed manually!')
PACKAGE_PATH = os.path.abspath(os.path.join(__file__, os.pardir))
# Mostly a duplication of requirements.txt
# with the addition of pip-only package `imgaug`
install_requires = [
'numpy<=1.16.4',
'scipy',
'dill',
'Pillow',
'cython',
'matplotlib',
'scikit-image',
'keras>=2.0.8,<=2.2.5',
'opencv-python',
'h5py',
'imgaug',
'IPython[all]'
]
setup(name='corebreakout',
version='0.2',
description='Segmentation and depth-alignment of geological core sample images via Mask-RCNN',
url='https://github.com/rgmyr/corebreakout',
author='Ross Meyer',
author_email='ross.meyer@utexas.edu',
packages=find_packages(PACKAGE_PATH),
install_requires=install_requires,
zip_safe=False
)
| 23.829787
| 100
| 0.68125
|
e4bc7012aeb298951ce85df5077a9bd74ccebb69
| 213
|
py
|
Python
|
project_system/config/desktop.py
|
pradyotr/frappe-project-sys
|
ec4bc793e445ddd8f37f286e30f329369b51bb11
|
[
"MIT"
] | null | null | null |
project_system/config/desktop.py
|
pradyotr/frappe-project-sys
|
ec4bc793e445ddd8f37f286e30f329369b51bb11
|
[
"MIT"
] | null | null | null |
project_system/config/desktop.py
|
pradyotr/frappe-project-sys
|
ec4bc793e445ddd8f37f286e30f329369b51bb11
|
[
"MIT"
] | null | null | null |
from frappe import _
def get_data():
return [
{
"module_name": "Project System",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Project System")
}
]
| 16.384615
| 44
| 0.596244
|
14be349ed04493c43141190033835920909e1dae
| 369
|
py
|
Python
|
projects/migrations/0004_rename_name_userprofile_username.py
|
sling254/msHackthorn
|
82fb627c9e521e1a24c583b28c63df44db7860d9
|
[
"MIT"
] | null | null | null |
projects/migrations/0004_rename_name_userprofile_username.py
|
sling254/msHackthorn
|
82fb627c9e521e1a24c583b28c63df44db7860d9
|
[
"MIT"
] | null | null | null |
projects/migrations/0004_rename_name_userprofile_username.py
|
sling254/msHackthorn
|
82fb627c9e521e1a24c583b28c63df44db7860d9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-11 20:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0003_auto_20211211_2255'),
]
operations = [
migrations.RenameField(
model_name='userprofile',
old_name='name',
new_name='username',
),
]
| 19.421053
| 48
| 0.590786
|
9f6503581e582d13b54be9ade22689a2e1f432d8
| 2,120
|
py
|
Python
|
utils/models_utils.py
|
ermekaitygulov/STIT
|
93dca8d589b555fa99a5c5438a8517a52d8898c3
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 6
|
2022-03-11T23:42:12.000Z
|
2022-03-28T09:39:25.000Z
|
utils/models_utils.py
|
bycloudai/STIT-Windows
|
cadb2a01457bfd1c90bcd8d220587b48e1c2327a
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
utils/models_utils.py
|
bycloudai/STIT-Windows
|
cadb2a01457bfd1c90bcd8d220587b48e1c2327a
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
import copy
import pickle
from argparse import Namespace
import torch
from configs import paths_config, global_config
from models.e4e.psp import pSp
from training.networks import Generator
def save_tuned_G(generator, pivots, quads, run_id):
generator = copy.deepcopy(generator).cpu()
pivots = copy.deepcopy(pivots).cpu()
torch.save({'generator': generator, 'pivots': pivots, 'quads': quads},
f'{paths_config.checkpoints_dir}/model_{run_id}.pt')
def load_tuned_G(run_id):
new_G_path = f'{paths_config.checkpoints_dir}/model_{run_id}.pt'
with open(new_G_path, 'rb') as f:
checkpoint = torch.load(f)
new_G, pivots, quads = checkpoint['generator'], checkpoint['pivots'], checkpoint['quads']
new_G = new_G.float().to(global_config.device).eval().requires_grad_(False)
pivots = pivots.float().to(global_config.device)
return new_G, pivots, quads
def load_old_G():
return load_g(paths_config.stylegan2_ada_ffhq)
def load_g(file_path):
with open(file_path, 'rb') as f:
old_G = pickle.load(f)['G_ema'].to(global_config.device).eval()
old_G = old_G.float()
return old_G
def initialize_e4e_wplus():
ckpt = torch.load(paths_config.e4e, map_location='cpu')
opts = ckpt['opts']
opts['checkpoint_path'] = paths_config.e4e
opts = Namespace(**opts)
e4e_inversion_net = pSp(opts)
e4e_inversion_net = e4e_inversion_net.eval().to(global_config.device).requires_grad_(False)
return e4e_inversion_net
def load_from_pkl_model(tuned):
model_state = {'init_args': tuned.init_args, 'init_kwargs': tuned.init_kwargs
, 'state_dict': tuned.state_dict()}
gen = Generator(*model_state['init_args'], **model_state['init_kwargs'])
gen.load_state_dict(model_state['state_dict'])
gen = gen.eval().cuda().requires_grad_(False)
return gen
def load_generators(run_id):
tuned, pivots, quads = load_tuned_G(run_id=run_id)
original = load_old_G()
gen = load_from_pkl_model(tuned)
orig_gen = load_from_pkl_model(original)
del tuned, original
return gen, orig_gen, pivots, quads
| 31.641791
| 95
| 0.713208
|
1cfc4917b463b78e9058131327b07dce7af2d133
| 39,145
|
py
|
Python
|
job_search-ad_targeting-search_engine-advanced_use_cases.py
|
MDRCS/Redis
|
5315ce48d4a771f14129efe8bd2deefcec465f24
|
[
"MIT"
] | null | null | null |
job_search-ad_targeting-search_engine-advanced_use_cases.py
|
MDRCS/Redis
|
5315ce48d4a771f14129efe8bd2deefcec465f24
|
[
"MIT"
] | null | null | null |
job_search-ad_targeting-search_engine-advanced_use_cases.py
|
MDRCS/Redis
|
5315ce48d4a771f14129efe8bd2deefcec465f24
|
[
"MIT"
] | null | null | null |
import math
import re
import unittest
import uuid
import redis
# Search Engine - Use case :
AVERAGE_PER_1K = {}
# <start id="tokenize-and-index"/>
STOP_WORDS = set('''able about across after all almost also am among
an and any are as at be because been but by can cannot could dear did
do does either else ever every for from get got had has have he her
hers him his how however if in into is it its just least let like
likely may me might most must my neither no nor not of off often on
only or other our own rather said say says she should since so some
than that the their them then there these they this tis to too twas us
wants was we were what when where which while who whom why will with
would yet you your'''.split()) #A
WORDS_RE = re.compile("[a-z']{2,}") #B
def tokenize(content):
words = set() #C
for match in WORDS_RE.finditer(content.lower()): #D
word = match.group().strip("'") #E
if len(word) >= 2: #F
words.add(word) #F
return words - STOP_WORDS #G
def index_document(conn, docid, content):
words = tokenize(content) #H
pipeline = conn.pipeline(True)
for word in words: #I
pipeline.sadd('idx:' + word, docid) #I
return len(pipeline.execute()) #J
# <end id="tokenize-and-index"/>
#A We pre-declare our known stop words, these were fetched from http://www.textfixer.com/resources/
#B A regular expression that extracts words as we defined them
#C Our Python set of words that we have found in the document content
#D Iterate over all of the words in the content
#E Strip any leading or trailing single-quote characters
#F Keep any words that are still at least 2 characters long
#G Return the set of words that remain that are also not stop words
#H Get the tokenized words for the content
#I Add the documents to the appropriate inverted index entries
#J Return the number of unique non-stop words that were added for the document
#END
# <start id="_1314_14473_9158"/>
def _set_common(conn, method, names, ttl=30, execute=True):
id = str(uuid.uuid4()) #A
pipeline = conn.pipeline(True) if execute else conn #B
names = ['idx:' + name for name in names] #C
getattr(pipeline, method)('idx:' + id, *names) #D
pipeline.expire('idx:' + id, ttl) #E
if execute:
pipeline.execute() #F
return id #G
def intersect(conn, items, ttl=30, _execute=True): #H
return _set_common(conn, 'sinterstore', items, ttl, _execute) #H
def union(conn, items, ttl=30, _execute=True): #I
return _set_common(conn, 'sunionstore', items, ttl, _execute) #I
def difference(conn, items, ttl=30, _execute=True): #J
return _set_common(conn, 'sdiffstore', items, ttl, _execute) #J
# <end id="_1314_14473_9158"/>
#A Create a new temporary identifier
#B Set up a transactional pipeline so that we have consistent results for each individual call
#C Add the 'idx:' prefix to our terms
#D Set up the call for one of the operations
#E Instruct Redis to expire the SET in the future
#F Actually execute the operation
#G Return the id for the caller to process the results
#H Helper function to perform SET intersections
#I Helper function to perform SET unions
#J Helper function to perform SET differences
#END
# <start id="parse-query"/>
QUERY_RE = re.compile("[+-]?[a-z']{2,}") #A
#query = "look at that query behind this result +see +request -code -cql"
def parse(query):
unwanted = set() #B
all = [] #C
current = set() #D
for match in QUERY_RE.finditer(query.lower()): #E
word = match.group() #F
print(match)
print(word)
prefix = word[:1] #F
print(prefix)
if prefix in '+-': #F
word = word[1:] #F
else: #F
prefix = None #F
word = word.strip("'") #G
if len(word) < 2 or word in STOP_WORDS: #G
continue #G
if prefix == '-': #H
unwanted.add(word) #H
continue #H
if current and not prefix: #I
all.append(list(current)) #I
current = set() #I
current.add(word) #J
if current: #K
all.append(list(current)) #K
return all, list(unwanted) #L
# <end id="parse-query"/>
#A Our regular expression for finding wanted, unwanted, and synonym words
#B A unique set of unwanted words
#C Our final result of words that we are looking to intersect
#D The current unique set of words to consider as synonyms
#E Iterate over all words in the search query
#F Discover +/- prefixes, if any
#G Strip any leading or trailing single quotes, and skip anything that is a stop word
#H If the word is unwanted, add it to the unwanted set
#I Set up a new synonym set if we have no synonym prefix and we already have words
#J Add the current word to the current set
#K Add any remaining words to the final intersection
#END
# <start id="search-query"/>
def parse_and_search(conn, query, ttl=30):
all, unwanted = parse(query) #A
if not all: #B
return None #B
to_intersect = []
for syn in all: #D
if len(syn) > 1: #E
to_intersect.append(union(conn, syn, ttl=ttl)) #E
else: #F
to_intersect.append(syn[0]) #F
if len(to_intersect) > 1: #G
intersect_result = intersect(conn, to_intersect, ttl=ttl) #G
else: #H
intersect_result = to_intersect[0] #H
if unwanted: #I
unwanted.insert(0, intersect_result) #I
return difference(conn, unwanted, ttl=ttl) #I
return intersect_result #J
# <end id="search-query"/>
#A Parse the query
#B If there are no words in the query that are not stop words, we don't have a result
#D Iterate over each list of synonyms
#E If the synonym list is more than one word long, then perform the union operation
#F Otherwise use the individual word directly
#G If we have more than one word/result to intersect, intersect them
#H Otherwise use the individual word/result directly
#I If we have any unwanted words, remove them from our earlier result and return it
#J Otherwise return the intersection result
#END
# <start id="sorted-searches"/>
def search_and_sort(conn, query, id=None, ttl=300, sort="-updated", #A
start=0, num=20): #A
desc = sort.startswith('-') #B
sort = sort.lstrip('-') #B
by = "kb:doc:*->" + sort #B
alpha = sort not in ('updated', 'id', 'created') #I
if id and not conn.expire(id, ttl): #C
id = None #C
if not id: #D
id = parse_and_search(conn, query, ttl=ttl) #D
pipeline = conn.pipeline(True)
pipeline.scard('idx:' + id) #E
pipeline.sort('idx:' + id, by=by, alpha=alpha, #F
desc=desc, start=start, num=num) #F
results = pipeline.execute()
return results[0], results[1], id #G
# <end id="sorted-searches"/>
#A We will optionally take an previous result id, a way to sort the results, and options for paginating over the results
#B Determine which attribute to sort by, and whether to sort ascending or descending
#I We need to tell Redis whether we are sorting by a number or alphabetically
#C If there was a previous result, try to update its expiration time if it still exists
#D Perform the search if we didn't have a past search id, or if our results expired
#E Fetch the total number of results
#F Sort the result list by the proper column and fetch only those results we want
#G Return the number of items in the results, the results we wanted, and the id of the results so that we can fetch them again later
#END
# <start id="zset_scored_composite"/>
def search_and_zsort(conn, query, id=None, ttl=300, update=1, vote=0, #A
start=0, num=20, desc=True): #A
if id and not conn.expire(id, ttl): #B
id = None #B
if not id: #C
id = parse_and_search(conn, query, ttl=ttl) #C
scored_search = {
id: 0, #I
'sort:update': update, #D
'sort:votes': vote #D
}
id = zintersect(conn, scored_search, ttl) #E
pipeline = conn.pipeline(True)
pipeline.zcard('idx:' + id) #F
if desc: #G
pipeline.zrevrange('idx:' + id, start, start + num - 1) #G
else: #G
pipeline.zrange('idx:' + id, start, start + num - 1) #G
results = pipeline.execute()
return results[0], results[1], id #H
# <end id="zset_scored_composite"/>
#A Like before, we'll optionally take a previous result id for pagination if the result is still available
#B We will refresh the search result's TTL if possible
#C If our search result expired, or if this is the first time we've searched, perform the standard SET search
#I We use the 'id' key for the intersection, but we don't want it to count towards weights
#D Set up the scoring adjustments for balancing update time and votes. Remember: votes can be adjusted to 1, 10, 100, or higher depending on the sorting result desired.
#E Intersect using our helper function that we define in listing 7.7
#F Fetch the size of the result ZSET
#G Handle fetching a "page" of results
#H Return the results and the id for pagination
#END
# <start id="zset_helpers"/>
def _zset_common(conn, method, scores, ttl=30, **kw):
id = str(uuid.uuid4()) #A
execute = kw.pop('_execute', True) #J
pipeline = conn.pipeline(True) if execute else conn #B
for key in list(scores.keys()): #C
scores['idx:' + key] = scores.pop(key) #C
getattr(pipeline, method)('idx:' + id, scores, **kw) #D
pipeline.expire('idx:' + id, ttl) #E
if execute: #F
pipeline.execute() #F
return id #G
def zintersect(conn, items, ttl=30, **kw): #H
return _zset_common(conn, 'zinterstore', dict(items), ttl, **kw) #H
def zunion(conn, items, ttl=30, **kw): #I
return _zset_common(conn, 'zunionstore', dict(items), ttl, **kw) #I
# <end id="zset_helpers"/>
#A Create a new temporary identifier
#B Set up a transactional pipeline so that we have consistent results for each individual call
#C Add the 'idx:' prefix to our inputs
#D Set up the call for one of the operations
#E Instruct Redis to expire the ZSET in the future
#F Actually execute the operation, unless explicitly instructed not to by the caller
#G Return the id for the caller to process the results
#H Helper function to perform ZSET intersections
#I Helper function to perform ZSET unions
#J Allow the passing of an argument to determine whether we should defer pipeline execution
#END
# <start id="string-to-score"/>
def string_to_score(string, ignore_case=False):
if ignore_case: #A
string = string.lower() #A
pieces = list(map(ord, string[:6])) #B
while len(pieces) < 6: #C
pieces.append(-1) #C
score = 0
for piece in pieces: #D
score = score * 257 + piece + 1 #D
return score * 2 + (len(string) > 6) #E
# <end id="string-to-score"/>
#A We can handle optional case-insensitive indexes easily, so we will
#B Convert the first 6 characters of the string into their numeric values, null being 0, tab being 9, capital A being 65, etc.
#C For strings that aren't at least 6 characters long, we will add place-holder values to represent that the string was short
#D For each value in the converted string values, we add it to the score, taking into consideration that a null is different from a place holder
#E Because we have an extra bit, we can also signify whether the string is exactly 6 characters or more, allowing us to differentiate 'robber' and 'robbers', though not 'robbers' and 'robbery'
#END
def to_char_map(set):
out = {}
for pos, val in enumerate(sorted(set)):
out[val] = pos-1
return out
LOWER = to_char_map(set([-1]) | set(range(ord('a'), ord('z')+1)))
ALPHA = to_char_map(set(LOWER) | set(range(ord('A'), ord('Z')+1)))
LOWER_NUMERIC = to_char_map(set(LOWER) | set(range(ord('0'), ord('9')+1)))
ALPHA_NUMERIC = to_char_map(set(LOWER_NUMERIC) | set(ALPHA))
def string_to_score_generic(string, mapping):
length = int(52 / math.log(len(mapping), 2)) #A
pieces = list(map(ord, string[:length])) #B
while len(pieces) < length: #C
pieces.append(-1) #C
score = 0
for piece in pieces: #D
value = mapping[piece] #D
score = score * len(mapping) + value + 1 #D
return score * 2 + (len(string) > length) #E
# <start id="zadd-string"/>
def zadd_string(conn, name, *args, **kwargs):
pieces = list(args) #A
for piece in kwargs.items(): #A
pieces.extend(piece) #A
a = {}
for i, v in enumerate(pieces):
if i & 1: #B
a[pieces[i-1]] = string_to_score(v) #B
return conn.zadd(name, a) #C
# <end id="zadd-string"/>
#A Combine both types of arguments passed for later modification
#B Convert string scores to integer scores
#C Call the existing ZADD method
#END
# Ads Targeting - Use case :
# <start id="ecpm_helpers"/>
def cpc_to_ecpm(views, clicks, cpc):
return 1000. * cpc * clicks / views
def cpa_to_ecpm(views, actions, cpa):
return 1000. * cpa * actions / views #A
# <end id="ecpm_helpers"/>
#A Because click through rate is (clicks/views), and action rate is (actions/clicks), when we multiply them together we get (actions/views)
#END
# <start id="index_ad"/>
TO_ECPM = {
b'cpc': cpc_to_ecpm,
b'cpa': cpa_to_ecpm,
b'cpm': lambda *args:args[-1],
}
def index_ad(conn, id, locations, content, type, value):
pipeline = conn.pipeline(True) #A
if not isinstance(type, bytes):
type = type.encode('latin-1')
for location in locations:
pipeline.sadd('idx:req:'+location, id) #B
words = tokenize(content)
for word in words: #H
pipeline.zadd('idx:' + word, {id: 0}) #H
rvalue = TO_ECPM[type]( #C
1000, AVERAGE_PER_1K.get(type, 1), value) #C
pipeline.hset('type:', id, type) #D
pipeline.zadd('idx:ad:value:', {id: rvalue}) #E
pipeline.zadd('ad:base_value:', {id: value}) #F
pipeline.sadd('terms:' + id, *list(words)) #G
pipeline.execute()
# <end id="index_ad"/>
#A Set up the pipeline so that we only need a single round-trip to perform the full index operation
#B Add the ad id to all of the relevant location SETs for targeting
#H Index the words for the ad
#C We will keep a dictionary that stores the average number of clicks or actions per 1000 views on our network, for estimating the performance of new ads
#D Record what type of ad this is
#E Add the ad's eCPM to a ZSET of all ads
#F Add the ad's base value to a ZST of all ads
#G Keep a record of the words that could be targeted for the ad
#END
# <start id="target_ad"/>
def target_ads(conn, locations, content):
pipeline = conn.pipeline(True)
matched_ads, base_ecpm = match_location(pipeline, locations) #A
words, targeted_ads = finish_scoring( #B
pipeline, matched_ads, base_ecpm, content) #B
pipeline.incr('ads:served:') #C
pipeline.zrevrange('idx:' + targeted_ads, 0, 0) #D
target_id, targeted_ad = pipeline.execute()[-2:]
if not targeted_ad: #E
return None, None #E
ad_id = targeted_ad[0]
record_targeting_result(conn, target_id, ad_id, words) #F
return target_id, ad_id #G
# <end id="target_ad"/>
#A Find all ads that fit the location targeting parameter, and their eCPMs
#B Finish any bonus scoring based on matching the content
#C Get an id that can be used for reporting and recording of this particular ad target
#D Fetch the top-eCPM ad id
#E If there were no ads that matched the location targeting, return nothing
#F Record the results of our targeting efforts as part of our learning process
#G Return the target id and the ad id to the caller
#END
# <start id="location_target"/>
def match_location(pipe, locations):
required = ['req:' + loc for loc in locations] #A
matched_ads = union(pipe, required, ttl=300, _execute=False) #B
return matched_ads, zintersect(pipe, #C
{matched_ads: 0, 'ad:value:': 1}, _execute=False) #C
# <end id="location_target"/>
#A Calculate the SET key names for all of the provided locations
#B Calculate the SET of matched ads that are valid for this location
#C Return the matched ads SET id, as well as the id of the ZSET that includes the base eCPM of all of the matched ads
#END
# <start id="finish_scoring"/>
def finish_scoring(pipe, matched, base, content):
bonus_ecpm = {}
words = tokenize(content) #A
for word in words:
word_bonus = zintersect( #B
pipe, {matched: 0, word: 1}, _execute=False) #B
bonus_ecpm[word_bonus] = 1 #B
if bonus_ecpm:
minimum = zunion( #C
pipe, bonus_ecpm, aggregate='MIN', _execute=False) #C
maximum = zunion( #C
pipe, bonus_ecpm, aggregate='MAX', _execute=False) #C
return words, zunion( #D
pipe, {base:1, minimum:.5, maximum:.5}, _execute=False) #D
return words, base #E
# <end id="finish_scoring"/>
#A Tokenize the content for matching against ads
#B Find the ads that are location-targeted, which also have one of the words in the content
#C Find the minimum and maximum eCPM bonuses for each ad
#D Compute the total of the base + half of the minimum eCPM bonus + half of the maximum eCPM bonus
#E If there were no words in the content to match against, return just the known eCPM
#END
# <start id="record_targeting"/>
def record_targeting_result(conn, target_id, ad_id, words):
pipeline = conn.pipeline(True)
terms = conn.smembers(b'terms:' + ad_id) #A
matched = list(words & terms) #A
if matched:
matched_key = 'terms:matched:%s' % target_id
pipeline.sadd(matched_key, *matched) #B
pipeline.expire(matched_key, 900) #B
type = conn.hget('type:', ad_id) #C
pipeline.incr('type:%s:views:' % type) #C
for word in matched: #D
pipeline.zincrby('views:%s' % ad_id, 1, word) #D
pipeline.zincrby('views:%s' % ad_id, 1, '') #D
if not pipeline.execute()[-1] % 100: #E
update_cpms(conn, ad_id) #E
# <end id="record_targeting"/>
#A Find the words in the content that matched with the words in the ad
#B If any words in the ad matched the content, record that information and keep it for 15 minutes
#C Keep a per-type count of the number of views that each ad received
#D Record view information for each word in the ad, as well as the ad itself
#E Every 100th time that the ad was shown, update the ad's eCPM
#END
# <start id="record_click"/>
def record_click(conn, target_id, ad_id, action=False):
pipeline = conn.pipeline(True)
click_key = 'clicks:%s'%ad_id
match_key = 'terms:matched:%s'%target_id
type = conn.hget('type:', ad_id)
if type == 'cpa': #A
pipeline.expire(match_key, 900) #A
if action:
click_key = 'actions:%s' % ad_id #B
if action and type == 'cpa':
pipeline.incr('type:%s:actions:' % type) #C
else:
pipeline.incr('type:%s:clicks:' % type) #C
matched = list(conn.smembers(match_key))#D
matched.append('') #D
for word in matched: #D
pipeline.zincrby(click_key, 1, word) #D
pipeline.execute()
update_cpms(conn, ad_id) #E
# <end id="record_click"/>
#A If the ad was a CPA ad, refresh the expiration time of the matched terms if it is still available
#B Record actions instead of clicks
#C Keep a global count of clicks/actions for ads based on the ad type
#D Record clicks (or actions) for the ad and for all words that had been targeted in the ad
#E Update the eCPM for all words that were seen in the ad
#END
# <start id="update_cpms"/>
def update_cpms(conn, ad_id):
pipeline = conn.pipeline(True)
pipeline.hget('type:', ad_id) #A
pipeline.zscore('ad:base_value:', ad_id) #A
pipeline.smembers(b'terms:' + ad_id) #A
type, base_value, words = pipeline.execute()#A
which = 'clicks' #B
if type == 'cpa': #B
which = 'actions' #B
pipeline.get('type:%s:views:' % type) #C
pipeline.get('type:%s:%s' % (type, which)) #C
type_views, type_clicks = pipeline.execute() #C
AVERAGE_PER_1K[type] = ( #D
1000. * int(type_clicks or '1') / int(type_views or '1')) #D
if type == 'cpm': #E
return #E
view_key = 'views:%s' % ad_id
click_key = '%s:%s' % (which, ad_id)
to_ecpm = TO_ECPM[type]
pipeline.zscore(view_key, '') #G
pipeline.zscore(click_key, '') #G
ad_views, ad_clicks = pipeline.execute() #G
if (ad_clicks or 0) < 1: #N
ad_ecpm = conn.zscore('idx:ad:value:', ad_id) #N
else:
ad_ecpm = to_ecpm(ad_views or 1, ad_clicks or 0, base_value)#H
pipeline.zadd('idx:ad:value:', {ad_id: ad_ecpm}) #H
for word in words:
pipeline.zscore(view_key, word) #I
pipeline.zscore(click_key, word) #I
views, clicks = pipeline.execute()[-2:] #I
if (clicks or 0) < 1: #J
continue #J
word_ecpm = to_ecpm(views or 1, clicks or 0, base_value) #K
bonus = word_ecpm - ad_ecpm #L
pipeline.zadd('idx:' + word, {ad_id: bonus}) #M
pipeline.execute()
# <end id="update_cpms"/>
#A Fetch the type and value of the ad, as well as all of the words in the ad
#B Determine whether the eCPM of the ad should be based on clicks or actions
#C Fetch the current number of views and clicks/actions for the given ad type
#D Write back to our global dictionary the click-through rate or action rate for the ad
#E If we are processing a CPM ad, then we don't update any of the eCPMs, as they are already updated
#N Use the existing eCPM if the ad hasn't received any clicks yet
#G Fetch the per-ad view and click/action scores and
#H Calculate the ad's eCPM and update the ad's value
#I Fetch the view and click/action scores for the word
#J Don't update eCPMs when the ad has not received any clicks
#K Calculate the word's eCPM
#L Calculate the word's bonus
#M Write the word's bonus back to the per-word per-ad ZSET
#END
# Job Search - Use case :
# <start id="slow_job_search"/>
def add_job(conn, job_id, required_skills):
conn.sadd('job:' + job_id, *required_skills) #A
def is_qualified(conn, job_id, candidate_skills):
temp = str(uuid.uuid4())
pipeline = conn.pipeline(True)
pipeline.sadd(temp, *candidate_skills) #B
pipeline.expire(temp, 5) #B
pipeline.sdiff('job:' + job_id, temp) #C
return not pipeline.execute()[-1] #D
# <end id="slow_job_search"/>
#A Add all required job skills to the job's SET
#B Add the candidate's skills to a temporary SET with an expiration time
#C Calculate the SET of skills that the job requires that the user doesn't have
#D Return True if there are no skills that the candidate does not have
#END
# <start id="job_search_index"/>
def index_job(conn, job_id, skills):
pipeline = conn.pipeline(True)
for skill in skills:
pipeline.sadd('idx:skill:' + skill, job_id) #A
pipeline.zadd('idx:jobs:req', {job_id: len(set(skills))}) #B
pipeline.execute()
# <end id="job_search_index"/>
#A Add the job id to all appropriate skill SETs
#B Add the total required skill count to the required skills ZSET
#END
# <start id="job_search_results"/>
def find_jobs(conn, candidate_skills):
skills = {} #A
for skill in set(candidate_skills): #A
skills['skill:' + skill] = 1 #A
job_scores = zunion(conn, skills) #B
final_result = zintersect( #C
conn, {job_scores:-1, 'jobs:req':1}) #C
return conn.zrangebyscore('idx:' + final_result, 0, 0) #D
# <end id="job_search_results"/>
#A Set up the dictionary for scoring the jobs
#B Calculate the scores for each of the jobs
#C Calculate how many more skills the job requires than the candidate has
#D Return the jobs that the candidate has the skills for
#END
# 0 is beginner, 1 is intermediate, 2 is expert
SKILL_LEVEL_LIMIT = 2
def index_job_levels(conn, job_id, skill_levels):
total_skills = len(set(skill for skill, level in skill_levels))
pipeline = conn.pipeline(True)
for skill, level in skill_levels:
level = min(level, SKILL_LEVEL_LIMIT)
for wlevel in range(level, SKILL_LEVEL_LIMIT+1):
pipeline.sadd('idx:skill:%s:%s'%(skill,wlevel), job_id)
pipeline.zadd('idx:jobs:req', {job_id: total_skills})
pipeline.execute()
def search_job_levels(conn, skill_levels):
skills = {}
for skill, level in skill_levels:
level = min(level, SKILL_LEVEL_LIMIT)
skills['skill:%s:%s'%(skill,level)] = 1
job_scores = zunion(conn, skills)
final_result = zintersect(conn, {job_scores:-1, 'jobs:req':1})
return conn.zrangebyscore('idx:' + final_result, '-inf', 0)
def index_job_years(conn, job_id, skill_years):
total_skills = len(set(skill for skill, years in skill_years))
pipeline = conn.pipeline(True)
for skill, years in skill_years:
pipeline.zadd(
'idx:skill:%s:years'%skill, {job_id:max(years, 0)})
pipeline.sadd('idx:jobs:all', job_id)
pipeline.zadd('idx:jobs:req', {job_id:total_skills})
pipeline.execute()
def search_job_years(conn, skill_years):
skill_years = dict(skill_years)
pipeline = conn.pipeline(True)
union = []
for skill, years in skill_years.items():
sub_result = zintersect(pipeline,
{'jobs:all':-years, 'skill:%s:years'%skill:1}, _execute=False)
pipeline.zremrangebyscore('idx:' + sub_result, '(0', 'inf')
union.append(
zintersect(pipeline, {'jobs:all':1, sub_result:0}, _execute=False))
job_scores = zunion(pipeline, dict((key, 1) for key in union), _execute=False)
final_result = zintersect(pipeline, {job_scores:-1, 'jobs:req':1}, _execute=False)
pipeline.zrangebyscore('idx:' + final_result, '-inf', 0)
return pipeline.execute()[-1]
class TestCh07(unittest.TestCase):
content = 'this is some random content, look at how it is indexed.'
def setUp(self):
self.conn = redis.Redis(db=15)
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
def test_index_document(self):
print("We're tokenizing some content...")
tokens = tokenize(self.content)
print("Those tokens are:", tokens)
self.assertTrue(tokens)
print("And now we are indexing that content...")
r = index_document(self.conn, 'test', self.content)
self.assertEqual(r, len(tokens))
for t in tokens:
self.assertEqual(self.conn.smembers('idx:' + t), set([b'test']))
def test_set_operations(self):
index_document(self.conn, 'test', self.content)
r = intersect(self.conn, ['content', 'indexed'])
self.assertEqual(self.conn.smembers('idx:' + r), set([b'test']))
r = intersect(self.conn, ['content', 'ignored'])
self.assertEqual(self.conn.smembers('idx:' + r), set())
r = union(self.conn, ['content', 'ignored'])
self.assertEqual(self.conn.smembers('idx:' + r), set([b'test']))
r = difference(self.conn, ['content', 'ignored'])
self.assertEqual(self.conn.smembers('idx:' + r), set([b'test']))
r = difference(self.conn, ['content', 'indexed'])
self.assertEqual(self.conn.smembers('idx:' + r), set())
def test_parse_query(self):
query = 'test query without stopwords'
self.assertEqual(parse(query), ([[x] for x in query.split()], []))
query = 'test +query without -stopwords'
self.assertIn(parse(query), (([['test', 'query'], ['without']], ['stopwords'],),
([['query', 'test'], ['without']], ['stopwords'],)))
def test_parse_and_search(self):
print("And now we are testing search...")
index_document(self.conn, 'test', self.content)
r = parse_and_search(self.conn, 'content')
self.assertEqual(self.conn.smembers('idx:' + r), set([b'test']))
r = parse_and_search(self.conn, 'content indexed random')
self.assertEqual(self.conn.smembers('idx:' + r), set([b'test']))
r = parse_and_search(self.conn, 'content +indexed random')
self.assertEqual(self.conn.smembers('idx:' + r), set([b'test']))
r = parse_and_search(self.conn, 'content indexed +random')
self.assertEqual(self.conn.smembers('idx:' + r), set([b'test']))
r = parse_and_search(self.conn, 'content indexed -random')
self.assertEqual(self.conn.smembers('idx:' + r), set())
print("Which passed!")
def test_search_with_sort(self):
print("And now let's test searching with sorting...")
index_document(self.conn, 'test', self.content)
index_document(self.conn, 'test2', self.content)
self.conn.hmset('kb:doc:test', {'updated': 12345, 'id': 10})
self.conn.hmset('kb:doc:test2', {'updated': 54321, 'id': 1})
r = search_and_sort(self.conn, "content")
self.assertEqual(r[1], [b'test2', b'test'])
r = search_and_sort(self.conn, "content", sort='-id')
self.assertEqual(r[1], [b'test', b'test2'])
print("Which passed!")
def test_search_with_zsort(self):
print("And now let's test searching with sorting via zset...")
index_document(self.conn, 'test', self.content)
index_document(self.conn, 'test2', self.content)
self.conn.zadd('idx:sort:update', {'test': 12345, 'test2': 54321})
self.conn.zadd('idx:sort:votes', {'test': 10, 'test2': 1})
r = search_and_zsort(self.conn, "content", desc=False)
self.assertEqual(r[1], [b'test', b'test2'])
r = search_and_zsort(self.conn, "content", update=0, vote=1, desc=False)
self.assertEqual(r[1], [b'test2', b'test'])
print("Which passed!")
def test_string_to_score(self):
words = 'these are some words that will be sorted'.split()
pairs = [(word, string_to_score(word)) for word in words]
pairs2 = list(pairs)
pairs.sort()
pairs2.sort(key=lambda x:x[1])
self.assertEqual(pairs, pairs2)
words = 'these are some words that will be sorted'.split()
pairs = [(word, string_to_score_generic(word, LOWER)) for word in words]
pairs2 = list(pairs)
pairs.sort()
pairs2.sort(key=lambda x:x[1])
self.assertEqual(pairs, pairs2)
zadd_string(self.conn, 'key', 'test', 'value', test2='other')
self.assertEqual(self.conn.zscore('key', 'test'), string_to_score('value'))
self.assertEqual(self.conn.zscore('key', 'test2'), string_to_score('other'))
def test_index_and_target_ads(self):
index_ad(self.conn, '1', ['USA', 'CA'], self.content, 'cpc', .25)
index_ad(self.conn, '2', ['USA', 'VA'], self.content + ' wooooo', 'cpc', .125)
for i in range(100):
ro = target_ads(self.conn, ['USA'], self.content)
self.assertEqual(ro[1], b'1')
r = target_ads(self.conn, ['VA'], 'wooooo')
self.assertEqual(r[1], b'2')
self.assertEqual(self.conn.zrange('idx:ad:value:', 0, -1, withscores=True), [(b'2', 0.125), (b'1', 0.25)])
self.assertEqual(self.conn.zrange('ad:base_value:', 0, -1, withscores=True), [(b'2', 0.125), (b'1', 0.25)])
record_click(self.conn, ro[0], ro[1])
self.assertEqual(self.conn.zrange('idx:ad:value:', 0, -1, withscores=True), [(b'2', 0.125), (b'1', 2.5)])
self.assertEqual(self.conn.zrange('ad:base_value:', 0, -1, withscores=True), [(b'2', 0.125), (b'1', 0.25)])
def test_is_qualified_for_job(self):
add_job(self.conn, 'test', ['q1', 'q2', 'q3'])
self.assertTrue(is_qualified(self.conn, 'test', ['q1', 'q3', 'q2']))
self.assertFalse(is_qualified(self.conn, 'test', ['q1', 'q2']))
def test_index_and_find_jobs(self):
index_job(self.conn, 'test1', ['q1', 'q2', 'q3'])
index_job(self.conn, 'test2', ['q1', 'q3', 'q4'])
index_job(self.conn, 'test3', ['q1', 'q3', 'q5'])
self.assertEqual(find_jobs(self.conn, ['q1']), [])
self.assertEqual(find_jobs(self.conn, ['q1', 'q3', 'q4']), [b'test2'])
self.assertEqual(find_jobs(self.conn, ['q1', 'q3', 'q5']), [b'test3'])
self.assertEqual(find_jobs(self.conn, ['q1', 'q2', 'q3', 'q4', 'q5']), [b'test1', b'test2', b'test3'])
def test_index_and_find_jobs_levels(self):
print("now testing find jobs with levels ...")
index_job_levels(self.conn, "job1" ,[('q1', 1)])
index_job_levels(self.conn, "job2", [('q1', 0), ('q2', 2)])
self.assertEqual(search_job_levels(self.conn, [('q1', 0)]), [])
self.assertEqual(search_job_levels(self.conn, [('q1', 1)]), [b'job1'])
self.assertEqual(search_job_levels(self.conn, [('q1', 2)]), [b'job1'])
self.assertEqual(search_job_levels(self.conn, [('q2', 1)]), [])
self.assertEqual(search_job_levels(self.conn, [('q2', 2)]), [])
self.assertEqual(search_job_levels(self.conn, [('q1', 0), ('q2', 1)]), [])
self.assertEqual(search_job_levels(self.conn, [('q1', 0), ('q2', 2)]), [b'job2'])
self.assertEqual(search_job_levels(self.conn, [('q1', 1), ('q2', 1)]), [b'job1'])
self.assertEqual(search_job_levels(self.conn, [('q1', 1), ('q2', 2)]), [b'job1', b'job2'])
print("which passed")
def test_index_and_find_jobs_years(self):
print("now testing find jobs with years ...")
index_job_years(self.conn, "job1",[('q1',1)])
index_job_years(self.conn, "job2",[('q1',0),('q2',2)])
self.assertEqual(search_job_years(self.conn, [('q1',0)]), [])
self.assertEqual(search_job_years(self.conn, [('q1',1)]), [b'job1'])
self.assertEqual(search_job_years(self.conn, [('q1',2)]), [b'job1'])
self.assertEqual(search_job_years(self.conn, [('q2',1)]), [])
self.assertEqual(search_job_years(self.conn, [('q2',2)]), [])
self.assertEqual(search_job_years(self.conn, [('q1',0), ('q2', 1)]), [])
self.assertEqual(search_job_years(self.conn, [('q1',0), ('q2', 2)]), [b'job2'])
self.assertEqual(search_job_years(self.conn, [('q1',1), ('q2', 1)]), [b'job1'])
self.assertEqual(search_job_years(self.conn, [('q1',1), ('q2', 2)]), [b'job1',b'job2'])
print("which passed")
if __name__ == '__main__':
unittest.main()
| 44.994253
| 192
| 0.567557
|
98190be3cb4d7cd798c48d3d94ec4f5848355f07
| 2,072
|
py
|
Python
|
backend/pharmacy/api/views/medical/medicine.py
|
rahul007-bit/pharmaService
|
73191f64569eae7c7851f5b7bf9187f3f01b7a6e
|
[
"MIT"
] | 4
|
2022-01-28T13:05:07.000Z
|
2022-01-31T12:24:56.000Z
|
backend/pharmacy/api/views/medical/medicine.py
|
rahul007-bit/pharmaService
|
73191f64569eae7c7851f5b7bf9187f3f01b7a6e
|
[
"MIT"
] | 6
|
2022-01-30T11:53:31.000Z
|
2022-02-02T06:17:30.000Z
|
backend/pharmacy/api/views/medical/medicine.py
|
rahul007-bit/pharmaService
|
73191f64569eae7c7851f5b7bf9187f3f01b7a6e
|
[
"MIT"
] | 3
|
2022-01-28T13:41:03.000Z
|
2022-01-30T12:23:11.000Z
|
# pylint: disable=missing-module-docstring
#
# Copyright (C) 2022 by YadavGulshan@Github, < https://github.com/YadavGulshan >.
#
# This file is part of < https://github.com/Yadavgulshan/pharmaService > project,
# and is released under the "BSD 3-Clause License Agreement".
# Please see < https://github.com/YadavGulshan/pharmaService/blob/master/LICENCE >
#
# All rights reserved.
from django.http import Http404
from rest_framework.response import Response
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
from pharmacy.models import Medical, Medicine
from ...serializers import MedicalSerializer, MedicineSerializer
from rest_framework import generics
from rest_framework import status
@permission_classes([IsAuthenticated])
class MedicineViewList(generics.CreateAPIView):
"""This class will display or help medical owner view or update the medicines."""
def get(self, request):
medicine = Medicine.objects.filter(user=request.user)
serializer = MedicalSerializer(medicine, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
@permission_classes([IsAuthenticated])
class MedicineViewByID(generics.CreateAPIView):
""" "This class will display only the medicine owned by specific medical shop."""
def get(self, request, pk):
# First check if the medical shop exists
try:
medical = Medical.objects.get(pk=pk)
except Medical.DoesNotExist:
raise Http404
# Check if that medical shop is owned by the user
if medical.user != request.user:
return Response(status=status.HTTP_403_FORBIDDEN)
try:
# Get the medicine list of that medical shop
medicine = Medicine.objects.filter(medicalId=pk)
# medicine = Medicine.objects.get(pk=pk)
except Medicine.DoesNotExist:
raise Http404
serializer = MedicineSerializer(medicine, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| 35.724138
| 85
| 0.726834
|
00345485045dc5f4f7fed4443a3b831e24371cee
| 4,740
|
py
|
Python
|
test/unit/test_cmdline.py
|
jsiverskog/pyOCD
|
8b75633482a2f1856a8ab6af9ebb5c1b2f9d8285
|
[
"Apache-2.0"
] | 1
|
2020-07-11T09:24:25.000Z
|
2020-07-11T09:24:25.000Z
|
test/unit/test_cmdline.py
|
ARMmbed/pyOCD-Samsung
|
03242b6eb57d2170a4b531d00f1a0577e2b0abde
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_cmdline.py
|
ARMmbed/pyOCD-Samsung
|
03242b6eb57d2170a4b531d00f1a0577e2b0abde
|
[
"Apache-2.0"
] | null | null | null |
# pyOCD debugger
# Copyright (c) 2015,2018-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyocd.utility.cmdline import (
split_command_line,
convert_vector_catch,
VECTOR_CATCH_CHAR_MAP,
convert_session_options,
)
from pyocd.core.target import Target
import pytest
import six
class TestSplitCommandLine(object):
def test_split(self):
assert split_command_line('foo') == ['foo']
assert split_command_line(['foo']) == ['foo']
assert split_command_line('foo bar') == ['foo', 'bar']
assert split_command_line(['foo bar']) == ['foo', 'bar']
def test_split_strings(self):
assert split_command_line('"foo"') == ['foo']
assert split_command_line('"foo bar"') == ['foo bar']
assert split_command_line(['"foo"']) == ['foo']
assert split_command_line('a "b c" d') == ['a', "b c", 'd']
assert split_command_line("'foo bar'") == ['foo bar']
def test_split_whitespace(self):
assert split_command_line('a b') == ['a', 'b']
assert split_command_line('a\tb') == ['a', 'b']
assert split_command_line('a\rb') == ['a', 'b']
assert split_command_line('a\nb') == ['a', 'b']
assert split_command_line('a \tb') == ['a', 'b']
class TestConvertVectorCatch(object):
def test_none_str(self):
assert convert_vector_catch('none') == 0
def test_all_str(self):
assert convert_vector_catch('all') == Target.CATCH_ALL
def test_none_b(self):
assert convert_vector_catch(b'none') == 0
def test_all_b(self):
assert convert_vector_catch(b'all') == Target.CATCH_ALL
@pytest.mark.parametrize(("vc", "msk"),
list(VECTOR_CATCH_CHAR_MAP.items()))
def test_vc_str(self, vc, msk):
assert convert_vector_catch(vc) == msk
@pytest.mark.parametrize(("vc", "msk"),
[(six.b(x), y) for x,y in VECTOR_CATCH_CHAR_MAP.items()])
def test_vc_b(self, vc, msk):
assert convert_vector_catch(vc) == msk
class TestConvertSessionOptions(object):
def test_empty(self):
assert convert_session_options([]) == {}
def test_unknown_option(self):
assert convert_session_options(['dumkopf']) == {}
def test_bool(self):
assert convert_session_options(['auto_unlock']) == {'auto_unlock': True}
assert convert_session_options(['no-auto_unlock']) == {'auto_unlock': False}
assert convert_session_options(['auto_unlock=1']) == {'auto_unlock': True}
assert convert_session_options(['auto_unlock=true']) == {'auto_unlock': True}
assert convert_session_options(['auto_unlock=yes']) == {'auto_unlock': True}
assert convert_session_options(['auto_unlock=on']) == {'auto_unlock': True}
assert convert_session_options(['auto_unlock=0']) == {'auto_unlock': False}
assert convert_session_options(['auto_unlock=false']) == {'auto_unlock': False}
assert convert_session_options(['auto_unlock=anything-goes-here']) == {'auto_unlock': False}
def test_noncasesense(self):
# Test separate paths for with and without a value.
assert convert_session_options(['AUTO_Unlock']) == {'auto_unlock': True}
assert convert_session_options(['AUTO_Unlock=0']) == {'auto_unlock': False}
def test_int(self):
# Non-bool with no value is ignored (and logged).
assert convert_session_options(['frequency']) == {}
# Invalid int value is ignored and logged
assert convert_session_options(['frequency=abc']) == {}
# Ignore with no- prefix
assert convert_session_options(['no-frequency']) == {}
# Valid int
assert convert_session_options(['frequency=1000']) == {'frequency': 1000}
# Valid hex int
assert convert_session_options(['frequency=0x40']) == {'frequency': 64}
def test_str(self):
# Ignore with no value
assert convert_session_options(['test_binary']) == {}
# Ignore with no- prefix
assert convert_session_options(['no-test_binary']) == {}
# Valid
assert convert_session_options(['test_binary=abc']) == {'test_binary': 'abc'}
| 41.217391
| 100
| 0.65211
|
662f721e6bf897cea9dd650362875a4f927de112
| 3,595
|
py
|
Python
|
tests/unit/test_protocol_errors.py
|
dcolligan/ga4gh-server
|
dd0b00a52de9684609b7f04a9d70946c36afa8a5
|
[
"Apache-2.0"
] | 83
|
2015-01-05T22:21:11.000Z
|
2017-02-20T01:25:28.000Z
|
tests/unit/test_protocol_errors.py
|
dcolligan/ga4gh-server
|
dd0b00a52de9684609b7f04a9d70946c36afa8a5
|
[
"Apache-2.0"
] | 1,508
|
2015-01-02T14:06:12.000Z
|
2017-03-08T19:49:18.000Z
|
tests/unit/test_protocol_errors.py
|
dcolligan/ga4gh-server
|
dd0b00a52de9684609b7f04a9d70946c36afa8a5
|
[
"Apache-2.0"
] | 99
|
2015-01-14T20:48:56.000Z
|
2017-03-08T18:35:06.000Z
|
"""
Unit tests for frontend error conditions.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import ga4gh.server.frontend as frontend
import ga4gh.server.exceptions as exceptions
import ga4gh.schemas.protocol as protocol
class TestFrontendErrors(unittest.TestCase):
"""
Tests the frontend for various errors that can occur and verify
that the correct exception was raised by the error code sent
back.
"""
@classmethod
def setUpClass(cls):
frontend.reset()
frontend.configure(baseConfig="TestConfig")
cls.app = frontend.app.test_client()
@classmethod
def tearDownClass(cls):
cls.app = None
def setUp(self):
# TODO replace this with ALL post methods once the rest of the
# end points have been implemented. This should also add an API
# to protocol.py to simplify and document the process of getting
# the correct API endpoints and classes. That is, we shouldn't
# use protocol.postMethods directly, but instead call a function.
supportedMethods = set([
protocol.SearchCallSetsRequest,
protocol.SearchVariantSetsRequest,
protocol.SearchVariantsRequest,
])
self.endPointMap = {}
for endPoint, requestClass, responseClass in protocol.postMethods:
if requestClass in supportedMethods:
self.endPointMap[endPoint] = requestClass
def assertRawRequestRaises(self, exceptionClass, url, requestString):
"""
Verifies that the specified request string returns a protocol
exception corresponding to the specified class when applied to
all POST endpoints.
"""
response = self.app.post(
url, headers={'Content-type': 'application/json'},
data=requestString)
self.assertEqual(response.status_code, exceptionClass.httpStatus)
error = protocol.fromJson(response.data, protocol.GAException)
self.assertEqual(
error.error_code, exceptionClass.getErrorCode())
self.assertGreater(len(error.message), 0)
def assertRequestRaises(self, exceptionClass, url, request):
"""
Verifies that the specified request returns a protocol exception
corresponding to the specified exception class.
"""
self.assertRawRequestRaises(
exceptionClass, url, protocol.toJson(request))
def testPageSize(self):
for url, requestClass in self.endPointMap.items():
for badSize in [-100, -1]:
request = requestClass()
request.page_size = badSize
self.assertRequestRaises(
exceptions.BadPageSizeException, url, request)
@unittest.skip("Gets caught by the protocol buffer checkers")
def testPageToken(self):
for url, requestClass in self.endPointMap.items():
for badType in [0, 0.0, 1e-3, {}, [], [None]]:
request = requestClass()
request.page_token = badType
self.assertRequestRaises(
exceptions.RequestValidationFailureException, url, request)
@unittest.skip("TODO: create invalid JSON to test validation")
def testInvalidFields(self):
for url, requestClass in self.endPointMap.items():
request = self._createInvalidInstance(requestClass)
self.assertRequestRaises(
exceptions.RequestValidationFailureException, url, request)
| 38.244681
| 79
| 0.664256
|
8639cebd5b3ff50ceacb82ea2b2485b775145b52
| 1,115
|
py
|
Python
|
pacific-factbook/flag.py
|
kaunta/pacific-factbook
|
ccf3f08c0d6121d852e5dd0319e21e0a9ec44e3d
|
[
"MIT"
] | 5
|
2020-01-23T04:08:46.000Z
|
2020-04-02T05:19:34.000Z
|
pacific-factbook/flag.py
|
kaunta/pacific-factbook
|
ccf3f08c0d6121d852e5dd0319e21e0a9ec44e3d
|
[
"MIT"
] | 23
|
2019-11-22T01:56:54.000Z
|
2020-02-08T23:45:10.000Z
|
pacific-factbook/flag.py
|
kaunta/pacific-factbook
|
ccf3f08c0d6121d852e5dd0319e21e0a9ec44e3d
|
[
"MIT"
] | null | null | null |
from fractions import Fraction
from random import choice
def generate() -> str:
"""
Generate a random flag. Outputs SVG blob.
"""
colors = set("red blue white green yellow black orange brown gray purple".split())
color_background = choice(list(colors))
color_shape = choice(list(colors - {color_background}))
aspect_ratio = choice([Fraction("2/3"), Fraction("1/2")])
height = 200
width = height / aspect_ratio
shape = choice(
[
f"""<circle cx="50" cy="50" r="40" stroke="{color_shape}" stroke-width="4" fill="{color_shape}" />""",
f"""<polygon points=" 50,5 20,99 95,39 5,39 80,99 " style="fill:{color_shape};stroke:{color_shape};stroke-width:4;fill-rule:nonzero;" />""",
]
)
return f"""
<svg width="{width}" height="{height}" style="border: 1px solid black">
<rect width="100%" height="100%" fill="{color_background}"/>
{shape}
</svg>
"""
if __name__ == "__main__":
print("<h1>Test Flag Report</h1>")
for _ in range(12):
print(generate())
print("<hr>")
| 31.857143
| 152
| 0.584753
|
37bfa76cc51a0d3dcd589ba75b34bc72511475c3
| 6,723
|
py
|
Python
|
lighttpd/datadog_checks/lighttpd/lighttpd.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | null | null | null |
lighttpd/datadog_checks/lighttpd/lighttpd.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | null | null | null |
lighttpd/datadog_checks/lighttpd/lighttpd.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
import requests
from six.moves.urllib.parse import urlparse
from datadog_checks.checks import AgentCheck
VERSION_REGEX = re.compile(r".*/(\d)")
class Lighttpd(AgentCheck):
"""Tracks basic connection/requests/workers metrics
See http://redmine.lighttpd.net/projects/1/wiki/Docs_ModStatus for Lighttpd details
See http://redmine.lighttpd.net/projects/lighttpd2/wiki/Mod_status for Lighttpd2 details
"""
SERVICE_CHECK_NAME = 'lighttpd.can_connect'
URL_SUFFIX_PER_VERSION = {1: '?auto', 2: '?format=plain', 'Unknown': '?auto'}
GAUGES = {
b'IdleServers': 'lighttpd.performance.idle_server',
b'BusyServers': 'lighttpd.performance.busy_servers',
b'Uptime': 'lighttpd.performance.uptime',
b'Total kBytes': 'lighttpd.net.bytes',
b'Total Accesses': 'lighttpd.net.hits',
b'memory_usage': 'lighttpd.performance.memory_usage',
b'requests_avg': 'lighttpd.net.requests_avg',
b'traffic_out_avg': 'lighttpd.net.bytes_out_avg',
b'traffic_in_avg': 'lighttpd.net.bytes_in_avg',
b'connections_avg': 'lighttpd.net.connections_avg',
b'connection_state_start': 'lighttpd.connections.state_start',
b'connection_state_read_header': 'lighttpd.connections.state_read_header',
b'connection_state_handle_request': 'lighttpd.connections.state_handle_request',
b'connection_state_write_response': 'lighttpd.connections.state_write_response',
b'connection_state_keep_alive': 'lighttpd.connections.state_keep_alive',
b'requests_avg_5sec': 'lighttpd.net.requests_avg_5sec',
b'traffic_out_avg_5sec': 'lighttpd.net.bytes_out_avg_5sec',
b'traffic_in_avg_5sec': 'lighttpd.net.bytes_in_avg_5sec',
b'connections_avg_5sec': 'lighttpd.net.connections_avg_5sec',
}
COUNTERS = {
b'requests_abs': 'lighttpd.net.requests_total',
b'traffic_out_abs': 'lighttpd.net.bytes_out',
b'traffic_in_abs': 'lighttpd.net.bytes_in',
b'connections_abs': 'lighttpd.net.connections_total',
b'status_1xx': 'lighttpd.response.status_1xx',
b'status_2xx': 'lighttpd.response.status_2xx',
b'status_3xx': 'lighttpd.response.status_3xx',
b'status_4xx': 'lighttpd.response.status_4xx',
b'status_5xx': 'lighttpd.response.status_5xx',
}
RATES = {b'Total kBytes': 'lighttpd.net.bytes_per_s', b'Total Accesses': 'lighttpd.net.request_per_s'}
HTTP_CONFIG_REMAPPER = {'user': {'name': 'username'}}
def __init__(self, name, init_config, instances):
super(Lighttpd, self).__init__(name, init_config, instances)
self.assumed_url = {}
if 'auth_type' in self.instance:
if self.instance['auth_type'] == 'digest':
auth = self.http.options['auth']
self.http.options['auth'] = requests.auth.HTTPDigestAuth(auth[0], auth[1])
def check(self, instance):
if 'lighttpd_status_url' not in instance:
raise Exception("Missing 'lighttpd_status_url' variable in Lighttpd config")
url = self.assumed_url.get(instance['lighttpd_status_url'], instance['lighttpd_status_url'])
tags = instance.get('tags', [])
auth_type = instance.get('auth_type', 'basic').lower()
if self.http.options['auth'] is None:
msg = "Unsupported value of 'auth_type' variable in Lighttpd config: {}".format(auth_type)
raise Exception(msg)
self.log.debug("Connecting to %s" % url)
# Submit a service check for status page availability.
parsed_url = urlparse(url)
lighttpd_url = parsed_url.hostname
lighttpd_port = parsed_url.port or 80
service_check_tags = ['host:%s' % lighttpd_url, 'port:%s' % lighttpd_port] + tags
try:
r = self.http.get(url)
r.raise_for_status()
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags)
raise
else:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
headers_resp = r.headers
server_version = self._get_server_version(headers_resp)
response = r.content
metric_count = 0
# Loop through and extract the numerical values
for line in response.split(b'\n'):
values = line.split(b': ')
if len(values) == 2: # match
metric, value = values
try:
value = float(value)
except ValueError:
continue
# Special case: kBytes => bytes
if metric == b'Total kBytes':
value = value * 1024
# Send metric as a gauge, if applicable
if metric in self.GAUGES:
metric_count += 1
metric_name = self.GAUGES[metric]
self.gauge(metric_name, value, tags=tags)
# Send metric as a rate, if applicable
if metric in self.RATES:
metric_count += 1
metric_name = self.RATES[metric]
self.rate(metric_name, value, tags=tags)
# Send metric as a counter, if applicable
if metric in self.COUNTERS:
metric_count += 1
metric_name = self.COUNTERS[metric]
self.increment(metric_name, value, tags=tags)
if metric_count == 0:
url_suffix = self.URL_SUFFIX_PER_VERSION[server_version]
if self.assumed_url.get(instance['lighttpd_status_url']) is None and url[-len(url_suffix) :] != url_suffix:
self.assumed_url[instance['lighttpd_status_url']] = '%s%s' % (url, url_suffix)
self.warning("Assuming url was not correct. Trying to add %s suffix to the url" % url_suffix)
self.check(instance)
else:
raise Exception(
"No metrics were fetched for this instance. Make sure "
"that %s is the proper url." % instance['lighttpd_status_url']
)
def _get_server_version(self, headers):
server_version = headers.get("server", "")
match = VERSION_REGEX.match(server_version)
if match is None:
self.log.debug("Lighttpd server version is Unknown")
return "Unknown"
version = int(match.group(1))
self.log.debug("Lighttpd server version is %s" % version)
return version
| 41.5
| 119
| 0.625614
|
233391949dea48d8da274bc5b7e7be2c1ffac7f7
| 3,568
|
py
|
Python
|
sdks/python/appcenter_sdk/models/BlobInfo.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/appcenter_sdk/models/BlobInfo.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6
|
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/appcenter_sdk/models/BlobInfo.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2
|
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class BlobInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'size': 'number',
'url': 'string'
}
attribute_map = {
'size': 'size',
'url': 'url'
}
def __init__(self, size=None, url=None): # noqa: E501
"""BlobInfo - a model defined in Swagger""" # noqa: E501
self._size = None
self._url = None
self.discriminator = None
self.size = size
self.url = url
@property
def size(self):
"""Gets the size of this BlobInfo. # noqa: E501
:return: The size of this BlobInfo. # noqa: E501
:rtype: number
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this BlobInfo.
:param size: The size of this BlobInfo. # noqa: E501
:type: number
"""
if size is None:
raise ValueError("Invalid value for `size`, must not be `None`") # noqa: E501
self._size = size
@property
def url(self):
"""Gets the url of this BlobInfo. # noqa: E501
:return: The url of this BlobInfo. # noqa: E501
:rtype: string
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this BlobInfo.
:param url: The url of this BlobInfo. # noqa: E501
:type: string
"""
if url is None:
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BlobInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.043796
| 90
| 0.539238
|
2501b593dd91359dd470ec59d0b5a796aa1d77c7
| 13,195
|
py
|
Python
|
GeoHealthCheck/probe.py
|
SteveR1984/GeoHealthCheck
|
52a13c1bd42e79dec547fa29324d583aea5e3773
|
[
"MIT"
] | null | null | null |
GeoHealthCheck/probe.py
|
SteveR1984/GeoHealthCheck
|
52a13c1bd42e79dec547fa29324d583aea5e3773
|
[
"MIT"
] | null | null | null |
GeoHealthCheck/probe.py
|
SteveR1984/GeoHealthCheck
|
52a13c1bd42e79dec547fa29324d583aea5e3773
|
[
"MIT"
] | null | null | null |
import logging
import sys
import datetime
import requests
from factory import Factory
from init import App
from plugin import Plugin
from result import ProbeResult
LOGGER = logging.getLogger(__name__)
class Probe(Plugin):
"""
Base class for specific implementations to run a Probe with Checks.
Most Probes can be implemented using REQUEST_TEMPLATES parameterized
via actualized PARAM_DEFS but specialized Probes may implement
their own Requests and Checks, for example by "drilling down"
through OWS services on an OGC OWS endpoint starting at the
Capabilities level or for specific WWW:LINK-based REST APIs.
"""
# Generic attributes, subclassses override
RESOURCE_TYPE = 'Not Applicable'
"""
Type of GHC Resource e.g. 'OGC:WMS', default not applicable.
"""
# Request attributes, defaults, subclasses override
REQUEST_METHOD = 'GET'
"""
HTTP request method capitalized, GET (default) or POST.
"""
REQUEST_HEADERS = {}
"""
`dict` of optional requests headers.
"""
REQUEST_TEMPLATE = ''
"""
Template in standard Python `str.format(*args)`. The variables
like {service} and {version} within a template are filled from
actual values for parameters defined in PARAM_DEFS and substituted
from values or constant values specified by user in GUI and stored
in DB.
"""
# Parameter definitions and possible Checks,
# subclassses override
PARAM_DEFS = {}
"""
Parameter definitions mostly for `REQUEST_TEMPLATE` but potential other
uses in specific Probe implementations. Format is `dict` where each key
is a parameter name and the value a `dict` of: `type`, `description`,
`required`, `default`, `range` (value range) and optional `value` item.
If `value` specified, this value becomes fixed (non-editable) unless
overridden in subclass.
"""
CHECKS_AVAIL = {}
"""
Available Check (classes) for this Probe in `dict` format.
Key is a Check class (string), values are optional (default `{}`).
In the (constant) value 'parameters' and other attributes for
Check.PARAM_DEFS can be specified, including `default` if this Check
should be added to Probe on creation.
"""
METADATA_CACHE = {}
"""
Cache for metadata, like capabilities documents or OWSLib Service
instances. Saves doing multiple requests/responses. In particular for
endpoints with 50+ Layers.
"""
def __init__(self):
Plugin.__init__(self)
self._resource = None
#
# Lifecycle : optionally expand params from Resource metadata
def expand_params(self, resource):
"""
Called after creation. Use to expand PARAM_DEFS, e.g. from Resource
metadata like WMS Capabilities. See e.g. WmsGetMapV1 class.
:param resource:
:return: None
"""
pass
def get_metadata(self, resource, version='any'):
"""
Get metadata, specific per Resource type.
:param resource:
:param version:
:return: Metadata object
"""
return 'md'
def get_metadata_cached(self, resource, version='any'):
"""
Get metadata, specific per Resource type, get from cache
if cached.
:param resource:
:param version:
:return: Metadata object
"""
key = '%s_%s_%s' % (resource.url, resource.resource_type,
version)
metadata = None
if key in Probe.METADATA_CACHE:
entry = Probe.METADATA_CACHE[key]
delta = datetime.datetime.utcnow() - entry['time']
metadata = entry['metadata']
# Don't keep cache forever, refresh every N mins
if delta.seconds > App.get_config()['GHC_METADATA_CACHE_SECS']:
entry = Probe.METADATA_CACHE.pop(key)
del entry
metadata = None
if not metadata:
# Get actual metadata, Resource-type specifc
metadata = self.get_metadata(resource, version)
if metadata and App.get_config()['GHC_METADATA_CACHE_SECS'] > 0:
# Store entry with time, for expiry later
entry = {
"metadata": metadata,
"time": datetime.datetime.utcnow()
}
Probe.METADATA_CACHE[key] = entry
return metadata
# Lifecycle
def init(self, resource, probe_vars):
"""
Probe contains the actual Probe parameters (from Models/DB) for
requests and a list of response Checks with their
functions and parameters
:param resource:
:param probe_vars:
:return: None
"""
self._resource = resource
self._probe_vars = probe_vars
self._parameters = probe_vars.parameters
self._check_vars = probe_vars.check_vars
self.response = None
# Create ProbeResult object that gathers all results for single Probe
self.result = ProbeResult(self, self._probe_vars)
#
# Lifecycle
def exit(self):
pass
def get_var_names(self):
var_names = Plugin.get_var_names(self)
var_names.extend([
'RESOURCE_TYPE',
'REQUEST_METHOD',
'REQUEST_HEADERS',
'REQUEST_TEMPLATE',
'CHECKS_AVAIL'
])
return var_names
def expand_check_vars(self, checks_avail):
for check_class in checks_avail:
check_avail = checks_avail[check_class]
check = Factory.create_obj(check_class)
check_vars = Plugin.copy(check.get_plugin_vars())
# Check if Probe class overrides Check Params
# mainly "value" entries.
if 'set_params' in check_avail:
set_params = check_avail['set_params']
for set_param in set_params:
if set_param in check_vars['PARAM_DEFS']:
param_orig = check_vars['PARAM_DEFS'][set_param]
param_override = set_params[set_param]
param_def = Plugin.merge(param_orig, param_override)
check_vars['PARAM_DEFS'][set_param] = param_def
checks_avail[check_class] = check_vars
return checks_avail
def get_checks_info_defaults(self):
checks_avail = self.get_checks_info()
checks_avail_default = {}
for check_class in checks_avail:
check_avail = checks_avail[check_class]
# Only include default Checks if specified
if 'default' in check_avail and check_avail['default']:
checks_avail_default[check_class] = check_avail
return checks_avail_default
def get_checks_info(self):
return Plugin.copy(Plugin.get_plugin_vars(self))['CHECKS_AVAIL']
def get_plugin_vars(self):
probe_vars = Plugin.copy(Plugin.get_plugin_vars(self))
probe_vars['CHECKS_AVAIL'] = \
self.expand_check_vars(probe_vars['CHECKS_AVAIL'])
return probe_vars
def log(self, text):
LOGGER.info(text)
def before_request(self):
""" Before running actual request to service"""
pass
def after_request(self):
""" After running actual request to service"""
pass
def get_request_headers(self):
if not self._resource:
return dict()
headers = Plugin.copy(self.REQUEST_HEADERS)
return self._resource.add_auth_header(headers)
def perform_request(self):
""" Perform actual request to service"""
# Actualize request query string or POST body
# by substitution in template.
url_base = self._resource.url
request_string = None
if self.REQUEST_TEMPLATE:
request_string = self.REQUEST_TEMPLATE
if '?' in url_base and self.REQUEST_TEMPLATE[0] == '?':
self.REQUEST_TEMPLATE = '&' + self.REQUEST_TEMPLATE[1:]
if self._parameters:
request_parms = self._parameters
param_defs = self.get_param_defs()
# Expand string list array to comma separated string
for param in request_parms:
if param_defs[param]['type'] == 'stringlist':
request_parms[param] = ','.join(request_parms[param])
request_string = self.REQUEST_TEMPLATE.format(**request_parms)
self.log('Requesting: %s url=%s' % (self.REQUEST_METHOD, url_base))
try:
if self.REQUEST_METHOD == 'GET':
# Default is plain URL, e.g. for WWW:LINK
url = url_base
if request_string:
# Query String: mainly OWS:* resources
url = "%s%s" % (url, request_string)
self.response = self.perform_get_request(url)
elif self.REQUEST_METHOD == 'POST':
self.response = self.perform_post_request(
url_base, request_string)
except requests.exceptions.RequestException as e:
msg = "Request Err: %s %s" % (e.__class__.__name__, str(e))
self.result.set(False, msg)
if self.response:
self.log('response: status=%d' % self.response.status_code)
if self.response.status_code / 100 in [4, 5]:
self.log('Error response: %s' % (str(self.response.text)))
def perform_get_request(self, url):
""" Perform actual HTTP GET request to service"""
return requests.get(
url,
timeout=App.get_config()['GHC_PROBE_HTTP_TIMEOUT_SECS'],
headers=self.get_request_headers())
def perform_post_request(self, url_base, request_string):
""" Perform actual HTTP POST request to service"""
return requests.post(
url_base,
timeout=App.get_config()['GHC_PROBE_HTTP_TIMEOUT_SECS'],
data=request_string,
headers=self.get_request_headers())
def run_request(self):
""" Run actual request to service"""
try:
self.before_request()
self.result.start()
try:
self.perform_request()
except Exception as e:
msg = "Perform_request Err: %s %s" % \
(e.__class__.__name__, str(e))
self.result.set(False, msg)
self.result.stop()
self.after_request()
except Exception as e:
# We must never bailout because of Exception
# in Probe.
msg = "Probe Err: %s %s" % (e.__class__.__name__, str(e))
LOGGER.error(msg)
self.result.set(False, msg)
def run_checks(self):
""" Do the checks on the response from request"""
# Do not run Checks if Probe already failed
if not self.result.success:
return
# Config also determines which actual checks are performed
# from possible Checks in Probe. Checks are performed
# by Check instances.
for check_var in self._check_vars:
check = None
check_class = ''
try:
check_class = check_var.check_class
check = Factory.create_obj(check_class)
except Exception:
LOGGER.error("Cannot create Check class: %s %s"
% (check_class, str(sys.exc_info())))
if not check:
continue
try:
check.init(self, check_var)
check.perform()
except Exception:
msg = "Check Err: %s" % str(sys.exc_info())
LOGGER.error(msg)
check.set_result(False, msg)
self.log('Check: fun=%s result=%s' % (check_class,
check._result.success))
self.result.add_result(check._result)
# Lifecycle
def calc_result(self):
""" Calculate overall result from the Result object"""
self.log("Result: %s" % str(self.result))
@staticmethod
def run(resource, probe_vars):
"""
Class method to create and run a single Probe
instance. Follows strict sequence of method calls.
Each method can be overridden in subclass.
"""
probe = None
try:
# Create Probe instance from module.class string
probe = Factory.create_obj(probe_vars.probe_class)
except Exception:
LOGGER.error("Cannot create Probe class: %s %s"
% (probe_vars.probe_class, str(sys.exc_info())))
if not probe:
return
# Initialize with actual parameters
probe.init(resource, probe_vars)
# Perform request
probe.run_request()
# Perform the Probe's checks
probe.run_checks()
# Determine result
probe.calc_result()
# Lifecycle
probe.exit()
# Return result
return probe.result
| 33.070175
| 78
| 0.59371
|
b20f5bb5243c099ecde48102e2c5b3241ad8c242
| 1,215
|
py
|
Python
|
blog/feeds.py
|
josephdubon/boilerplate_dubon_django_blog
|
1dbe470006be066b12dd6486eb26a41d304206f8
|
[
"Unlicense",
"MIT"
] | null | null | null |
blog/feeds.py
|
josephdubon/boilerplate_dubon_django_blog
|
1dbe470006be066b12dd6486eb26a41d304206f8
|
[
"Unlicense",
"MIT"
] | 2
|
2021-06-10T20:43:00.000Z
|
2021-09-22T19:55:41.000Z
|
blog/feeds.py
|
josephdubon/boilerplate_dubon_django_blog
|
1dbe470006be066b12dd6486eb26a41d304206f8
|
[
"Unlicense",
"MIT"
] | null | null | null |
from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords
from django.urls import reverse_lazy
from .models import Post
# First subclass the Feed class of the syndication framework
class LatestPostFeed(Feed):
# The title, link, and description attributes correspond to the
# - <title>, <link>, and <description> RSS elements, respectively.
title = "My Blog"
link = reverse_lazy('blog:post_list') # reverse_lazy() to generate the URL for the link attribute
description = 'New posts of my blog.'
# The items() method retrieves the objects to be included in the feed. You are retrieving only the
# - last five published posts for this feed.
def items(self):
return Post.published.all()[:5]
# The item_title() and item_description() methods will receive each object returned by items()
# - and return the title and description for each item.
def item_title(self, item):
return item.title
# - Use the truncatewords built-in template filter to build the description of the blog post with the
# - first 30 words.
def item_description(self, item):
return truncatewords(item.body, 30)
| 40.5
| 105
| 0.721811
|
732af64a825fbb52eff7ca5348fcccf84ffaf7b6
| 6,857
|
py
|
Python
|
tests/unit/python/foglamp/services/core/api/test_plugin_discovery_api.py
|
christoofar/FogLAMP
|
3aaae302104038a8534c54ff8a3ed0fefd4f3201
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/services/core/api/test_plugin_discovery_api.py
|
christoofar/FogLAMP
|
3aaae302104038a8534c54ff8a3ed0fefd4f3201
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/services/core/api/test_plugin_discovery_api.py
|
christoofar/FogLAMP
|
3aaae302104038a8534c54ff8a3ed0fefd4f3201
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import json
from unittest.mock import patch
import pytest
from aiohttp import web
from foglamp.services.core import routes
from foglamp.common.plugin_discovery import PluginDiscovery
__author__ = "Ashish Jabble"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("api", "plugin-discovery")
class TestPluginDiscoveryApi:
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
@pytest.mark.parametrize("method, result, is_config", [
("/foglamp/plugins/installed", {"name": "sinusoid", "version": "1.0", "type": "south", "description": "sinusoid plugin"}, False),
("/foglamp/plugins/installed?config=true",
{"name": "sinusoid", "version": "1.0", "type": "south", "description": "sinusoid plugin", "config": {
"plugin": {"description": "sinusoid plugin", "type": "string", "default": "sinusoid", "readonly": "true"}}}, True),
("/foglamp/plugins/installed?config=false", {"name": "sinusoid", "version": "1.0", "type": "south", "description": "sinusoid plugin"}, False)
])
async def test_get_plugins_installed(self, client, method, result, is_config):
with patch.object(PluginDiscovery, 'get_plugins_installed', return_value=result) as patch_get_plugin_installed:
resp = await client.get('{}'.format(method))
assert 200 == resp.status
r = await resp.text()
json_response = json.loads(r)
assert {'plugins': result} == json_response
patch_get_plugin_installed.assert_called_once_with(None, is_config)
@pytest.mark.parametrize("param", [
"north",
"south",
"North",
"South",
"NORTH",
"SOUTH",
"filter",
"Filter",
"FILTER",
"notify",
"NOTIFY"
])
async def test_get_plugins_installed_by_params(self, client, param):
with patch.object(PluginDiscovery, 'get_plugins_installed', return_value={}) as patch_get_plugin_installed:
resp = await client.get('/foglamp/plugins/installed?type={}'.format(param))
assert 200 == resp.status
r = await resp.text()
json_response = json.loads(r)
assert {'plugins': {}} == json_response
patch_get_plugin_installed.assert_called_once_with(param.lower(), False)
@pytest.mark.parametrize("param, direction, result, is_config", [
("?type=north&config=false", "north", {"name": "http", "version": "1.0.0", "type": "north", "description": "HTTP North-C plugin"}, False),
("?type=south&config=false", "south", {"name": "sinusoid", "version": "1.0", "type": "south", "description": "sinusoid plugin"}, False),
("?type=filter&config=false", "filter", {"name": "scale", "version": "1.0.0", "type": "filter", "description": "Filter Scale plugin"}, False),
("?type=notify&config=false", "notify", {"name": "email", "version": "1.0.0", "type": "notify", "description": "Email notification plugin"}, False),
("?type=north&config=true", "north", {"name": "http", "version": "1.0.0", "type": "north", "description": "HTTP North-C plugin",
"config": {"plugin": {"description": "HTTP North-C plugin", "type": "string", "default": "http-north"}}}, True),
("?type=south&config=true", "south", {"name": "sinusoid", "version": "1.0", "type": "south", "description": "sinusoid plugin",
"config": {"plugin": {"description": "sinusoid plugin", "type": "string", "default": "sinusoid", "readonly": "true"}}}, True),
("?type=filter&config=true", "filter", {"name": "scale", "version": "1.0.0", "type": "filter", "description": "Filter Scale plugin",
"config": {"offset": {"default": "0.0", "type": "float", "description": "A constant offset"}, "factor": {"default": "100.0", "type": "float", "description": "Scale factor for a reading."}, "plugin": {"default": "scale", "type": "string", "description": "Scale filter plugin"}, "enable": {"default": "false", "type": "boolean", "description": "A switch that can be used to enable or disable."}}}, True),
("?type=notify&config=true", "notify", {"name": "email", "version": "1.0.0", "type": "notify", "description": "Email notification plugin",
"config": {"plugin": {"type": "string", "description": "Email notification plugin", "default": "email"}}}, True)
])
async def test_get_plugins_installed_by_type_and_config(self, client, param, direction, result, is_config):
with patch.object(PluginDiscovery, 'get_plugins_installed', return_value=result) as patch_get_plugin_installed:
resp = await client.get('/foglamp/plugins/installed{}'.format(param))
assert 200 == resp.status
r = await resp.text()
json_response = json.loads(r)
assert {'plugins': result} == json_response
patch_get_plugin_installed.assert_called_once_with(direction, is_config)
@pytest.mark.parametrize("param, message", [
("?type=blah", "Invalid plugin type. Must be 'north' or 'south' or 'filter' or 'notify'."),
("?config=blah", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=False", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=True", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=f", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=t", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=1", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=Y", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=Yes", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=No&type=north", 'Only "true", "false", true, false are allowed for value of config.'),
("?config=TRUE&type=south", 'Only "true", "false", true, false are allowed for value of config.'),
("?type=south&config=0", 'Only "true", "false", true, false are allowed for value of config.')
])
async def test_bad_get_plugins_installed(self, client, param, message):
resp = await client.get('/foglamp/plugins/installed{}'.format(param))
assert 400 == resp.status
assert message == resp.reason
| 62.336364
| 450
| 0.612221
|
996b118d17d717560f4a0af453b7ab64ecd26aa2
| 14,016
|
py
|
Python
|
bcbio/variation/freebayes.py
|
SciLifeLab/bcbio-nextgen
|
370b3f316c423b41523accc5e212d51a5b7ecaa9
|
[
"MIT"
] | 3
|
2015-11-18T07:17:54.000Z
|
2021-04-28T13:58:37.000Z
|
bcbio/variation/freebayes.py
|
SciLifeLab/bcbio-nextgen
|
370b3f316c423b41523accc5e212d51a5b7ecaa9
|
[
"MIT"
] | null | null | null |
bcbio/variation/freebayes.py
|
SciLifeLab/bcbio-nextgen
|
370b3f316c423b41523accc5e212d51a5b7ecaa9
|
[
"MIT"
] | null | null | null |
"""Bayesian variant calling with FreeBayes.
https://github.com/ekg/freebayes
"""
import os
import sys
from bcbio import bam, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.pipeline.shared import subset_variant_regions
from bcbio.provenance import do
from bcbio.variation import annotation, bedutils, ploidy, vcfutils
from bcbio.variation.vcfutils import (get_paired_bams, is_paired_analysis,
move_vcf)
def region_to_freebayes(region):
if isinstance(region, (list, tuple)):
chrom, start, end = region
return "%s:%s..%s" % (chrom, start, end)
else:
return region
def _freebayes_options_from_config(items, config, out_file, region=None):
"""Prepare standard options from configuration input.
Input BED target files are merged to avoid overlapping regions which
cause FreeBayes to call multiple times.
"""
opts = []
opts += ["--ploidy", str(ploidy.get_ploidy(items, region))]
variant_regions = bedutils.merge_overlaps(utils.get_in(config, ("algorithm", "variant_regions")),
items[0])
target = subset_variant_regions(variant_regions, region, out_file, items)
if target:
if isinstance(target, basestring) and os.path.isfile(target):
opts += ["--targets", target]
else:
opts += ["--region", region_to_freebayes(target)]
resources = config_utils.get_resources("freebayes", config)
if resources.get("options"):
opts += resources["options"]
return opts
def _add_somatic_opts(opts, paired):
"""Add somatic options to current set. See _run_freebayes_paired for references.
"""
if "--min-alternate-fraction" not in opts and "-F" not in opts:
# add minimum reportable allele frequency
# FreeBayes defaults to 20%, but use 10% by default for the
# tumor case
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
opts += " --min-alternate-fraction %s" % min_af
# Recommended settings for cancer calling
opts += (" --pooled-discrete --pooled-continuous --genotype-qualities "
"--report-genotype-likelihood-max --allele-balance-priors-off")
return opts
def run_freebayes(align_bams, items, ref_file, assoc_files, region=None,
out_file=None):
"""Run FreeBayes variant calling, either paired tumor/normal or germline calling.
"""
if is_paired_analysis(align_bams, items):
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file, somatic=paired)
else:
call_file = _run_freebayes_paired(align_bams, items, ref_file,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = _run_freebayes_caller(align_bams, items, ref_file,
assoc_files, region, out_file)
return call_file
def _run_freebayes_caller(align_bams, items, ref_file, assoc_files,
region=None, out_file=None, somatic=None):
"""Detect SNPs and indels with FreeBayes.
Performs post-filtering to remove very low quality variants which
can cause issues feeding into GATK. Breaks variants into individual
allelic primitives for analysis and evaluation.
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
for align_bam in align_bams:
bam.index(align_bam, config)
freebayes = config_utils.get_program("freebayes", config)
vcffilter = config_utils.get_program("vcffilter", config)
input_bams = " ".join("-b %s" % x for x in align_bams)
opts = " ".join(_freebayes_options_from_config(items, config, out_file, region))
# Recommended options from 1000 genomes low-complexity evaluation
# https://groups.google.com/d/msg/freebayes/GvxIzjcpbas/1G6e3ArxQ4cJ
opts += " --min-repeat-entropy 1 --experimental-gls"
if somatic:
opts = _add_somatic_opts(opts, somatic)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
cmd = ("{freebayes} -f {ref_file} {input_bams} {opts} | "
"{vcffilter} -f 'QUAL > 5' -s | {fix_ambig} | "
"vcfallelicprimitives --keep-info --keep-geno | vcffixup | vcfstreamsort | "
"vt normalize -r {ref_file} -q - 2> /dev/null | vcfuniqalleles "
"{compress_cmd} > {tx_out_file}")
do.run(cmd.format(**locals()), "Genotyping with FreeBayes", {})
ann_file = annotation.annotate_nongatk_vcf(out_file, align_bams,
assoc_files.get("dbsnp"),
ref_file, config)
return ann_file
def _run_freebayes_paired(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Detect SNPs and indels with FreeBayes for paired tumor/normal samples.
Sources of options for FreeBayes:
mailing list: https://groups.google.com/d/msg/freebayes/dTWBtLyM4Vs/HAK_ZhJHguMJ
mailing list: https://groups.google.com/forum/#!msg/freebayes/LLH7ZfZlVNs/63FdD31rrfEJ
speedseq: https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L916
sga/freebayes: https://github.com/jts/sga-extra/blob/7e28caf71e8107b697f9be7162050e4fa259694b/
sga_generate_varcall_makefile.pl#L299
"""
config = items[0]["config"]
if out_file is None:
out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0]
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
paired = get_paired_bams(align_bams, items)
assert paired.normal_bam, "Require normal BAM for FreeBayes paired calling and filtering"
freebayes = config_utils.get_program("freebayes", config)
opts = " ".join(_freebayes_options_from_config(items, config, out_file, region))
opts += " --min-repeat-entropy 1 --experimental-gls"
opts = _add_somatic_opts(opts, paired)
compress_cmd = "| bgzip -c" if out_file.endswith("gz") else ""
fix_ambig = vcfutils.fix_ambiguous_cl()
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
cl = ("{freebayes} -f {ref_file} {opts} "
"{paired.tumor_bam} {paired.normal_bam} "
"| vcffilter -f 'QUAL > 5' -s "
"| {py_cl} -x 'bcbio.variation.freebayes.call_somatic(x)' "
"| {fix_ambig} | "
"vcfallelicprimitives --keep-info --keep-geno | vcffixup | vcfstreamsort | "
"vt normalize -r {ref_file} -q - 2> /dev/null | vcfuniqalleles "
"{compress_cmd} > {tx_out_file}")
bam.index(paired.tumor_bam, config)
bam.index(paired.normal_bam, config)
do.run(cl.format(**locals()), "Genotyping paired variants with FreeBayes", {})
ann_file = annotation.annotate_nongatk_vcf(out_file, align_bams,
assoc_files.get("dbsnp"), ref_file,
config)
return ann_file
# ## Filtering
def _check_lods(parts, tumor_thresh, normal_thresh):
"""Ensure likelihoods for tumor and normal pass thresholds.
Skipped if no FreeBayes GL annotations available.
"""
try:
gl_index = parts[8].split(":").index("GL")
except ValueError:
return True
try:
tumor_gls = [float(x) for x in parts[9].split(":")[gl_index].split(",")]
tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls)))
# No GL information, no tumor call (so fail it)
except IndexError:
tumor_lod = -1.0
try:
normal_gls = [float(x) for x in parts[10].split(":")[gl_index].split(",")]
normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls)))
# No GL inofmration, no normal call (so pass it)
except IndexError:
normal_lod = normal_thresh
return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
def _check_freqs(parts):
"""Ensure frequency of tumor to normal passes a reasonable threshold.
Avoids calling low frequency tumors also present at low frequency in normals,
which indicates a contamination or persistent error.
"""
thresh_ratio = 2.7
try: # FreeBayes
ao_index = parts[8].split(":").index("AO")
ro_index = parts[8].split(":").index("RO")
except ValueError:
ao_index, ro_index = None, None
try: # VarDict
af_index = parts[8].split(":").index("AF")
except ValueError:
af_index = None
if af_index is None and ao_index is None:
raise NotImplementedError("Unexpected format annotations: %s" % parts[0])
def _calc_freq(item):
try:
if ao_index is not None and ro_index is not None:
ao = sum([int(x) for x in item.split(":")[ao_index].split(",")])
ro = int(item.split(":")[ro_index])
freq = ao / float(ao + ro)
elif af_index is not None:
freq = float(item.split(":")[af_index])
except (IndexError, ValueError, ZeroDivisionError):
freq = 0.0
return freq
tumor_freq, normal_freq = _calc_freq(parts[9]), _calc_freq(parts[10])
return normal_freq <= 0.001 or normal_freq <= tumor_freq / thresh_ratio
def call_somatic(line):
"""Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Assumes tumor/normal called with tumor first and normal second, as done in bcbio
implementation.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
"""
# Thresholds are like phred scores, so 3.5 = phred35
tumor_thresh, normal_thresh = 3.5, 3.5
if line.startswith("#CHROM"):
headers = ['##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic event">',
('##FILTER=<ID=REJECT,Description="Not somatic due to normal call frequency '
'or phred likelihoods: tumor: %s, normal %s.">')
% (int(tumor_thresh * 10), int(normal_thresh * 10))]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
if _check_lods(parts, tumor_thresh, normal_thresh) and _check_freqs(parts):
parts[7] = parts[7] + ";SOMATIC"
else:
if parts[6] in set([".", "PASS"]):
parts[6] = "REJECT"
else:
parts[6] += ";REJECT"
line = "\t".join(parts)
return line
def _clean_freebayes_output(line):
"""Clean FreeBayes output to make post-processing with GATK happy.
XXX Not applied on recent versions which fix issues to be more compatible
with bgzip output, but retained in case of need.
- Remove lines from FreeBayes outputs where REF/ALT are identical:
2 22816178 . G G 0.0339196
or there are multiple duplicate alleles:
4 60594753 . TGAAA T,T
- Remove Type=Int specifications which are not valid VCF and GATK chokes
on.
"""
if line.startswith("#"):
line = line.replace("Type=Int,D", "Type=Integer,D")
return line
else:
parts = line.split("\t")
alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()]
if len(alleles) == len(set(alleles)):
return line
return None
def clean_vcf_output(orig_file, clean_fn, config, name="clean"):
"""Provide framework to clean a file in-place, with the specified clean
function.
"""
base, ext = utils.splitext_plus(orig_file)
out_file = "{0}-{1}{2}".format(base, name, ext)
if not utils.file_exists(out_file):
with open(orig_file) as in_handle:
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
update_line = clean_fn(line)
if update_line:
out_handle.write(update_line)
move_vcf(orig_file, "{0}.orig".format(orig_file))
move_vcf(out_file, orig_file)
with open(out_file, "w") as out_handle:
out_handle.write("Moved to {0}".format(orig_file))
| 46.564784
| 111
| 0.625571
|
059477a5e9673a4c2633fe2f8f66cc7e6ad70934
| 722
|
py
|
Python
|
dataworkspace/dataworkspace/apps/core/migrations/0009_alter_newslettersubscription_user.py
|
uktrade/analysis-workspace
|
2de79c6172cf391c1954ca3789c5c0dc0030ec25
|
[
"MIT"
] | 1
|
2019-06-10T08:22:56.000Z
|
2019-06-10T08:22:56.000Z
|
dataworkspace/dataworkspace/apps/core/migrations/0009_alter_newslettersubscription_user.py
|
uktrade/analysis-workspace
|
2de79c6172cf391c1954ca3789c5c0dc0030ec25
|
[
"MIT"
] | 2
|
2019-05-17T13:10:42.000Z
|
2019-06-17T10:48:46.000Z
|
dataworkspace/dataworkspace/apps/core/migrations/0009_alter_newslettersubscription_user.py
|
uktrade/analysis-workspace
|
2de79c6172cf391c1954ca3789c5c0dc0030ec25
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.13 on 2022-05-30 16:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("core", "0008_newslettersubscription"),
]
operations = [
migrations.AlterField(
model_name="newslettersubscription",
name="user",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="newsletter_signups",
to=settings.AUTH_USER_MODEL,
unique=True,
),
),
]
| 26.740741
| 66
| 0.620499
|
92bef292d32f033bf6fc1a3129d3ad91496c15f1
| 735
|
py
|
Python
|
third_party/WebKit/LayoutTests/http/tests/websocket/count-received-bytes_wsh.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777
|
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
third_party/WebKit/LayoutTests/http/tests/websocket/count-received-bytes_wsh.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66
|
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
third_party/WebKit/LayoutTests/http/tests/websocket/count-received-bytes_wsh.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123
|
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import codecs
def web_socket_do_extra_handshake(request):
request.ws_extension_processors = []
request.received_bytes = 0
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
request.received_bytes += len(codecs.encode(line, 'utf-8'))
else:
request.received_bytes += len(line)
def web_socket_passive_closing_handshake(request):
return 1000, 'received %d bytes' % request.received_bytes
| 27.222222
| 72
| 0.69932
|
0dc7a0e33f43683e9d3893c734939f1b47d95db7
| 3,853
|
py
|
Python
|
Website/site/gravi_site.py
|
NPPC-UK/Gravimetrics
|
a4a4bada6da5e2c3dd6f58e7fa4ca226fc374d86
|
[
"MIT"
] | 4
|
2016-11-19T00:34:45.000Z
|
2021-12-30T14:27:01.000Z
|
Website/site/gravi_site.py
|
NPPC-UK/Gravimetrics
|
a4a4bada6da5e2c3dd6f58e7fa4ca226fc374d86
|
[
"MIT"
] | null | null | null |
Website/site/gravi_site.py
|
NPPC-UK/Gravimetrics
|
a4a4bada6da5e2c3dd6f58e7fa4ca226fc374d86
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from flask import Flask, render_template, request, redirect, url_for, session, flash, make_response
from functools import wraps
import pandas as pd
from login_manager import login_user
from data_manager import get_experiments, get_experiment_plants, get_all_water_data, get_all_balance_data, end_experiment, create_new_experiment, update_target_weights
app = Flask(__name__)
ALLOWED_EXTENSIONS = set(['csv'])
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def get_uploaded_file_as_df():
# Max said it was okay!
# We Trust people!
df = pd.DataFrame()
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
resp = False
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filenameX
if file.filename == '':
flash('No selected file')
resp = False
if file and allowed_file(file.filename):
print(file.filename)
resp = True
df = pd.read_csv(request.files.get('file'))
else:
resp = 'Bad upload file'
return (resp, df)
def checkuser(func):
"""Checks whether user is logged in or passes to login page."""
@wraps(func)
def wrapper(*args, **kwargs):
if 'username' not in session:
return redirect(url_for('login'))
return func(*args, **kwargs)
return wrapper
@app.route("/", defaults={'path': ''}, methods=['GET', 'POST'])
@app.route('/<path:path>', methods=['GET', 'POST'])
@checkuser
def index(path):
exps = get_experiments()
return render_template('index.html', experiments=exps, path=path)
@app.route("/new_experiment", methods=['GET', 'POST'])
@checkuser
def new_experiment():
resp = None
if request.method == 'POST':
resp, df = get_uploaded_file_as_df()
if resp:
resp = create_new_experiment(df, owner=session['username'][:3])
return render_template('new_experiment.html',
resp=resp)
@app.route("/data")
@checkuser
def data():
exp = request.args.get("experiment")
data_type = request.args.get("type")
if 'end' in data_type.lower():
end_experiment(exp)
df = get_all_water_data(
exp) if 'water' in data_type.lower() else get_all_balance_data(exp)
resp = make_response(df.to_csv())
resp.headers["Content-Disposition"] = "attachment; filename=export.csv"
resp.headers["Content-Type"] = "text/csv"
return resp
@app.route("/experiment", methods=['GET', 'POST'])
@checkuser
def view_experiment():
resp = None
exp = request.args.get("experiment")
plants_df = get_experiment_plants(exp)
if request.method == 'POST':
resp, df = get_uploaded_file_as_df()
if resp:
resp = update_target_weights(df)
return render_template('experiment.html',
experiment=exp,
error=resp,
plants=plants_df)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'GET':
# When this page is visited we want to log out the user
if 'username' in session:
session.pop('username', None)
if request.method == 'POST':
session['username'] = request.form['username']
pwd = request.form['password']
if login_user(app, session['username'], pwd):
return redirect(url_for('index'))
else:
error = 'Invalid Credentials. Please try again.'
return render_template('login.html', error=error)
if __name__ == "__main__":
app.secret_key = '8080'
app.config['SESSION_TYPE'] = 'filesystem'
app.run(host='0.0.0.0', port=9666, debug=False)
| 31.072581
| 167
| 0.634051
|
80c66543b75d2ff9a7750f02411603a70d48e236
| 3,171
|
py
|
Python
|
louvain_to_gephi_giraph.py
|
ErathosthemesAmmoro/track-communities
|
7afd60aaa62ed0b81c7f785974ea0a8687ea136e
|
[
"Apache-2.0"
] | 12
|
2015-02-02T13:13:52.000Z
|
2022-03-16T12:35:32.000Z
|
louvain_to_gephi_giraph.py
|
ErathosthemesAmmoro/track-communities
|
7afd60aaa62ed0b81c7f785974ea0a8687ea136e
|
[
"Apache-2.0"
] | null | null | null |
louvain_to_gephi_giraph.py
|
ErathosthemesAmmoro/track-communities
|
7afd60aaa62ed0b81c7f785974ea0a8687ea136e
|
[
"Apache-2.0"
] | 3
|
2015-10-05T00:27:38.000Z
|
2020-03-02T17:51:39.000Z
|
#
# Copyright 2016 Sotera Defense Solutions Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import sys
from subprocess import call
table = sys.argv[1]
garbage = open("garbage.out","w")
os.system("cat output/giraph/giraph_0/part-m* > output/giraph/giraph_0/output")
f = open('output/giraph/giraph_0/output','r')
o = open('louvain_to_gephi/giraph/community_itr_1.nodes','w')
for line in f:
vals = line.split('\t')
o.write(vals[0].strip() + '\t' + vals[1].strip() + '\n')
f.close()
o.close()
call("hadoop fs -mkdir /tmp/trackcomms/" + table + "/output/giraph/comm_1", stdout=garbage, shell=True)
call("hadoop fs -put louvain_to_gephi/giraph/community_itr_1.nodes /tmp/trackcomms/" + table + "/output/giraph/comm_1", stdout=garbage, shell=True)
f = open('edgelist.tsv','r')
o = open('louvain_to_gephi/giraph/graph_itr_0.edges','w')
for line in f:
if len(line.split('\t')) == 3:
source,weight,edgelist = line.split('\t')
edgelist = edgelist.strip().split(',')
for e in edgelist:
o.write('\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\n')
o.close()
f.close()
# Here's the looping piece
i = 1
pm = 'output/giraph/mapreduce_'+str(i)
pg = 'output/giraph/giraph_'+str(i+1)
while os.path.exists(pm):
os.system("cat " + pg + "/part* > " + pg + "/output")
os.system("cat " + pm + "/part* > " + pm + "/output")
level = str(i+1)
f = open(pg + '/output','r')
o = open('louvain_to_gephi/giraph/community_itr_' + level + '.nodes','w')
for line in f:
vals = line.split('\t')
o.write(vals[0].strip() + '\t' + vals[1].strip() + '\n')
f.close()
o.close()
call("hadoop fs -mkdir /tmp/trackcomms/" + table + "/output/giraph/comm_" + level, stdout=garbage, shell=True)
call("hadoop fs -put louvain_to_gephi/giraph/community_itr_" + level + ".nodes /tmp/trackcomms/" + table + "/output/giraph/comm_" + level, stdout=garbage, shell=True)
f = open(pm + '/output','r')
o = open('louvain_to_gephi/giraph/graph_itr_' + str(i) + '.edges','w')
for line in f:
if len(line.split('\t')) == 3:
source,weight,edgelist = line.split('\t')
edgelist = edgelist.strip().split(',')
for e in edgelist:
o.write('\t'.join((source,e.split(':')[0],e.split(':')[1])) + '\n')
if int(weight) != 0:
o.write('\t'.join((source,source,weight,'\n')))
elif len(line.split('\t')) == 2:
source, weight = line.split('\t')
weight = weight.strip()
if int(weight) != 0:
o.write('\t'.join((source,source,weight,'\n')))
o.close()
f.close()
i = i + 1
pm = 'output/giraph/mapreduce_'+str(i)
pg = 'output/giraph/giraph_'+str(i+1)
| 32.690722
| 168
| 0.639546
|
a911e61793ef8f41cbc108968858cbe169ca9be7
| 854
|
py
|
Python
|
maza/modules/creds/cameras/avigilon/ssh_default_creds.py
|
ArturSpirin/maza
|
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
|
[
"MIT"
] | 2
|
2020-02-06T20:24:31.000Z
|
2022-03-08T19:07:16.000Z
|
maza/modules/creds/cameras/avigilon/ssh_default_creds.py
|
ArturSpirin/maza
|
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
|
[
"MIT"
] | null | null | null |
maza/modules/creds/cameras/avigilon/ssh_default_creds.py
|
ArturSpirin/maza
|
56ae6325c08bcedd22c57b9fe11b58f1b38314ca
|
[
"MIT"
] | null | null | null |
from maza.core.exploit import *
from maza.modules.creds.generic.ssh_default import Exploit as SSHDefault
class Exploit(SSHDefault):
__info__ = {
"name": "Avigilon Camera Default SSH Creds",
"description": "Module performs dictionary attack against Avigilon Camera SSH service. "
"If valid credentials are found, they are displayed to the user.",
"authors": (
"Marcin Bury <marcin[at]threat9.com>", # routersploit module
),
"devices": (
"Avigilon Camera",
)
}
target = OptIP("", "Target IPv4, IPv6 address or file with ip:port (file://)")
port = OptPort(22, "Target SSH port")
threads = OptInteger(1, "Number of threads")
defaults = OptWordlist("admin:admin,Administrator:", "User:Pass or file with default credentials (file://)")
| 37.130435
| 112
| 0.633489
|
1a846b94245e2d6772d0bc9c4ad377131f0e9ab0
| 842
|
py
|
Python
|
molecool/io/xyz.py
|
aatishpr/molecool
|
73a52479b41ae2847b32707b2c32ca4e23ca83c4
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/io/xyz.py
|
aatishpr/molecool
|
73a52479b41ae2847b32707b2c32ca4e23ca83c4
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/io/xyz.py
|
aatishpr/molecool
|
73a52479b41ae2847b32707b2c32ca4e23ca83c4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
xyz.py
read and write xyz files
"""
import numpy as np
def open_xyz(file_location):
# Open an xyz file and return symbols and coordinates.
xyz_file = np.genfromtxt(fname=file_location, skip_header=2, dtype='unicode')
symbols = xyz_file[:,0]
coords = (xyz_file[:,1:])
coords = coords.astype(np.float)
return symbols, coords
def write_xyz(file_location, symbols, coordinates):
# Write an xyz file given a file location, symbols, and coordinates.
num_atoms = len(symbols)
with open(file_location, 'w+') as f:
f.write('{}\n'.format(num_atoms))
f.write('XYZ file\n')
for i in range(num_atoms):
f.write('{}\t{}\t{}\t{}\n'.format(symbols[i],
coordinates[i,0], coordinates[i,1], coordinates[i,2]))
| 27.16129
| 100
| 0.597387
|
e104fb385828ad6c784935c829f1b6493d240a92
| 4,142
|
py
|
Python
|
packages/core/minos-microservice-common/minos/common/exceptions.py
|
sorasful/minos-python
|
1189330eebf6444627a2af6b29f347670f95a4dd
|
[
"MIT"
] | 247
|
2022-01-24T14:55:30.000Z
|
2022-03-25T12:06:17.000Z
|
packages/core/minos-microservice-common/minos/common/exceptions.py
|
sorasful/minos-python
|
1189330eebf6444627a2af6b29f347670f95a4dd
|
[
"MIT"
] | 400
|
2021-04-03T08:51:40.000Z
|
2022-01-28T11:51:22.000Z
|
packages/core/minos-microservice-common/minos/common/exceptions.py
|
sorasful/minos-python
|
1189330eebf6444627a2af6b29f347670f95a4dd
|
[
"MIT"
] | 21
|
2022-02-06T17:25:58.000Z
|
2022-03-27T04:50:29.000Z
|
from __future__ import (
annotations,
)
from typing import (
Any,
Type,
)
class MinosException(Exception):
"""Exception class for import packages or modules"""
__slots__ = "_message"
def __init__(self, error_message: str):
self._message = error_message
def __repr__(self):
return f"{type(self).__name__}(message={repr(self._message)})"
def __str__(self) -> str:
"""represent in a string format the error message passed during the instantiation"""
return self._message
class NotProvidedException(MinosException):
"""Exception to be raised when a dependency is needed but not provided."""
class MinosImportException(MinosException):
pass
class MinosProtocolException(MinosException):
pass
class MinosMessageException(MinosException):
pass
class MinosConfigException(MinosException):
"""Base config exception."""
class MinosBrokerException(MinosException):
"""Base broker exception"""
class MinosHandlerException(MinosException):
"""Base handler exception"""
class MinosLockException(MinosException):
"""Base lock exception"""
class MinosModelException(MinosException):
"""Exception to be raised when some mandatory condition is not satisfied by a model."""
pass
class EmptyMinosModelSequenceException(MinosModelException):
"""Exception to be raised when a sequence must be not empty, but it is empty."""
pass
class MultiTypeMinosModelSequenceException(MinosModelException):
"""Exception to be raised when a sequence doesn't satisfy the condition to have the same type for each item."""
pass
class MinosModelAttributeException(MinosException):
"""Base model attributes exception."""
pass
class MinosReqAttributeException(MinosModelAttributeException):
"""Exception to be raised when some required attributes are not provided."""
pass
class MinosTypeAttributeException(MinosModelAttributeException):
"""Exception to be raised when there are any mismatching between the expected and observed attribute type."""
def __init__(self, name: str, target_type: Type, value: Any):
self.name = name
self.target_type = target_type
self.value = value
super().__init__(
f"The {target_type!r} expected type for {name!r} does not match with "
f"the given data type: {type(value)!r} ({value!r})"
)
class MinosMalformedAttributeException(MinosModelAttributeException):
"""Exception to be raised when there are any kind of problems with the type definition."""
pass
class MinosParseAttributeException(MinosModelAttributeException):
"""Exception to be raised when there are any kind of problems with the parsing logic."""
def __init__(self, name: str, value: Any, exception: Exception):
self.name = name
self.value = value
self.exception = exception
super().__init__(f"{repr(exception)} was raised while parsing {repr(name)} field with {repr(value)} value.")
class MinosAttributeValidationException(MinosModelAttributeException):
"""Exception to be raised when some fields are not valid."""
def __init__(self, name: str, value: Any):
self.name = name
self.value = value
super().__init__(f"{repr(value)} value does not pass the {repr(name)} field validation.")
class DataDecoderException(MinosModelException):
"""Base data decoder exception."""
class DataDecoderMalformedTypeException(DataDecoderException):
"""Exception to be raised when malformed types are provided."""
class DataDecoderRequiredValueException(DataDecoderException):
"""Exception to be raised when required values are not provided."""
class DataDecoderTypeException(DataDecoderException):
"""Exception to be raised when expected and provided types do not match."""
def __init__(self, target_type: Type, value: Any):
self.target_type = target_type
self.value = value
super().__init__(
f"The {target_type!r} expected type does not match the given data type: {type(value)!r} ({value!r})"
)
| 27.986486
| 116
| 0.714148
|
26dda6dadc85793b3ded36754e694ede7a1ba805
| 2,394
|
py
|
Python
|
tests/test_environment.py
|
mclaffey/dfx
|
29f223e4d2be924f25f8903bcbac10b91915d6fb
|
[
"MIT"
] | null | null | null |
tests/test_environment.py
|
mclaffey/dfx
|
29f223e4d2be924f25f8903bcbac10b91915d6fb
|
[
"MIT"
] | null | null | null |
tests/test_environment.py
|
mclaffey/dfx
|
29f223e4d2be924f25f8903bcbac10b91915d6fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import unittest
import unittest_dfx
import dfx.ui_curses
import dfx.datasets
class EnvTest(unittest_dfx.AbstractDfTestCase):
"""Test functionality of ui_curses.Environment
"""
def test_load_file(self):
"""Providing a path to a csv loads a new Dfview named
after the path
"""
env = dfx.ui_curses.Environment()
data_path='../sample_data/obesity.csv'
data_dir=os.path.dirname(data_path)
env.load_file(data_path)
self.assertIn(data_path, env.dfvs)
self.assertEqual(data_path, env.current_dfv_name)
self.assertEqual(data_dir, env.current_dir)
self.assertTrue(isinstance(env.dfv, dfx.ui_curses.DfView))
def test_next_file(self):
"""Specify a directory and calling next file loads new csvs
"""
env = dfx.ui_curses.Environment()
env.current_dir='../sample_data'
file_name = env.next_file()
self.assertEqual(file_name, '../sample_data/corona.csv')
self.assertEqual(env.current_dfv_name,
'../sample_data/corona.csv')
file_name = env.next_file()
self.assertEqual(file_name, '../sample_data/emissions.csv')
self.assertEqual(env.current_dfv_name,
'../sample_data/emissions.csv')
def test_new_dfv(self):
"""Test convenience method for creating a new DfView
"""
env=dfx.ui_curses.Environment()
df=dfx.datasets.checks
dfv_p=dfx.ui_curses.DfView(df)
env.new_dfv(df=df, name='child', parent_dfv=dfv_p)
self.assertIn('child', env.dfvs)
self.assertEqual('child', env.current_dfv_name)
self.assertTrue(isinstance(env.dfv, dfx.ui_curses.DfView))
self.assertEqual(env.dfv.parent_dfv, dfv_p)
def test_next(self):
"""Next goes to alphabetically next Dfview
"""
env=dfx.ui_curses.Environment()
df=dfx.datasets.checks
env.new_dfv(df=df, name='abc')
env.new_dfv(df=df, name='xyz')
self.assertEqual(env.current_dfv_name, 'xyz')
env.next()
self.assertEqual(env.current_dfv_name, 'abc')
env.next()
self.assertEqual(env.current_dfv_name, 'xyz')
if __name__=='__main__':
sys.exit(unittest_dfx.main(__file__))
| 32.351351
| 67
| 0.629073
|
52e16dff99c9662b47b99315d3e8de64bfb65f4e
| 374
|
py
|
Python
|
contests/20210123/abc189/b/main.py
|
yamap55/atcoder_python
|
eb000b8df3037a2bba3d3527014bc12770018cb6
|
[
"MIT"
] | null | null | null |
contests/20210123/abc189/b/main.py
|
yamap55/atcoder_python
|
eb000b8df3037a2bba3d3527014bc12770018cb6
|
[
"MIT"
] | 7
|
2021-01-23T06:51:03.000Z
|
2021-07-26T15:05:44.000Z
|
contests/20210123/abc189/b/main.py
|
yamap55/atcoder_python
|
eb000b8df3037a2bba3d3527014bc12770018cb6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
a = [int(_) for _ in input().split()]
N = a[0]
X = a[1]
l = [input().split() for _ in range(N)]
al = 0
result = -1
for i, r in enumerate(l):
v = int(r[0])
p = int(r[1])
# al = al + (v * p / 100)
# if X < al:
# 小数点以下は誤差が出るので100倍して比較
al = al + (v * p)
if (X * 100) < al:
result = i + 1
break
print(result)
| 17
| 39
| 0.475936
|
7d75044ee36eb1c5bd509405675bedcbc9d547da
| 2,564
|
py
|
Python
|
library_samples/Python3/ocs_sample_library_preview/SDS/SdsStreamViewMap.py
|
osi-awoodall/OSI-Samples-OCS
|
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
|
[
"Apache-2.0"
] | null | null | null |
library_samples/Python3/ocs_sample_library_preview/SDS/SdsStreamViewMap.py
|
osi-awoodall/OSI-Samples-OCS
|
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
|
[
"Apache-2.0"
] | null | null | null |
library_samples/Python3/ocs_sample_library_preview/SDS/SdsStreamViewMap.py
|
osi-awoodall/OSI-Samples-OCS
|
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
|
[
"Apache-2.0"
] | null | null | null |
# SdsStreamViewMap.py
#
import json
from .SdsStreamViewProperty import SdsStreamViewProperty
class SdsStreamViewMap(object):
"""
SdsStreamViewMap definitions
"""
@property
def SourceTypeId(self):
"""
required
:return:
"""
return self.__sourceTypeId
@SourceTypeId.setter
def SourceTypeId(self, baseType):
"""
required
:param baseType:
:return:
"""
self.__sourceTypeId = baseType
@property
def TargetTypeId(self):
"""
required
:return:
"""
return self.__targetTypeId
@TargetTypeId.setter
def TargetTypeId(self, typeCode):
"""
required
:param typeCode:
:return:
"""
self.__targetTypeId = typeCode
@property
def Properties(self):
"""
list of SdsStreamViewMapProperty not required
:return:
"""
return self.__properties
@Properties.setter
def Properties(self, properties):
"""
list of SdsStreamViewMapProperty not required
:param properties:
:return:
"""
self.__properties = properties
def toJson(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
# required properties
dictionary = {'SourceTypeId': self.SourceTypeId,
'TargetTypeId': self.TargetTypeId}
# optional properties
if hasattr(self, 'Properties'):
dictionary['Properties'] = []
for value in self.Properties:
dictionary['Properties'].append(value.toDictionary())
return dictionary
@staticmethod
def fromJson(jsonObj):
return SdsStreamViewMap.fromDictionary(jsonObj)
@staticmethod
def fromDictionary(content):
streamViewMap = SdsStreamViewMap()
if not content:
return streamViewMap
if 'TargetTypeId' in content:
streamViewMap.TargetTypeId = content['TargetTypeId']
if 'SourceTypeId' in content:
streamViewMap.SourceTypeId = content['SourceTypeId']
if 'Properties' in content:
properties = content['Properties']
if properties is not None and len(properties) > 0:
streamViewMap.Properties = []
for value in properties:
streamViewMap.Properties.append(
SdsStreamViewProperty.fromDictionary(value))
return streamViewMap
| 24.188679
| 69
| 0.583463
|
b57289300e28fa87dba472d7101a480343463701
| 7,606
|
py
|
Python
|
graphsaint/pytorch_version/models.py
|
sandl99/KGraph
|
bb1a9e90b785315ecb501593a0ac19e6fafc2f28
|
[
"MIT"
] | null | null | null |
graphsaint/pytorch_version/models.py
|
sandl99/KGraph
|
bb1a9e90b785315ecb501593a0ac19e6fafc2f28
|
[
"MIT"
] | 1
|
2021-04-14T15:22:45.000Z
|
2021-04-14T15:22:45.000Z
|
graphsaint/pytorch_version/models.py
|
sandl99/Simple-KGCN-GraphSAINT
|
bb1a9e90b785315ecb501593a0ac19e6fafc2f28
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from graphsaint.utils import *
import graphsaint.pytorch_version.layers as layers
class GraphSAINT(nn.Module):
def __init__(self, num_classes, arch_gcn, train_params, feat_full, label_full, cpu_eval=False):
"""
Build the multi-layer GNN architecture.
Inputs:
num_classes int, number of classes a node can belong to
arch_gcn dict, config for each GNN layer
train_params dict, training hyperparameters (e.g., learning rate)
feat_full np array of shape N x f, where N is the total num of
nodes and f is the dimension for input node feature
label_full np array, for single-class classification, the shape
is N x 1 and for multi-class classification, the
shape is N x c (where c = num_classes)
cpu_eval bool, if True, will put the model on CPU.
Outputs:
None
"""
super(GraphSAINT, self).__init__()
self.use_cuda = (args_global.gpu >= 0)
if cpu_eval:
self.use_cuda=False
if "attention" in arch_gcn:
if "gated_attention" in arch_gcn:
if arch_gcn['gated_attention']:
self.aggregator_cls = layers.GatedAttentionAggregator
self.mulhead = int(arch_gcn['attention'])
else:
self.aggregator_cls = layers.AttentionAggregator
self.mulhead = int(arch_gcn['attention'])
else:
self.aggregator_cls = layers.HighOrderAggregator
self.mulhead = 1
self.num_layers = len(arch_gcn['arch'].split('-'))
self.weight_decay = train_params['weight_decay']
self.dropout = train_params['dropout']
self.lr = train_params['lr']
self.arch_gcn = arch_gcn
self.sigmoid_loss = (arch_gcn['loss'] == 'sigmoid')
self.feat_full = torch.from_numpy(feat_full.astype(np.float32))
self.label_full = torch.from_numpy(label_full.astype(np.float32))
if self.use_cuda:
self.feat_full = self.feat_full.cuda()
self.label_full = self.label_full.cuda()
if not self.sigmoid_loss:
self.label_full_cat = torch.from_numpy(label_full.argmax(axis=1).astype(np.int64))
if self.use_cuda:
self.label_full_cat = self.label_full_cat.cuda()
self.num_classes = num_classes
_dims, self.order_layer, self.act_layer, self.bias_layer, self.aggr_layer \
= parse_layer_yml(arch_gcn, self.feat_full.shape[1])
# get layer index for each conv layer, useful for jk net last layer aggregation
self.set_idx_conv()
self.set_dims(_dims)
self.loss = 0
self.opt_op = None
# build the model below
self.num_params = 0
self.aggregators, num_param = self.get_aggregators()
self.num_params += num_param
self.conv_layers = nn.Sequential(*self.aggregators)
self.classifier = layers.HighOrderAggregator(self.dims_feat[-1], self.num_classes,\
act='I', order=0, dropout=self.dropout, bias='bias')
self.num_params += self.classifier.num_param
self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
def set_dims(self, dims):
"""
Set the feature dimension / weight dimension for each GNN or MLP layer.
We will use the dimensions set here to initialize PyTorch layers.
Inputs:
dims list, length of node feature for each hidden layer
Outputs:
None
"""
self.dims_feat = [dims[0]] + [
((self.aggr_layer[l]=='concat') * self.order_layer[l] + 1) * dims[l+1]
for l in range(len(dims) - 1)
]
self.dims_weight = [(self.dims_feat[l],dims[l+1]) for l in range(len(dims)-1)]
def set_idx_conv(self):
"""
Set the index of GNN layers for the full neural net. For example, if
the full NN is having 1-0-1-0 arch (1-hop graph conv, followed by 0-hop
MLP, ...). Then the layer indices will be 0, 2.
"""
idx_conv = np.where(np.array(self.order_layer) >= 1)[0]
idx_conv = list(idx_conv[1:] - 1)
idx_conv.append(len(self.order_layer) - 1)
_o_arr = np.array(self.order_layer)[idx_conv]
if np.prod(np.ediff1d(_o_arr)) == 0:
self.idx_conv = idx_conv
else:
self.idx_conv = list(np.where(np.array(self.order_layer) == 1)[0])
def forward(self, node_subgraph, adj_subgraph):
feat_subg = self.feat_full[node_subgraph]
label_subg = self.label_full[node_subgraph]
label_subg_converted = label_subg if self.sigmoid_loss else self.label_full_cat[node_subgraph]
_, emb_subg = self.conv_layers((adj_subgraph, feat_subg))
emb_subg_norm = F.normalize(emb_subg, p=2, dim=1)
pred_subg = self.classifier((None, emb_subg_norm))[1]
return pred_subg, label_subg, label_subg_converted
def _loss(self, preds, labels, norm_loss):
"""
The predictor performs sigmoid (for multi-class) or softmax (for single-class)
"""
if self.sigmoid_loss:
norm_loss = norm_loss.unsqueeze(1)
return torch.nn.BCEWithLogitsLoss(weight=norm_loss,reduction='sum')(preds, labels)
else:
_ls = torch.nn.CrossEntropyLoss(reduction='none')(preds, labels)
return (norm_loss*_ls).sum()
def get_aggregators(self):
"""
Return a list of aggregator instances. to be used in self.build()
"""
num_param = 0
aggregators = []
for l in range(self.num_layers):
aggr = self.aggregator_cls(
*self.dims_weight[l],
dropout=self.dropout,
act=self.act_layer[l],
order=self.order_layer[l],
aggr=self.aggr_layer[l],
bias=self.bias_layer[l],
mulhead=self.mulhead,
)
num_param += aggr.num_param
aggregators.append(aggr)
return aggregators, num_param
def predict(self, preds):
return nn.Sigmoid()(preds) if self.sigmoid_loss else F.softmax(preds, dim=1)
def train_step(self, node_subgraph, adj_subgraph, norm_loss_subgraph):
"""
Forward and backward propagation
"""
self.train()
self.optimizer.zero_grad()
preds, labels, labels_converted = self(node_subgraph, adj_subgraph)
loss = self._loss(preds, labels_converted, norm_loss_subgraph) # labels.squeeze()?
loss.backward()
torch.nn.utils.clip_grad_norm(self.parameters(), 5)
self.optimizer.step()
return loss, self.predict(preds), labels
def eval_step(self, node_subgraph, adj_subgraph, norm_loss_subgraph):
"""
Forward propagation only
"""
self.eval()
with torch.no_grad():
preds, labels, labels_converted = self(node_subgraph, adj_subgraph)
loss = self._loss(preds, labels_converted, norm_loss_subgraph)
return loss, self.predict(preds), labels
| 42.49162
| 103
| 0.58822
|
093c4535c1ce61b59f9d327f2b10e216bb19b0d4
| 5,319
|
py
|
Python
|
test/sagemaker_tests/mxnet/training/resources/mnist/mnist.py
|
Elizaaaaa/deep-learning-containers
|
6274ecb264645070d11b27e5c7e60d2e4110537d
|
[
"Apache-2.0"
] | 383
|
2020-05-19T18:09:10.000Z
|
2022-03-29T22:41:05.000Z
|
test/sagemaker_tests/mxnet/training/resources/mnist/mnist.py
|
Elizaaaaa/deep-learning-containers
|
6274ecb264645070d11b27e5c7e60d2e4110537d
|
[
"Apache-2.0"
] | 551
|
2020-05-27T17:25:50.000Z
|
2022-03-31T18:00:35.000Z
|
test/sagemaker_tests/mxnet/training/resources/mnist/mnist.py
|
Elizaaaaa/deep-learning-containers
|
6274ecb264645070d11b27e5c7e60d2e4110537d
|
[
"Apache-2.0"
] | 263
|
2020-05-19T18:17:12.000Z
|
2022-03-29T22:41:10.000Z
|
# Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import argparse
import gzip
import json
import logging
import os
import struct
import sys
import mxnet as mx
import numpy as np
from sagemaker_mxnet_container.training_utils import scheduler_host
def load_data(path):
with gzip.open(find_file(path, 'labels.gz')) as flbl:
struct.unpack('>II', flbl.read(8))
labels = np.fromstring(flbl.read(), dtype=np.int8)
with gzip.open(find_file(path, 'images.gz')) as fimg:
_, _, rows, cols = struct.unpack('>IIII', fimg.read(16))
images = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(labels), rows, cols)
images = images.reshape(images.shape[0], 1, 28, 28).astype(np.float32) / 255
return labels, images
def find_file(root_path, file_name):
for root, dirs, files in os.walk(root_path):
if file_name in files:
return os.path.join(root, file_name)
def build_graph():
data = mx.sym.var('data')
data = mx.sym.flatten(data=data)
fc1 = mx.sym.FullyConnected(data=data, num_hidden=128)
act1 = mx.sym.Activation(data=fc1, act_type='relu')
fc2 = mx.sym.FullyConnected(data=act1, num_hidden=64)
act2 = mx.sym.Activation(data=fc2, act_type='relu')
fc3 = mx.sym.FullyConnected(data=act2, num_hidden=10)
return mx.sym.SoftmaxOutput(data=fc3, name='softmax')
def get_training_context(num_gpus):
if num_gpus:
return [mx.gpu(i) for i in range(num_gpus)]
else:
return mx.cpu()
def train(batch_size, epochs, learning_rate, num_gpus, training_channel, testing_channel,
hosts, current_host, model_dir):
(train_labels, train_images) = load_data(training_channel)
(test_labels, test_images) = load_data(testing_channel)
# Data parallel training - shard the data so each host
# only trains on a subset of the total data.
shard_size = len(train_images) // len(hosts)
for i, host in enumerate(hosts):
if host == current_host:
start = shard_size * i
end = start + shard_size
break
train_iter = mx.io.NDArrayIter(train_images[start:end], train_labels[start:end], batch_size,
shuffle=True)
val_iter = mx.io.NDArrayIter(test_images, test_labels, batch_size)
logging.getLogger().setLevel(logging.DEBUG)
kvstore = 'local' if len(hosts) == 1 else 'dist_sync'
mlp_model = mx.mod.Module(symbol=build_graph(),
context=get_training_context(num_gpus))
mlp_model.fit(train_iter,
eval_data=val_iter,
kvstore=kvstore,
optimizer='sgd',
optimizer_params={'learning_rate': learning_rate},
eval_metric='acc',
batch_end_callback=mx.callback.Speedometer(batch_size, 100),
num_epoch=epochs)
if current_host == scheduler_host(hosts):
save(model_dir, mlp_model)
assert_can_track_sagemaker_experiments()
def assert_can_track_sagemaker_experiments():
in_sagemaker_training = 'TRAINING_JOB_ARN' in os.environ
in_python_three = sys.version_info[0] == 3
if in_sagemaker_training and in_python_three:
import smexperiments.tracker
with smexperiments.tracker.Tracker.load() as tracker:
tracker.log_parameter('param', 1)
tracker.log_metric('metric', 1.0)
def save(model_dir, model):
model.symbol.save(os.path.join(model_dir, 'model-symbol.json'))
model.save_params(os.path.join(model_dir, 'model-0000.params'))
signature = [{'name': data_desc.name, 'shape': [dim for dim in data_desc.shape]}
for data_desc in model.data_shapes]
with open(os.path.join(model_dir, 'model-shapes.json'), 'w') as f:
json.dump(signature, f)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--learning-rate', type=float, default=0.1)
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
parser.add_argument('--test', type=str, default=os.environ['SM_CHANNEL_TEST'])
parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])
parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
num_gpus = int(os.environ['SM_NUM_GPUS'])
train(args.batch_size, args.epochs, args.learning_rate, num_gpus, args.train, args.test,
args.hosts, args.current_host, args.model_dir)
| 36.682759
| 96
| 0.679263
|
4aad679f22b6819115723b88865258ef950cad8a
| 10,061
|
py
|
Python
|
stage/test_google_bigquery_destination.py
|
streamsets/datacollector-tests
|
6c3e908768e1d4a586e9183e2141096921ecd5be
|
[
"Apache-2.0"
] | 14
|
2019-03-04T10:12:39.000Z
|
2021-11-24T16:17:09.000Z
|
stage/test_google_bigquery_destination.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 48
|
2019-03-08T14:59:06.000Z
|
2021-08-13T14:49:56.000Z
|
stage/test_google_bigquery_destination.py
|
Pragatibs/datacollector-tests
|
aac53b2f0e056009ef0e437c8430651e3cf4d502
|
[
"Apache-2.0"
] | 23
|
2018-09-24T20:49:17.000Z
|
2021-11-24T16:17:11.000Z
|
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from string import ascii_letters
import pytest
from google.cloud.bigquery import Dataset, SchemaField, Table
from streamsets.testframework.markers import gcp, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
name=DESTINATION_STAGE_NAME = 'com_streamsets_pipeline_stage_bigquery_destination_BigQueryDTarget'
pytestmark = [pytest.mark.category('nonstandard')]
ROWS_EXPECTED = [('Cristiano Ronaldo', 32),
('David Beckham', 32),
('Gerard Pique', 30),
('Lionel Messi', 30),
('Mario Gotze', 25),
('Neymar', 25),
('Pele', 76),
('Ronaldinho', 40),
('Ronaldo', 40),
('Zinedine Zidane', 42)]
CSV_DATA_TO_INSERT = ['full_name,age'] + [','.join(str(element) for element in row) for row in ROWS_EXPECTED]
@gcp
@sdc_min_version('2.7.2.0')
def test_google_bigquery_destination(sdc_builder, sdc_executor, gcp):
"""
Send data to Google BigQuery from Dev Raw Data Source and
confirm that Google BigQuery destination successfully recieves them using Google BigQuery client.
This is achieved by using a deduplicator which assures that there is only one ingest to Google BigQuery.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> google_bigquery
record_deduplicator >> trash
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(CSV_DATA_TO_INSERT))
dataset_name = get_random_string(ascii_letters, 5)
table_name = get_random_string(ascii_letters, 5)
google_bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME, type='destination')
google_bigquery.set_attributes(dataset=dataset_name,
table_name=table_name)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> google_bigquery
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='Google BigQuery Destination').configure_for_environment(gcp)
sdc_executor.add_pipeline(pipeline)
bigquery_client = gcp.bigquery_client
schema = [SchemaField('full_name', 'STRING', mode='required'),
SchemaField('age', 'INTEGER', mode='required')]
dataset_ref = Dataset(bigquery_client.dataset(dataset_name))
try:
logger.info('Creating dataset %s using Google BigQuery client ...', dataset_name)
bigquery_client.create_dataset(dataset_ref)
table = bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=schema))
logger.info('Starting BigQuery Destination pipeline and waiting for it to produce records ...')
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_batch_count(1)
logger.info('Stopping BigQuery Destination pipeline and getting the count of records produced in total ...')
sdc_executor.stop_pipeline(pipeline)
# Verify by reading records using Google BigQuery client
data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]
data_from_bigquery.sort()
logger.debug('read_data = {}'.format(data_from_bigquery))
assert ROWS_EXPECTED == data_from_bigquery
finally:
bigquery_client.delete_dataset(dataset_ref, delete_contents=True)
@gcp
@sdc_min_version('2.7.2.0')
def test_google_bigquery_destination_multiple_types(sdc_builder, sdc_executor, gcp):
"""Simple big query destination test with INSERT operation.
The pipeline inserts 1000 records of multiple types.
A type converter is included to transform decimal to float.
The pipeline should look like:
dev_data_generator >> field_type_converter >> [google_bigquery, wiretap.destination]
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [
{'field': 'field1', 'type': 'STRING'},
{'field': 'field2', 'type': 'DATETIME'},
{'field': 'field3', 'type': 'INTEGER'},
{'field': 'field4', 'precision': 10, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'field5', 'type': 'DOUBLE'}
]
batch_size = 1000
dev_data_generator.set_attributes(delay_between_batches=1000, batch_size=batch_size)
dataset_name = get_random_string(ascii_letters, 5)
table_name = get_random_string(ascii_letters, 5)
google_bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME, type='destination')
google_bigquery.set_attributes(dataset=dataset_name,
table_name=table_name,
stage_on_record_error='TO_ERROR')
# Create Field Type Converter
conversions = [{'fields': ['/field4'],
'targetType': 'FLOAT'}]
field_type_converter = pipeline_builder.add_stage('Field Type Converter')
field_type_converter.set_attributes(conversion_method='BY_FIELD',
field_type_converter_configs=conversions)
wiretap = pipeline_builder.add_wiretap()
dev_data_generator >> field_type_converter >> [google_bigquery, wiretap.destination]
pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(pipeline.configure_for_environment(gcp))
# FLOAT64 is used because there is a bug with NUMERIC, in bigquery Client
bigquery_client = gcp.bigquery_client
schema = [SchemaField('field1', 'STRING', mode='required'),
SchemaField('field2', 'DATETIME', mode='required'),
SchemaField('field3', 'INTEGER', mode='required'),
SchemaField('field4', 'FLOAT64', mode='required'),
SchemaField('field5', 'FLOAT', mode='required')
]
dataset_ref = Dataset(bigquery_client.dataset(dataset_name))
try:
logger.info('Creating dataset %s using Google BigQuery client ...', dataset_name)
bigquery_client.create_dataset(dataset_ref)
table = bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=schema))
logger.info('Starting BigQuery Destination pipeline and waiting for it to produce records ...')
sdc_executor.start_pipeline(pipeline)
sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 1_000, timeout_sec=60)
sdc_executor.stop_pipeline(pipeline)
# Verify by reading records using Google BigQuery client
data_from_bigquery = [{"field1" : row.values()[0],
"field2" : row.values()[1].replace(microsecond = row.values()[1].microsecond * 1000),
"field3" : row.values()[2],
"field4" : row.values()[3],
"field5" : row.values()[4]}
for row in bigquery_client.list_rows(table)]
data_from_wiretap = [rec.field for rec in wiretap.output_records]
assert len(data_from_bigquery) >= batch_size
assert len(wiretap.error_records) == 0
assert len(data_from_bigquery) == len(data_from_wiretap)
assert all([element in data_from_bigquery for element in data_from_wiretap])
finally:
logger.info('Dropping table %s in Google Big Query database ...', table_name)
bigquery_client.delete_dataset(dataset_ref, delete_contents=True)
@gcp
@sdc_min_version('3.11.0')
def test_google_bigquery_destination_empty_table_name_error(sdc_builder, sdc_executor, gcp):
"""Test that BigQuery API does not return a NullPointerException if asked for an empty table name
Pipeline:
dev_raw_data_source >> google_bigquery
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
json_data = {'table': ''}
# Dev Raw Data Source
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(
data_format='JSON',
raw_data=json.dumps(json_data),
stop_after_first_batch=True
)
# Google BigQuery Destination
dataset_name = 'dont_care'
table_name = '${record:value(\'/table\')}'
google_bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME, type='destination')
google_bigquery.set_attributes(dataset=dataset_name,
table_name=table_name,
stage_on_record_error='TO_ERROR')
wiretap = pipeline_builder.add_wiretap()
# Implement pipeline topology
dev_raw_data_source >> [google_bigquery, wiretap.destination]
pipeline = pipeline_builder.build()
sdc_executor.add_pipeline(pipeline.configure_for_environment(gcp))
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Verify that we have exactly one record
assert len(wiretap.error_records) == 1
# Verify that the error is indeed a BIGQUERY_18 (table name is empty or expression evaluates to empty)
assert wiretap.error_records[0].header['errorCode'] == 'BIGQUERY_18'
| 43.180258
| 116
| 0.688898
|
f01bdf47ea08dc3c9aac49fe707dbd4b07f65f71
| 12,939
|
py
|
Python
|
sympy/core/symbol.py
|
tesseralis/sympy
|
0c2f7e06b1a43d25ba93bac65e93f8d5f323be7a
|
[
"BSD-3-Clause"
] | 1
|
2020-11-17T07:35:20.000Z
|
2020-11-17T07:35:20.000Z
|
sympy/core/symbol.py
|
tesseralis/sympy
|
0c2f7e06b1a43d25ba93bac65e93f8d5f323be7a
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/symbol.py
|
tesseralis/sympy
|
0c2f7e06b1a43d25ba93bac65e93f8d5f323be7a
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.core.assumptions import StdFactKB
from basic import Basic
from core import C
from sympify import sympify
from singleton import S
from expr import Expr, AtomicExpr
from cache import cacheit
from function import FunctionClass
from sympy.core.logic import fuzzy_bool
from sympy.logic.boolalg import Boolean
from sympy.utilities.exceptions import SymPyDeprecationWarning
import re
class Symbol(AtomicExpr, Boolean):
"""
Assumptions:
commutative = True
You can override the default assumptions in the constructor:
>>> from sympy import symbols
>>> A,B = symbols('A,B', commutative = False)
>>> bool(A*B != B*A)
True
>>> bool(A*B*2 == 2*A*B) == True # multiplication by scalars is commutative
True
"""
is_comparable = False
__slots__ = ['name']
is_Symbol = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Symbols.
Examples
========
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> x._diff_wrt
True
"""
return True
def __new__(cls, name, **assumptions):
"""Symbols are identified by name and assumptions::
>>> from sympy import Symbol
>>> Symbol("x") == Symbol("x")
True
>>> Symbol("x", real=True) == Symbol("x", real=False)
False
"""
if 'dummy' in assumptions:
SymPyDeprecationWarning(
feature="Symbol('x', dummy=True)",
useinstead="Dummy() or symbols(..., cls=Dummy)"
).warn()
if assumptions.pop('dummy'):
return Dummy(name, **assumptions)
if assumptions.get('zero', False):
return S.Zero
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
if is_commutative is None:
raise ValueError(
'''Symbol commutativity must be True or False.''')
assumptions['commutative'] = is_commutative
return Symbol.__xnew_cached_(cls, name, **assumptions)
def __new_stage2__(cls, name, **assumptions):
assert isinstance(name, str),repr(type(name))
obj = Expr.__new__(cls)
obj.name = name
obj._assumptions = StdFactKB(assumptions)
return obj
__xnew__ = staticmethod(__new_stage2__) # never cached (e.g. dummy)
__xnew_cached_ = staticmethod(cacheit(__new_stage2__)) # symbols are always cached
def __getnewargs__(self):
return (self.name,)
def __getstate__(self):
return {'_assumptions': self._assumptions}
def _hashable_content(self):
return (self.name,) + tuple(sorted(self.assumptions0.iteritems()))
@property
def assumptions0(self):
return dict((key, value) for key, value
in self._assumptions.iteritems() if value is not None)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def as_dummy(self):
return Dummy(self.name, **self.assumptions0)
def __call__(self, *args):
from function import Function
return Function(self.name)(*args)
def as_real_imag(self, deep=True, **hints):
if hints.get('ignore') == self:
return None
else:
return (C.re(self), C.im(self))
def _eval_expand_complex(self, deep=True, **hints):
re, im = self.as_real_imag()
return re + im*S.ImaginaryUnit
def _sage_(self):
import sage.all as sage
return sage.var(self.name)
def is_constant(self, *wrt, **flags):
if not wrt:
return False
return not self in wrt
@property
def is_number(self):
return False
@property
def free_symbols(self):
return set([self])
class Dummy(Symbol):
"""Dummy symbols are each unique, identified by an internal count index:
>>> from sympy import Dummy
>>> bool(Dummy("x") == Dummy("x")) == True
False
If a name is not supplied then a string value of the count index will be
used. This is useful when a temporary variable is needed and the name
of the variable used in the expression is not important.
>>> Dummy._count = 0 # /!\ this should generally not be changed; it is being
>>> Dummy() # used here to make sure that the doctest passes.
_0
"""
_count = 0
__slots__ = ['dummy_index']
is_Dummy = True
def __new__(cls, name=None, **assumptions):
if name is None:
name = str(Dummy._count)
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
if is_commutative is None:
raise ValueError(
'''Dummy's commutativity must be True or False.''')
assumptions['commutative'] = is_commutative
obj = Symbol.__xnew__(cls, name, **assumptions)
Dummy._count += 1
obj.dummy_index = Dummy._count
return obj
def __getstate__(self):
return {'_assumptions': self._assumptions, 'dummy_index': self.dummy_index}
def _hashable_content(self):
return Symbol._hashable_content(self) + (self.dummy_index,)
class Wild(Symbol):
"""
Wild() matches any expression but another Wild().
"""
__slots__ = ['exclude', 'properties']
is_Wild = True
def __new__(cls, name, exclude=(), properties=(), **assumptions):
exclude = tuple([sympify(x) for x in exclude])
properties = tuple(properties)
is_commutative = fuzzy_bool(assumptions.get('commutative', True))
if is_commutative is None:
raise ValueError(
'''Wild's commutativity must be True or False.''')
assumptions['commutative'] = is_commutative
return Wild.__xnew__(cls, name, exclude, properties, **assumptions)
def __getnewargs__(self):
return (self.name, self.exclude, self.properties)
@staticmethod
@cacheit
def __xnew__(cls, name, exclude, properties, **assumptions):
obj = Symbol.__xnew__(cls, name, **assumptions)
obj.exclude = exclude
obj.properties = properties
return obj
def _hashable_content(self):
return super(Wild, self)._hashable_content() + (self.exclude, self.properties)
# TODO add check against another Wild
def matches(self, expr, repl_dict={}):
if any(expr.has(x) for x in self.exclude):
return None
if any(not f(expr) for f in self.properties):
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
def __call__(self, *args, **kwargs):
raise TypeError("'%s' object is not callable" % type(self).__name__)
_re_var_range = re.compile(r"^(.*?)(\d*):(\d+)$")
_re_var_scope = re.compile(r"^(.):(.)$")
_re_var_split = re.compile(r"\s*,\s*|\s+")
def symbols(names, **args):
"""
Transform strings into instances of :class:`Symbol` class.
:func:`symbols` function returns a sequence of symbols with names taken
from ``names`` argument, which can be a comma or whitespace delimited
string, or a sequence of strings::
>>> from sympy import symbols, Function
>>> x, y, z = symbols('x,y,z')
>>> a, b, c = symbols('a b c')
The type of output is dependent on the properties of input arguments::
>>> symbols('x')
x
>>> symbols('x,')
(x,)
>>> symbols('x,y')
(x, y)
>>> symbols(('a', 'b', 'c'))
(a, b, c)
>>> symbols(['a', 'b', 'c'])
[a, b, c]
>>> symbols(set(['a', 'b', 'c']))
set([a, b, c])
If an iterable container is needed for a single symbol, set the ``seq``
argument to ``True`` or terminate the symbol name with a comma::
>>> symbols('x', seq=True)
(x,)
To reduce typing, range syntax is supported to create indexed symbols::
>>> symbols('x:10')
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
>>> symbols('x5:10')
(x5, x6, x7, x8, x9)
>>> symbols('x5:10,y:5')
(x5, x6, x7, x8, x9, y0, y1, y2, y3, y4)
>>> symbols(('x5:10', 'y:5'))
((x5, x6, x7, x8, x9), (y0, y1, y2, y3, y4))
To reduce typing even more, lexicographic range syntax is supported::
>>> symbols('x:z')
(x, y, z)
>>> symbols('a:d,x:z')
(a, b, c, d, x, y, z)
>>> symbols(('a:d', 'x:z'))
((a, b, c, d), (x, y, z))
All newly created symbols have assumptions set accordingly to ``args``::
>>> a = symbols('a', integer=True)
>>> a.is_integer
True
>>> x, y, z = symbols('x,y,z', real=True)
>>> x.is_real and y.is_real and z.is_real
True
Despite its name, :func:`symbols` can create symbol--like objects of
other type, for example instances of Function or Wild classes. To
achieve this, set ``cls`` keyword argument to the desired type::
>>> symbols('f,g,h', cls=Function)
(f, g, h)
>>> type(_[0])
<class 'sympy.core.function.UndefinedFunction'>
"""
result = []
if 'each_char' in args:
SymPyDeprecationWarning(
feature="each_char in the options to symbols() and var()",
useinstead="spaces or commas between symbol names"
).warn()
if isinstance(names, basestring):
names = names.strip()
as_seq= names.endswith(',')
if as_seq:
names = names[:-1].rstrip()
if not names:
raise ValueError('no symbols given')
names = _re_var_split.split(names)
if args.pop('each_char', False) and not as_seq and len(names) == 1:
return symbols(tuple(names[0]), **args)
cls = args.pop('cls', Symbol)
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing symbol')
if ':' not in name:
symbol = cls(name, **args)
result.append(symbol)
continue
match = _re_var_range.match(name)
if match is not None:
name, start, end = match.groups()
if not start:
start = 0
else:
start = int(start)
for i in xrange(start, int(end)):
symbol = cls("%s%i" % (name, i), **args)
result.append(symbol)
seq = True
continue
match = _re_var_scope.match(name)
if match is not None:
start, end = match.groups()
for name in xrange(ord(start), ord(end)+1):
symbol = cls(chr(name), **args)
result.append(symbol)
seq = True
continue
raise ValueError("'%s' is not a valid symbol range specification" % name)
if not seq and len(result) <= 1:
if not result:
raise ValueError('missing symbol') # should never happen
return result[0]
return tuple(result)
else:
for name in names:
result.append(symbols(name, **args))
return type(names)(result)
def var(names, **args):
"""
Create symbols and inject them into the global namespace.
This calls :func:`symbols` with the same arguments and puts the results
into the *global* namespace. It's recommended not to use :func:`var` in
library code, where :func:`symbols` has to be used::
>>> from sympy import var
>>> var('x')
x
>>> x
x
>>> var('a,ab,abc')
(a, ab, abc)
>>> abc
abc
>>> var('x,y', real=True)
(x, y)
>>> x.is_real and y.is_real
True
See :func:`symbol` documentation for more details on what kinds of
arguments can be passed to :func:`var`.
"""
def traverse(symbols, frame):
"""Recursively inject symbols to the global namespace. """
for symbol in symbols:
if isinstance(symbol, Basic):
frame.f_globals[symbol.name] = symbol
elif isinstance(symbol, FunctionClass):
frame.f_globals[symbol.__name__] = symbol
else:
traverse(symbol, frame)
from inspect import currentframe
frame = currentframe().f_back
try:
syms = symbols(names, **args)
if syms is not None:
if isinstance(syms, Basic):
frame.f_globals[syms.name] = syms
elif isinstance(syms, FunctionClass):
frame.f_globals[syms.__name__] = syms
else:
traverse(syms, frame)
finally:
del frame # break cyclic dependencies as stated in inspect docs
return syms
| 29.141892
| 88
| 0.567277
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.