blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce505ca0ceaa5e400375f9fc5ee87089d635e977 | 325fde42058b2b82f8a4020048ff910cfdf737d7 | /src/databox/azext_databox/vendored_sdks/databox/__init__.py | 1c85885ae27c33da1710d57cd105b2ea74f26605 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | ebencarek/azure-cli-extensions | 46b0d18fe536fe5884b00d7ffa30f54c7d6887d1 | 42491b284e38f8853712a5af01836f83b04a1aa8 | refs/heads/master | 2023-04-12T00:28:44.828652 | 2021-03-30T22:34:13 | 2021-03-30T22:34:13 | 261,621,934 | 2 | 5 | MIT | 2020-10-09T18:21:52 | 2020-05-06T01:25:58 | Python | UTF-8 | Python | false | false | 736 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._configuration import DataBoxManagementClientConfiguration
from ._data_box_management_client import DataBoxManagementClient
__all__ = ['DataBoxManagementClient', 'DataBoxManagementClientConfiguration']
from .version import VERSION
__version__ = VERSION
| [
"noreply@github.com"
] | noreply@github.com |
7790d6343f88dbb56c1797fe913f7c904cd7e3ac | bf5081d59b21af6b622df1d4cc39a1dd9eec18ce | /testchild.py | d7674eb5731d2f3b721ec9e6721e9aff9caa433d | [] | no_license | vc8885/test-repo | 3622d830080c98fb70a89ab36376516a4d1c8c93 | d1da6f19c49cc20118fc15d7968d8fa722f64e13 | refs/heads/master | 2022-12-15T02:55:19.299570 | 2020-08-31T19:55:43 | 2020-08-31T19:55:43 | 291,808,317 | 0 | 0 | null | 2020-08-31T19:55:44 | 2020-08-31T19:39:53 | Python | UTF-8 | Python | false | false | 63 | py | #adding a file in child branch
print('this is a child branch')
| [
"noreply@github.com"
] | noreply@github.com |
bd3f176a008a20a6e7855e996b871cf2c1445cbd | 5d0c93c0d8678357e5f657a54ed596a757e179a1 | /TubesScrap_1914311050_pg/TubesScrap_1914311050_pg/middlewares.py | e35fd969ab24db6c52de3ec7790ffef50707b9ea | [] | no_license | ubaidillah219/Tugas-Besar-Scrapy-NIM-Pagi-Ubhara-Surabaya | 45679474b098010785ddad32bdf1ac421a2c2fb3 | 5ff48ffc0a2350fc85b96d2e3dd67af1f5d6c298 | refs/heads/main | 2023-05-11T09:56:07.677568 | 2021-05-28T12:57:17 | 2021-05-28T12:57:17 | 371,698,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,680 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class Tubesscrap1914311050PgSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class Tubesscrap1914311050PgDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"noreply@github.com"
] | noreply@github.com |
7d3e2f8fb51b8a57e36c7a414828eb752c9632b7 | a1f0c923cb50ed7bcf6443614277cb2197619d36 | /controller.py | caf1a2bc5daaa57a9b4081dd96c9c4890bfd170f | [] | no_license | michaelelcock/assignment001 | 5d43a45cdc320a2f949e7ef52575a7c535464198 | d2b358897dd673a1562f27113788e06e05e1a36f | refs/heads/master | 2020-04-17T15:16:29.224890 | 2016-08-28T10:33:59 | 2016-08-28T10:33:59 | 66,617,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py |
class Controller(object):
@abstractmethod
def get_pack_name(self):
pass
@abstractmethod
def scrape_data(self, args):
pass
@abstractmethod
def show_data(self, data):
pass
@abstractmethod
def pickle_data(self, data):
pass
| [
"michaelelcock@hotmail.com"
] | michaelelcock@hotmail.com |
7df30939d83206061d734e50cdbc9ecdaa973fbb | a35840ee0c867fd7d2fa3ac84a8681ea20ace507 | /2022/python/day1.py | be5794cd8d551f3e568ea57239a4edd1a3e6eef2 | [] | no_license | javiermolinar/AdventOfCode | 8b82284b17bff4d7b5f9cd4b836a2708f46799c5 | 9f45eb27cdf5fcf3787d6dd00b64336c1a6d4ba4 | refs/heads/master | 2022-12-26T14:48:38.296963 | 2022-12-12T08:26:40 | 2022-12-12T08:26:40 | 160,176,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from utils import read_lines
def get_calories():
calory = 0
for calory_count in read_lines(1):
if calory_count == "\n":
yield calory
calory = 0
else:
calory += int(calory_count)
def day1_1():
"Find the biggest sum of calories"
return max(get_calories())
def day1_2():
"Get the sum of the top tree calories"
calories = [calory for calory in get_calories()]
calories.sort(reverse=True)
return sum(calories[:3])
| [
"javiermolinar@live.com"
] | javiermolinar@live.com |
ee5d2188e09cde889cc9c6b2cb128b64c151072c | 4b221a07d4d13b5ee42ca094e086d4a7dbd6e560 | /Deploy/plantilla.py | e98e662d79bba8a2c91282089741681b53835b6b | [] | no_license | Puppy-runner/web-app | 95e90f5850b932c622e0d6733a1a76194b167bb8 | 892cf634d58dc6a4ac3b3e3530d1bf222274b201 | refs/heads/master | 2021-01-01T04:00:39.289205 | 2016-05-16T05:31:23 | 2016-05-16T05:31:23 | 58,324,058 | 0 | 0 | null | 2016-10-19T02:56:50 | 2016-05-08T17:52:33 | Python | UTF-8 | Python | false | false | 187 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def info_template():
return render_template('index.html')
if __name__ == '__main__':
app.run()
| [
"kazevtrinid@gmail.com"
] | kazevtrinid@gmail.com |
9b403cc28a2704821212b20a387d129532f18987 | e6a3f71ef4963eda799f449be226ad6e2a2cbb55 | /wemarry/wemarry/wsgi.py | d32512602114a3bd86b31bdff792a1da9b9871a1 | [] | no_license | iameeo/wemarry.pythonanywhere.com | b2556f3c7ceef87ae190f41c1f7cc94f6b9753e3 | 832f19fe0786459e2320f742c19fe6c28c0c4063 | refs/heads/master | 2023-02-08T23:00:51.125461 | 2021-01-04T14:28:42 | 2021-01-04T14:28:42 | 208,023,836 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for wemarry project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wemarry.settings')
application = get_wsgi_application()
| [
"joowon1028@gmail.com"
] | joowon1028@gmail.com |
b2bffebe0d3afd8e30834ea253f25b0301693c7e | 61e9212b05b65d555e48328f66e50a3a91ababc3 | /CycleGAN/split_data.py | 8b8724d7fe495c88cea34a370db663c4f3dbfadc | [] | no_license | davidwajngot/CT-image-enhancement | eb31ee4d6c6fb038f8a452234b0a86267e5f2ccd | 9ac385dc2eb95de11cefe28b53f749b3b81a08d8 | refs/heads/master | 2023-03-04T12:06:45.108643 | 2019-01-22T16:37:08 | 2019-01-22T16:37:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | import random
import os
from shutil import copyfile
#print (os.getcwd())
#FROM_PATH = 'datasets/R/trainA'
#TO_PATH = 'datasets/R/testA'
#FROM_PATH = 'datasets/Quality/mostA'
#TO_PATH = 'datasets/Quality/testA'
FROM_PATH = 'C:/Users/davwa/Desktop/Observer Test/A14-A20 (test)'
#FROM_PATH = 'C:/Users/davwa/Desktop/Observer Test/R29-R39 (test)'
TO_PATH = 'C:/Users/davwa/Desktop/CT-image-enhancement/CycleGAN/datasets/artifacts/testA'
#FROM_PATH = 'E:/david/A1-A12'
#TO_PATH = 'E:/david/A1-A12_35%'
N_FILES = len([file for file in os.listdir(FROM_PATH) if file.endswith(".png")])
#N_TEST_FILES = int(round(N_FILES*0.3)) #Take 30% of the images to the validation set
N_TEST_FILES = 3000
#Take N_TEST_FILES random indices from N_FILES
sample_indices = random.sample(range(N_FILES), N_TEST_FILES)
#Copy images of sample_indices from FROM_PATH to TO_PATH
test_files = []
for i in range(N_FILES):
if i in sample_indices:
file = os.listdir(FROM_PATH)[i]
if file.endswith(".png"):
print(i, file)
test_files.append(file)
copyfile(os.path.join(FROM_PATH, file), os.path.join(TO_PATH, file))
#Remove images of sample_indices from FROM_PATH
#for file in test_files:
# os.remove(os.path.join(FROM_PATH, file))
print("Number of files:", N_FILES)
print("Number of test files:", N_TEST_FILES)
print("Number of training files:", N_FILES-N_TEST_FILES)
| [
"davwa@cmiv.local"
] | davwa@cmiv.local |
c0b8870b43e999588ed4befc1e107dd7815911b0 | 5df035428c001027c15b0ae3d441ad81587101df | /libs/eyed3/utils/console.py | 7f847e12c1b4a21f8788712d50532bff4854e821 | [] | no_license | bbaldino/BGMM | 256a32c07cce2aa7fdf66f8905ced6c37597fe59 | c6694cfe988eecda3f308b3c2e505885ff1f9357 | refs/heads/master | 2021-01-10T19:55:49.119557 | 2015-12-06T23:32:05 | 2015-12-06T23:32:05 | 9,126,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,126 | py | # -*- coding: utf-8 -*-
################################################################################
# Copyright (C) 2013 Travis Shirk <travis@pobox.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
################################################################################
#
# ANSI codes abstraction borrowed from colorama and is covered by its own
# license. https://pypi.python.org/pypi/colorama
# Spinner and progress bar code modified from astropy and is covered by its own
# license. https://github.com/astropy/astropy
#
################################################################################
# Copyright (c) 2010 Jonathan Hartley <tartley@tartley.com>
# Copyright (c) 2011-2013, Astropy Developers
#
# Released under the New BSD license (reproduced below), or alternatively you may
# use this software under any OSI approved open source license such as those at
# http://opensource.org/licenses/alphabetical
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name(s) of the copyright holders, nor those of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
from __future__ import print_function
import sys
import time
import struct
from ..compat import PY2
from .. import LOCAL_ENCODING
from . import formatSize, formatTime
try:
import fcntl
import termios
import signal
_CAN_RESIZE_TERMINAL = True
except ImportError:
_CAN_RESIZE_TERMINAL = False
USE_ANSI = True
'''If set to False no ANSI codes are ever used. Otherwise, use ``enableColor``
to toggle ANSI on a per file stream basis.'''
CSI = '\033['
class AnsiCodes(object):
def __init__(self, codes):
def code_to_chars(code):
return CSI + str(code) + 'm'
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
def __getattribute__(self, name):
global USE_ANSI
name = name.upper()
attr = super(AnsiCodes, self).__getattribute__(name)
if attr.startswith(CSI) and not USE_ANSI:
return ''
else:
return attr
def __getitem__(self, name):
return getattr(self, name.upper())
class AnsiFore:
GREY = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
GREY = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
RESET_ALL = 0
BRIGHT = 1
RESET_BRIGHT = 22
DIM = 2
RESET_DIM = RESET_BRIGHT
ITALICS = 3
RESET_ITALICS = 23
UNDERLINE = 4
RESET_UNDERLINE = 24
BLINK_SLOW = 5
RESET_BLINK_SLOW = 25
BLINK_FAST = 6
RESET_BLINK_FAST = 26
INVERSE = 7
RESET_INVERSE = 27
STRIKE_THRU = 9
RESET_STRIKE_THRU = 29
Fore = AnsiCodes(AnsiFore)
Back = AnsiCodes(AnsiBack)
Style = AnsiCodes(AnsiStyle)
def ERROR_COLOR(): return Fore.RED
def WARNING_COLOR(): return Fore.YELLOW
def HEADER_COLOR(): return Fore.GREEN
class Spinner(object):
"""
A class to display a spinner in the terminal.
It is designed to be used with the `with` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.next()
"""
_default_unicode_chars = u"◓◑◒◐"
_default_ascii_chars = u"-/|\\"
def __init__(self, msg, file=None, step=1,
chars=None, use_unicode=True, print_done=True):
self._msg = msg
self._file = file or sys.stdout
self._step = step
if not chars:
if use_unicode:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not self._file.isatty()
self._print_done = print_done
def _iterator(self):
chars = self._chars
index = 0
write = self._file.write
flush = self._file.flush
while True:
write(u'\r')
write(self._msg)
write(u' ')
write(chars[index])
flush()
yield
for i in xrange(self._step):
yield
index += 1
if index == len(chars):
index = 0
def __enter__(self):
if self._silent:
return self._silent_iterator()
else:
return self._iterator()
def __exit__(self, exc_type, exc_value, traceback):
write = self._file.write
flush = self._file.flush
if not self._silent:
write(u'\r')
write(self._msg)
if self._print_done:
if exc_type is None:
write(Fore.GREEN + u' [Done]\n')
else:
write(Fore.RED + u' [Failed]\n')
else:
write("\n")
flush()
def _silent_iterator(self):
self._file.write(self._msg)
self._file.flush()
while True:
yield
class ProgressBar(object):
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the `with` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
file : writable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If `file` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
"""
self._file = file or sys.stdout
if not self._file.isatty():
self.update = self._silent_update
self._silent = True
else:
self._silent = False
try:
self._items = iter(total_or_items)
self._total = len(total_or_items)
except TypeError:
try:
self._total = int(total_or_items)
self._items = iter(xrange(self._total))
except TypeError:
raise TypeError("First argument must be int or sequence")
self._start_time = time.time()
self._should_handle_resize = (
_CAN_RESIZE_TERMINAL and self._file.isatty())
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
else:
self._signal_set = False
self.update(0)
def _handle_resize(self, signum=None, frame=None):
if self._should_handle_resize:
data = fcntl.ioctl(self._file, termios.TIOCGWINSZ, '\0' * 8)
terminal_width = struct.unpack("HHHH", data)[1]
else:
try:
terminal_width = int(os.environ.get('COLUMNS'))
except (TypeError, ValueError):
terminal_width = 78
self._terminal_width = terminal_width
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write('\n')
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def next(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if value is None:
value = self._current_value = self._current_value + 1
else:
self._current_value = value
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
suffix = self._formatSuffix(value, frac)
self._bar_length = self._terminal_width - 37
bar_fill = int(float(self._bar_length) * frac)
write(u'\r|')
write(Fore.BLUE + u'=' * bar_fill + Fore.RESET)
if bar_fill < self._bar_length:
write(Fore.GREEN + u'>' + Fore.RESET)
write(u'-' * (self._bar_length - bar_fill - 1))
write(u'|')
write(suffix)
self._file.flush()
def _formatSuffix(self, value, frac):
if value >= self._total:
t = time.time() - self._start_time
time_str = ' '
elif value <= 0:
t = None
time_str = ''
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
time_str = u' ETA '
if t is not None:
time_str += formatTime(t, short=True)
suffix = ' {0:>4s}/{1:>4s}'.format(formatSize(value, short=True),
formatSize(self._total, short=True))
suffix += u' ({0:>6s}%)'.format(u'{0:.2f}'.format(frac * 100.0))
suffix += time_str
return suffix
def _silent_update(self, value=None):
pass
@classmethod
def map(cls, function, items, multiprocess=False, file=None):
"""
Does a `map` operation while displaying a progress bar with
percentage complete.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, optional
If `True`, use the `multiprocessing` module to distribute each
task to a different processor core.
file : writeable file-like object, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If `file` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
"""
results = []
if file is None:
file = stdio.stdout
with cls(len(items), file=file) as bar:
step_size = max(200, bar._bar_length)
steps = max(int(float(len(items)) / step_size), 1)
if not multiprocess:
for i, item in enumerate(items):
function(item)
if (i % steps) == 0:
bar.update(i)
else:
import multiprocessing
p = multiprocessing.Pool()
for i, result in enumerate(
p.imap_unordered(function, items, steps)):
bar.update(i)
results.append(result)
return results
def _encode(s):
'''This is a helper for output of unicode. With Python2 it is necessary to
do encoding to the LOCAL_ENCODING since by default unicode will be encoded
to ascii. In python3 this conversion is not necessary for the user to
to perform; in fact sys.std*.write, for example, requires unicode strings
be passed in. This function will encode for python2 and do nothing
for python3 (except assert that ``s`` is a unicode type).'''
if PY2:
if isinstance(s, unicode):
return s.encode(LOCAL_ENCODING)
elif isinstance(s, str):
return s
else:
raise TypeError("Argument must be str or unicode")
else:
assert(isinstance(s, str))
return s
def printMsg(s):
fp = sys.stdout
s = _encode(s)
fp.write("%s\n" % s)
fp.flush()
def printError(s):
_printWithColor(s, ERROR_COLOR(), sys.stderr)
def printWarning(s):
_printWithColor(s, WARNING_COLOR(), sys.stderr)
def printHeader(s):
_printWithColor(s, HEADER_COLOR(), sys.stdout)
def boldText(s, fp=sys.stdout, c=None):
return (Style.BRIGHT + (c or '') +
s +
(Fore.RESET if c else '') + Style.RESET_BRIGHT)
def _printWithColor(s, color, file):
s = _encode(s)
file.write(color + s + Fore.RESET + '\n')
file.flush()
if __name__ == "__main__":
USE_ANSI = True
def checkCode(c):
return c[0] != '_' and "RESET" not in c
for bg_name, bg_code in ((c, getattr(Back, c))
for c in dir(Back) if checkCode(c)):
sys.stdout.write('%s%-7s%s %s ' %
(bg_code, bg_name, Back.RESET, bg_code))
for fg_name, fg_code in ((c, getattr(Fore, c))
for c in dir(Fore) if checkCode(c)):
sys.stdout.write(fg_code)
for st_name, st_code in ((c, getattr(Style, c))
for c in dir(Style) if checkCode(c)):
sys.stdout.write('%s%s %s %s' %
(st_code, st_name,
getattr(Style, "RESET_%s" % st_name),
bg_code))
sys.stdout.write("%s\n" % Style.RESET_ALL)
sys.stdout.write("\n")
import time
with Spinner(Fore.GREEN + u"Phase #1") as spinner:
for i in range(50):
time.sleep(.05)
spinner.next()
with Spinner(Fore.RED + u"Phase #2" + Fore.RESET,
print_done=False) as spinner:
for i in range(50):
time.sleep(.05)
spinner.next()
with Spinner(u"Phase #3", print_done=False, use_unicode=False) as spinner:
for i in range(50):
spinner.next()
time.sleep(.05)
with Spinner(u"Phase #4", print_done=False, chars='.oO°Oo.') as spinner:
for i in range(50):
spinner.next()
time.sleep(.05)
items = range(200)
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
time.sleep(.05)
for item in ProgressBar(items):
time.sleep(.05)
progress = 0
max = 320000000
with ProgressBar(max) as bar:
while progress < max:
progress += 23400
bar.update(progress)
time.sleep(.001)
| [
"bbaldino@gmail.com"
] | bbaldino@gmail.com |
6e6b1da786d00ec3a90e3ce7a86f766394891fb3 | aa8356ddad5a92ac30dd5d68987725d81df093cc | /configs/retinanet_r50_416_fpn_1x_DIOR_cocosty.py | 27f2c1049c96435f71a2116b4af17394987eaa8e | [] | no_license | TangYuangao/Hello-_-word | 92b4632cab6c23ffb5d52daee86d5ab3db63dfad | 5f04f12387491d39e601c1707c23b142836e48da | refs/heads/master | 2023-08-30T15:47:10.146969 | 2023-08-07T14:52:14 | 2023-08-07T14:52:14 | 287,425,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,735 | py | _base_ = [
'../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
# model settings
model = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=20,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/DIOR/'
# CLASSES = ('vehicle', 'ship', 'harbor', 'bridge', 'tennis court',
# 'airplane', 'basketball court', 'ground track field',
# 'baseball diamond', 'storage tank')
CLASSES = ('airplane', 'airport', 'baseballfield', 'basketballcourt',
'bridge', 'chimney', 'dam', 'Expressway-Service-area',
'Expressway-toll-station', 'golffield', 'groundtrackfield',
'harbor', 'overpass', 'ship', 'stadium', 'storagetank',
'tenniscourt', 'trainstation', 'vehicle', 'windmill')
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(416, 416), keep_ratio=True),
# dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', pad_to_square=True),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', pad_to_square=True),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=12,
workers_per_gpu=4,
train=dict(
type=dataset_type,
classes=CLASSES,
ann_file=data_root + 'cocostyle_annotation/DIOR_train_coco.json',
img_prefix=data_root + 'VOC2007/JPEGImages',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
classes=CLASSES,
ann_file=data_root + 'cocostyle_annotation/DIOR_test_coco.json',
img_prefix=data_root + 'VOC2007/JPEGImages',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=CLASSES,
ann_file=data_root + 'cocostyle_annotation/DIOR_test_coco.json',
img_prefix=data_root + 'VOC2007/JPEGImages',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')
# learning policy
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=500,
# warmup_ratio=0.001,
# step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=20)
# optimizer
# 使用论文中0.01的学习率梯度爆炸了,将学习率调小0.001
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
| [
"noreply@github.com"
] | noreply@github.com |
7c0c42ad12b2c2286ad2646113febc7ab5cc2773 | 77a9250b7287d1d23ba3b6c4a6171cc3cdae8362 | /jieying_he_task2.py | 9bfac71e9124a880744606fbb296a9c4f28245ae | [] | no_license | ElvaHe/LSH-Recommendation-System | e59c79bc959d454e35694c42ca493f699e45f80e | d4f759c3325f1fc6a19113797853555f75c9859c | refs/heads/master | 2022-04-10T21:14:57.449206 | 2020-04-04T06:55:17 | 2020-04-04T06:55:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,974 | py | from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
from pyspark import SparkConf, SparkContext
import time
import csv
import collections
import random
import sys
start = time.time()
if __name__ == "__main__":
train_file_path = sys.argv[1]
test_file_path = sys.argv[2]
case_id = int(sys.argv[3])
output_file_path = sys.argv[4]
conf = SparkConf().setAppName("cvsApp").setMaster("local[*]")
sc = SparkContext(conf=conf)
#train_file_path = '../hw3dataset/yelp_train.csv'
#test_file_path = '../hw3dataset/yelp_val.csv'
#output_file_path = 'task2_1.csv'
#output_file_path1 = 'task2_2.csv'
#output_file_path2 = 'task2_3.csv'
#output_file_path3 = 'task2_4.csv'
#output_path1 = 'test.txt'
#output_path2 = 'test1.txt'
#output_path3 = 'test2.txt'
#output_path4 = 'test3.txt'
trainRDD = sc.textFile(train_file_path)
train_head = trainRDD.first()
testRDD = sc.textFile(test_file_path)
test_head = testRDD.first()
if case_id == 1:
users = trainRDD.filter(lambda row: row != train_head).map(lambda row: row.split(",")[0]).distinct().persist()
user_map = collections.defaultdict(int)
id_user_map = {}
for index, user in enumerate(users.collect()):
user_map[user] = index
id_user_map[index] = user
businesses = trainRDD.filter(lambda row: row != train_head).map(lambda row: row.split(",")[1]).distinct().persist()
bus_id_map = collections.defaultdict(int)
id_bus_map = {}
for index, business in enumerate(businesses.collect()):
bus_id_map[business] = index
id_bus_map[index] = business
#print(bus_id_map)
#print(id_bus_map)
#case1:
# Load and parse the data
train_ratings = trainRDD.filter(lambda row: row != train_head).map(lambda row: row.split(',')).map(lambda row: Rating(user_map[row[0]], bus_id_map[row[1]], float(row[2])))
# Build the recommendation model using Alternating Least Squares
rank = 10
numIterations = 10
model = ALS.train(train_ratings, rank, numIterations, lambda_=0.25)
# Evaluate the model on training data
#test_file_path = '../hw3dataset/yelp_val.csv'
test_ratings = testRDD.filter(lambda row: row != test_head).map(lambda row: row.split(',')).map(lambda row: Rating(user_map[row[0]], bus_id_map[row[1]], float(row[2])))
testdata = test_ratings.map(lambda p: (p[0], p[1]))
predictions = model.predictAll(testdata).map(lambda r: ((r[0], r[1]), r[2]))
#ratesAndPreds = test_ratings.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
#MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()
#print("Mean Squared Error = " + str(MSE))
with open(output_file_path, "w") as csvFile:
out = csv.writer(csvFile)
out.writerow(['user_id', 'business_id', 'prediction'])
for pred in predictions.collect():
out.writerow([id_user_map[pred[0][0]], id_bus_map[pred[0][1]], pred[1]])
# Save and load model
#model.save(sc, "target/tmp/myCollaborativeFilter")
#sameModel = MatrixFactorizationModel.load(sc, "target/tmp/myCollaborativeFilter")
if case_id == 2:
#case2:
userCF_data = trainRDD.filter(lambda row: row != train_head).map(lambda row: (row.split(",")[0], (row.split(",")[1], row.split(",")[2]))).groupByKey().persist()
bus_dataset = trainRDD.filter(lambda row: row != train_head).map(lambda row: (row.split(",")[1], row.split(",")[0])).groupByKey().persist()
test_data = testRDD.filter(lambda row: row != test_head).map(lambda row: (row.split(",")[0], row.split(",")[1])).persist()
'''with open(output_path1, 'w') as file:
for data in userCF_data.collect():
file.write('(')
file.write(data[0])
file.write(',(')
for d in data[1]:
file.write(d[0])
file.write(':')
file.write(str(d[1]))
file.write(',')
file.write('),')
file.write(')\n')'''
avg_user = collections.defaultdict(float)
for user in userCF_data.collect():
item_count = len(user[1])
item = {}
for i in user[1]:
item[i[0]] = float(i[1])
rating_sum = sum(item.values())
avg_user[user[0]] = rating_sum / item_count
#print(avg_user)
bus_dict = {}
for bus in bus_dataset.collect():
bus_dict[bus[0]] = []
for user in bus[1]:
bus_dict[bus[0]].append(user)
user_item_dict = {}
for user in userCF_data.collect():
rating = {}
for item in user[1]:
rating[item[0]] = float(item[1])
user_item_dict[user[0]] = rating
'''with open(output_path2, 'w') as file:
for key, value in bus_dict.items():
file.write('(')
file.write(key)
file.write(',(')
for v in value:
file.write(v)
file.write(',')
file.write('),')
file.write(')\n')
with open(output_path3, 'w') as file:
for key, value in user_item_dict.items():
file.write('(')
file.write(key)
file.write(',(')
for k, v in user_item_dict[key].items():
file.write(k)
file.write(':')
file.write(str(v))
file.write(',')
file.write(')')
file.write(')\n')'''
P = {}
W = {}
for row in test_data.collect():
user = row[0]
bus = row[1]
avg = avg_user[user]
w = {}
if user in user_item_dict:
if bus in bus_dict:
u_set = set(user_item_dict[user].keys())
for u in bus_dict[bus]:
u_pair = []
u_pair.append(user)
u_pair.append(u)
k = frozenset(u_pair)
if k in W:
w[k] = W[k]
if k not in W:
v_set = set(user_item_dict[u].keys())
r = u_set & v_set
a = 0
for i in r:
a += (user_item_dict[user][i] - avg) * (user_item_dict[u][i] - avg_user[u])
b1 = 0
for i in r:
b1 += (user_item_dict[user][i] - avg) ** 2
b1 = b1 ** 0.5
b2 = 0
for i in r:
b2 += (user_item_dict[u][i] - avg_user[u]) ** 2
b2 = b2 ** 0.5
b = b1 * b2
if b == 0:
W[k] = 0
w[k] = 0
else:
W[k] = a / b
w[k] = a / b
m = 0
for u in bus_dict[bus]:
u_pair = []
u_pair.append(user)
u_pair.append(u)
k = frozenset(u_pair)
m += (user_item_dict[u][bus] - avg_user[u]) * w[k]
n = 0
for k, v in w.items():
n += abs(w[k])
if n == 0:
P[(user, bus)] = avg
else:
P[(user, bus)] = avg + (m / n)
elif bus not in bus_dict:
P[(user, bus)] = avg
elif user not in user_item_dict:
if bus in bus_dict:
res = 0
count = len(bus_dict[bus])
for u in bus_dict[bus]:
res += user_item_dict[u][bus]
res = res / count
P[(user, bus)] = res
elif bus not in bus_dict:
P[(user, item)] = 0
with open(output_file_path, "w") as csvFile:
out = csv.writer(csvFile)
out.writerow(['user_id', 'business_id', 'prediction'])
for pred in P:
out.writerow([pred[0], pred[1], P[pred]])
if case_id == 3:
#case3.1:
users = trainRDD.filter(lambda row: row != train_head).map(lambda row: row.split(",")[0]).distinct().persist()
user_map = collections.defaultdict(int)
for index, user in enumerate(users.collect()):
user_map[user] = index
businesses = trainRDD.filter(lambda row: row != train_head).map(
lambda row: row.split(",")[1]).distinct().persist()
bus_id_map = collections.defaultdict(int)
id_bus_map = {}
for index, business in enumerate(businesses.collect()):
bus_id_map[business] = index
id_bus_map[index] = business
data_set = trainRDD.filter(lambda row: row != train_head).map(
lambda row: (row.split(",")[1], row.split(",")[0])).groupByKey().persist()
# print(user_map)
business_num = data_set.count()
#print(business_num)
all_data = data_set.collect()
sig_matrix = [[0 for j in range(business_num)] for i in range(100)]
for i in range(100):
a, b = random.randint(1, 100000), random.randint(1, 100000)
for data in all_data:
each_business = []
for user in data[1]:
x = user_map[user]
hash_num = (a * x + b) % business_num
each_business.append(hash_num)
min_num = min(each_business)
sig_matrix[i][bus_id_map[data[0]]] = min_num
band = 50
row = 2
k = business_num * 100
buckets = {}
r = 0
for i in range(band):
b_k = {}
for b in range(business_num):
col = (sig_matrix[r][b] * 1 + sig_matrix[r + 1][b] * 2) % k
if col not in b_k:
b_k[col] = []
b_k[col].append(b)
buckets[i] = b_k
r = r + row
data_dict = {}
for data in all_data:
user_set = set(data[1])
data_dict[data[0]] = user_set
J_Sim = {}
for i in range(band):
bucket = buckets[i]
for key, value in bucket.items():
if len(bucket[key]) >= 2:
bin = bucket[key]
for a in range(len(bin) - 1):
for b in range(a + 1, len(bin)):
p = []
p.append(a)
p.append(b)
p = frozenset(p)
J_Sim[p] = 1
itemCF_data = trainRDD.filter(lambda row: row != train_head).map(
lambda row: (row.split(",")[1], (row.split(",")[0], row.split(",")[2]))).groupByKey().persist()
user_dataset = trainRDD.filter(lambda row: row != train_head).map(
lambda row: (row.split(",")[0], row.split(",")[1])).groupByKey().persist()
test_data = testRDD.filter(lambda row: row != test_head).map(
lambda row: (row.split(",")[0], row.split(",")[1])).persist()
avg_item = collections.defaultdict(float)
for item in itemCF_data.collect():
user_count = len(item[1])
user = {}
for u in item[1]:
user[u[0]] = float(u[1])
rating_sum = sum(user.values())
avg_item[item[0]] = rating_sum / user_count
#print(avg_item)
user_dict = {}
for user in user_dataset.collect():
user_dict[user[0]] = []
for item in user[1]:
user_dict[user[0]].append(item)
item_user_dict = {}
for item in itemCF_data.collect():
rating = collections.defaultdict(float)
for user in item[1]:
rating[user[0]] = float(user[1])
item_user_dict[item[0]] = rating
P = {}
W = {}
for row in test_data.collect():
user = row[0]
item = row[1]
avg = avg_item[item]
w = {}
if item in item_user_dict:
if user in user_dict:
u_set = set(item_user_dict[item].keys())
for i in user_dict[user]:
i_pair = []
i_pair.append(item)
i_pair.append(i)
k = frozenset(i_pair)
if k in J_Sim:
if k in W:
w[k] = W[k]
if k not in W:
v_set = set(item_user_dict[i].keys())
r = u_set & v_set
a = 0
for u in r:
a += (item_user_dict[item][u] - avg) * (item_user_dict[i][u] - avg_item[i])
b1 = 0
for u in r:
b1 += (item_user_dict[item][u] - avg) ** 2
b1 = b1 ** 0.5
b2 = 0
for u in r:
b2 += (item_user_dict[i][u] - avg_item[i]) ** 2
b2 = b2 ** 0.5
b = b1 * b2
if b == 0:
W[k] = 0
w[k] = 0
else:
W[k] = a / b
w[k] = a / b
m = 0
for i in user_dict[user]:
i_pair = []
i_pair.append(item)
i_pair.append(i)
k = frozenset(i_pair)
if k in J_Sim:
m += (item_user_dict[i][user] - avg_item[i]) * w[k]
n = 0
for k, v in w.items():
n += abs(w[k])
if n == 0:
P[(user, item)] = avg
else:
P[(user, item)] = avg + (m / n)
elif user not in user_dict:
P[(user, item)] = avg
elif item not in item_user_dict:
if user in user_dict:
res = 0
count = len(user_dict[user])
for i in user_dict[user]:
res += item_user_dict[i][user]
res = res / count
P[(user, item)] = res
elif user not in user_dict:
P[(user, item)] = 0
with open(output_file_path, "w") as csvFile:
out = csv.writer(csvFile)
out.writerow(['user_id', 'business_id', 'prediction'])
for pred in P:
out.writerow([pred[0], pred[1], P[pred]])
'''if case_id == 4:
itemCF_data = trainRDD.filter(lambda row: row != train_head).map(
lambda row: (row.split(",")[1], (row.split(",")[0], row.split(",")[2]))).groupByKey().persist()
user_dataset = trainRDD.filter(lambda row: row != train_head).map(
lambda row: (row.split(",")[0], row.split(",")[1])).groupByKey().persist()
test_data = testRDD.filter(lambda row: row != test_head).map(
lambda row: (row.split(",")[0], row.split(",")[1])).persist()
avg_item = collections.defaultdict(float)
for item in itemCF_data.collect():
user_count = len(item[1])
user = {}
for u in item[1]:
user[u[0]] = float(u[1])
rating_sum = sum(user.values())
avg_item[item[0]] = rating_sum / user_count
#print(avg_item)
user_dict = {}
for user in user_dataset.collect():
user_dict[user[0]] = []
for item in user[1]:
user_dict[user[0]].append(item)
item_user_dict = {}
for item in itemCF_data.collect():
rating = {}
for user in item[1]:
rating[user[0]] = float(user[1])
item_user_dict[item[0]] = rating
P = {}
W = {}
for row in test_data.collect():
user = row[0]
item = row[1]
avg = avg_item[item]
w = {}
if item in item_user_dict:
if user in user_dict:
u_set = set(item_user_dict[item].keys())
for i in user_dict[user]:
i_pair = []
i_pair.append(item)
i_pair.append(i)
k = frozenset(i_pair)
if k in W:
w[k] = W[k]
if k not in W:
v_set = set(item_user_dict[i].keys())
r = u_set & v_set
a = 0
for u in r:
a += (item_user_dict[item][u] - avg) * (item_user_dict[i][u] - avg_item[i])
b1 = 0
for u in r:
b1 += (item_user_dict[item][u] - avg) ** 2
b1 = b1 ** 0.5
b2 = 0
for u in r:
b2 += (item_user_dict[i][u] - avg_item[i]) ** 2
b2 = b2 ** 0.5
b = b1 * b2
if b == 0:
W[k] = 0
w[k] = 0
else:
W[k] = a / b
w[k] = a / b
m = 0
for i in user_dict[user]:
i_pair = []
i_pair.append(item)
i_pair.append(i)
k = frozenset(i_pair)
m += (item_user_dict[i][user] - avg_item[i]) * w[k]
n = 0
for k, v in w.items():
n += abs(w[k])
if n == 0:
P[(user, item)] = avg
else:
P[(user, item)] = avg + (m / n)
elif user not in user_dict:
P[(user, item)] = avg
elif item not in item_user_dict:
if user in user_dict:
res = 0
count = len(user_dict[user])
for i in user_dict[user]:
res += item_user_dict[i][user]
res = res / count
P[(user, item)] = res
elif user not in user_dict:
P[(user, item)] = 0
with open(output_file_path, "w") as csvFile:
out = csv.writer(csvFile)
out.writerow(['user_id', 'business_id', 'prediction'])
for pred in P:
out.writerow([pred[0], pred[1], P[pred]])'''
end = time.time()
time = end - start
print("Duration:" + str(time)) | [
"jieyingh@usc.edu"
] | jieyingh@usc.edu |
96dc9ae441e5fcda790e882d873f43dd970a3421 | 8318644cd5b2c26ea080975b7a12926f04bdb0d8 | /ybsong55/app.py | 2d5055c766baf97acc7f30d427ab0d733df333f8 | [] | no_license | kimhokyoung94/team6 | c7784c13650a512b58e68c24dde054f44f606bda | c896aadc129b864482db7474179528611145e624 | refs/heads/master | 2022-11-25T14:16:40.616928 | 2020-08-05T07:40:43 | 2020-08-05T07:40:43 | 285,166,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,346 | py | from flask import Flask ,render_template , flash , redirect , url_for, session, request, logging
from functools import wraps
import pymysql
from passlib.hash import pbkdf2_sha256
from bs4 import BeautifulSoup
app = Flask(__name__)
app.debug=True
db = pymysql.connect(host='localhost',
port=3306,
user='root',
passwd='1234',
db='myflaskapp')
# def data_for_monitor(f):
# @wraps(f)
# def wrap(*args , **kwargs):
# cursor = db.cursor()
# sql='SELECT * FROM solar_{};'.format(session['id'])
# cursor.execute(sql)
# data = cursor.fetchall()
# print(data)
# return data
# return wrap
@app.route('/')
def main_page():
return render_template('main_page.html')
@app.route('/topic')
def topic():
return render_template('topic.html')
@app.route('/login',methods=['POST','GET'])
def login():
if request.method == 'POST':
id = request.form['email']
pw = request.form.get('password')
sql='SELECT * FROM users WHERE email = %s'
cursor = db.cursor()
cursor.execute(sql, [id])
users = cursor.fetchone()
print(users)
if users ==None:
return redirect(url_for('login'))
else:
if pbkdf2_sha256.verify(pw,users[4] ):
session['is_logged'] = True
session['username'] = users[3]
session['id'] = users[0]
print(session)
return redirect(url_for('main_page'))
else:
return redirect(url_for('login'))
else:
return render_template('login.html')
@app.route('/logout')
def logout():
session.clear()
return redirect(url_for('login'))
@app.route('/register', methods=['POST','GET'])
def register():
if request.method == 'POST':
# data = request.body.get('author')
name = request.form.get('name')
email = request.form.get('email')
password = pbkdf2_sha256.hash(request.form.get('password'))
re_password = request.form.get('re_password')
username = request.form.get('username')
# name = form.name.data
cursor = db.cursor()
sql = "SELECT username FROM users WHERE username =%s"
cursor.execute(sql,[username])
exist = cursor.fetchone()
if exist :
return redirect(url_for('register'))
else:
if(pbkdf2_sha256.verify(re_password,password)):
sql = '''
INSERT INTO users (name , email , username , password)
VALUES (%s ,%s, %s, %s)
'''
cursor.execute(sql , (name,email,username,password))
db.commit()
return redirect(url_for('login'))
else:
return "Invalid Password"
db.close()
else:
return render_template('register.html')
@app.route('/monitor',methods=['POST','GET'])
# @data_for_monitor
def monitor():
# if request.method=='GET':
# select = request.form.get('date')
# session['date'] = select
# print(select)
if request.method=='POST':
cursor = db.cursor()
sql='SELECT * FROM monitoring_data where user_id =%s and DATE(Date) = %s ORDER BY Time;'
select = request.form.get('date')
session['date']=select
cursor.execute(sql,[session['id'],session['date']])
data = cursor.fetchall()
lux_data = []
for i in range(len(data)):
lux_data.append(data[i][0])
return render_template('monitor.html', m_data= data, lux_data=lux_data)
else:
return render_template('monitor.html')
@app.route('/monitor1',methods=['POST','GET'])
# @data_for_monitor
def monitor1():
if request.method=='POST':
cursor = db.cursor()
sql='SELECT * FROM monitoring_data where user_id =%s and DATE(Date) = %s ORDER BY Time;'
select = request.form.get('date')
session['date']=select
cursor.execute(sql,[session['id'],session['date']])
data = cursor.fetchall()
humid_data = []
for i in range(len(data)):
humid_data.append(data[i][1])
return render_template('monitor1.html', m_data= data,humid_data=humid_data)
else:
return render_template('monitor1.html')
@app.route('/monitor2',methods=['POST','GET'])
# @data_for_monitor
def monitor2():
if request.method=='POST':
cursor = db.cursor()
sql='SELECT * FROM monitoring_data where user_id =%s and DATE(Date) = %s ORDER BY Time;'
select = request.form.get('date')
session['date']=select
cursor.execute(sql,[session['id'],session['date']])
data = cursor.fetchall()
temp_data = []
for i in range(len(data)):
temp_data.append(data[i][2])
print(temp_data)
return render_template('monitor2.html',m_data= data, temp_data=temp_data)
else:
return render_template('monitor2.html')
@app.route('/analysis')
def analysis():
return render_template('analysis.html')
if __name__ =='__main__':
# ssession 실행시 필요한 설정
app.secret_key = 'secretKey123456789'
# 서버 실행
app.run(host='0.0.0.0', port='8000') | [
"ghrud24020@naver,com"
] | ghrud24020@naver,com |
ae3a9d742c5911496407a7502d31c30106ef2b0a | 7e5a50d6959ac6ec422ec95e6fb1f56fa7cdc93e | /mysite/settings.py | 9f421993da90ccffae98da6916ce8629a1b5c802 | [] | no_license | surmok/my-first-blog | d850b684b2bb4e3a8ba96e6737f629891e81d1f0 | e4ff5a0b0c2bd9a16b9df845db10efd3700f3dd1 | refs/heads/master | 2021-01-25T11:39:57.204336 | 2017-06-10T15:41:52 | 2017-06-10T15:41:52 | 93,939,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,207 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.10.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o7w76%-u45+frrc@wtr9%*1+kgkc+%s+*8f4^0v9^l(cu44nqo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'surmok.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Budapest'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"kata.suranyi@gmail.com"
] | kata.suranyi@gmail.com |
057b579c0756afea2706e055a5f783c34be7b264 | d411a921dd53af6531520ca5d74ea1f9ec6bbed7 | /src/python/live/buySymbol.py | 7e6de90c54c49ca0a3bc77ebae66c0af2431e5a3 | [] | no_license | fxmag/trading-bot | 628205d912f381fb16f6ab8d4fd85c3dd734ed94 | 714f3bfaa9c615e4eddd25870355f4589b5ca2ca | refs/heads/master | 2022-12-09T02:36:53.326497 | 2020-09-15T19:37:28 | 2020-09-15T19:37:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | import time
import MetaTrader5 as mt5
# display data on the MetaTrader 5 package
print("MetaTrader5 package author: ", mt5.__author__)
print("MetaTrader5 package version: ", mt5.__version__)
# establish connection to the MetaTrader 5 terminal
if not mt5.initialize():
print("initialize() failed, error code =",mt5.last_error())
quit()
# prepare the buy request structure
symbol = "USDJPY"
symbol_info = mt5.symbol_info(symbol)
if symbol_info is None:
print(symbol, "not found, can not call order_check()")
mt5.shutdown()
quit()
# if the symbol is unavailable in MarketWatch, add it
if not symbol_info.visible:
print(symbol, "is not visible, trying to switch on")
if not mt5.symbol_select(symbol,True):
print("symbol_select({}}) failed, exit",symbol)
mt5.shutdown()
quit()
lot = 0.1
point = mt5.symbol_info(symbol).point
price = mt5.symbol_info_tick(symbol).ask
deviation = 20
request = {
"action": mt5.TRADE_ACTION_DEAL,
"symbol": symbol,
"volume": lot,
"type": mt5.ORDER_TYPE_BUY,
"price": price,
"sl": price - 100 * point,
"tp": price + 100 * point,
"deviation": deviation,
"magic": 234000,
"comment": "python script open",
"type_time": mt5.ORDER_TIME_GTC,
"type_filling": mt5.ORDER_FILLING_RETURN,
}
# send a trading request
result = mt5.order_send(request)
# check the execution result
print("3. close position #{}: sell {} {} lots at {} with deviation={} points".format(position_id,symbol,lot,price,deviation))
if result.retcode != mt5.TRADE_RETCODE_DONE:
print("4. order_send failed, retcode={}".format(result.retcode))
print(" result",result)
else:
print("4. position #{} closed, {}".format(position_id,result))
# request the result as a dictionary and display it element by element
result_dict=result._asdict()
for field in result_dict.keys():
print(" {}={}".format(field,result_dict[field]))
# if this is a trading request structure, display it element by element as well
if field=="request":
traderequest_dict=result_dict[field]._asdict()
for tradereq_filed in traderequest_dict:
print(" traderequest: {}={}".format(tradereq_filed,traderequest_dict[tradereq_filed]))
# shut down connection to the MetaTrader 5 terminal
mt5.shutdown() | [
"fontelucas@yahoo.com.br"
] | fontelucas@yahoo.com.br |
ec0bfed2e04944f6a53b48dd4438719b1733cb75 | 699ff10c347dc9b6d5af7f531a1c941dbfecd558 | /leetcode/python/232-implement-queue-using-stacks.py | cfbd49aa1d50363b1d16e3ac48c0bcd623bf7032 | [] | no_license | iampkuhz/OnlineJudge_cpp | 71a7637c54d81be2aa066a6132aab31b798bbe6b | 737b9bac5a73c319e46cda8c3e9d729f734d7792 | refs/heads/master | 2021-01-10T10:16:37.589855 | 2017-03-06T12:45:20 | 2017-03-06T12:45:20 | 24,891,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | #!/usr/bin/env python
# encoding: utf-8
"""
Implement the following operations of a queue using stacks.
push(x) -- Push element x to the back of queue.
pop() -- Removes the element from in front of queue.
peek() -- Get the front element.
empty() -- Return whether the queue is empty.
Notes:
You must use only standard operations of a stack -- which means only push to top, peek/pop from top, size, and is empty operations are valid.
Depending on your language, stack may not be supported natively. You may simulate a stack by using a list or deque (double-ended queue), as long as you use only standard operations of a stack.
You may assume that all operations are valid (for example, no pop or peek operations will be called on an empty queue).
"""
# 2次过,速度差不多 40-44ms
class Queue(object):
def __init__(self):
self.ls = []
def push(self, x):
self.ls.append(x)
def pop(self):
return self.ls.pop()
def peek(self):
return self.ls[-1]
def empty(self):
return len(self.ls) == 0
# 3次过, 36-44ms
class Queue(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.ins, self.out = [], []
def conv(self):
k = len(self.ins)
while k > 0:
k -= 1
self.out.append(self.ins.pop())
def push(self, x):
"""
:type x: int
:rtype: nothing
"""
self.ins.append(x)
def pop(self):
"""
:rtype: nothing
"""
if len(self.out) == 0:self.conv()
return self.out.pop()
def peek(self):
"""
:rtype: int
"""
if len(self.out) == 0: self.conv()
return self.out[-1]
def empty(self):
"""
:rtype: bool
"""
return len(self.out) == 0 and len(self.ins) == 0
| [
"iampkuhz@gmail.com"
] | iampkuhz@gmail.com |
fe247491ad250b60cfc470e63187bc32eacfeb9c | 8ed215ee731bc8c55eabdc66ee028a43771510bc | /tasks/nooisee/flask/ids_and_flags.py | ab033ca249b1daac30bde9c7af9c03d3eb3c21b4 | [
"MIT"
] | permissive | irdkwmnsb/lkshl-ctf | c6c0b0ae58653d3d7c427073221043d2adea212c | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | refs/heads/master | 2020-03-23T22:22:23.499985 | 2019-02-22T13:29:51 | 2019-02-22T13:29:51 | 142,172,055 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,415 | py | ids = ['i2yctSSMGde1TjBIor4T', 'DZrIgOypvPqMctJS06O9', 'lH4uWfzCg1kFeULgUIVC', 'N9mUMmG8maidbCD0s4mV', 'PuqvlpXgaa5s42mZKE4j', 'CoRVyJtEoNE87xUoRqw0', '8uKC7RXAOBlOXdP7KVGl', '2cVvsSZY3QLRmoAYRFkL', '22enKTGqQwxga5RFQdus', 'Sw1vkSnYDQdEtOjTnfH0', 'S7mUlJwhAJhCFgxCOoAW', 'JkTjzY6JUEgo9G7tHciV', 'xn8irINr3sDQLIAb2CkI', 'WPnNc5oYE4s3IPkL0fGg', 'dVqlZVLNgCCm1Sy4qfOW', 'l5cEE0hLMVCt2TjTit5c', '7BcmXXs7kbip5gfT853u', 'QZrfCklvGpdwt4vB6oh8', 'hAiHcE7oRy0zXqpoNeWh', 'LbSDhksl1vNujaAmhVhJ', 'oZZ1TyF3Ysg9KxtBGVs8', 'FKerevwgUjdoWUGyZ652', 'jvutz2HwlzcUGbSoZkgu', 'oV28KmKrYyvSdosdRck1', 'tuacfxYJA1CE54Ixao5F', 'q2B8TWtgwVD2rsGEeehx', '5bO4XLG9OyswG01jVleq', 'rZBIgEB002nWqVjMIBzg', '5ojT7jrimtbZP6mp7MAh', 'Z31bdvkrb3NtMIC3MenW', 'bbH9tpTiZz7V8a7i848m', '9xWkjKVCX91TzD933bMD', '3Jq5yRa0S0DKpIi96kjH', '2h3bhgxJ3ohZeNuG5Noq', 'o2YpZKg7619CB6yzN5SB', '1JZsemZRho77QrN0skkl', 'K9ySRqaGklft8OAY4l0G', 'r4wgNCzZbxtMhx8SHZlv', 'zLUWPrq0JEvsXn0yb2c5', 'fa87JWxShRCUK1xAV611', 'Kcr65dOqJVpTDU2cuUP1', 'aidLGpK1oiHQ2lv7gC12', 'Ttxgw2BqBD1jrm5l7hYc', 'YbQgYJDKArj2kOh95j86', 'tF82UdE6QhLaNfxbJ5PB', '3lN47CbPRR22TsFVdP6i', '2msPQ0ruOJ5rvOzvi0Gz', 'MUdVTNaq9i2k85AqIlVl', 'AiPFQJJBnJGTKtU6Ifrc', 'a7patLnLlBvWKjzfoPUy', 'sAsApUGUvINTq68IVRXX', 'Ne2J72wYP6LAnrckWZIh', '0D774XxxX56yC2WM7qkr', 'oggJm9bOxLAEEb1FswRZ', 'DXonEXUOBi61oAAj24MG', 'udpI7111q5lofsEmVkZN', 'lSoDYeb6RnTtOUZCG0y5', 'foE9zcv1E3n4VlnbnMGO', 'H69Fr2lKjto9PoNhbGdO', 'rKPI7ONeLkxt6p0bkCnC', '0gXXuhUYpAfF5TWuouUu', 'R4AMaHKEDnMx2fxdFR4v', 'p8HtmlwqQFbRvmlWj09t', 'Gv8gN89ZvU32kvteW3Kw', 'xOtLyoBFxgTskjoHCA74', 'zWesr92l1uJKm9RGGFwt', 'iCkYhQIGrM46gZyOeivp', 'R6BRGvq6uPZ7cRHdx5iq', 'UiIfbhnbY0J7SmnPOiHI', 'tgzeloib826aSzWzJRqr', '2XNmQ1h3uOXxvBY5cf45', 'TronVfZ9KWuimzkX7o56', 'iN2I21k3xQ1x4slI1q81', 'Tg0LMsdZFyyuv5spDpcM', '5Tg0M84vOTpM5jee3xCj', 'xS5dAwdL8ZLCUBCRZnEf', 'r9lyMRAjrWp3BxpT9NB5', 'hhsJYamIQskGD4BkLPUK', 'Du0ryLhVJFhfoDDOquON', 'yJkQ3y6kv93edSSoPcXl', 'ax6V8yGruNZMWmQJW8s8', 'U7GWGaCUjYSJQROeJNPl', '0Kr0BkEkkbj0fHi0HVt9', 'TvX8REHZmZGFWchiFFff', 'unBi2qvY43oHIf67DpJk', 'f70Yf2D6U2cokFrE4M89', 'UHR1IIsj0RqR0tCMWjaY', 'k7noV2tQkX0mXSwbRTeA', 'G2RCr57Ur0r8Bd0msCnh', 'uEs94rT8dvyMSK3tgUmb', 'Cn86UI8t4tEU5LslZLhT', 'GasDs9W7O7zE5UOLlUyU', '8yBxaH7rrliGXrE9iN6l', '2ib88yqt6IMAhZLbP5eR', 'OqZnGIsjGmw2MGsS4Y7F', 'JRj9gpEn7JmdErJthkDN', 'OHoTjcphFL1tWxfdmwfb', 'CmLSRN8HL8CpH7Je56lw', 'BQPKERa1jaW7lv99ETYL', 'tshNpN967rtSld8NYCgt', 'MWVDlAwG0z6KSFKDVdxy', 'Aj6Wbseon4e6CM2a8N1s', 'ZtDlWQnQaURZqkWVxPSc', 'KdtXIlqS604uHDKoqenD', 'QO0sLYHHSnzPUKHjZgiy', 'C1Tn5OZCJfSewPe9qxnP', 'plyCyxg03GwFY0kc9Mh7', 'BV5FLHzyksnrHn4qgb7Z', 'uz2OONdBpGpWREOFn2Dp', 'nRafhWiq9ady7YMjMfmD', 'jbKC4rnCJqnSWNybL0ho', 'v15v2YSAdmmwFsbJzv17', 'wrA9JCI3vQ17wKRznfmT', 'Wrppq31UosvJyjTECnOB', 'MxloukUZxGkyVaDtGyG1', 'CoeFnlaucvloF9dGCV1K', 'q0z4Vqf8uVOJjXMooAd0', 'tEu9sRK1yGBC8kH7sDUb', 'lgMRRPCNwMKSjb82hJN0', 'UhYGSPsJx6dNUCzuVaEw', '0lzOfTipjEFqzJsLiyYI', 'hbTF34f4iv7a981bHuV7', 'DYvTEJqsIk8pPuQS5Hai', 'mG9i1Q9oOtimP7Xpge4R', 'fI4oQBFO67nD6GeFxPjb', 'TkZRLa98jQEvEk5xiBi2', 'gJdNhYVuNHsXemBEU0XX', 'TxyeoCCGL80nX6hv7OcE', '5SayX4207FIvIIjqBANn', 'Kt1CH39vMZQO8mVVbPVG', 'sZInNDG5pKcfzsL1GeJk', 'lSoiTyLc5Pm2g8bS25fO', '8RygrlGoU9tVd20nB6pU', 'QjpdFah2GgN73iMOcKgu', '5uqRz6u3P0kcLddR0X1Q', 'JlYTWI9NtbINkqA9Gcx8', 'O48qYJt8hOoOeIRFZiZ5', 'HKhGZkv4v4SCwVNgowvF', 'fV4BZGFvUS2PUNCHQJoQ', 'kqSSIrlgFzfrYzW7LeC6', 'Yf2waPjPfokxDnTkbKR0', '41fFikB0JwoZ2d3bbT2N', 'XzITKmVvLkUGANuolqjs', 'WeL0qrrg3VUSvce4eOqH', 'bnC1DabRzDnbLOHlVs95', 'iNVSQXEm7uamgR2uM8ub', 'Us0NNywPFasbh18rhKJL', '0UJ9ZTXiVSsXjpJCXYw5', 'MAZtrsxoVBMmh1KxOXIC', 'XMHVLcPDSFEL4PBgO5Uw', '57aBZrcavR7OsV8mFfdL', 'SDYrvSy5HNxZvCHpdPBa', 'qZjSvBWwtiV16tprHaJZ', 'cwYVhgQzRGa1UZa6wVXJ', 'ZurZjCN8DEZ87clrXlr0', 'sLTnm7u7ZtN9JfT29Vxg', 'sYdSmELU7dTZzi9tPmkG', 's0JdJwu3TsxmUDccEpKQ', 'jZrz2x2pItkIAFD2oyWF', 'ooOzVV9Dd1POoGwBiWsd', 'MuFsWSaRIWDWxUtMtHdH', 'mYEILDLnIaMBAg4LJZYA', 'w402UedR0qSN03uxFKro', 'Xh4tAImtQ11tnq25JIwt', 'Q6YOeR6OYFXpRc2vaqp4', '6EaLzsqaq7s0qRavwFOR', 'KipsQDR4RI6fgYkiYQeu', '5TByalXqgofiGuUFQ8ga', '6WSjCzum30MOuHJHI25r', '8o9mxMvUtlKMAIFxuQkY', 'x2N5Jp1uJPsIeLAknqrk', 'fn1DYGLRxayGv91i3ico', '1vpNXERfEuuvKG1yt6Es', 'YDgM6cyCeZ3WMbKtnZRA', 'VXcpNpWmcOD4ZuH0vvqE', '67bfDWVAqymhAV8xoow2', 'shuxt8SQoWuiSjmNCrq6', 'SUAwETaPiK5yZWwWgzLe', 'v8c6KsbsY0O7r20NcTc9', 'Z3I5tZoUE9Sl80IPDio7', 'erz0CZLp38LLQtw5CEyE', 'qiV6CQW3Np8fLUi4aUx1', 'UMxKLOtyDTZsD89IVXn5', '6Ue63hlYvUd2vHbNQTSZ', 'zLwT4gUVggNYF1Qz3eJK', 'EcfEf5UUER30630SJtcM', 'd1GbTz3UiUdCZAtOiSfH', '8I6JNrQL7zXkoMLQ14AI', '9oylE0h4WnWRlJJJ81RO', 'nSMZmbS7vIdnKGym2NOB', 'CJCVx5gq2zEVFZSsHlUi', '1okbUDCHJuIZJ4c4r0cN', 'rc8HONSCGpF0WTct384T', 'EcrLmnCC47uM5uNzapU7', 'BKcxCqu6kH2eB5tvqbp8', 'zxcpVWFMGRo96KdhAWC4', 'pNAbg6kLWHvgWU18GSDR', 'rXsOIcfQbrObgjhKFD1y', 'gng3koJU2ngLBOMBkn09', '6eDv9WvCunSJ3rbR7P41']
flags = ['LKL{g00D_N0isss3_M0VwcT}', 'LKL{g00D_N0isss3_fIh2JH}', 'LKL{g00D_N0isss3_oD1gJ7}', 'LKL{g00D_N0isss3_SfipqG}', 'LKL{g00D_N0isss3_oS5Nnz}', 'LKL{g00D_N0isss3_Btipdn}', 'LKL{g00D_N0isss3_Mo2isN}', 'LKL{g00D_N0isss3_gfjVax}', 'LKL{g00D_N0isss3_89DjDR}', 'LKL{g00D_N0isss3_U9rTxu}', 'LKL{g00D_N0isss3_zkT5Ks}', 'LKL{g00D_N0isss3_vVa7nj}', 'LKL{g00D_N0isss3_6PTYIO}', 'LKL{g00D_N0isss3_yXAKpI}', 'LKL{g00D_N0isss3_UXYisz}', 'LKL{g00D_N0isss3_485o6m}', 'LKL{g00D_N0isss3_IAfQoF}', 'LKL{g00D_N0isss3_u7jwOR}', 'LKL{g00D_N0isss3_0eVf9D}', 'LKL{g00D_N0isss3_cJEXvX}', 'LKL{g00D_N0isss3_r8yGte}', 'LKL{g00D_N0isss3_0Wg6vG}', 'LKL{g00D_N0isss3_2yxorP}', 'LKL{g00D_N0isss3_4F6Syl}', 'LKL{g00D_N0isss3_Sfy6NZ}', 'LKL{g00D_N0isss3_MHIZ0f}', 'LKL{g00D_N0isss3_besNuI}', 'LKL{g00D_N0isss3_3Ofy6n}', 'LKL{g00D_N0isss3_bU4Enb}', 'LKL{g00D_N0isss3_jTy3F5}', 'LKL{g00D_N0isss3_ZeCN3f}', 'LKL{g00D_N0isss3_qJE6fK}', 'LKL{g00D_N0isss3_86VxMN}', 'LKL{g00D_N0isss3_VXRzes}', 'LKL{g00D_N0isss3_JyPPq5}', 'LKL{g00D_N0isss3_JGYTE9}', 'LKL{g00D_N0isss3_NcaQzt}', 'LKL{g00D_N0isss3_Py2Jbl}', 'LKL{g00D_N0isss3_yepRkv}', 'LKL{g00D_N0isss3_2SsIXv}', 'LKL{g00D_N0isss3_O1Hz6r}', 'LKL{g00D_N0isss3_H6n4Z9}', 'LKL{g00D_N0isss3_Ncw3Z8}', 'LKL{g00D_N0isss3_KUcuzK}', 'LKL{g00D_N0isss3_qIY0i2}', 'LKL{g00D_N0isss3_084rcz}', 'LKL{g00D_N0isss3_CSOVie}', 'LKL{g00D_N0isss3_Tx304O}', 'LKL{g00D_N0isss3_NQHYem}', 'LKL{g00D_N0isss3_j2yrJp}', 'LKL{g00D_N0isss3_fYETyb}', 'LKL{g00D_N0isss3_KFKGph}', 'LKL{g00D_N0isss3_Y67kzX}', 'LKL{g00D_N0isss3_DFaPLi}', 'LKL{g00D_N0isss3_pH9R0C}', 'LKL{g00D_N0isss3_Jz9TY7}', 'LKL{g00D_N0isss3_JGxdKo}', 'LKL{g00D_N0isss3_EEUsf3}', 'LKL{g00D_N0isss3_tffJEU}', 'LKL{g00D_N0isss3_mCsaLE}', 'LKL{g00D_N0isss3_F8J0OW}', 'LKL{g00D_N0isss3_9l20a6}', 'LKL{g00D_N0isss3_bZHXxr}', 'LKL{g00D_N0isss3_WXInmT}', 'LKL{g00D_N0isss3_giBP9c}', 'LKL{g00D_N0isss3_S3Oxlh}', 'LKL{g00D_N0isss3_fVRZxk}', 'LKL{g00D_N0isss3_OePWlp}', 'LKL{g00D_N0isss3_VrqnRw}', 'LKL{g00D_N0isss3_IoLWv0}', 'LKL{g00D_N0isss3_IyM6fA}', 'LKL{g00D_N0isss3_auHrW6}', 'LKL{g00D_N0isss3_oK579V}', 'LKL{g00D_N0isss3_RVElQC}', 'LKL{g00D_N0isss3_oR9Aqc}', 'LKL{g00D_N0isss3_zPD9Za}', 'LKL{g00D_N0isss3_5khQWk}', 'LKL{g00D_N0isss3_wydJs2}', 'LKL{g00D_N0isss3_ttNaud}', 'LKL{g00D_N0isss3_kIMIU7}', 'LKL{g00D_N0isss3_SNahdB}', 'LKL{g00D_N0isss3_kBCPmL}', 'LKL{g00D_N0isss3_BpNCv3}', 'LKL{g00D_N0isss3_IZPzC4}', 'LKL{g00D_N0isss3_s6kihA}', 'LKL{g00D_N0isss3_KX4A5L}', 'LKL{g00D_N0isss3_uQUZzA}', 'LKL{g00D_N0isss3_632Y2A}', 'LKL{g00D_N0isss3_W135ft}', 'LKL{g00D_N0isss3_LE6N7W}', 'LKL{g00D_N0isss3_KyICZe}', 'LKL{g00D_N0isss3_zkD0rf}', 'LKL{g00D_N0isss3_9buyIv}', 'LKL{g00D_N0isss3_kGEOoy}', 'LKL{g00D_N0isss3_ZfBib1}', 'LKL{g00D_N0isss3_z0slZ2}', 'LKL{g00D_N0isss3_88A01U}', 'LKL{g00D_N0isss3_oUNEDP}', 'LKL{g00D_N0isss3_Cnyscg}', 'LKL{g00D_N0isss3_7IkYG0}', 'LKL{g00D_N0isss3_gF0wmI}', 'LKL{g00D_N0isss3_yMF2cR}', 'LKL{g00D_N0isss3_TXzhcc}', 'LKL{g00D_N0isss3_3vUVPT}', 'LKL{g00D_N0isss3_75g5Wu}', 'LKL{g00D_N0isss3_ZGkNWN}', 'LKL{g00D_N0isss3_9baV51}', 'LKL{g00D_N0isss3_emoXAO}', 'LKL{g00D_N0isss3_pVghGT}', 'LKL{g00D_N0isss3_tQFOWQ}', 'LKL{g00D_N0isss3_jd4Zue}', 'LKL{g00D_N0isss3_kcVj6F}', 'LKL{g00D_N0isss3_XBIDjP}', 'LKL{g00D_N0isss3_hCVw6C}', 'LKL{g00D_N0isss3_tkYVgw}', 'LKL{g00D_N0isss3_t7tZkx}', 'LKL{g00D_N0isss3_6xlFZ6}', 'LKL{g00D_N0isss3_HSWb9c}', 'LKL{g00D_N0isss3_sLOi9l}', 'LKL{g00D_N0isss3_YXkZdr}', 'LKL{g00D_N0isss3_K5w8aU}', 'LKL{g00D_N0isss3_mv8ziu}', 'LKL{g00D_N0isss3_vxVAEt}', 'LKL{g00D_N0isss3_azgJlU}', 'LKL{g00D_N0isss3_Z2NJdp}', 'LKL{g00D_N0isss3_JaF5vV}', 'LKL{g00D_N0isss3_KxSi7R}', 'LKL{g00D_N0isss3_OI6SRb}', 'LKL{g00D_N0isss3_4R6m2i}', 'LKL{g00D_N0isss3_xtOTsi}', 'LKL{g00D_N0isss3_8ulVa0}', 'LKL{g00D_N0isss3_HkjTle}', 'LKL{g00D_N0isss3_FcrnrL}', 'LKL{g00D_N0isss3_zIDDbw}', 'LKL{g00D_N0isss3_wh2Fh6}', 'LKL{g00D_N0isss3_pkrF9v}', 'LKL{g00D_N0isss3_1Lq22A}', 'LKL{g00D_N0isss3_Vyf8vW}', 'LKL{g00D_N0isss3_VZ9rR0}', 'LKL{g00D_N0isss3_aeVraB}', 'LKL{g00D_N0isss3_hSoDcd}', 'LKL{g00D_N0isss3_RkTNkY}', 'LKL{g00D_N0isss3_2jRJ44}', 'LKL{g00D_N0isss3_p6PYM7}', 'LKL{g00D_N0isss3_nODrjr}', 'LKL{g00D_N0isss3_Btlsll}', 'LKL{g00D_N0isss3_48wYnO}', 'LKL{g00D_N0isss3_TBcmal}', 'LKL{g00D_N0isss3_lErmPs}', 'LKL{g00D_N0isss3_fEHtQe}', 'LKL{g00D_N0isss3_gjShxr}', 'LKL{g00D_N0isss3_Daj3S7}', 'LKL{g00D_N0isss3_CfIRqC}', 'LKL{g00D_N0isss3_pXUtMd}', 'LKL{g00D_N0isss3_rhVZVx}', 'LKL{g00D_N0isss3_CqsRWp}', 'LKL{g00D_N0isss3_yNBCA6}', 'LKL{g00D_N0isss3_vw6ySl}', 'LKL{g00D_N0isss3_JzxHxq}', 'LKL{g00D_N0isss3_Wcjjdr}', 'LKL{g00D_N0isss3_AKedWk}', 'LKL{g00D_N0isss3_hs10Sa}', 'LKL{g00D_N0isss3_5WBLqq}', 'LKL{g00D_N0isss3_1riPbD}', 'LKL{g00D_N0isss3_dV1wxO}', 'LKL{g00D_N0isss3_or6wJE}', 'LKL{g00D_N0isss3_bfr8E6}', 'LKL{g00D_N0isss3_Jlgc1D}', 'LKL{g00D_N0isss3_t1J8ZG}', 'LKL{g00D_N0isss3_8m9ery}', 'LKL{g00D_N0isss3_hiVkBd}', 'LKL{g00D_N0isss3_vIrWAD}', 'LKL{g00D_N0isss3_Mn9K3B}', 'LKL{g00D_N0isss3_pgjdiB}', 'LKL{g00D_N0isss3_azAstf}', 'LKL{g00D_N0isss3_wwURNX}', 'LKL{g00D_N0isss3_dtXquC}', 'LKL{g00D_N0isss3_qYuvXY}', 'LKL{g00D_N0isss3_rIkruu}', 'LKL{g00D_N0isss3_ATULAI}', 'LKL{g00D_N0isss3_wernRd}', 'LKL{g00D_N0isss3_pvziV6}', 'LKL{g00D_N0isss3_WPIIJQ}', 'LKL{g00D_N0isss3_yJPisd}', 'LKL{g00D_N0isss3_xrXPrQ}', 'LKL{g00D_N0isss3_j0IkqH}', 'LKL{g00D_N0isss3_wXBlZx}', 'LKL{g00D_N0isss3_DKBsw5}', 'LKL{g00D_N0isss3_l9JeSM}', 'LKL{g00D_N0isss3_jPVEqw}', 'LKL{g00D_N0isss3_BuGWtj}', 'LKL{g00D_N0isss3_mJWPmx}', 'LKL{g00D_N0isss3_2zAryd}', 'LKL{g00D_N0isss3_rP5bah}', 'LKL{g00D_N0isss3_Z86HGm}', 'LKL{g00D_N0isss3_m08J5V}', 'LKL{g00D_N0isss3_hukANs}', 'LKL{g00D_N0isss3_P2KSOO}', 'LKL{g00D_N0isss3_aauXbW}', 'LKL{g00D_N0isss3_kZ6TBv}'] | [
"supermax74.02@gmail.com"
] | supermax74.02@gmail.com |
1aadf32219df1a0f837c2565c18f3ee4644543aa | da4f58a4a839301385816c9b31aee12fda478587 | /3/dsa/heaps/minheap.py | 4ef194e9f6e759716f4fc0ac01d34908c2505d06 | [] | no_license | san39320/lab | 047f0531ab6496391aedf274a4f7710c7c0b2d31 | 69867bb36db15c23f2ca2cd6514a0de432acbaff | refs/heads/master | 2021-05-14T08:05:30.047940 | 2019-10-21T08:39:56 | 2019-10-21T08:39:56 | 116,284,508 | 0 | 1 | null | 2019-10-21T08:38:37 | 2018-01-04T16:49:37 | C | UTF-8 | Python | false | false | 1,813 | py | class maxheap:
def __init__(self):
self.a = [None] * 50
self.count = 0
### heapify
def arrayheap(self,a):
for i in range(0, len(a)):
self.a[i + 1] = a[i]
self.count = len(a)
for i in range(int(self.count / 2), 0, -1):
self.heapify(i)
def heapify(self,i):
if (i <= self.count) and ((2*i) <= self.count):
if (self.a[2 * i + 1] != None) and (self.a[2 * i] > self.a[2 * i + 1]):
if self.a[i] > self.a[2 * i + 1]:
self.a[i],self.a[2*i+1] = self.a[2 * i+1],self.a[i]
self.heapify(2*i+1)
elif self.a[2*i] != None:
if self.a[i] > self.a[2 * i]:
self.a[i],self.a[2*i] = self.a[2 * i],self.a[i]
self.heapify(2*i)
def insertheap(self,k):
self.count=self.count+1
self.a[self.count]=k
self.bottomtopbalance(self.count)
def bottomtopbalance(self,i):
if (self.a[i//2] != None) and (self.a[i//2] > self.a[i] ):
self.a[i],self.a[i//2]=self.a[i//2],self.a[i]
self.bottomtopbalance(i//2)
else:
return
def extractmax(self):
temp=self.a[1]
self.a[1]=self.a[self.count]
self.a[self.count ] = None
self.count=self.count-1
self.heapify(1)
return temp
def display(self):
print("heap contains",self.a)
def maximum(self):
print("the minimum is",self.a[1])
def main():
a = [1, 2, 7, 8, 3, 4, 9, 70, 13, 15, 20]
d = maxheap()
d.arrayheap(a)
d.insertheap(5)
print("the extracted min element ",d.extractmax())
print("the extracted min element ",d.extractmax())
d.display()
d.maximum()
if __name__ == '__main__':
main()
| [
"sanjayshetty2015@gmail.com"
] | sanjayshetty2015@gmail.com |
aded8e6f34c43d6a0acd9789f1172361e389c74e | 9bef44307cf379a005f695ca65fd4cef2f6d2dda | /unsorted/make_symm_axes.py | 6bbf0b19f014ca27cfbca841dd13a0dbfa873952 | [] | no_license | willsheffler/lib | a2e691cd1bccfc89989b161820616b57f9097c4d | b3e2781468a8fc25528a9f03c31e45af1cddd75a | refs/heads/master | 2020-03-30T04:11:51.663557 | 2013-05-26T03:53:40 | 2013-05-26T03:53:40 | 1,439,853 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import sys,os,math
def sub(X,Y):
return X[0]-Y[0], X[1]-Y[1], X[2]-Y[2]
def cross(X,Y):
return X[1]*Y[2]-X[2]*Y[1], X[2]*Y[0]-X[0]*Y[2], X[0]*Y[1]-X[1]*Y[0]
def norm(X):
s = math.sqrt(X[0]*X[0]+X[1]*X[1]+X[2]*X[2])
return X[0]/s,X[1]/s,X[2]/s
a = None
b = None
c = None
for l in open(sys.argv[1]).xreadlines():
if not l.startswith("ATOM "): continue
# print "'"+l[30:38]+"'", "'"+l[38:46]+"'", "'"+l[46:54]+"'"
if a is None: a = float(l[30:38]),float(l[38:46]),float(l[46:54])
elif b is None: b = float(l[30:38]),float(l[38:46]),float(l[46:54])
elif c is None: c = float(l[30:38]),float(l[38:46]),float(l[46:54])
else: break
X = norm(sub(b,a))
Y = norm(cross(X,sub(c,b)))
C = a
print X[0],X[1],X[2], " ", Y[0],Y[1],Y[2], " ", C[0],C[1],C[2]
| [
"will@sheffler.me"
] | will@sheffler.me |
a6801fd0c116eef78700629bacc8ba15f81a8b43 | 33f0f22e6449d24dbdb314e2912b6ffad7954ddc | /multiprocessing/maml_rl/samplers/multi_task_sampler.py | f89b9b44f4ed770a4750c0a8e931aef906ddd6f8 | [
"MIT"
] | permissive | imhgchoi/MAML-RL | 55d3d9489c5b58c4f656755f13bad2a4d10bf5df | b90e6fa75b5c320b98b58cf0242fd55b47128dc6 | refs/heads/master | 2023-02-20T23:57:21.946406 | 2021-01-14T12:49:51 | 2021-01-14T12:49:51 | 313,572,570 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,789 | py | import torch
import torch.multiprocessing as mp
import asyncio
import threading
import time
import pdb
from datetime import datetime, timezone
from copy import deepcopy
from maml_rl.samplers.sampler import Sampler, make_env
from maml_rl.envs.utils.sync_vector_env import SyncVectorEnv
from maml_rl.episode import BatchEpisodes
from maml_rl.utils.reinforcement_learning import reinforce_loss
def _create_consumer(queue, futures, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
while True:
data = queue.get()
if data is None:
break
index, step, episodes = data
future = futures if (step is None) else futures[step]
if not future[index].cancelled():
loop.call_soon_threadsafe(future[index].set_result, episodes)
class MultiTaskSampler(Sampler):
"""Vectorized sampler to sample trajectories from multiple environements.
Parameters
----------
env_name : str
Name of the environment. This environment should be an environment
registered through `gym`. See `maml.envs`.
env_kwargs : dict
Additional keywork arguments to be added when creating the environment.
batch_size : int
Number of trajectories to sample from each task (ie. `fast_batch_size`).
policy : `maml_rl.policies.Policy` instance
The policy network for sampling. Note that the policy network is an
instance of `torch.nn.Module` that takes observations as input and
returns a distribution (typically `Normal` or `Categorical`).
baseline : `maml_rl.baseline.LinearFeatureBaseline` instance
The baseline. This baseline is an instance of `nn.Module`, with an
additional `fit` method to fit the parameters of the model.
env : `gym.Env` instance (optional)
An instance of the environment given by `env_name`. This is used to
sample tasks from. If not provided, an instance is created from `env_name`.
seed : int (optional)
Random seed for the different environments. Note that each task and each
environement inside every process use different random seed derived from
this value if provided.
num_workers : int
Number of processes to launch. Note that the number of processes does
not have to be equal to the number of tasks in a batch (ie. `meta_batch_size`),
and can scale with the amount of CPUs available instead.
"""
def __init__(self, env_name, env_kwargs, batch_size, policy, baseline,
env=None, seed=None, num_workers=1):
super(MultiTaskSampler, self).__init__(env_name, env_kwargs, batch_size,
policy, seed=seed, env=env)
self.num_workers = num_workers
self.task_queue = mp.JoinableQueue()
self.train_episodes_queue = mp.Queue()
self.valid_episodes_queue = mp.Queue()
policy_lock = mp.Lock()
self.workers = [SamplerWorker(index, env_name, env_kwargs, batch_size,
self.env.observation_space, self.env.action_space,
self.policy, deepcopy(baseline), self.seed,
self.task_queue, self.train_episodes_queue,
self.valid_episodes_queue, policy_lock)
for index in range(num_workers)]
for worker in self.workers:
worker.daemon = True # this makes all the threads stop when main process ends
worker.start()
self._waiting_sample = False
self._event_loop = asyncio.get_event_loop()
self._train_consumer_thread = None
self._valid_consumer_thread = None
def sample_tasks(self, num_tasks):
return self.env.unwrapped.sample_tasks(num_tasks)
def sample_async(self, tasks, **kwargs):
if self._waiting_sample:
raise RuntimeError('Calling `sample_async` while waiting '
'for a pending call to `sample_async` '
'to complete. Please call `sample_wait` '
'before calling `sample_async` again.')
for index, task in enumerate(tasks):
self.task_queue.put((index, task, kwargs))
num_steps = kwargs.get('num_steps', 1)
futures = self._start_consumer_threads(tasks,
num_steps=num_steps)
self._waiting_sample = True
return futures
def sample_wait(self, episodes_futures):
if not self._waiting_sample:
raise RuntimeError('Calling `sample_wait` without any '
'prior call to `sample_async`.')
async def _wait(train_futures, valid_futures):
# Gather the train and valid episodes
train_episodes = await asyncio.gather(*[asyncio.gather(*futures)
for futures in train_futures])
valid_episodes = await asyncio.gather(*valid_futures)
print(train_episodes[0][0].observations[:,0,:])
print(train_episodes[0][0].observations.shape)
exit()
return (train_episodes, valid_episodes)
samples = self._event_loop.run_until_complete(_wait(*episodes_futures))
self._join_consumer_threads()
self._waiting_sample = False
return samples
def sample(self, tasks, **kwargs):
futures = self.sample_async(tasks, **kwargs)
return self.sample_wait(futures)
@property
def train_consumer_thread(self):
if self._train_consumer_thread is None:
raise ValueError()
return self._train_consumer_thread
@property
def valid_consumer_thread(self):
if self._valid_consumer_thread is None:
raise ValueError()
return self._valid_consumer_thread
def _start_consumer_threads(self, tasks, num_steps=1):
# Start train episodes consumer thread
train_episodes_futures = [[self._event_loop.create_future() for _ in tasks]
for _ in range(num_steps)]
self._train_consumer_thread = threading.Thread(target=_create_consumer,
args=(self.train_episodes_queue, train_episodes_futures),
kwargs={'loop': self._event_loop},
name='train-consumer')
self._train_consumer_thread.daemon = True
self._train_consumer_thread.start()
# Start valid episodes consumer thread
valid_episodes_futures = [self._event_loop.create_future() for _ in tasks]
self._valid_consumer_thread = threading.Thread(target=_create_consumer,
args=(self.valid_episodes_queue, valid_episodes_futures),
kwargs={'loop': self._event_loop},
name='valid-consumer')
self._valid_consumer_thread.daemon = True
self._valid_consumer_thread.start()
return (train_episodes_futures, valid_episodes_futures)
def _join_consumer_threads(self):
if self._train_consumer_thread is not None:
self.train_episodes_queue.put(None)
self.train_consumer_thread.join()
if self._valid_consumer_thread is not None:
self.valid_episodes_queue.put(None)
self.valid_consumer_thread.join()
self._train_consumer_thread = None
self._valid_consumer_thread = None
def close(self):
if self.closed:
return
for _ in range(self.num_workers):
self.task_queue.put(None)
self.task_queue.join()
self._join_consumer_threads()
self.closed = True
class SamplerWorker(mp.Process):
def __init__(self,
index,
env_name,
env_kwargs,
batch_size,
observation_space,
action_space,
policy,
baseline,
seed,
task_queue,
train_queue,
valid_queue,
policy_lock):
super(SamplerWorker, self).__init__()
env_fns = [make_env(env_name, env_kwargs=env_kwargs)
for _ in range(batch_size)]
self.envs = SyncVectorEnv(env_fns,
observation_space=observation_space,
action_space=action_space)
self.envs.seed(None if (seed is None) else seed + index * batch_size)
self.batch_size = batch_size
self.policy = policy
self.baseline = baseline
self.task_queue = task_queue
self.train_queue = train_queue
self.valid_queue = valid_queue
self.policy_lock = policy_lock
def sample(self,index, num_steps=1, fast_lr=0.5, gamma=0.95, gae_lambda=1.0, device='cpu'):
# Sample the training trajectories with the initial policy and adapt the
# policy to the task, based on the REINFORCE loss computed on the
# training trajectories. The gradient update in the fast adaptation uses
# `first_order=True` no matter if the second order version of MAML is
# applied since this is only used for sampling trajectories, and not
# for optimization.
params = None
for step in range(num_steps):
train_episodes = self.create_episodes(params=params,
gamma=gamma,
gae_lambda=gae_lambda,
device=device)
train_episodes.log('_enqueueAt', datetime.now(timezone.utc))
# QKFIX: Deep copy the episodes before sending them to their
# respective queues, to avoid a race condition. This issue would
# cause the policy pi = policy(observations) to be miscomputed for
# some timesteps, which in turns makes the loss explode.
self.train_queue.put((index, step, deepcopy(train_episodes)))
with self.policy_lock:
loss = reinforce_loss(self.policy, train_episodes, params=params)
params = self.policy.update_params(loss,
params=params,
step_size=fast_lr,
first_order=True)
# Sample the validation trajectories with the adapted policy
valid_episodes = self.create_episodes(params=params,
gamma=gamma,
gae_lambda=gae_lambda,
device=device)
valid_episodes.log('_enqueueAt', datetime.now(timezone.utc))
self.valid_queue.put((index, None, deepcopy(valid_episodes)))
def create_episodes(self,
params=None,
gamma=0.95,
gae_lambda=1.0,
device='cpu'):
episodes = BatchEpisodes(batch_size=self.batch_size,
gamma=gamma,
device=device)
episodes.log('_createdAt', datetime.now(timezone.utc))
episodes.log('process_name', self.name)
t0 = time.time()
for item in self.sample_trajectories(params=params):
episodes.append(*item)
episodes.log('duration', time.time() - t0)
self.baseline.fit(episodes)
episodes.compute_advantages(self.baseline,
gae_lambda=gae_lambda,
normalize=True)
return episodes
def sample_trajectories(self, params=None):
observations = self.envs.reset()
with torch.no_grad():
while not self.envs.dones.all():
observations_tensor = torch.from_numpy(observations)
pi = self.policy(observations_tensor, params=params)
actions_tensor = pi.sample()
actions = actions_tensor.cpu().numpy()
new_observations, rewards, _, infos = self.envs.step(actions)
batch_ids = infos['batch_ids']
yield (observations, actions, rewards, batch_ids)
observations = new_observations
def run(self):
while True:
data = self.task_queue.get()
if data is None:
self.envs.close()
self.task_queue.task_done()
break
index, task, kwargs = data
self.envs.reset_task(task)
self.sample(index, **kwargs)
self.task_queue.task_done()
| [
"imhgchoi@korea.ac.kr"
] | imhgchoi@korea.ac.kr |
a8c6f1dbf4ed1dd8116a7bd5ca487fe8c769489f | d1abf524ba77816c50ee9ddbdb14e1a69744c6d4 | /scrollbartest.py | e9ba4222ab6c5a1a0bf977d512c008e04e91b823 | [] | no_license | M1nlex/Spotif-Air | 357bb11304b7741c23a4f3ac4fdf5f0e3b71a6ef | 8448547ade9b8594547105192a7e5e5a3cd0da6c | refs/heads/master | 2020-08-04T07:04:21.369704 | 2020-01-13T01:21:27 | 2020-01-13T01:21:27 | 212,048,454 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | import tkinter as tk
# ************************
# Scrollable Frame Class
# ************************
class ScrollFrame(tk.Frame):
def __init__(self, parent):
super().__init__(parent) # create a frame (self)
self.canvas = tk.Canvas(self, borderwidth=0, background="#ffffff", width=200, height=200) # place canvas on self
self.viewPort = tk.Frame(self.canvas,
background="#ffffff") # place a frame on the canvas, this frame will hold the child widgets
self.vsb = tk.Scrollbar(self, orient="vertical", command=self.canvas.yview) # place a scrollbar on self
self.canvas.configure(yscrollcommand=self.vsb.set) # attach scrollbar action to scroll of canvas
self.vsb.pack(side="right", fill="y") # pack scrollbar to right of self
self.canvas.pack(side="left", fill="both", expand=True) # pack canvas to left of self and expand to fil
self.canvas_window = self.canvas.create_window((4, 4), window=self.viewPort, anchor="nw",
# add view port frame to canvas
tags="self.viewPort")
self.viewPort.bind("<Configure>",
self.onFrameConfigure) # bind an event whenever the size of the viewPort frame changes.
self.canvas.bind("<Configure>",
self.onCanvasConfigure) # bind an event whenever the size of the viewPort frame changes.
self.onFrameConfigure(
None) # perform an initial stretch on render, otherwise the scroll region has a tiny border until the first resize
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox(
"all")) # whenever the size of the frame changes, alter the scroll region respectively.
def onCanvasConfigure(self, event):
'''Reset the canvas window to encompass inner frame when required'''
canvas_width = event.width
self.canvas.itemconfig(self.canvas_window,
width=canvas_width) # whenever the size of the canvas changes alter the window region respectively.
# ********************************
# Example usage of the above class
# ********************************
class Example(tk.Frame):
def __init__(self, root):
tk.Frame.__init__(self, root)
self.scrollFrame = ScrollFrame(self) # add a new scrollable frame.
# Now add some controls to the scrollframe.
# NOTE: the child controls are added to the view port (scrollFrame.viewPort, NOT scrollframe itself)
for row in range(100):
a = row
tk.Label(self.scrollFrame.viewPort, text="%s" % row, width=3, borderwidth="1",
relief="solid").grid(row=row, column=0)
t = "this is the second column for row %s" % row
tk.Button(self.scrollFrame.viewPort, text=t, command=lambda x=a: self.printMsg("Hello " + str(x))).grid(
row=row, column=1)
# when packing the scrollframe, we pack scrollFrame itself (NOT the viewPort)
self.scrollFrame.pack(side="top", fill="both", expand=True)
def printMsg(self, msg):
print(msg)
if __name__ == "__main__":
root = tk.Tk()
Example(root).pack(side="top", fill="both", expand=True)
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
843d7839c9bb41dd49d27d923e3380fa9ccc816a | 2e7430a6d54a78e47ba6bf30ad327337e1bc9d1c | /calendrier_perpetuel.py | 3f4dc7f7873f8eb2b2282ce8a2b4f61706a7572c | [] | no_license | Saihttamu/calendrier-python | a884693ce755a6700081729655def6127dfde017 | e114c27973d419998e761835f5c7388fc95af975 | refs/heads/master | 2021-05-24T14:10:02.239151 | 2020-04-07T23:21:40 | 2020-04-07T23:21:40 | 253,599,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,391 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 14:48:16 2014
@author: Matthias
"""
def date(j,m,a):
J,M,A=23,2,2013 # jour: samedi
ka=0 # écart d'années
km=0 # écart de mois
for i in range(abs(a-A)): # pour chaque année on compte 365 ou 366 jours
if (min(a,A)+i)%4==0 and (min(a,A)+i)%100!=0 or (min(a,A)+i)%400==0:
ka+=366
else:
ka+=365
for i in range(min(m,M),max(m,M)):
if i<=7:
if i%2==1: # mois impair: janvier, mars, mai, juillet
km+=31
elif i==2: # pour février
if max(A,a)%4==0 and max(A,a)%100!=0 or max(A,a)%400==0: # on vérifie si c'est une année bissextile
km+=29
else: # si ce n'est pas une année bissextile
km+=28
else: # mois pair différent de février: avril, juin
km+=30
elif i%2==1: # i>=8
km+=30
else:
km+=31
if A<=a: # si l'année de référence est supérieure à la date entrée
if M<=m: # si le mois de référence est supérieur au mois entré
if J<=j:
ka+=km+abs(J-j)
else:
ka+=km-abs(J-j)
elif J<=j: # avec M>m
ka=ka-km+abs(J-j)
else:
ka=ka-km-abs(J-j)
else: # A>a
if M>m:
if J<=j:
ka=ka+km-abs(J-j)
else:
ka=ka+km+abs(J-j)
elif J<=j: # M<=m
ka=ka-km-abs(J-j)
else:
ka=ka-km+abs(J-j)
if A<a or A==a and M<m or A==a and M==m and J<=j:
if (ka)%7==0:
return 'samedi'
if (ka)%7==1:
return 'dimanche'
if (ka)%7==2:
return 'lundi'
if (ka)%7==3:
return 'mardi'
if (ka)%7==4:
return 'mercredi'
if (ka)%7==5:
return 'jeudi'
if (ka)%7==6:
return 'vendredi'
else:
if (ka)%7==0:
return 'samedi'
if (ka)%7==1:
return 'vendredi'
if (ka)%7==2:
return 'jeudi'
if (ka)%7==3:
return 'mercredi'
if (ka)%7==4:
return 'mardi'
if (ka)%7==5:
return 'lundi'
if (ka)%7==6:
return 'dimanche' | [
"61355238+Saihttamu@users.noreply.github.com"
] | 61355238+Saihttamu@users.noreply.github.com |
96988ea396c1c27e0da004f7c40133aa1825de51 | 42c2100d878476e5636080398bfdc09943e298f4 | /GET_Request/CreatingNewResource.py | be5c4141151525b47dd46cccc77f603e18b3b16d | [] | no_license | Nbash001/Automated_API_Test_Sample | 1c870b80315594948085839c90b09e47dd180952 | 2f11aab29ab39bcac0eb92c15e014afe22e1ad22 | refs/heads/master | 2022-11-16T20:34:34.431591 | 2020-06-28T15:46:45 | 2020-06-28T15:46:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | import requests
import json
import jsonpath
#API URL
url = "https://reqres.in/api/users"
#Read input json file
file = open('/Users/Naveed/Downloads/TestFiles/postreq.json', 'r')
#below will read it, but look like a string
json_input = file.read()
#parse the file into json format
request_json = json.loads(json_input)
print(request_json)
#Make POST request with Json Input Body
response = requests.post(url,request_json)
print(response.content)
#make an assertion to verify
assert response.status_code == 201
#Fetch Header from response
print(response.headers)
print(response.headers.get('Content-Length'))
#Parse response into Json format
response_json = json.loads(response.text)
#Pick ID using Json path
id = jsonpath.jsonpath(response_json,'id')
print(id[0]) | [
"naveed.bashir001@gmail.com"
] | naveed.bashir001@gmail.com |
4e0fe6547a110182f76e9ab8ad0eb89cb972a754 | e9ffc75f2de77f76bcd10c28195e7b8dcc01db4b | /config/settings/test.py | 95f766b07155f793e1fc376098106f9cf3549397 | [
"MIT"
] | permissive | Parkyes90/pystagram | c2d4b9e38a2065e5d1a8cb4eaa330640efe60a4e | 54a497f4acb70eb4d4288816d9ae460ec5722640 | refs/heads/master | 2023-08-04T06:20:40.536836 | 2023-07-27T10:12:01 | 2023-07-27T10:12:01 | 133,020,662 | 0 | 0 | MIT | 2023-07-27T10:12:02 | 2018-05-11T09:38:24 | JavaScript | UTF-8 | Python | false | false | 2,024 | py | """
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY", default="NxsCski0lpOnFecEQq1YyJgwECIifBOjpuA0ftEf6UOKcGY4z88okutqp0T5rRZF")
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": ""
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
| [
"parkyes90@gmail.com"
] | parkyes90@gmail.com |
ee5617465b61bb3ffc83e4e4ee8df99315b4bd11 | f9f8f723bcdd89970c1a184628e4ad8de97f1c0d | /env/bin/pip-3.8 | 4cc4993bdacaf606c88e4b30422c94f023010632 | [] | no_license | markimfeld/mini-pexels | 80ddc9253b9b64146a75d0e9cf6e96e3789464db | 1e6eb50ff67b66bbf8107218f3ae5a4801ec5223 | refs/heads/master | 2023-01-01T03:09:59.575208 | 2020-10-17T17:45:24 | 2020-10-17T17:45:24 | 304,934,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | 8 | #!/home/marcos/Documents/programming/python/projects/getting_photos_pexels/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sebastianimfeld@gmail.com"
] | sebastianimfeld@gmail.com |
1f41a398f7b1bbc063c06d139e7b94f847a1fe29 | df306c7a9f1e437f2c4466bff811f61b589431bf | /CiscoASARESTAPI-BlogPost.py | 2990b6bc67fe3adceef8c5d4dd582014adb53114 | [
"MIT"
] | permissive | natecrisler/ciscoasarestapi | be4bafabe37953cc37255a44dc82f024555fd915 | c8816b07935919497e3c1684f66f170f80621c5e | refs/heads/main | 2023-03-23T18:17:23.507861 | 2021-03-11T20:24:05 | 2021-03-11T20:24:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,901 | py | #https://learninglabs.cisco.com
#https://sandboxapicdc.cisco.com/
#https://github.com/CiscoDevNet
import random
import string
import requests
import urllib3
from pprint import pprint
import json
import getpass
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
log_file="fwbackuplog.log"
link_log_file="lnk.log"
user_log="user.log"
headers = {'Content-Type': 'application/json', 'User-Agent':'REST API Agent'}
cli_path = "/api/cli/"
api_path = "/api/mgmtaccess/hosts/"
user_path="/api/objects/localusers/"
openlog=open(log_file, "w")
payload={"commands":["show run user"]}
backup_payload={"commands": ["show run"]}
fw_user=input("Username: ")
try:
fw_pwd=getpass.getpass(prompt='Password: ', stream=None)
#en_pwd=getpass.getpass(prompt='Enable Password: ', stream=None)
except Exception as error:
print ('ERROR',error)
def get_backups():
openlog=open(log_file, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + cli_path
print(" ")
backupresponse=requests.post(url,auth=(fw_user, fw_pwd),data=json.dumps(backup_payload),headers=headers,verify=False)
backup_data=json.loads(backupresponse.text)
pprint(backup_data)
openlog.write(backupresponse.text)
openlog.write("\n\n")
openlog.close()
print(" ")
pass
def get_random_password_string(length):
password_characters = string.ascii_letters + string.digits + string.punctuation
password = ''.join(random.choice(password_characters) for p in range(length))
return password
def get_mgmtdata():
openlog=open(link_log_file, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + api_path
print(" ")
mgmtresponse=requests.get(url,auth=(fw_user, fw_pwd),verify=False)
data=json.loads(mgmtresponse.text)
print(data['selfLink'])
for i in data['items']:
print("type : " + i["type"],i["ip"]["value"],i["netmask"]["value"],i["interface"]["name"])
strType=i["type"]
strIP=i["ip"]["value"]
strNM=i["netmask"]["value"]
strInt=i["interface"]["name"]
openlog.write("Type: %s\tIP: %s\tNetmask: %s\tInterface: %s \n" % (strType,strIP,strNM,strInt))
openlog.write("\n")
openlog.close()
print(" ")
def update_passwords():
openlog=open(user_log, "a")
hosts_file = open('hosts.txt', 'r+')
with open('hosts.txt') as hosts_file:
hosts_array = hosts_file.read().splitlines()
for host in hosts_array:
url = "https://"+ host + user_path
print("")
print(url)
userreq=requests.get(url,auth=(fw_user, fw_pwd),headers=headers,verify=False)
usernameres = json.loads(userreq.text)
for i in usernameres['items']:
print("Username : " + i["name"],",Privilege Level : ",i["privilegeLevel"])
str_username=i["name"]
str_privilegeLevel=i["privilegeLevel"]
openlog.write("Url: %s \tUsername: %s\tPrivilege Level: %s \n" % (url,str_username,str_privilegeLevel))
print("")
for j in usernameres['items']:
username=j["name"]
privilege=j["privilegeLevel"]
password=get_random_password_string(16)
cmdline=f"username {username} password {password} privilege {privilege}"
newcli='"'+ cmdline + '"'
_jsonpayload="{"+ '"'+"commands"+'"'+':'+"[" + newcli +"]}"
print(_jsonpayload)
openlog.write(_jsonpayload)
for host in hosts_array:
pwdurl = "https://"+ host + cli_path
print(pwdurl)
requests.post(pwdurl,auth=(fw_user, fw_pwd),data=_jsonpayload,headers=headers,verify=False)
openlog.write("\n")
print("")
openlog.close()
print(" ")
if __name__ == "__main__":
#get_creds()#Get credentials for Login and access to your script to run
get_backups()# Back it up before you start.
get_mgmtdata()#Get Management Access Information
update_passwords()#This will change all passwords returned for any local accounts on the ASA!
| [
"noreply@github.com"
] | noreply@github.com |
04b96bb0dd34247639feafded878d9b8e975dada | b4829fd1b79f7a2563a7167bf132f93f1c00fb76 | /publish.py | 1be770e7681835beadc0d47af1c3e0bee8dd2eb7 | [] | no_license | stakodiak/sbs | 0be6d1b8367c31b3dfe92e527d9c072adc278583 | 3272214081715da1a63209ded5c3bc2b732ccfef | refs/heads/master | 2016-08-09T05:29:04.618844 | 2016-01-25T02:18:12 | 2016-01-25T02:18:12 | 50,318,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,958 | py | #!/usr/bin/python
import os
import re
import sys
# Make sure user has S3 credentials.
AWS_ACCESS_KEY = os.environ.get('S3_ACCESS_KEY')
AWS_SECRET_KEY = os.environ.get('S3_SECRET_KEY')
if not (AWS_SECRET_KEY and AWS_ACCESS_KEY):
raise Exception("Could not find AWS credentials.")
# Create directory if it doesn't already exist.
TARGET_DIR = 'articles/'
if not os.path.exists(TARGET_DIR):
os.makedirs(TARGET_DIR)
def publish(bucket_name, files):
"""Publish files to target S3 bucket.
`bucket_name` - name of S3 bucket for which credentials can update.
`files` - a list of filenames.
Returns public-facing endpoint to URL.
"""
from boto.s3.connection import S3Connection
connection = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)
bucket = connection.get_bucket(bucket_name)
# Update files on target S3 bucket.
updated_files = []
for filename in files:
_publish_file(bucket, 'articles/' + filename)
updated_files.append(filename)
# Publish homepage.
_publish_file(bucket, 'index.html')
return bucket.get_website_endpoint()
def _publish_file(bucket, filename, target=None):
from boto.s3.key import Key
key = Key(bucket)
key.set_metadata('Content-type', 'text/html')
key.key = target or filename
key.set_contents_from_filename(filename)
def main():
"""Get files in current directory and publish them to S3."""
# Filter out in-progress articles.
targets = filter(lambda f: not re.search(r'^\.', f),
os.listdir(TARGET_DIR))
# Make sure user supplied bucket.
try:
bucket_name = sys.argv[1]
except IndexError:
print "Please supply a bucket name."
sys.exit(1)
# Push files to S3.
print "Connecting..."
published_endpoint = publish(bucket_name, files=targets)
print "Updated", len(targets), "files at:"
print published_endpoint
if __name__ == '__main__':
main()
| [
"astac1@umbc.edu"
] | astac1@umbc.edu |
c718484b85a780242417598a63d619f3e8ddf529 | 1aeee32fd5ab3f9b98264f59478445fd6f54b38a | /CommonUtil/PyTest.py | 7cfde4125d0b216217b0f0f42ba7c568e9d17674 | [] | no_license | li-zheng-hao/OnmyojiScript | dee9618ae5eb9e233b616cfbc413ceefcf11358c | f22dfbf8a54ba80ca419e1905a27d874a5460ad6 | refs/heads/master | 2022-12-14T22:50:44.361578 | 2020-02-10T13:09:39 | 2020-02-10T13:09:39 | 222,662,528 | 6 | 2 | null | 2022-12-08T07:03:55 | 2019-11-19T09:49:48 | Python | UTF-8 | Python | false | false | 274 | py | # import pytest
import win32gui
from ImageProcessModule.GameControl import GameControl
from YuHunModule.State import State
def test_window_full_shot():
hwnd = 0x0004084E
game_control=GameControl(hwnd,State())
game_control.window_full_shot('test_img.png')
| [
"1263212577@qq.com"
] | 1263212577@qq.com |
3adbab7f1457ee5c0f126f540350eff00b557ef3 | 61d4ae3aeed5375467bfa3b00f0ed43d1c44d122 | /sessions/models.py | 0c4a457ead316324340fb0284936a0eb2ff92ba7 | [] | no_license | beratakuzum/django-rest-crud | ed8e46d7a6157737b48adaca68babdb16920564d | 1cdf311cf803747d8ffdd29a251e3b850487de5f | refs/heads/master | 2023-02-15T22:18:31.351802 | 2021-01-12T08:28:44 | 2021-01-12T08:28:44 | 322,912,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from django.db import models
from events.models import Event
class Session(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
start_date = models.DateTimeField(null=False, blank=False)
end_date = models.DateTimeField(null=False, blank=False)
speaker = models.CharField(max_length=100, null=False, blank=False)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
class Meta:
ordering = ['start_date']
def __str__(self):
return "%s" % self.name
| [
"beratakuzum34@gmail.com"
] | beratakuzum34@gmail.com |
ae1990319995c77926580b4e072215f089eee820 | bc0f6da4e5378893d06dc93e98a56bfcf702e462 | /p5.py | 54907907953fb7d9683b56d631d8ed528a6e461b | [] | no_license | pcoving/euler | 23c17ac3e9fa22db49ae8cc060383de6289706cd | 3beb9436a3cfd7a5a77cfc4e7c3f104c6d0dbee5 | refs/heads/master | 2020-03-31T09:06:31.093932 | 2014-06-30T16:19:05 | 2014-06-30T16:19:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | '''
2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
'''
N = 20
n = N
done = False
while not done:
done = True
for m in range(2,N+1):
if n/m != float(n)/m:
done = False
if not done:
n += N
print n
| [
"pmcovington@gmail.com"
] | pmcovington@gmail.com |
a0c5f212fd43051e67683762fd380806b4dfcd78 | 0116e224af093d4356533998ed466ac7930654ea | /python_code.py | c6527aff98ab0e295965b402228259737cf6befc | [] | no_license | zyad9216/Test | 069ddd4acdb8f5834357c2af18a96911836886e4 | 00cb9f2b49f7e045f6ccb7e3b1c9594bba0ee203 | refs/heads/master | 2022-11-30T05:10:53.240926 | 2020-07-04T14:11:10 | 2020-07-04T14:11:10 | 277,113,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | i
print("Hello Github!")
print('Hello World!')
| [
"zyadabuharaz@gmail.com"
] | zyadabuharaz@gmail.com |
44f8d9c69390d7370bb31cd2234c246a450a52fd | 032a59902e47f6843ac9c76f6e27eb1d4a78c27d | /scripts/python/ep_fec_rc_no_tmt.py | f6834bb4c0dc8f9a15b64cdf8e6a41eda5f09f3e | [
"Apache-2.0"
] | permissive | OSADP/Pikalert-Vehicle-Data-Translator- | 17411c602879eb4fb080201973b4a966f9405a4b | 295da604408f6f13af0301b55476a81311459386 | refs/heads/master | 2021-03-27T12:02:18.535636 | 2017-04-03T16:09:38 | 2017-04-03T16:09:38 | 25,056,408 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,345 | py | #!/usr/bin/env python
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2015
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Laboratory(RAL)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** See LICENCE.TXT if applicable for licence details
# ** 2015/02/02 20:17:38
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
import sys,os
import sys_path
import rctm_path
import obs
import time
import tim
proc_script = "run_proc.py"
exec_cmd = "fec"
Site_list_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "static_data/site_list")
Cdl_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "static_data/cdl")
Params_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "static_data/params")
input_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "rc_no_tmt")
site_list = "%s/%s" % (Site_list_dir, "road_cond_sites.asc")
cdl_file = "%s/%s" % (Cdl_dir, "road_cond.cdl")
nbr_file = "%s/%s" % (Site_list_dir, "pp_nbr.nc")
age = "86400 86400"
obs_dir = "None"
concat_meso_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "concat_meso")
output_dir = "%s/%s" % (rctm_path.Rctm_root_dir, "fec_rc_no_tmt")
log_base = "%s/%s" % (rctm_path.Log_dir, "fec_rc_no_tmt")
params_file = "%s/%s" % (Params_dir, "fec.rc.params")
try:
date_time = os.environ['TEST_MODE_TIME']
date_tup = time.strptime("%s" % (date_time),"%Y%m%d.%H%M%S")
fcst_time = tim.mkgmtime(date_tup)
date_str = "-d %s" % (date_time)
except:
fcst_time = time.time()
date_tup = time.gmtime(fcst_time)
date_time = time.strftime("%Y%m%d.%H%M%S", date_tup)
date_str = ''
fcst_time = (int(fcst_time)/3600) * 3600
static_files = "%s %s %s" % (site_list, nbr_file, cdl_file)
concat_meso_file = obs.get_concat_meso_file(concat_meso_dir, date_time)
if(os.path.exists(concat_meso_file)):
concat_meso_str = "-f -c %s" % concat_meso_file
else:
concat_meso_str = ""
command = "%s %s -e %s -a %s -u %s %s -s %s -i %s %s -o %s -p %s -l %s %s" % (proc_script, date_str, exec_cmd, age, fcst_time, fcst_time, static_files, obs_dir, input_dir, output_dir, params_file, log_base, concat_meso_str)
#print "command = ", command
ret = os.system(command)
#ret = 0
if (ret != 0):
sys.exit(1)
else:
sys.exit(0)
| [
"bpetzke@ucar.edu"
] | bpetzke@ucar.edu |
702ca7b7a24ac98f70eb188c97ab812a717001c7 | 11b9d0e2d99d135366d942e0b8577eec6835bb6e | /Screens.spec | 2ae3876072632550acac2ab7b9dcd6cc73f7e4fd | [] | no_license | rhendre/TheOtherSide | ba110a82b9524c8c67678bfff8726435471fe7c9 | 41f06bc77aed2c4ea46635a4703e628883fe1e37 | refs/heads/master | 2021-04-05T16:20:48.540568 | 2020-04-18T13:56:13 | 2020-04-18T13:56:13 | 248,576,837 | 0 | 0 | null | 2020-03-19T18:30:39 | 2020-03-19T18:30:38 | null | UTF-8 | Python | false | false | 996 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['C:\\Users\\rhendre\\PycharmProjects\\TheOtherSide\\Editor\\Screens.py'],
pathex=['C:\\Users\\rhendre\\PycharmProjects\\TheOtherSide'],
binaries=[],
datas=[ ('src/README.txt', '.') ],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='Screens',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=False )
| [
"noreply@github.com"
] | noreply@github.com |
2718b4a553b211d4a9237d21b069590a78c1b9fc | df5d82456b26461643fe0f3c0d7f4b34a521afae | /volt/controllers.py | c8331417109714ac41cc8880e0b87eeefd6562ad | [] | no_license | ali96343/ombott-test | b5bfbc4e079ca3a50b40b210438405acdba65765 | 7d7c46d5cd5c73a92fae08247917ac988b83b9c7 | refs/heads/master | 2023-08-21T15:02:41.614957 | 2021-10-07T06:28:59 | 2021-10-07T06:28:59 | 380,330,616 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,870 | py | #
# py4web app, AI-biorex ported 26.04.2021 14:45:45 UTC+3, src: https://github.com/themesberg/volt-bootstrap-5-dashboard
# https://github.com/ali96343/facep4w
#
import os, json, uuid
import ombott as bottle
from py4web import action, request, response, abort, redirect, URL, Field
from py4web.utils.form import Form, FormStyleBulma
from py4web.utils.grid import Grid
from py4web.utils.publisher import Publisher, ALLOW_ALL_POLICY
from pydal.validators import IS_NOT_EMPTY, IS_INT_IN_RANGE, IS_IN_SET, IS_IN_DB
from py4web.core import Template, Reloader
from py4web.utils.dbstore import DBStore
from py4web import Session, Cache, Translator, Flash, DAL
from py4web.utils.url_signer import URLSigner
from yatl.helpers import INPUT, H1, HTML, BODY, A, DIV, SPAN, P
from .common import db, session, T, cache, authenticated, unauthenticated, auth
from .settings import APP_NAME
# ---------------------- Global -----------------------------------------------------
# exposes services necessary to access the db.thing via ajax
publisher = Publisher(db, policy=ALLOW_ALL_POLICY)
url_signer = URLSigner(session)
Glb= {'debug': True , 'my_app_name': APP_NAME, 'tte_path': '/static/tte' }
# ---------------------- Utils -------------------------------------------------------
def insert_form_vars(myform, mytable):
row_id, table_row, f0_fld = None, None, None
if Glb['debug'] == True:
print("app:",Glb['my_app_name'])
_ = [ print (f' {k}: {v}') for k,v in myform.vars.items() if k != '_formkey']
f0_fld = myform.vars.get('f0', None )
if (not f0_fld is None) and len(f0_fld):
row_id = mytable.insert(**mytable._filter_fields(myform.vars))
db.commit()
if not row_id is None:
table_row = mytable(row_id )
if not table_row is None:
if Glb['debug'] == True:
print( f' inserted: \"{f0_fld}\" into {mytable.f0}, id = {row_id}' )
print( f" select : \"{table_row.f0}\" from {mytable.f0}, id = {row_id}" )
print ()
else:
if Glb['debug'] == True:
print( f" no entry inserted: (f0_fld is None) or (len(f0_fld) == 0)")
print()
return row_id
@action('callback', method="GET")
# Note that we do not use a template. This is a JSON API, not a "web page".
@action.uses(url_signer.verify())
def callback():
print("Called with:", dict(request.params))
return dict(messages=request.params.echo)
#
def json2user(mess='mymess', icon_type = 'warning', js_alert='sweet2'):
response.headers["Content-Type"] = "application/json"
return json.dumps( {'messages' : f'{mess}', 'icon_type': icon_type, 'js_alert': js_alert})
# ---------------------- Controllers ------------------------------------------------
@action('X404', method=["GET", "POST"] )
@action.uses(db, session, T, Template('404.html', delimiters='[%[ ]]',))
def X404():
ctrl_info= { 'c':'X404', 'v':'404.html' }
messages = ['X404', '404.html']
#
ctrl_template_url = "\'" + URL('X404' ) + "\'"
return locals()
@action('X500', method=["GET", "POST"] )
@action.uses(db, session, T, Template('500.html', delimiters='[%[ ]]',))
def X500():
ctrl_info= { 'c':'X500', 'v':'500.html' }
messages = ['X500', '500.html']
#
ctrl_template_url = "\'" + URL('X500' ) + "\'"
return locals()
@action('lock', method=["GET", "POST"] )
@action.uses(db, session, T, Template('lock.html', delimiters='[%[ ]]',))
def lock():
ctrl_info= { 'c':'lock', 'v':'lock.html' }
messages = ['lock', 'lock.html']
#
ctrl_template_url = "\'" + URL('lock' ) + "\'"
#
flock0= Form(db.dflock0, dbio=False, formstyle=FormStyleBulma)
if flock0.accepted:
icon_type ='success' if insert_form_vars(flock0, db.dflock0) else 'info'
return json2user(mess = str( flock0.form_name ), icon_type=icon_type )
elif flock0.errors:
print("flock0 has errors: %s" % (flock0.errors))
return json2user(mess = str(flock0.form_name), icon_type = 'error')
return locals()
@action('index', method=["GET", "POST"] )
@action.uses(db, session, T, Template('index.html', delimiters='[%[ ]]',))
def index():
ctrl_info= { 'c':'index', 'v':'index.html' }
messages = ['index', 'index.html']
#
ctrl_template_url = "\'" + URL('index' ) + "\'"
#
findex0= Form(db.dfindex0, dbio=False, formstyle=FormStyleBulma)
if findex0.accepted:
icon_type ='success' if insert_form_vars(findex0, db.dfindex0) else 'info'
return json2user(mess = str( findex0.form_name ), icon_type=icon_type )
elif findex0.errors:
print("findex0 has errors: %s" % (findex0.errors))
return json2user(mess = str(findex0.form_name), icon_type = 'error')
return locals()
@action('forms', method=["GET", "POST"] )
@action.uses(db, session, T, Template('forms.html', delimiters='[%[ ]]',))
def forms():
ctrl_info= { 'c':'forms', 'v':'forms.html' }
messages = ['forms', 'forms.html']
#
ctrl_template_url = "\'" + URL('forms' ) + "\'"
#
fforms0= Form(db.dfforms0, dbio=False, formstyle=FormStyleBulma)
if fforms0.accepted:
icon_type ='success' if insert_form_vars(fforms0, db.dfforms0) else 'info'
return json2user(mess = str( fforms0.form_name ), icon_type=icon_type )
elif fforms0.errors:
print("fforms0 has errors: %s" % (fforms0.errors))
return json2user(mess = str(fforms0.form_name), icon_type = 'error')
return locals()
@action('modals', method=["GET", "POST"] )
@action.uses(db, session, T, Template('modals.html', delimiters='[%[ ]]',))
def modals():
ctrl_info= { 'c':'modals', 'v':'modals.html' }
messages = ['modals', 'modals.html']
#
ctrl_template_url = "\'" + URL('modals' ) + "\'"
#
fmodals0= Form(db.dfmodals0, dbio=False, formstyle=FormStyleBulma)
if fmodals0.accepted:
icon_type ='success' if insert_form_vars(fmodals0, db.dfmodals0) else 'info'
return json2user(mess = str( fmodals0.form_name ), icon_type=icon_type )
elif fmodals0.errors:
print("fmodals0 has errors: %s" % (fmodals0.errors))
return json2user(mess = str(fmodals0.form_name), icon_type = 'error')
#
fmodals1= Form(db.dfmodals1, dbio=False, formstyle=FormStyleBulma)
if fmodals1.accepted:
icon_type ='success' if insert_form_vars(fmodals1, db.dfmodals1) else 'info'
return json2user(mess = str( fmodals1.form_name ), icon_type=icon_type )
elif fmodals1.errors:
print("fmodals1 has errors: %s" % (fmodals1.errors))
return json2user(mess = str(fmodals1.form_name), icon_type = 'error')
#
fmodals2= Form(db.dfmodals2, dbio=False, formstyle=FormStyleBulma)
if fmodals2.accepted:
icon_type ='success' if insert_form_vars(fmodals2, db.dfmodals2) else 'info'
return json2user(mess = str( fmodals2.form_name ), icon_type=icon_type )
elif fmodals2.errors:
print("fmodals2 has errors: %s" % (fmodals2.errors))
return json2user(mess = str(fmodals2.form_name), icon_type = 'error')
return locals()
@action('buttons', method=["GET", "POST"] )
@action.uses(db, session, T, Template('buttons.html', delimiters='[%[ ]]',))
def buttons():
ctrl_info= { 'c':'buttons', 'v':'buttons.html' }
messages = ['buttons', 'buttons.html']
#
ctrl_template_url = "\'" + URL('buttons' ) + "\'"
#
fbuttons0= Form(db.dfbuttons0, dbio=False, formstyle=FormStyleBulma)
if fbuttons0.accepted:
icon_type ='success' if insert_form_vars(fbuttons0, db.dfbuttons0) else 'info'
return json2user(mess = str( fbuttons0.form_name ), icon_type=icon_type )
elif fbuttons0.errors:
print("fbuttons0 has errors: %s" % (fbuttons0.errors))
return json2user(mess = str(fbuttons0.form_name), icon_type = 'error')
return locals()
@action('signXin', method=["GET", "POST"] )
@action.uses(db, session, T, Template('sign-in.html', delimiters='[%[ ]]',))
def signXin():
ctrl_info= { 'c':'signXin', 'v':'sign-in.html' }
messages = ['signXin', 'sign-in.html']
#
ctrl_template_url = "\'" + URL('signXin' ) + "\'"
#
fsignXin0= Form(db.dfsignXin0, dbio=False, formstyle=FormStyleBulma)
if fsignXin0.accepted:
icon_type ='success' if insert_form_vars(fsignXin0, db.dfsignXin0) else 'info'
return json2user(mess = str( fsignXin0.form_name ), icon_type=icon_type )
elif fsignXin0.errors:
print("fsignXin0 has errors: %s" % (fsignXin0.errors))
return json2user(mess = str(fsignXin0.form_name), icon_type = 'error')
return locals()
@action('signXup', method=["GET", "POST"] )
@action.uses(db, session, T, Template('sign-up.html', delimiters='[%[ ]]',))
def signXup():
ctrl_info= { 'c':'signXup', 'v':'sign-up.html' }
messages = ['signXup', 'sign-up.html']
#
ctrl_template_url = "\'" + URL('signXup' ) + "\'"
#
fsignXup0= Form(db.dfsignXup0, dbio=False, formstyle=FormStyleBulma)
if fsignXup0.accepted:
icon_type ='success' if insert_form_vars(fsignXup0, db.dfsignXup0) else 'info'
return json2user(mess = str( fsignXup0.form_name ), icon_type=icon_type )
elif fsignXup0.errors:
print("fsignXup0 has errors: %s" % (fsignXup0.errors))
return json2user(mess = str(fsignXup0.form_name), icon_type = 'error')
return locals()
@action('settings', method=["GET", "POST"] )
@action.uses(db, session, T, Template('settings.html', delimiters='[%[ ]]',))
def settings():
ctrl_info= { 'c':'settings', 'v':'settings.html' }
messages = ['settings', 'settings.html']
#
ctrl_template_url = "\'" + URL('settings' ) + "\'"
#
fsettings0= Form(db.dfsettings0, dbio=False, formstyle=FormStyleBulma)
if fsettings0.accepted:
icon_type ='success' if insert_form_vars(fsettings0, db.dfsettings0) else 'info'
return json2user(mess = str( fsettings0.form_name ), icon_type=icon_type )
elif fsettings0.errors:
print("fsettings0 has errors: %s" % (fsettings0.errors))
return json2user(mess = str(fsettings0.form_name), icon_type = 'error')
#
fsettings1= Form(db.dfsettings1, dbio=False, formstyle=FormStyleBulma)
if fsettings1.accepted:
icon_type ='success' if insert_form_vars(fsettings1, db.dfsettings1) else 'info'
return json2user(mess = str( fsettings1.form_name ), icon_type=icon_type )
elif fsettings1.errors:
print("fsettings1 has errors: %s" % (fsettings1.errors))
return json2user(mess = str(fsettings1.form_name), icon_type = 'error')
return locals()
@action('dashboard', method=["GET", "POST"] )
@action.uses(db, session, T, Template('dashboard.html', delimiters='[%[ ]]',))
def dashboard():
ctrl_info= { 'c':'dashboard', 'v':'dashboard.html' }
messages = ['dashboard', 'dashboard.html']
#
ctrl_template_url = "\'" + URL('dashboard' ) + "\'"
rows_tdashboard0= db(db.tdashboard0).select()
#
fdashboard0= Form(db.dfdashboard0, dbio=False, formstyle=FormStyleBulma)
if fdashboard0.accepted:
icon_type ='success' if insert_form_vars(fdashboard0, db.dfdashboard0) else 'info'
return json2user(mess = str( fdashboard0.form_name ), icon_type=icon_type )
elif fdashboard0.errors:
print("fdashboard0 has errors: %s" % (fdashboard0.errors))
return json2user(mess = str(fdashboard0.form_name), icon_type = 'error')
return locals()
@action('typography', method=["GET", "POST"] )
@action.uses(db, session, T, Template('typography.html', delimiters='[%[ ]]',))
def typography():
ctrl_info= { 'c':'typography', 'v':'typography.html' }
messages = ['typography', 'typography.html']
#
ctrl_template_url = "\'" + URL('typography' ) + "\'"
#
ftypography0= Form(db.dftypography0, dbio=False, formstyle=FormStyleBulma)
if ftypography0.accepted:
icon_type ='success' if insert_form_vars(ftypography0, db.dftypography0) else 'info'
return json2user(mess = str( ftypography0.form_name ), icon_type=icon_type )
elif ftypography0.errors:
print("ftypography0 has errors: %s" % (ftypography0.errors))
return json2user(mess = str(ftypography0.form_name), icon_type = 'error')
return locals()
@action('transactions', method=["GET", "POST"] )
@action.uses(db, session, T, Template('transactions.html', delimiters='[%[ ]]',))
def transactions():
ctrl_info= { 'c':'transactions', 'v':'transactions.html' }
messages = ['transactions', 'transactions.html']
#
ctrl_template_url = "\'" + URL('transactions' ) + "\'"
rows_ttransactions0= db(db.ttransactions0).select()
#
ftransactions0= Form(db.dftransactions0, dbio=False, formstyle=FormStyleBulma)
if ftransactions0.accepted:
icon_type ='success' if insert_form_vars(ftransactions0, db.dftransactions0) else 'info'
return json2user(mess = str( ftransactions0.form_name ), icon_type=icon_type )
elif ftransactions0.errors:
print("ftransactions0 has errors: %s" % (ftransactions0.errors))
return json2user(mess = str(ftransactions0.form_name), icon_type = 'error')
return locals()
@action('notifications', method=["GET", "POST"] )
@action.uses(db, session, T, Template('notifications.html', delimiters='[%[ ]]',))
def notifications():
ctrl_info= { 'c':'notifications', 'v':'notifications.html' }
messages = ['notifications', 'notifications.html']
#
ctrl_template_url = "\'" + URL('notifications' ) + "\'"
#
fnotifications0= Form(db.dfnotifications0, dbio=False, formstyle=FormStyleBulma)
if fnotifications0.accepted:
icon_type ='success' if insert_form_vars(fnotifications0, db.dfnotifications0) else 'info'
return json2user(mess = str( fnotifications0.form_name ), icon_type=icon_type )
elif fnotifications0.errors:
print("fnotifications0 has errors: %s" % (fnotifications0.errors))
return json2user(mess = str(fnotifications0.form_name), icon_type = 'error')
return locals()
@action('upgradeXtoXpro', method=["GET", "POST"] )
@action.uses(db, session, T, Template('upgrade-to-pro.html', delimiters='[%[ ]]',))
def upgradeXtoXpro():
ctrl_info= { 'c':'upgradeXtoXpro', 'v':'upgrade-to-pro.html' }
messages = ['upgradeXtoXpro', 'upgrade-to-pro.html']
#
ctrl_template_url = "\'" + URL('upgradeXtoXpro' ) + "\'"
rows_tupgradeXtoXpro0= db(db.tupgradeXtoXpro0).select()
return locals()
@action('resetXpassword', method=["GET", "POST"] )
@action.uses(db, session, T, Template('reset-password.html', delimiters='[%[ ]]',))
def resetXpassword():
ctrl_info= { 'c':'resetXpassword', 'v':'reset-password.html' }
messages = ['resetXpassword', 'reset-password.html']
#
ctrl_template_url = "\'" + URL('resetXpassword' ) + "\'"
#
fresetXpassword0= Form(db.dfresetXpassword0, dbio=False, formstyle=FormStyleBulma)
if fresetXpassword0.accepted:
icon_type ='success' if insert_form_vars(fresetXpassword0, db.dfresetXpassword0) else 'info'
return json2user(mess = str( fresetXpassword0.form_name ), icon_type=icon_type )
elif fresetXpassword0.errors:
print("fresetXpassword0 has errors: %s" % (fresetXpassword0.errors))
return json2user(mess = str(fresetXpassword0.form_name), icon_type = 'error')
return locals()
@action('forgotXpassword', method=["GET", "POST"] )
@action.uses(db, session, T, Template('forgot-password.html', delimiters='[%[ ]]',))
def forgotXpassword():
ctrl_info= { 'c':'forgotXpassword', 'v':'forgot-password.html' }
messages = ['forgotXpassword', 'forgot-password.html']
#
ctrl_template_url = "\'" + URL('forgotXpassword' ) + "\'"
#
fforgotXpassword0= Form(db.dfforgotXpassword0, dbio=False, formstyle=FormStyleBulma)
if fforgotXpassword0.accepted:
icon_type ='success' if insert_form_vars(fforgotXpassword0, db.dfforgotXpassword0) else 'info'
return json2user(mess = str( fforgotXpassword0.form_name ), icon_type=icon_type )
elif fforgotXpassword0.errors:
print("fforgotXpassword0 has errors: %s" % (fforgotXpassword0.errors))
return json2user(mess = str(fforgotXpassword0.form_name), icon_type = 'error')
return locals()
@action('bootstrapXtables', method=["GET", "POST"] )
@action.uses(db, session, T, Template('bootstrap-tables.html', delimiters='[%[ ]]',))
def bootstrapXtables():
ctrl_info= { 'c':'bootstrapXtables', 'v':'bootstrap-tables.html' }
messages = ['bootstrapXtables', 'bootstrap-tables.html']
#
ctrl_template_url = "\'" + URL('bootstrapXtables' ) + "\'"
rows_tbootstrapXtables0= db(db.tbootstrapXtables0).select()
rows_tbootstrapXtables1= db(db.tbootstrapXtables1).select()
#
fbootstrapXtables0= Form(db.dfbootstrapXtables0, dbio=False, formstyle=FormStyleBulma)
if fbootstrapXtables0.accepted:
icon_type ='success' if insert_form_vars(fbootstrapXtables0, db.dfbootstrapXtables0) else 'info'
return json2user(mess = str( fbootstrapXtables0.form_name ), icon_type=icon_type )
elif fbootstrapXtables0.errors:
print("fbootstrapXtables0 has errors: %s" % (fbootstrapXtables0.errors))
return json2user(mess = str(fbootstrapXtables0.form_name), icon_type = 'error')
return locals()
from pydal.restapi import RestAPI, Policy
policy = Policy()
policy.set('*', 'GET', authorize=True, allowed_patterns=['*'])
policy.set('*', 'PUT', authorize=True)
policy.set('*', 'POST', authorize=True)
policy.set('*', 'DELETE', authorize=True)
@action('api/<tablename>/', method=["GET", "POST", "PUT", "DELETE"])
@action('api/<tablename>/<rec_id>', method=["GET", "POST", "PUT", "DELETE"])
def api(tablename, rec_id=None):
return RestAPI(db, policy)(request.method,
tablename,
rec_id,
request.GET,
request.POST
)
#
# curl -X GET http://localhost:8000/volt/api/test_table/
# curl -X GET http://localhost:8000/volt/api/test_table/9
# curl -X DELETE http://localhost:8000/volt/api/test_table/2
# curl -X POST -d 'f0=1111111&f1=2222222222&f2=33333333333' http://localhost:8000/volt/api/test_table/
# curl -X PUT -d 'f0=1111111&f1=2222222222&f2=33333333333' http://localhost:8000/volt/api/test_table/9
# curl -X POST -d f0=1111111 -d f1=2222222222 -d f2=8888888888 http://localhost:8000/volt/api/test_table/
#
# pip install httpie
# http localhost:8000/volt/api/test_table/
# http localhost:8000/volt/api/test_table/9
# http -f POST localhost:8000/volt/api/test_table/ f0=111111 f1=2222222 f2=333333
# http -f DELETE localhost:8000/volt/api/test_table/2
# http -f PUT localhost:8000/volt/api/test_table/2 f0=111111 f1=2222222 f2=333333
#------------------------------------------------------------------------------------
#curl -i -X POST -H 'Content-Type: application/json' -d '{"name": "New item", "year": "2009"}' http://rest-api.io/items
#curl -i -X PUT -H 'Content-Type: application/json' -d '{"name": "Updated item", "year": "2010"}' http://rest-api.io/items/5069b47aa892630aae059584
@bottle.error(404)
def error404(error):
func_mess = []
def check_rule(maybe_app_root):
for e in Reloader.ROUTES:
if ('rule' in e ) and ( e["rule"] == maybe_app_root) :
Glb["debug"] and func_mess.append(f" found_rule: {e['rule']}")
return True
return False
location = "/" + Glb["my_app_name"]
lx = bottle.request.path.split("/", 2)
if len(lx) >= 2 and check_rule("/" + lx[1]):
location = "/" + lx[1]
# this code is not necessary for modern py4web
#
# files_prefix = location + Glb["tte_path"]
#
# location_2x = location + location + "/"
# files_prefix_2x = files_prefix + files_prefix + "/"
#
# def rm_bad_prefix(bad_prefix):
# new_location = bottle.request.path.replace(bad_prefix, "", 1)
# Glb["debug"] and func_mess.append(f" rm_bad_prefix: {bad_prefix}")
# return new_location
#
# if bottle.request.path.startswith(files_prefix_2x):
# if len(bottle.request.path) > len(files_prefix_2x):
# location = rm_bad_prefix(files_prefix)
#
# elif bottle.request.path.startswith(location_2x):
# if len(bottle.request.path) > len(location_2x):
# location = rm_bad_prefix(location)
if Glb["debug"]:
debug_mess = [ f"404 app=/{Glb['my_app_name']}, err_path={bottle.request.path}",
f" info: {error}", ]
if len(func_mess):
debug_mess += func_mess
debug_mess.append(f" goto_new_path: {location}\n")
print("\n".join(debug_mess))
bottle.response.status = 303
bottle.response.set_header("Location", location)
# -------------------- tabinfo: my backend ------------------------------------
#
#from .atab_utils import mytab_grid
#from .images_utils import ima_grid
#from .upload_utils import p4wupload_file
#from .tlist_utils import tlist
#
#@unauthenticated("tabinfo", "tabinfo.html")
#def tabinfo():
# user = auth.get_user()
# message = T("Hello {first_name}".format(**user) if user else "Hello")
# menu = DIV(
# P( "test-demo for sql2table ( SQLTABLE from web2py)"),
# A( "sql2table", _role="button", _href=URL('mytab_grid', ),) ,
# A( "p4wupload_file", _role="button", _href=URL('p4wupload_file', ),) ,
# A( "tlist", _role="button", _href=URL('tlist', ),) ,
# A( "app_images", _role="button", _href=URL('ima_grid', ),) ,
# )
# return dict(message=message, menu=menu)
#
| [
"ab96343@gmail.com"
] | ab96343@gmail.com |
168a729a213cb05a64c5b3b4dc1ab8aa2155d254 | ac9e892c02af18cea990bb0a3f60071b34a03194 | /pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py | fc2db995b809735e7cefe6fc0d8df2ffd185d4ee | [
"MIT"
] | permissive | limsijie93/pytorch-pfn-extras | 1323e796d59fe113ee86f631cc65ad44c7914a77 | 4b675fce8f4a420d87f1685423a9e62dbe598700 | refs/heads/master | 2022-09-18T09:18:25.459126 | 2020-06-04T04:43:47 | 2020-06-04T04:43:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,131 | py | class ManualScheduleTrigger:
"""Trigger invoked at specified point(s) of iterations or epochs.
This trigger accepts iterations or epochs indicated by given point(s).
There are two ways to specify the point(s): iteration and epoch.
``iteration`` means the number of updates, while ``epoch`` means the number
of sweeps over the training dataset. Fractional values are allowed
if the point is a number of epochs; the trigger uses the ``iteration``
and ``epoch_detail`` attributes defined by the updater.
Args:
points (int, float, or list of int or float): time of the trigger.
Must be an integer or list of integer if unit is ``'iteration'``.
unit (str): Unit of the time specified by ``points``. It must be
either ``'iteration'`` or ``'epoch'``.
Attributes:
finished (bool): Flag that indicates whether or not this trigger will
fire in the future. This flag is used to determine if the extension
should be initialized after resume.
"""
def __init__(self, points, unit):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
self.points = (points if isinstance(points, list) else [points])
self.unit = unit
self.finished = False
self._previous_iteration = 0
self._previous_epoch_detail = 0.
def __call__(self, manager):
"""Decides whether the extension should be called on this iteration.
Args:
manager (~pytorch_pfn_extras.training.ExtensionsManager):
Manager object that this trigger is associated with.
The updater associated with this manager is used to
determine if the trigger should fire.
Returns:
bool: True if the corresponding extension should be invoked in this
iteration.
"""
updater = manager.updater
if self.unit == 'epoch':
epoch_detail = updater.epoch_detail
previous_epoch_detail = self._previous_epoch_detail
# if previous_epoch_detail is invalid value,
# use the value of updater.
if previous_epoch_detail < 0:
previous_epoch_detail = updater.previous_epoch_detail
fire = any(
previous_epoch_detail < p <= epoch_detail
for p in self.points)
if hasattr(self, '_finished_is_tmp'):
del self._finished_is_tmp
if epoch_detail >= max(self.points):
self.finished = True
if fire and epoch_detail >= max(self.points):
self.finished = True
else:
iteration = updater.iteration
previous_iteration = self._previous_iteration
# if previous_iteration is invalid value,
# guess it from current iteration.
if previous_iteration < 0:
previous_iteration = iteration - 1
fire = any(
previous_iteration < p <= iteration
for p in self.points)
if hasattr(self, '_finished_is_tmp'):
del self._finished_is_tmp
if iteration >= max(self.points):
self.finished = True
if fire and iteration >= max(self.points):
self.finished = True
# save current values
self._previous_iteration = updater.iteration
if hasattr(updater, 'epoch_detail'):
self._previous_epoch_detail = updater.epoch_detail
return fire
def state_dict(self):
state = {}
state['_previous_iteration'] = self._previous_iteration
state['_previous_epoch_detail'] = self._previous_epoch_detail
state['finished'] = self.finished
return state
def load_state_dict(self, to_load):
self._previous_iteration = to_load['_previous_iteration']
self._previous_epoch_detail = to_load['_previous_epoch_detail']
self.finished = to_load['finished']
| [
"webmaster@kenichimaehashi.com"
] | webmaster@kenichimaehashi.com |
640acd474ccc2667449fec3953056cfc3acb5173 | 3e74c0b272bfd7981454953aeef96ab2f5c59c69 | /benchmarking/timeIt.py | 8d8650898c5cef602fc4840308c61e368cda7614 | [] | no_license | LokeshKD/DSPython | 09e2e086182d1d0e73f85cc88611b7aa446d1253 | f657678ac2cc1855c4d13bdc66d790a1022b6640 | refs/heads/master | 2023-04-16T13:58:02.500681 | 2021-04-17T17:04:51 | 2021-04-17T17:04:51 | 357,611,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | #
def my_function():
try:
1 / 0
except ZeroDivisionError:
pass
if __name__ == "__main__":
import timeit
setup = "from __main__ import my_function"
print(timeit.timeit("my_function()", setup=setup))
| [
"i.lokesh@gmail.com"
] | i.lokesh@gmail.com |
c5966d474a2ab28ae3d633cd80d29129b5f0a498 | c6c74c3fdf54b01254caf42728aefdfb981d211b | /modules/Summary.py | 2a8079ee5b6acee405a74bf22eb914bbd84d4168 | [
"BSD-3-Clause",
"MIT"
] | permissive | serin113/orgdb | decdbf6329a76a1590ebf16450ad580c7ca4b757 | 25fb9e3b430ab88e43b7fdae4665b862881e41fc | refs/heads/master | 2020-04-18T01:42:47.548496 | 2019-05-22T07:36:21 | 2019-05-22T07:36:21 | 167,130,289 | 1 | 0 | MIT | 2019-05-22T07:36:23 | 2019-01-23T06:31:17 | Python | UTF-8 | Python | false | false | 4,361 | py | # Created in 2019-03-22 for PSYSC as part of a system for managing science club affiliations.
# Copyright (c) 2019 Nathankissam Roy Tubis & Elfren Simon Clemente.
# Licensed under the MIT License, refer to https://opensource.org/licenses/MIT for details.
# Code History:
# 2019/03/22 (Simon) - Moved class to this file
# - Uses a dictionary for passing data to the template
# 2019/03/23 (Simon) - Added (int) overallTotal to data sent to template
# 2019/03/26 (Simon) - Login.getUserType values passed to ContentRenderer.render
# - Added Login.accessible_by decorators to limit page access to specific users
# 2019/03/27 (Simon) - Added filtering functionality
# 2019/03/29 (Simon) - "DBC" argument now indicates the database configuration settings
# instead of a DBConnection class
# - Database connection now handled using a with statement
# 2019/04/02 (Simon) - Changed "back" URL
# 2019/04/24 (Simon) - Added gzip compression to page handler
# 2019/05/15 (Simon) - Added **kwargs to CherryPy-exposed methods to catch unexpected parameters w/o an error
from ._helpers import *
from .Login import *
# class used by CherryPy for handling /summary
class Summary(object):
def __init__(self, DBC=None, Renderer=None):
if DBC is not None:
self.DBC = DBConnection(DBC)
else:
self.DBC = DBConnection("db.conf")
if Renderer is not None:
self.renderer = Renderer
else:
self.renderer = ContentRenderer()
@cherrypy.expose
@cherrypy.tools.gzip()
@accessible_by("admin")
def index(self, q="", **kwargs):
data = None
with self.DBC as sqlcnx:
cur = sqlcnx.cursor(buffered=True) # create SQL database cursor
if (len(q) == 0):
# get school year range
cur.execute(
"SELECT MIN(schoolYear), MAX(schoolYear-1+yearsAffiliated) FROM AffiliationTable ORDER BY schoolYear"
)
res = cur.fetchall()
else:
# create year range (q to q+1)
qi = toInt(q)
if qi:
if qi in range(2007, 2051):
res = [(qi, qi + 1)]
else:
res = []
if len(res) > 0:
if res[0][0] is not None or res[0][1] is not None:
data = {}
# for every year within the range
for year in range(res[0][0], res[0][1] + 1):
region_total = defaultdict(lambda: 0)
level_total = defaultdict(lambda: 0)
type_total = defaultdict(lambda: 0)
# fetch all affiliated clubs for a specific year
cur.execute(
"SELECT region, level, type "
"FROM (AffiliationRecordsTable INNER JOIN AffiliationTable ON AffiliationRecordsTable.clubID = AffiliationTable.AffiliationRecordsTable_clubID)"
"WHERE %(schoolYear)s BETWEEN schoolYear AND schoolYear-1+yearsAffiliated "
"AND affiliated = 1 ", {"schoolYear": year})
res = cur.fetchall()
# count totals per region/level/type
for record in res:
region_total[record[0]] += 1
level_total[record[1]] += 1
type_total[record[2]] += 1
overall_total = len(res)
# save data for specific year
data[year] = (region_total, level_total, type_total,
overall_total)
cur.close()
return self.renderer.render("summary.mako", {
"data": data,
'user': getUserType(self.DBC),
'q': q
}) # display summary data
return self.renderer.render(
"dialog.mako", {
'title': "Error!",
'message': "A database error occured.",
'linkaddr': "#back",
'linktext': "< Back",
'user': getUserType(self.DBC)
})
| [
"simon_clemente@yahoo.com"
] | simon_clemente@yahoo.com |
3e060088bec0fd6cb0b2baf58eb5c225f1f27759 | 95315ecd3068cf98dd10267f98055ace5f6cafc9 | /json_lesson/json_lesson.py | 6f226d2400322c65001916e232183260b587e58c | [] | no_license | austin72905/python-lesson | 22d7dc3b70e524041711055f26ef0e39b99936cd | e51954e0516a60259d6794fb4ec52ae2f6722b4b | refs/heads/master | 2023-04-18T10:16:11.415602 | 2021-04-24T04:07:10 | 2021-04-24T04:07:10 | 332,389,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | import json
from pprint import pprint
##Json
# 1. 反序列化 json.loads
# 2. 序列化 json.dumps
#### code region ####
my_dic = dict(name='BOb', age=20, score=93)
json_str = json.dumps(my_dic)
pprint(json_str) # '{"name": "BOb", "age": 20, "score": 93}'
json_dic = json.loads(json_str)
pprint(json_dic) # {'age': 20, 'name': 'BOb', 'score': 93}
class Student:
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
# dic 轉 class
def dicToClass(dic: dict, className):
# 對應參數解包丟進去
return className(**dic)
#what =dicToClass(json_dic,Student)
#print(what._name)
def dicToClass2(dic):
return Student(**dic)
# dic 轉 class
stud1 = dicToClass(json_dic, Student)
# class 序列化成字串
stud_json_str = json.dumps(
stud1,
default=lambda obj: obj.__dict__) #{"name": "BOb", "age": 20, "score": 93}
print(stud_json_str)
# 字串反序列化成class
#但是class 要寫死不太好
json_str_tostud = json.loads(stud_json_str, object_hook=dicToClass2)
print(json_str_tostud)
# 不用寫死
# 先轉成dic
# 再從dic 轉 class
jstr_dic = json.loads(stud_json_str)
jstr_stud = dicToClass(jstr_dic,Student)
print(jstr_stud)
| [
"Linponggood@gmail.com"
] | Linponggood@gmail.com |
466502916f65ec970df5c90a6f2d448e9050d8b0 | 09efb7c148e82c22ce6cc7a17b5140aa03aa6e55 | /env/lib/python3.6/site-packages/plotly/graph_objs/ohlc/__init__.py | 6045b7202af831a93026f1550f8e714430892557 | [
"MIT"
] | permissive | harryturr/harryturr_garmin_dashboard | 53071a23b267116e1945ae93d36e2a978c411261 | 734e04f8257f9f84f2553efeb7e73920e35aadc9 | refs/heads/master | 2023-01-19T22:10:57.374029 | 2020-01-29T10:47:56 | 2020-01-29T10:47:56 | 235,609,069 | 4 | 0 | MIT | 2023-01-05T05:51:27 | 2020-01-22T16:00:13 | Python | UTF-8 | Python | false | false | 34,074 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "ohlc"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.ohlc.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.Stream
constructor must be a dict or
an instance of plotly.graph_objs.ohlc.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.ohlc import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# dash
# ----
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px"). Note that this style setting can also be
set per direction via `increasing.line.dash` and
`decreasing.line.dash`.
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# width
# -----
@property
def width(self):
"""
[object Object] Note that this style setting can also be set
per direction via `increasing.line.width` and
`decreasing.line.width`.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "ohlc"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px"). Note that this style setting can
also be set per direction via `increasing.line.dash`
and `decreasing.line.dash`.
width
[object Object] Note that this style setting can also
be set per direction via `increasing.line.width` and
`decreasing.line.width`.
"""
def __init__(self, arg=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.ohlc.Line
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px"). Note that this style setting can
also be set per direction via `increasing.line.dash`
and `decreasing.line.dash`.
width
[object Object] Note that this style setting can also
be set per direction via `increasing.line.width` and
`decreasing.line.width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.Line
constructor must be a dict or
an instance of plotly.graph_objs.ohlc.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.ohlc import line as v_line
# Initialize validators
# ---------------------
self._validators["dash"] = v_line.DashValidator()
self._validators["width"] = v_line.WidthValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dash", None)
self["dash"] = dash if dash is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Increasing(_BaseTraceHierarchyType):
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.ohlc.increasing.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.ohlc.increasing.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "ohlc"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
line
plotly.graph_objects.ohlc.increasing.Line instance or
dict with compatible properties
"""
def __init__(self, arg=None, line=None, **kwargs):
"""
Construct a new Increasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.ohlc.Increasing
line
plotly.graph_objects.ohlc.increasing.Line instance or
dict with compatible properties
Returns
-------
Increasing
"""
super(Increasing, self).__init__("increasing")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.Increasing
constructor must be a dict or
an instance of plotly.graph_objs.ohlc.Increasing"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.ohlc import increasing as v_increasing
# Initialize validators
# ---------------------
self._validators["line"] = v_increasing.LineValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.ohlc.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.ohlc.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# split
# -----
@property
def split(self):
"""
Show hover information (open, close, high, low) in separate
labels.
The 'split' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["split"]
@split.setter
def split(self, val):
self["split"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "ohlc"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
split
Show hover information (open, close, high, low) in
separate labels.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
split=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.ohlc.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
split
Show hover information (open, close, high, low) in
separate labels.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.ohlc.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.ohlc import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
self._validators["split"] = v_hoverlabel.SplitValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
_v = arg.pop("split", None)
self["split"] = split if split is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Decreasing(_BaseTraceHierarchyType):
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.ohlc.decreasing.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
plotly.graph_objs.ohlc.decreasing.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "ohlc"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
line
plotly.graph_objects.ohlc.decreasing.Line instance or
dict with compatible properties
"""
def __init__(self, arg=None, line=None, **kwargs):
"""
Construct a new Decreasing object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.ohlc.Decreasing
line
plotly.graph_objects.ohlc.decreasing.Line instance or
dict with compatible properties
Returns
-------
Decreasing
"""
super(Decreasing, self).__init__("decreasing")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.ohlc.Decreasing
constructor must be a dict or
an instance of plotly.graph_objs.ohlc.Decreasing"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.ohlc import decreasing as v_decreasing
# Initialize validators
# ---------------------
self._validators["line"] = v_decreasing.LineValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Decreasing",
"Hoverlabel",
"Increasing",
"Line",
"Stream",
"decreasing",
"hoverlabel",
"increasing",
]
from plotly.graph_objs.ohlc import increasing
from plotly.graph_objs.ohlc import hoverlabel
from plotly.graph_objs.ohlc import decreasing
| [
"griffin.harrisonn@gmail.com"
] | griffin.harrisonn@gmail.com |
4c14a3408c293c6b851fea2d1d703a3df72cb616 | 72862456de71e97613b14d297f775aec5ec1671d | /migrations/versions/526c6a80a57c_.py | b6a67b02429f50566b6f5adb3ae5dd550773f3c7 | [] | no_license | twistedladder/Google-Calendar-Phishing | 45b530f7698c1a5680b892be5413d1a5fc65ba36 | 35ccc54eeeb4daa2008cf985097c0c81e410f316 | refs/heads/master | 2022-12-13T04:00:48.848541 | 2020-10-01T17:56:30 | 2020-10-01T17:56:30 | 130,408,246 | 0 | 2 | null | 2022-12-08T02:03:11 | 2018-04-20T19:58:56 | Python | UTF-8 | Python | false | false | 680 | py | """empty message
Revision ID: 526c6a80a57c
Revises: c3395afa9869
Create Date: 2018-04-23 14:22:45.772623
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '526c6a80a57c'
down_revision = 'c3395afa9869'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('emails', sa.Column('recipient_email', sa.String(length=120), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('emails', 'recipient_email')
# ### end Alembic commands ###
| [
"gene.hsu@utexas.edu"
] | gene.hsu@utexas.edu |
e760becc3c1eb5c190c95e6eb021d1db26b75b93 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/dcx.py | 6b975b5f2f2c98bbfca63125607d2e2c1d79986e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'dCX':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
df665b32b2dfe47d8e32cbfc847bc28d6973111c | 9d2154ad9a3c6c4bc4d8aae47c2cf5dd30741a2d | /usr/lib/enigma2/python/Plugins/Extensions/iSkin/x.py | b55b465cb6ddbdb3f570bbaedbe4de4fd2812a9e | [] | no_license | XtrendAlliance/xta | b0b161a5b7b9e605f9bd7d6985b5e4171cdc7d28 | 4191835caf9f25edf6a31ab6214759ed4e0f35d2 | refs/heads/master | 2020-06-07T04:03:58.421352 | 2014-11-30T10:50:18 | 2014-11-30T10:50:18 | 27,283,822 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | import os
if os.path.exists('/usr/lib/enigma2/python/Plugins/Extensions/iSkin/Connection/Config/ConnectionSave'):
type = open('/usr/lib/enigma2/python/Plugins/Extensions/iSkin/Connection/Config/ConnectionSave').read()
if len(type):
p = type[:].find('://')
if p != -1:
type = type[(p + 3):]
type = type[:].split(':')
if len(type[0]) > 0:
print type[0]
if len(type) > 1 and type[1].isdigit():
print int(type[1])
if len(type) > 2 and type[2].isdigit():
print max(1, int(type[2])) | [
"xtrendboss@et-view-support.com"
] | xtrendboss@et-view-support.com |
de575d64908dac2ae371562a98245e061498181d | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/X733GROUP-MIB.py | 2e3597342dc315db7cc0b26e65652659f42fc2f8 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 18,926 | py | #
# PySNMP MIB module X733GROUP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/X733GROUP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:36:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
enterprises, Counter32, IpAddress, ModuleIdentity, Integer32, NotificationType, Bits, MibIdentifier, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Gauge32, ObjectIdentity, Unsigned32, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "enterprises", "Counter32", "IpAddress", "ModuleIdentity", "Integer32", "NotificationType", "Bits", "MibIdentifier", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Gauge32", "ObjectIdentity", "Unsigned32", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
sni = MibIdentifier((1, 3, 6, 1, 4, 1, 231))
siemensUnits = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7))
oenProductMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1))
nms = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3))
ncProxy = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1))
ewsdAlarms = ModuleIdentity((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1))
if mibBuilder.loadTexts: ewsdAlarms.setLastUpdated('200110150000Z')
if mibBuilder.loadTexts: ewsdAlarms.setOrganization('Siemens AG Osterreich')
commonGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 1))
controlGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2))
summaryGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 3))
miscGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 4))
x733Group = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5))
q3Group = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6))
osGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 7))
neName = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 1, 1), DisplayString())
if mibBuilder.loadTexts: neName.setStatus('current')
managedObjectClass = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 1, 2), DisplayString())
if mibBuilder.loadTexts: managedObjectClass.setStatus('current')
notificationId = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 1, 3), DisplayString())
if mibBuilder.loadTexts: notificationId.setStatus('current')
globalAlarmIds = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8192)))
if mibBuilder.loadTexts: globalAlarmIds.setStatus('current')
setPeriod = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 60)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: setPeriod.setStatus('current')
sendSummary = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sendSummary.setStatus('current')
resendAlarm = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: resendAlarm.setStatus('current')
sendAllAlarms = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: sendAllAlarms.setStatus('current')
alarmSpontan = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("yes", 1), ("no", 2))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: alarmSpontan.setStatus('current')
countAlarmPeriod = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 60))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: countAlarmPeriod.setStatus('current')
countAlarmSpontan = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 2, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("yes", 1), ("no", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: countAlarmSpontan.setStatus('current')
numberOfAlarms = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 3, 1), Integer32())
if mibBuilder.loadTexts: numberOfAlarms.setStatus('current')
connectionReliable = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 3, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("yes", 1), ("no", 2))))
if mibBuilder.loadTexts: connectionReliable.setStatus('current')
critical = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 3, 3), Integer32())
if mibBuilder.loadTexts: critical.setStatus('current')
major = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 3, 4), Integer32())
if mibBuilder.loadTexts: major.setStatus('current')
minor = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 3, 5), Integer32())
if mibBuilder.loadTexts: minor.setStatus('current')
timePeriod = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 4, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 60)).clone(1))
if mibBuilder.loadTexts: timePeriod.setStatus('current')
q3AlarmNumber = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 4, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)))
if mibBuilder.loadTexts: q3AlarmNumber.setStatus('current')
eventType = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 3, 4, 5, 8, 9, 10, 11, 13, 15))).clone(namedValues=NamedValues(("indeterminate", 0), ("communicationsAlarm", 2), ("enviromentalAlarm", 3), ("equipmentAlarm", 4), ("integrityViolation", 5), ("operationalViolation", 8), ("physicalViolation", 9), ("processingErrorAlarm", 10), ("qualityOfServiceAlarm", 11), ("securityServiceOrMechanismViolation", 13), ("timeDomainViolation", 15))))
if mibBuilder.loadTexts: eventType.setStatus('current')
severity = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("indeterminate", 0), ("critical", 1), ("major", 2), ("minor", 3), ("warning", 4), ("cleared", 5))))
if mibBuilder.loadTexts: severity.setStatus('current')
probableCause = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 3), DisplayString())
if mibBuilder.loadTexts: probableCause.setStatus('current')
originalAlarm = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8192)))
if mibBuilder.loadTexts: originalAlarm.setStatus('current')
processingStatus = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("not-processed", 0), ("in-process", 1), ("under-repair", 2), ("deferred", 3), ("cleared", 4))))
if mibBuilder.loadTexts: processingStatus.setStatus('current')
alarmClass = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 6), DisplayString())
if mibBuilder.loadTexts: alarmClass.setStatus('current')
managedObjectInstance = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 7), DisplayString())
if mibBuilder.loadTexts: managedObjectInstance.setStatus('current')
rack = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)))
if mibBuilder.loadTexts: rack.setStatus('current')
shelf = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)))
if mibBuilder.loadTexts: shelf.setStatus('current')
fromCard = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)))
if mibBuilder.loadTexts: fromCard.setStatus('current')
toCard = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)))
if mibBuilder.loadTexts: toCard.setStatus('current')
fromPort = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)))
if mibBuilder.loadTexts: fromPort.setStatus('current')
toPort = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)))
if mibBuilder.loadTexts: toPort.setStatus('current')
eventTime = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 14), DisplayString())
if mibBuilder.loadTexts: eventTime.setStatus('current')
ipAddress = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 15), DisplayString())
if mibBuilder.loadTexts: ipAddress.setStatus('current')
trapName = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 16), DisplayString())
if mibBuilder.loadTexts: trapName.setStatus('current')
specificProblems = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 17), DisplayString())
if mibBuilder.loadTexts: specificProblems.setStatus('current')
additionalText = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 18), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 2048)))
if mibBuilder.loadTexts: additionalText.setStatus('current')
additionalInformation = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 19), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8192)))
if mibBuilder.loadTexts: additionalInformation.setStatus('current')
backupStatus = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("yes", 1), ("no", 2))))
if mibBuilder.loadTexts: backupStatus.setStatus('current')
backupObject = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 2), DisplayString())
if mibBuilder.loadTexts: backupObject.setStatus('current')
trendIndication = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("moresevere", 1), ("nochange", 2), ("lesssevere", 3))))
if mibBuilder.loadTexts: trendIndication.setStatus('current')
thresholdInformation = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 4), DisplayString())
if mibBuilder.loadTexts: thresholdInformation.setStatus('current')
correlatedEvents = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8192)))
if mibBuilder.loadTexts: correlatedEvents.setStatus('current')
stateChanges = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 6), DisplayString())
if mibBuilder.loadTexts: stateChanges.setStatus('current')
monitoredAttributes = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8192)))
if mibBuilder.loadTexts: monitoredAttributes.setStatus('current')
securityAlarmDetector = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 8), DisplayString())
if mibBuilder.loadTexts: securityAlarmDetector.setStatus('current')
serviceUser = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 9), DisplayString())
if mibBuilder.loadTexts: serviceUser.setStatus('current')
serviceProvider = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 10), DisplayString())
if mibBuilder.loadTexts: serviceProvider.setStatus('current')
listOfFaultyBoards = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 6, 11), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 8192)))
if mibBuilder.loadTexts: listOfFaultyBoards.setStatus('current')
mmnKey = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 7, 1), DisplayString())
if mibBuilder.loadTexts: mmnKey.setStatus('current')
thresholdValue = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 7, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)))
if mibBuilder.loadTexts: thresholdValue.setStatus('current')
currentValue = MibScalar((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 7, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)))
if mibBuilder.loadTexts: currentValue.setStatus('current')
summaryAlarms = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 201)).setObjects(("X733GROUP-MIB", "neName"), ("X733GROUP-MIB", "numberOfAlarms"), ("X733GROUP-MIB", "connectionReliable"), ("X733GROUP-MIB", "globalAlarmIds"))
if mibBuilder.loadTexts: summaryAlarms.setStatus('current')
spontaneousAlarms = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 202)).setObjects(("X733GROUP-MIB", "neName"), ("X733GROUP-MIB", "managedObjectClass"), ("X733GROUP-MIB", "notificationId"), ("X733GROUP-MIB", "severity"), ("X733GROUP-MIB", "eventType"), ("X733GROUP-MIB", "eventTime"), ("X733GROUP-MIB", "probableCause"), ("X733GROUP-MIB", "processingStatus"), ("X733GROUP-MIB", "alarmClass"), ("X733GROUP-MIB", "managedObjectInstance"), ("X733GROUP-MIB", "rack"), ("X733GROUP-MIB", "shelf"), ("X733GROUP-MIB", "fromCard"), ("X733GROUP-MIB", "toCard"), ("X733GROUP-MIB", "fromPort"), ("X733GROUP-MIB", "toPort"), ("X733GROUP-MIB", "originalAlarm"))
if mibBuilder.loadTexts: spontaneousAlarms.setStatus('current')
snmpAlarm = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 203)).setObjects(("X733GROUP-MIB", "neName"), ("X733GROUP-MIB", "notificationId"), ("X733GROUP-MIB", "severity"), ("X733GROUP-MIB", "eventType"), ("X733GROUP-MIB", "eventTime"), ("X733GROUP-MIB", "probableCause"), ("X733GROUP-MIB", "specificProblems"), ("X733GROUP-MIB", "managedObjectClass"), ("X733GROUP-MIB", "managedObjectInstance"), ("X733GROUP-MIB", "ipAddress"), ("X733GROUP-MIB", "trapName"), ("X733GROUP-MIB", "originalAlarm"))
if mibBuilder.loadTexts: snmpAlarm.setStatus('current')
q3Alarm = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 204)).setObjects(("X733GROUP-MIB", "neName"), ("X733GROUP-MIB", "notificationId"), ("X733GROUP-MIB", "q3AlarmNumber"), ("X733GROUP-MIB", "severity"), ("X733GROUP-MIB", "eventType"), ("X733GROUP-MIB", "eventTime"), ("X733GROUP-MIB", "probableCause"), ("X733GROUP-MIB", "specificProblems"), ("X733GROUP-MIB", "managedObjectClass"), ("X733GROUP-MIB", "managedObjectInstance"), ("X733GROUP-MIB", "additionalText"), ("X733GROUP-MIB", "additionalInformation"), ("X733GROUP-MIB", "backupStatus"), ("X733GROUP-MIB", "backupObject"), ("X733GROUP-MIB", "trendIndication"), ("X733GROUP-MIB", "thresholdInformation"), ("X733GROUP-MIB", "correlatedEvents"), ("X733GROUP-MIB", "stateChanges"), ("X733GROUP-MIB", "monitoredAttributes"), ("X733GROUP-MIB", "securityAlarmDetector"), ("X733GROUP-MIB", "serviceUser"), ("X733GROUP-MIB", "serviceProvider"), ("X733GROUP-MIB", "listOfFaultyBoards"), ("X733GROUP-MIB", "originalAlarm"))
if mibBuilder.loadTexts: q3Alarm.setStatus('current')
q3contAlarm = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 205)).setObjects(("X733GROUP-MIB", "neName"), ("X733GROUP-MIB", "notificationId"), ("X733GROUP-MIB", "q3AlarmNumber"), ("X733GROUP-MIB", "correlatedEvents"), ("X733GROUP-MIB", "monitoredAttributes"), ("X733GROUP-MIB", "listOfFaultyBoards"), ("X733GROUP-MIB", "originalAlarm"))
if mibBuilder.loadTexts: q3contAlarm.setStatus('current')
timeAckAlarms = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 206)).setObjects(("X733GROUP-MIB", "timePeriod"))
if mibBuilder.loadTexts: timeAckAlarms.setStatus('current')
proxyStartUp = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 207))
if mibBuilder.loadTexts: proxyStartUp.setStatus('current')
countAlarm = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 208)).setObjects(("X733GROUP-MIB", "neName"), ("X733GROUP-MIB", "critical"), ("X733GROUP-MIB", "major"), ("X733GROUP-MIB", "minor"))
if mibBuilder.loadTexts: countAlarm.setStatus('current')
osAlarm = NotificationType((1, 3, 6, 1, 4, 1, 231, 7, 1, 3, 1, 1, 5, 209)).setObjects(("X733GROUP-MIB", "neName"), ("X733GROUP-MIB", "notificationId"), ("X733GROUP-MIB", "severity"), ("X733GROUP-MIB", "eventType"), ("X733GROUP-MIB", "eventTime"), ("X733GROUP-MIB", "probableCause"), ("X733GROUP-MIB", "managedObjectClass"), ("X733GROUP-MIB", "managedObjectInstance"), ("X733GROUP-MIB", "mmnKey"), ("X733GROUP-MIB", "additionalText"), ("X733GROUP-MIB", "thresholdValue"), ("X733GROUP-MIB", "currentValue"), ("X733GROUP-MIB", "securityAlarmDetector"), ("X733GROUP-MIB", "serviceUser"), ("X733GROUP-MIB", "serviceProvider"))
if mibBuilder.loadTexts: osAlarm.setStatus('current')
mibBuilder.exportSymbols("X733GROUP-MIB", fromPort=fromPort, listOfFaultyBoards=listOfFaultyBoards, major=major, eventTime=eventTime, alarmClass=alarmClass, securityAlarmDetector=securityAlarmDetector, resendAlarm=resendAlarm, spontaneousAlarms=spontaneousAlarms, timePeriod=timePeriod, snmpAlarm=snmpAlarm, rack=rack, correlatedEvents=correlatedEvents, currentValue=currentValue, trendIndication=trendIndication, x733Group=x733Group, eventType=eventType, sni=sni, monitoredAttributes=monitoredAttributes, severity=severity, serviceProvider=serviceProvider, PYSNMP_MODULE_ID=ewsdAlarms, timeAckAlarms=timeAckAlarms, critical=critical, backupObject=backupObject, serviceUser=serviceUser, q3contAlarm=q3contAlarm, shelf=shelf, fromCard=fromCard, siemensUnits=siemensUnits, probableCause=probableCause, ewsdAlarms=ewsdAlarms, countAlarmSpontan=countAlarmSpontan, q3AlarmNumber=q3AlarmNumber, miscGroup=miscGroup, trapName=trapName, summaryAlarms=summaryAlarms, globalAlarmIds=globalAlarmIds, minor=minor, neName=neName, originalAlarm=originalAlarm, additionalInformation=additionalInformation, sendAllAlarms=sendAllAlarms, oenProductMibs=oenProductMibs, processingStatus=processingStatus, backupStatus=backupStatus, proxyStartUp=proxyStartUp, commonGroup=commonGroup, controlGroup=controlGroup, alarmSpontan=alarmSpontan, notificationId=notificationId, toCard=toCard, osAlarm=osAlarm, q3Group=q3Group, thresholdValue=thresholdValue, ipAddress=ipAddress, countAlarmPeriod=countAlarmPeriod, stateChanges=stateChanges, numberOfAlarms=numberOfAlarms, additionalText=additionalText, countAlarm=countAlarm, nms=nms, sendSummary=sendSummary, managedObjectInstance=managedObjectInstance, q3Alarm=q3Alarm, summaryGroup=summaryGroup, thresholdInformation=thresholdInformation, specificProblems=specificProblems, mmnKey=mmnKey, managedObjectClass=managedObjectClass, ncProxy=ncProxy, setPeriod=setPeriod, toPort=toPort, osGroup=osGroup, connectionReliable=connectionReliable)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
ce4e62c243186cbb7a7e617fb41be98fdef66236 | 35c54f14769f86afd1a1014e184bb1294c3d7cea | /tpot_pipeline.py | f002d98019be9c0e6019dd1bed6950a17110eef8 | [] | no_license | samsoohoonseo/heart_rate_monior | e5d2ff7516dc2e0ca41b3b9a672f5a5f2c9badf7 | 959bcd4b3017bc662980648609f3e1c8e343083b | refs/heads/master | 2020-09-04T10:10:36.656065 | 2019-11-26T07:22:02 | 2019-11-26T07:22:02 | 219,708,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator, ZeroCount
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=None)
# Average CV score on the training set was:0.9999390720675893
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
FunctionTransformer(copy)
),
ZeroCount(),
ExtraTreesClassifier(bootstrap=False, criterion="entropy", max_features=0.6500000000000001, min_samples_leaf=4, min_samples_split=11, n_estimators=100)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"noreply@github.com"
] | noreply@github.com |
7436c12c9b17ab4e53a8e623b20b1a24fc082352 | dfaf6f7ac83185c361c81e2e1efc09081bd9c891 | /k8sdeployment/k8sstat/python/kubernetes/test/test_v1beta1_subject_access_review_status.py | 86ad671f95cfc1388e0b498d3971b2a7c14d6e90 | [
"Apache-2.0",
"MIT"
] | permissive | JeffYFHuang/gpuaccounting | d754efac2dffe108b591ea8722c831d979b68cda | 2c63a63c571240561725847daf1a7f23f67e2088 | refs/heads/master | 2022-08-09T03:10:28.185083 | 2022-07-20T00:50:06 | 2022-07-20T00:50:06 | 245,053,008 | 0 | 0 | MIT | 2021-03-25T23:44:50 | 2020-03-05T02:44:15 | JavaScript | UTF-8 | Python | false | false | 1,076 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1beta1_subject_access_review_status import V1beta1SubjectAccessReviewStatus # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1beta1SubjectAccessReviewStatus(unittest.TestCase):
"""V1beta1SubjectAccessReviewStatus unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1SubjectAccessReviewStatus(self):
"""Test V1beta1SubjectAccessReviewStatus"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1beta1_subject_access_review_status.V1beta1SubjectAccessReviewStatus() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"JeffYFHuang@github.com"
] | JeffYFHuang@github.com |
08a7aaa10003d220c767b52f5a87d32182e08dbd | 6377b3fcce305fb11f7cce5d6162ba2cbb0c4b7c | /modules/kraken_services/kraken_private_trades_service.py | 23962da5f249634995a0e70972ee9f6567c4d5b8 | [] | no_license | KDamsgaard/CryPtoD_public | 3aa4e99cded80ab82b0b3a671ba5e67988da0423 | a32c4788727f2c6793ed435f8ef949ef0916ccb6 | refs/heads/master | 2023-07-09T15:08:13.003402 | 2021-08-16T16:28:57 | 2021-08-16T16:28:57 | 396,728,215 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | import logging
class KrakenPrivateTradesService:
def __init__(self):
self._log = logging.getLogger(self.__class__.__name__)
self._trades = {}
def update(self, message):
if type(message) == list:
if 'ownTrades' in message[1]:
self._log.debug(f'Found {message[1]} with {len(message[0])} entries')
self._update_trades(message)
def _update_trades(self, trades):
trades = trades[0]
for trade in trades:
# Trade starts with a trade id
# we don't need this so get the data behind the id
_id = list(trade.keys())
trade = trade[_id[0]]
if trade['pair'] not in self._trades.keys():
self._trades[trade['pair']] = []
trade['txid'] = _id
self._trades[trade['pair']].append(trade)
self._log.debug(f'Trades: {self._trades}')
def trades(self, ws_name=None):
if ws_name:
if ws_name in self._trades.keys():
return self._trades[ws_name]
else:
return None
else:
return self._trades | [
"kristiandamsgaard@gmail.com"
] | kristiandamsgaard@gmail.com |
f85ebc5a5151f2cbfa44804b7e62295bde977f91 | 5f3e8c065d267314070e600f8b3a85b1f451a625 | /main.py | 8435e10f49dc62dc1f2590780a727f59182d706d | [] | no_license | MrSnowZ/calculator | e08be631f148db4e3d020266898eb6ca8c97d511 | 4fe1b3c7eb9e52c3c8d1f161c93095a06f46fdb1 | refs/heads/main | 2023-05-10T06:32:28.446106 | 2021-06-12T00:40:48 | 2021-06-12T00:40:48 | 376,162,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,159 | py | from tkinter import *
root = Tk()
root.title("Snow Calculator")
e = Entry(root, width=35, borderwidth=5)
e.grid(row=0, column=0, columnspan=3, padx=10, pady=10)
def button_click(number):
#e.delete(0, END)
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
first_number = e.get()
global f_num
f_num = int(first_number)
e.delete(0, END)
def button_equal():
s_number = e.get()
e.delete(0, END)
e.insert(0, f_num + int(s_number))
# Define buttons
button_1 = Button(root, text="1", padx=40, pady=20, command=lambda: button_click(1))
button_2 = Button(root, text="2", padx=40, pady=20, command=lambda: button_click(2))
button_3 = Button(root, text="3", padx=40, pady=20, command=lambda: button_click(3))
button_4 = Button(root, text="4", padx=40, pady=20, command=lambda: button_click(4))
button_5 = Button(root, text="5", padx=40, pady=20, command=lambda: button_click(5))
button_6 = Button(root, text="6", padx=40, pady=20, command=lambda: button_click(6))
button_7 = Button(root, text="7", padx=40, pady=20, command=lambda: button_click(7))
button_8 = Button(root, text="8", padx=40, pady=20, command=lambda: button_click(8))
button_9 = Button(root, text="9", padx=40, pady=20, command=lambda: button_click(9))
button_0 = Button(root, text="0", padx=40, pady=20, command=lambda: button_click(0))
button_addSign = Button(root, text="+", padx=39, pady=20, command=button_add)
button_equal = Button(root, text="=", padx=91, pady=20, command=button_equal)
button_clear = Button(root, text="Clear", padx=79, pady=20, command=button_clear)
# Put buttons on the screen
button_1.grid(row=3, column=0)
button_2.grid(row=3, column=1)
button_3.grid(row=3, column=2)
button_4.grid(row=2, column=0)
button_5.grid(row=2, column=1)
button_6.grid(row=2, column=2)
button_7.grid(row=1, column=0)
button_8.grid(row=1, column=1)
button_9.grid(row=1, column=2)
button_0.grid(row=4, column=0)
button_clear.grid(row=4, column=1, columnspan=2)
button_addSign.grid(row=5, column=0)
button_equal.grid(row=5, column=1, columnspan=2)
root.mainloop() | [
"nkasten@gmail.com"
] | nkasten@gmail.com |
2500703afbfbf68c508d1c88738f2728e58cc730 | 19400a06c1632b44bbc291a284e82ca9e6549ba7 | /main.py | d2e8889465b5fcad936f3c638012b3f5d68181d5 | [
"MIT"
] | permissive | codacy-badger/minecarft-server-controller | ef4c035ac8f3223300dfbe80f1a4e35f399c691b | eb923cde805b8f3419384c7ed4d464351b825af2 | refs/heads/master | 2023-02-19T16:50:38.082397 | 2021-01-18T09:35:48 | 2021-01-18T09:35:48 | 330,618,237 | 0 | 0 | MIT | 2021-01-18T11:49:48 | 2021-01-18T09:37:06 | null | UTF-8 | Python | false | false | 5,629 | py | from mcrcon import MCRcon
# Берём данные
def start():
print('███╗░░░███╗██╗███╗░░██╗███████╗░█████╗░██████╗░░█████╗░███████╗████████╗\n'
'████╗░████║██║████╗░██║██╔════╝██╔══██╗██╔══██╗██╔══██╗██╔════╝╚══██╔══╝\n'
'██╔████╔██║██║██╔██╗██║█████╗░░██║░░╚═╝██████╔╝███████║█████╗░░░░░██║░░░\n'
'██║╚██╔╝██║██║██║╚████║██╔══╝░░██║░░██╗██╔══██╗██╔══██║██╔══╝░░░░░██║░░░\n'
'██║░╚═╝░██║██║██║░╚███║███████╗╚█████╔╝██║░░██║██║░░██║██║░░░░░░░░██║░░░\n'
'╚═╝░░░░░╚═╝╚═╝╚═╝░░╚══╝╚══════╝░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░░░░░░░╚═╝░░░\n\n'
'░██████╗███████╗██████╗░██╗░░░██╗███████╗██████╗░\n'
'██╔════╝██╔════╝██╔══██╗██║░░░██║██╔════╝██╔══██╗\n'
'╚█████╗░█████╗░░██████╔╝╚██╗░██╔╝█████╗░░██████╔╝\n'
'░╚═══██╗██╔══╝░░██╔══██╗░╚████╔╝░██╔══╝░░██╔══██╗\n'
'██████╔╝███████╗██║░░██║░░╚██╔╝░░███████╗██║░░██║\n'
'╚═════╝░╚══════╝╚═╝░░╚═╝░░░╚═╝░░░╚══════╝╚═╝░░╚═╝\n\n'
'░█████╗░░█████╗░███╗░░██╗████████╗██████╗░░█████╗░██╗░░░░░██╗░░░░░███████╗██████╗░\n'
'██╔══██╗██╔══██╗████╗░██║╚══██╔══╝██╔══██╗██╔══██╗██║░░░░░██║░░░░░██╔════╝██╔══██╗\n'
'██║░░╚═╝██║░░██║██╔██╗██║░░░██║░░░██████╔╝██║░░██║██║░░░░░██║░░░░░█████╗░░██████╔╝\n'
'██║░░██╗██║░░██║██║╚████║░░░██║░░░██╔══██╗██║░░██║██║░░░░░██║░░░░░██╔══╝░░██╔══██╗\n'
'╚█████╔╝╚█████╔╝██║░╚███║░░░██║░░░██║░░██║╚█████╔╝███████╗███████╗███████╗██║░░██║\n'
'╚════╝░░╚════╝░╚═╝░░╚══╝░░░╚═╝░░░╚═╝░░╚═╝░╚════╝░╚══════╝╚══════╝╚══════╝╚═╝░░╚═╝\n\n')
ip = input('Введите IP-адресс сервера: ')
rcon_port = input('Введите RCON-порт сервера: ')
rcon_pas = input('Введите RCON-пароль сервера: ')
connect(ip, rcon_port, rcon_pas)
# Подключаемся
def connect(ip, port, pas):
print('[LOG] Подключение.')
# Пробуем подключится
try:
mcr = MCRcon(ip, pas, int(port))
mcr.connect()
# Если, неверные данные, не существующий сервер, выключен RCON, выводим ошибку
except OSError:
print('Произошла ошибка. '
'Удостоверьтесь в правильностим введёных Вами данными и повторите попытку. '
'Если данные верны, то проблема в выключенном RCON.')
# Если, в порту присутствуют буквы, выводим ошибку
except ValueError:
print('Произошла ошибка. RCON-порт не может принимать буквенные значения.')
# Подключаемся
else:
print('[LOG] Подключено.')
print('[LOG] Ввод команд производится без знака "/".')
while True:
command = input()
resp = mcr.command(command)
print('[Сервер] ' + resp)
if __name__ == '__main__':
start()
| [
"noreply@github.com"
] | noreply@github.com |
28faa7f92cba44f87759c8d75b0649dcd9d4cc72 | 2a22b6e979c00faa87a58d926d09201bf7a97c71 | /src/mtgdeck/wsgi.py | 092ae564c09a28218a93acb674aa321b12a319df | [] | no_license | mattdeboard/mtgdeck | 5d244f856312aecd1ba1318e67fd6bac16790395 | e03dbe737a3919bc118d5a8967cfa523592f81db | refs/heads/main | 2023-04-05T17:56:39.811386 | 2021-04-24T03:30:42 | 2021-04-24T03:30:42 | 361,061,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for mtgdeck project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mtgdeck.settings')
application = get_wsgi_application()
| [
"matt.deboard@gmail.com"
] | matt.deboard@gmail.com |
a140d89b7aff1f78dd5c192b636f74d6b5203b5f | 42b95abbad6b8c46215dadfb1f53b0514c16c0ea | /aula08/anotacoes_aula08.py | 39bfb30506db5a0b508a0d810bd9d1fefb681917 | [] | no_license | ricardoapalhares/python_aulas_era_conectada | 7667776402d57215cef3fcb07d9f47c524ceaf91 | cd75e80f51755ea51615c21e28e0df3f0f63b110 | refs/heads/master | 2022-12-15T20:47:47.420000 | 2019-05-10T00:59:42 | 2019-05-10T00:59:42 | 156,447,934 | 1 | 0 | null | 2022-12-08T05:03:34 | 2018-11-06T21:07:51 | Python | UTF-8 | Python | false | false | 285 | py | lista1 = [1,2,3]
lista2 = list(lista1)
print(lista1 == lista2)
print(lista1 is lista2)
#-------------------
def soma(num1,num2):
return num2 + num1
class Calculadora():
def soma(self, num1, num2):
return num2 + num1
print(soma(1,2))
calc = Calculadora()
print(calc.soma(2,3)) | [
"jrcjuniorcesar@gmail.com"
] | jrcjuniorcesar@gmail.com |
8848eede4ef7c7f72488ecaa4396d81926ddcdcd | cc7bff51fc29ed2f349d1130e89813c2595ecebf | /Walker.py | a441f2ae10ed46304f27ffb119961ecbf2829b3c | [
"MIT"
] | permissive | tigerw/EfsTools | 6101bd58dd4cedf2648ca6034c69606a486bc39b | 5c41f7c4e8fa0555ab8169acd0571853f47ebb59 | refs/heads/master | 2022-09-21T05:38:23.323210 | 2020-05-31T17:23:06 | 2020-05-31T17:23:06 | 268,323,470 | 0 | 0 | null | 2020-05-31T16:53:44 | 2020-05-31T16:53:43 | null | UTF-8 | Python | false | false | 1,539 | py | import os
import os.path as path
root_dir = 'MBNs'
out_name = "MBNs.nvi"
def format_efs_rpc(dir_name, name, ascii):
template = r'{"jsonrpc": "2.0", "method": "WriteEFSData", "params": {"MessageVersion": 0, "FilePath": "/%s", "Data": [%s], "ItemType": "Item"}, "id": 0}'
hack = path.normpath(path.join(dir_name, name)).split(os.sep)
del hack[0]
return template % ('/'.join(hack), ", ".join(ascii)) + '\n'
def format_nv_rpc(id, ascii):
template = r'{"jsonrpc": "2.0", "method": "WriteNVData", "params": {"MessageVersion": 0, "ID": %d, "SubscriptionId": 0, "NVData": [%s]}, "id": 0}'
return template % (id, ", ".join(ascii)) + '\n'
def file_as_ascii_bytes(dir_name, item):
with open(path.join(dir_name, item), mode="rb") as file:
contents = file.read()
ascii = [str(byte) for byte in contents]
return ascii
def process_nv_items(dir_name, file_list):
for item in file_list:
id = int(item.split("__")[1])
ascii = file_as_ascii_bytes(dir_name, item)
with open(out_name, 'a') as nvi:
nvi.write(format_nv_rpc(id, ascii))
def process_efs_items(dir_name, file_list):
for item in file_list:
name = item.split("__")[0]
ascii = file_as_ascii_bytes(dir_name, item)
with open(out_name, 'a') as nvi:
nvi.write(format_efs_rpc(dir_name, name, ascii))
try:
os.unlink(out_name)
except:
pass
for dir_name, subdir_list, file_list in os.walk(root_dir):
if dir_name == root_dir:
process_nv_items(dir_name, file_list)
continue
process_efs_items(dir_name, file_list)
| [
"ziwei.tiger@outlook.com"
] | ziwei.tiger@outlook.com |
6be37cc0ac42477690b61af9c479b69f340ab7b4 | 97a2a0c901d82ebd79c7d77d4264de33718d1a1b | /users/migrations/0002_auto_20181127_0228.py | c7dd09481c573645f5566c29d8b0c32b14b7bedc | [] | no_license | gaojiaxuan1998/mysite-master | 5fd9fff8967fa3aa171cb838372aca25e4e82215 | d1e6b87cc07250a64ca5749ee71f2c7f19d52613 | refs/heads/master | 2020-04-11T08:37:02.854166 | 2018-12-13T14:41:32 | 2018-12-13T14:41:32 | 161,649,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # Generated by Django 2.1.2 on 2018-11-27 02:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='password',
field=models.CharField(max_length=18, verbose_name='密码'),
),
]
| [
"43905475+gaojiaxuan1998@users.noreply.github.com"
] | 43905475+gaojiaxuan1998@users.noreply.github.com |
c40f6b94961010096fa1e43f69e3c26d32368c2c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_229/ch57_2020_04_10_21_47_51_592464.py | 1b83ba556f170dae510b6bab0604f9e0d9a59eca | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,127 | py | def verifica_progressao(lista):
r = lista[1] - lista[0]
rn = lista[2] - lista[1]
continua = True
continua2 = True
i = 0
if lista[0] != 0 and lista[1] != 0:
q = lista[1]/lista[0]
qn = lista[2]/lista[1]
if qn == q:
while continua == True:
while i < len(lista):
if lista[i] != 0:
qn = lista[i+1]/lista[i]
if qn != q:
continua = False
break
else:
i += 1
break
else:
continua = False
break
if rn == r:
i = 0
while continua2 == True:
while i < len(lista):
rn = lista[i+1] - lista[i]
if rn != r:
continua2 = False
break
else:
i += 1
break
return "AG"
return "PG"
else:
return "PG"
elif rn == r:
i = 0
while continua2 == True:
while i < len(lista):
rn = lista[i+1] - lista[i]
if rn != r:
continua2 = False
break
else:
i += 1
break
return "PA"
else:
return "NA"
else:
if rn == r:
while continua2 == True:
while i < len(lista):
rn = lista[i+1] - lista[i]
if rn != r:
continua = False
break
else:
i += 1
break
return "PA"
else:
return "NA" | [
"you@example.com"
] | you@example.com |
e8f5af39b518fe0ceaa644b3df0e7147f104418d | 0462732ae54ede10362e0b8d894698f4839e2ba4 | /HFS_SF/CMO/NetypesfromGraph_v0.py | fc85b4b2d88756c85d0bd99f006139bd0f48bb37 | [] | no_license | PeterWanng/hfs | 7335a563cee90b6bd2ce470251936f2fd038e8fb | 10737634047f899a210f84397b1bce801656518e | refs/heads/master | 2021-07-10T06:13:03.429313 | 2017-10-02T16:08:53 | 2017-10-02T16:08:53 | 105,551,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50,600 | py | #encoding=utf8
import sys
sys.path.append("..\..")
from tools import commontools
from matplotlib import pyplot as plt
import numpy as np
import igraph as ig
from igraph import clustering as clus
import re
import time
import os
from tools import commontools as gtf
import csv
import scipy as sp
from scipy import stats
gt=gtf()
class NetypesfromGraph2():
def geomean(nums):
return sp.stats.mstats.gmean(a=nums)
# return (reduce(lambda x, y: x*y, nums))**(1.0/len(nums))
nums = (1,2,3,4,5)
print geomean(nums)
class NetypesfromGraph():
def geomean(self,nums):
return (reduce(lambda x, y: x*y, nums))**(1.0/len(nums))
def mode(self,numbers):
'''Return the mode of the list of numbers.'''
#Find the value that occurs the most frequently in a data set
freq={}
for i in range(len(numbers)):
try:
freq[numbers[i]] += 1
except KeyError:
freq[numbers[i]] = 1
max = 0
mode = None
for k, v in freq.iteritems():
if v > max:
max = v
mode = k
return mode
def kmeans(self,x,k=4,show=False,runtimes=100):
import numpy as np
import pylab as pl
from sklearn.cluster import KMeans,MiniBatchKMeans
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.datasets.samples_generator import make_blobs
# np.random.seed(0)
# centers = [[1,1], [-1,-1], [1, -1], [-1, 1]]
# k = len(centers)
# x , labels = make_blobs(n_samples=3000, centers=centers, cluster_std=.7)
# print x
kmeans = KMeans(init='k-means++', n_clusters=k, n_init = runtimes)
t0 = time.time()
kmeans.fit(x)
t_end = time.time() - t0
# print kmeans.labels_
if show:
colors = ['r', 'b', 'g','y','c','m','k','r', 'b', 'g','y','c','m','k']
for k , col, label in zip( range(k) , colors,kmeans.labels_):
members = (kmeans.labels_ == k )
# pl.plot( x[members, 0] , x[members,1] , 'w', markerfacecolor=col, marker='.')
# pl.plot( x[members, 0] , x[members,1] , 'w', markerfacecolor=col, marker='.')
pl.plot(kmeans.cluster_centers_[k,0], kmeans.cluster_centers_[k,1], 'o', markerfacecolor=col,\
markeredgecolor='k', markersize=10)
xy = zip(*x)
# pl.plot(xy[2],xy[1])
# print xy[0],xy[1]
pl.plot(xy[0],xy[1],linestyle='o',markerfacecolor=colors[label], markeredgecolor=colors[label], markersize=4)
pl.show()
return kmeans.labels_,kmeans.cluster_centers_,kmeans.fit
def insertFig(self,figpath,figInserto):
import PIL.Image as Image
im = Image.open(figpath)#r'G:\HFS\WeiboData\HFSWeiboGMLNew\shape\3455798066008083.coc.gml.png')
orginSize = im.size
height = im.size[1]
# im.resize([orginSize[0]/1,orginSize[1]/2])
im.thumbnail([orginSize[0]/1,orginSize[1]/2])
# im.show()
# We need a float array between 0-1, rather than
# a uint8 array between 0-255
# With newer (1.0) versions of matplotlib, you can
# use the "zorder" kwarg to make the image overlay
# the plot, rather than hide behind it... (e.g. zorder=10)
figInserto.figimage(im, 10, 80)#fig.bbox.ymax - height)
return figInserto
def openfig(self,figpath,label):
import PIL.Image as Image
im = Image.open(figpath)#r'G:\HFS\WeiboData\HFSWeiboGMLNew\shape\3455798066008083.coc.gml.png')
im.show()
time.sleep(1)
def indexofavg(self,lista,sorted=True,avg=None,geomean=0,harmean=0):
"input: number list; need sorted or not; avg value have provided or not"
"output:avg value index; the distance between avg"
indexnum,dis = 0,0
if sorted:
lista.sort()
if not avg:
if not geomean and not harmean:
avg = np.average(lista)
if geomean:
avg = geomean(lista)
if harmean:
avg = stats.hmean(lista)
for i in range(len(lista)):
if lista[i]>=avg:
indexnum = i
if i<len(lista)-1 and i>0:
dis = lista[i]-lista[i-1]
break
return indexnum,dis
def getCorePart_deg(self,g,condition):
g.delete_vertices(g.vs.select(_degree_lt=condition))
return g
def getCorePart_inoutdeg(self,g,condition):
# g.vs["inoutdeg"] = g.indegree()*g.outdegree()
# g.delete_vertices(g.vs.select(_inoutdeg_lt=condition))
g.vs.select(_indegree_gt=0,_outdegree_gt=0)
return g
def netC(self,graphme):
"connectivity"
print gg.reciprocity(ignore_loops=True, mode="ratio")#one value;mode-default,ratio; Reciprocity defines the proportion of mutual connections in a directed graph.
def netCentrity(self,graphme):
result = []
gg = graphme.simplify(multiple=True, loops=True, combine_edges=None)#Simplifies a graph by removing self-loops and/or multiple edges.
"centrility"
result.append(gg.degree())
result.append(gg.betweenness(vertices=None, directed=True, cutoff =None, weights=None,nobigint=True))
result.append(gg.coreness(mode='ALL'))#from gg.k_core();same as result.append(gg.shell_index(mode='ALL')---Reference: Vladimir Batagelj, Matjaz Zaversnik: An O(m) Algorithm for Core Decomposition of Networks.
result.append(gg.closeness(vertices=None, mode='ALL', cutoff =None, weights=None))#cutoff
# result.append(gg.eigenvector_centrality(directed=True, scale=True, weights=None,return_eigenvalue=False))#just shihe undirected graph, pagerank and hits for directed.
result.append(gg.eccentricity(vertices=None, mode='ALL'))
# result.append(gg.radius(mode="ALL"))#one value
result.append(gg.pagerank(vertices=None, directed=True, damping=0.85, weights=None))#,arpack_options=None
return result
def net_tree_star_line(self,graphme):
result = []
gg = graphme.simplify(multiple=True, loops=True, combine_edges=None)#Simplifies a graph by removing self-loops and/or multiple edges.
indeg = gg.indegree()
outdeg = gg.outdegree()
indegavg = np.average(indeg)
outdegavg = np.average(outdeg)
a,b1,b2,c1,c2 = 0,0,0
"new metrics"
'(Ind-outd)**2=0;N for tree;N**2'
'Degree vs average avgdegree; Indegree Sum (D-d)**2 = 0 ignore indegree <=1; This is N for star; -N for Line'
for ind,outd in zip(*(indeg,outdeg)):
a += (ind-outd)**2
b1 +=((ind-1)-indegavg)**2
b2 +=((outd-1)-outdegavg)**2
"For pairs (a,b), sum(indega-indegb)**2=N**3;sum(outdega-outdegb)**2=N;1 for line and ** for tree??"
edgelists = gg.get_edgelist()
for pair in edgelists:
# c+=(g.indegree(pair[0])-g.indegree(pair[1]))**2
c1+=(indeg[pair[0]]-indeg[pair[1]])**2
c2+=(outdeg[pair[0]]-outdeg[pair[1]])**2
print c1,c2
result.append(a,b1,b2,c1,c2)
return result
def analysisNet(graph):
try:
g=graph
gg=clus.VertexClustering.giant(g.clusters(mode='weak'))
vcount=g.vcount()
ecount=g.ecount()
degree=gg.degree()
indegree=gg.indegree()
outdegree=gg.outdegree()
degreePowerLawFit=stcs.power_law_fit(degree,method='auto',return_alpha_only=False)
indegreePowerLawFit=stcs.power_law_fit(indegree, method='auto',return_alpha_only=False)
outdegreePowerLawFit=stcs.power_law_fit(outdegree,method='auto',return_alpha_only=False)
assorDeg=gg.assortativity(degree,directed= False)
assorDegD=gg.assortativity(degree,directed= True)
assorInDeg=gg.assortativity(indegree,directed= True)
assorOutDeg=gg.assortativity(outdegree,directed= True)
assorDegF='1' if assorDeg>0 else '-1'
assorInDegF='1' if assorInDeg>0 else '-1'
assorOutDegF= '1' if assorOutDeg>0 else '-1'
# print g.average_path_length()
# return g.vcount(),g.ecount(),\
# str(g.average_path_length()),\
# str(g.diameter()),\
# str(len(g.clusters(mode='weak'))),\
# str(clus.VertexClustering.giant(g.clusters(mode='weak')).vcount()),\
# str(clus.VertexClustering.giant(g.clusters(mode='weak')).ecount())
"centrility"
gg.degree()
gg.betweenness(vertices=None, directed=True, cutoff =None, weights=None,nobigint=True)
gg.kcore()
gg.shell_index(mode=ALL)
gg.coreness(mode=ALL)#Reference: Vladimir Batagelj, Matjaz Zaversnik: An O(m) Algorithm for Core Decomposition of Networks.
gg.closeness(vertices=None, mode=ALL, cutoff =None, weights=None)#cutoff
gg.eigenvector_centrality(directed=True, scale=True, weights=None,return_eigenvalue=False, arpack_options=None)
gg.eccentricity(vertices=None, mode=ALL)
gg.radius(mode=OUT)
gg.pagerank(vertices=None, directed=True, damping=0.85, weights=None,arpack_options=None)
# gg.hist
return [str(vcount),\
str(ecount),\
str(g.density()),\
str(len(g.clusters(mode='weak'))),\
str(len(g.clusters(mode='strong'))),\
str(gg.vcount()),\
str(gg.ecount()),\
str((ecount*2)/float(vcount)),\
str(gg.transitivity_undirected(mode='0')) ,\
str(gg.average_path_length()),\
str(gg.diameter()),\
str(assorDeg),\
str(assorDegD),\
str(assorInDeg),\
str(assorOutDeg),\
str(assorDegF),\
str(assorInDegF),\
str(assorOutDegF),\
str(degreePowerLawFit.alpha),\
str(degreePowerLawFit.xmin),\
str(degreePowerLawFit.p),\
str(degreePowerLawFit.L),\
str(degreePowerLawFit.D),\
str(indegreePowerLawFit.alpha),\
str(indegreePowerLawFit.xmin),\
str(indegreePowerLawFit.p),\
str(indegreePowerLawFit.L),\
str(indegreePowerLawFit.D),\
str(outdegreePowerLawFit.alpha),\
str(outdegreePowerLawFit.xmin),\
str(outdegreePowerLawFit.p),\
str(outdegreePowerLawFit.L),\
str(outdegreePowerLawFit.D)]
except:
return []
def featuresofgraph(self,graphinstance):
result = []
"metrics vectors"
degree_about = []
nodedis = []
g = graphinstance
# g = ig.Graph.Read_GML(gmlfolder+filen+'.coc.gml')
# g = ig.Graph.Read_GML(gmlfolder+filen)
# print analysisNet(g)
gg = clus.VertexClustering.giant(g.clusters(mode='weak'))
# ggcore = getCorePart_indeg(gg,1)
# ggcore2 = getCorePart_inoutdeg(gg,1)
#
# gsp=ig.Graph.spanning_tree(gg)
"Degree order list"
deg = gg.degree()
deg.sort()
deglen = float(len(deg))
# print deg
geomeanv = round(sp.stats.mstats.gmean(deg),2)#round(self.geomean(deg),2)
avgv = round(np.average(deg),2)#,np.amax(deg),np.amin(deg)
# fig = plt.figure()
#
# plt.subplot(243)
# plt.semilogy(range(len(deg)),deg)
# plt.plot([1,len(deg)],[avgv,geomeanv,])
#
# "degree ratio"
# degratio = list_ratio(deg)
# plt.subplot(247)
# plt.plot(range(len(degratio)),degratio)
# degratio.sort(reverse=True)
# plt.semilogx(range(len(degratio)),degratio)
"degree sequence avg,std,nodes cnt"
avgdegindex = self.indexofavg(deg,sorted=False,avg = avgv,geomean=1)
# print avgv,geomean(deg),stats.hmean(deg),avgdegindex[0],len(deg),avgdegindex[1]
avgdegindex_above,dist_avg_above = avgdegindex[0],avgdegindex[1]
# print avgdegindex_above,len(deg)
deg_abovepart = deg[avgdegindex_above:]
lendegabove = float(len(deg_abovepart))
# degree_about.append([filen,lendegabove,np.average(deg[avgdegindex_above:])/float(deglen),np.std(deg[avgdegindex_above:])/float(deglen)])#len(deg[avgdegindex_above:]),
# degree_about.append([filen,np.average(deg[avgdegindex_above:])/float(lendegabove),np.std(deg[avgdegindex_above:])/float(lendegabove),lendegabove,])#len(deg[avgdegindex_above:]),
# degree_about.append([filen,geomean(deg[avgdegindex_above:])/float(deglen),np.std(deg[avgdegindex_above:])/float(deglen)])#len(deg[avgdegindex_above:]),
# degree_about.append([filen,geomean(deg[avgdegindex_above:]),np.std(deg[avgdegindex_above:])])#len(deg[avgdegindex_above:]),
# degree_about.append([filen,len(deg[avgdegindex_above:]),np.std(deg[avgdegindex_above:])])#len(deg[avgdegindex_above:]),
"kcore"
# print gg.k_core()
"big degree nodes distance"
ggcore = self.getCorePart_deg(gg,avgv)
# gt.drawgraph(ggcore,giantornot=False)
nodedis = ggcore.average_path_length()
# nodedis = 0 if np.isnan(nodedis) else nodedis
# nodedis = -1 if np.isinf(nodedis) else nodedis
assor = ggcore.assortativity(ggcore.degree(),directed= False)
# assor = 2 if np.isnan(assor) else assor
# assor = -2 if np.isinf(assor) else assor
# print assor
result1 = [len(deg[avgdegindex_above:]),lendegabove/deglen,np.average(deg_abovepart)/lendegabove,np.std(deg_abovepart)/lendegabove,nodedis/lendegabove,assor]
# result.append([filen,lendegabove/deglen,np.average(deg_abovepart)/deglen,np.std(deg_abovepart)/deglen,nodedis])#len(deg[avgdegindex_above:]),
result1 = sp.nan_to_num(result1)
netcentri = self.netCentrity(gg)
result2 = sp.average(netcentri,axis=1)#sp.stats.mstats.gmean(netcentri,axis=1)#
result.extend(result1)
result.extend(result2)
return result
def define_metrics(self,gmlfolder,pngfolder):
result = []
for filen in os.listdir(pngfolder):
filen = str(filen).replace('.png','')
if os.path.isfile(gmlfolder+filen) and os.path.splitext(filen)[-1]=='.gml' and os.stat(gmlfolder+filen).st_size<500000:
resultone = [filen]
filesize = os.path.getsize(gmlfolder+filen)
print filen,filesize
g = ig.Graph.Read_GML(gmlfolder+filen)
resultone.extend(sp.nan_to_num(self.featuresofgraph(g)))
result.append(resultone)
return result
'----------------------------------------------------------------------------------------------------------------------------------'
def classfig(self,labels,x,z):
# datafig = zip(*(xy,labels))
# datafig.sort(key=lambda x:x[1])
# for d,l in datafig:
# print d,l
# print xyz,labels
# labelsdis = np.histogram(labels, range=None, normed=True, weights=None, density=None)#,bins=(numpy.max(lista)+1)/binsdivide)
# print labelsdis
lenlabel = len(labels)
labelfig = zip(*(z,labels))
labelfig.sort(key=lambda x:x[1])
gt.createFolder(folderName='G:\\HFS\\WeiboData\\HFSWeiboGMLNew\\shape\\tesTypes',keepold=False)
# figs = plt.figure()
i = 0
for fig,label in labelfig:
i+=1
# print fig,label
# ax = figs.add_subplot(3,lenlabel/3,i)
# plt.subplot(3,lenlabel/3,i)
figpath = 'G:\\HFS\\WeiboData\\HFSWeiboGMLNew\\test\\'+ fig[0] +'.png'
gt.copyfile(figpath,'G:\\HFS\\WeiboData\\HFSWeiboGMLNew\\shape\\tesTypes\\'+ str(label)+'_'+fig[0] +'.png')
# openfig(figpath,label)
# insertFig(figpath,figs)
os.startfile('G:\\HFS\\WeiboData\\HFSWeiboGMLNew\\shape\\tesTypes\\')
# plt.show()
def start(self,graphinstance):
resultone = ['filen']
g = graphinstance#ig.Graph.Read_GML(gmlfolder+filen)
resultone.extend(self.featuresofgraph(g))
return resultone
if __name__=='__main__':
# x = [1,2,3,4,4,5,5,5,6,7,8,8,9]
# x = [[2, 92.0, 17.0], [2, 36.0, 7.0], [2, 122.5, 62.5], [2, 17.5, 4.5], [2, 36.0, 0.0], [2, 149.5, 3.5], [2, 112.5, 67.5], [2, 111.5, 10.5], [2, 38.0, 11.0], [2, 90.0, 80.0], [2, 187.0, 14.0], [2, 137.0, 46.0], [2, 33.0, 4.0], [2, 44.0, 15.0], [2, 42.5, 21.5]]
# x = [['3344204856189380.coc.gml', 92.0, 17.0], ['3344631446304834.coc.gml', 36.0, 7.0], ['3347020320429724.coc.gml', 122.5, 62.5], ['3455798066008083.coc.gml', 17.5, 4.5], ['3456392600040737.coc.gml', 36.0, 0.0], ['3486178575789465.coc.gml', 149.5, 3.5], ['3512387527089684.coc.gml', 112.5, 67.5], ['3512638635619787.coc.gml', 111.5, 10.5], ['3512956526933221.coc.gml', 38.0, 11.0], ['3514047335033747.coc.gml', 90.0, 80.0], ['3518864421482109.coc.gml', 187.0, 14.0], ['3519104033490770.coc.gml', 137.0, 46.0], ['3521836014420909.coc.gml', 33.0, 4.0], ['3526708160065364.coc.gml', 44.0, 15.0], ['3582187498347368.coc.gml', 42.5, 21.5]]
# x = [['3344204856189380.coc.gml', 0.20353982300884957, 0.037610619469026552], ['3344631446304834.coc.gml', 0.19148936170212766, 0.037234042553191488], ['3347020320429724.coc.gml', 0.4766536964980545, 0.24319066147859922], ['3455798066008083.coc.gml', 0.28688524590163933, 0.073770491803278687], ['3456392600040737.coc.gml', 0.35294117647058826, 0.0], ['3486178575789465.coc.gml', 0.33823529411764708, 0.0079185520361990946], ['3512387527089684.coc.gml', 0.21387832699619772, 0.12832699619771862], ['3512638635619787.coc.gml', 0.85769230769230764, 0.080769230769230774], ['3512956526933221.coc.gml', 0.40425531914893614, 0.11702127659574468], ['3514047335033747.coc.gml', 0.3930131004366812, 0.34934497816593885], ['3518864421482109.coc.gml', 0.39534883720930231, 0.029598308668076109], ['3519104033490770.coc.gml', 0.74863387978142082, 0.25136612021857924], ['3521836014420909.coc.gml', 0.13636363636363635, 0.016528925619834711], ['3526708160065364.coc.gml', 0.66666666666666663, 0.22727272727272727], ['3582187498347368.coc.gml', 0.42079207920792078, 0.21287128712871287]]
gmlfolder = 'G:\\HFS\\WeiboData\\HFSWeiboGMLNew\\'#shape\\test\\
pngfolder = 'G:\\HFS\\WeiboData\\HFSWeiboGMLNew\\test\\'
netky = NetypesfromGraph()
x = netky.define_metrics(gmlfolder,pngfolder)
# print len(x),x
# x = [['3342670838100183.coc.gml', 0.05263157894736842, 18.0, 0.0, 0], ['3343740313561521.coc.gml', 0.07865168539325842, 1.9183673469387756, 3.5487912308747034, 0.17857142857142858], ['3343744527348953.coc.gml', 0.0036363636363636364, 267.0, 0.0, 0], ['3343901805640480.coc.gml', 0.10619469026548672, 1.0138888888888888, 1.6859417817226146, 0.13333333333333333], ['3344178035788881.coc.gml', 0.04395604395604396, 5.9375, 9.1299216179548885, 0.3125], ['3344204856189380.coc.gml', 0.10855263157894737, 0.54545454545454541, 0.67066170866769925, 0.07248484848484849], ['3344605319892924.coc.gml', 0.04878048780487805, 2.890625, 4.5789248448052735, 0.17307692307692307], ['3344617189676598.coc.gml', 0.0364963503649635, 5.6399999999999997, 9.8804048500048829, 0.24], ['3344631446304834.coc.gml', 0.17307692307692307, 0.43072702331961593, 0.32572449008846815, 0.06951566951566951], ['3345283975088597.coc.gml', 0.0673076923076923, 2.1224489795918369, 3.7427703459546207, 0.21428571428571427], ['3345341063735706.coc.gml', 0.04918032786885246, 3.5833333333333335, 6.672046440463653, 0.16666666666666666], ['3345672913760585.coc.gml', 0.025477707006369428, 9.6875, 15.48020409264684, 0.3125], ['3346041476969222.coc.gml', 0.03064066852367688, 2.9256198347107438, 8.0741137017537579, 0.09090909090909091], ['3346361808667289.coc.gml', 0.19889502762430938, 0.26774691358024694, 0.19368820951258611, 0.07207854406130268], ['3346671720159783.coc.gml', 0.031578947368421054, 10.666666666666666, 14.142135623730951, 0.3333333333333333], ['3346786119768056.coc.gml', 0.07692307692307693, 12.0, 0.0, 0], ['3347020320429724.coc.gml', 0.00823045267489712, 61.25, 31.25, 0], ['3347114865931646.coc.gml', 0.009615384615384616, 33.666666666666664, 46.197643037521111, 0.3333333333333333], ['3347122272192199.coc.gml', 0.04966887417218543, 1.5911111111111111, 5.0808495449841296, 0.07111111111111111], ['3348202183182981.coc.gml', 0.152317880794702, 0.40831758034026466, 0.43699609154782276, 0.10062111801242236], ['3356800646950624.coc.gml', 0.037037037037037035, 10.0, 12.492960981051453, 0.3333333333333333], ['3356881155164816.coc.gml', 0.015873015873015872, 12.68, 17.832375052134811, 0.2], ['3358716283896811.coc.gml', 0.1864406779661017, 1.2727272727272727, 1.0466216984729966, 0.14545454545454548], ['3363356413828548.coc.gml', 0.025210084033613446, 6.4722222222222223, 12.39863219258131, 0.16666666666666666], ['3367472590570390.coc.gml', 0.10126582278481013, 0.85546875, 1.5339133008903851, 0.11554621848739496], ['3368776344558652.coc.gml', 0.038461538461538464, 6.6875, 9.5759709037778507, 0.25], ['3369168951009868.coc.gml', 0.16304347826086957, 1.1822222222222223, 0.78333254531087237, 0.23055555555555557], ['3369278306978444.coc.gml', 0.10666666666666667, 1.296875, 2.2646549647518053, 0.125], ['3369886157847997.coc.gml', 0.04827586206896552, 3.3469387755102038, 5.0920718934696145, 0.16071428571428573], ['3370126999415642.coc.gml', 0.10232558139534884, 0.62809917355371903, 1.1553485496404718, 0.08645276292335115], ['3370187475368354.coc.gml', 0.20952380952380953, 0.3264462809917355, 0.39971730328605265, 0.06304985337243402], ['3370242220657016.coc.gml', 0.16153846153846155, 0.21712018140589567, 0.63028785398679887, 0.03552532123960696], ['3370848283881337.coc.gml', 0.23958333333333334, 0.55576559546313797, 0.3573728882194952, 0.13000852514919012], ['3371095383919407.coc.gml', 0.0995850622406639, 0.5, 1.4114999737605121, 0.06140350877192982], ['3371320634873316.coc.gml', 0.13756613756613756, 0.34319526627218938, 0.81099094495818724, 0.057692307692307696], ['3371353334212131.coc.gml', 0.03153988868274583, 1.9550173010380623, 6.8644379678601455, 0.07058823529411765], ['3371452671936848.coc.gml', 0.20833333333333334, 0.48749999999999999, 0.53639421137816168, 0.0891891891891892], ['3372030013618565.coc.gml', 0.08776595744680851, 0.39302112029384756, 1.1388429603981345, 0.0361952861952862], ['3372087507207341.coc.gml', 0.09523809523809523, 2.0277777777777777, 3.416214965624679, 0.16666666666666666], ['3372437473180477.coc.gml', 0.014925373134328358, 66.0, 0.0, 0], ['3373230205208545.coc.gml', 0.05555555555555555, 3.6399999999999997, 5.3056950534307941, 0.24], ['3373235549224874.coc.gml', 0.15841584158415842, 0.84765625, 0.80464009265692038, 0.1568627450980392], ['3374640441380743.coc.gml', 0.1619047619047619, 0.90311418685121114, 0.97389939058828379, 0.12802768166089964], ['3376476919830770.coc.gml', 0.04477611940298507, 7.666666666666667, 9.428090415820634, 0.3333333333333333], ['3379543500467226.coc.gml', 0.050359712230215826, 3.1836734693877551, 4.2691614253037979, 0.14285714285714285], ['3380021664523673.coc.gml', 0.1111111111111111, 1.6111111111111109, 2.8571979712497311, 0.19444444444444445], ['3381430950351222.coc.gml', 0.058823529411764705, 2.5306122448979593, 5.4988545246152967, 0.16326530612244897], ['3381830646827771.coc.gml', 0.16666666666666666, 1.6265432098765433, 1.1893453542136936, 0.2148846960167715], ['3384338781194861.coc.gml', 0.1864951768488746, 0.18430439952437574, 0.32827711883233557, 0.03694581280788177], ['3389552931061050.coc.gml', 0.18543046357615894, 0.84693877551020413, 0.96809796042270124, 0.11401743796109994], ['3390492127727272.coc.gml', 0.18478260869565216, 0.52249134948096887, 0.57595568874735348, 0.08144796380090498], ['3391344838326450.coc.gml', 0.12686567164179105, 1.4982698961937715, 1.7941168128716447, 0.1718266253869969], ['3393543211128758.coc.gml', 0.05185185185185185, 1.3979591836734695, 3.0183735563126923, 0.10902255639097745], ['3395625792060467.coc.gml', 0.06726457399103139, 1.0933333333333333, 2.9701527582613334, 0.10303030303030303], ['3397758248007683.coc.gml', 0.17277486910994763, 0.37373737373737376, 0.39557241991530395, 0.057323232323232325], ['3397769022933580.coc.gml', 0.07100591715976332, 1.3611111111111109, 2.6094853094522135, 0.09895833333333333], ['3398499611337996.coc.gml', 0.050314465408805034, 3.21875, 5.4632114811253647, 0.2533783783783784], ['3399163027809032.coc.gml', 0.2185430463576159, 0.38016528925619836, 0.29803886442871774, 0.09672887818583183], ['3400632889381253.coc.gml', 0.22972972972972974, 0.42906574394463665, 0.29585280960486099, 0.11250713877784124], ['3400751558944628.coc.gml', 0.1881720430107527, 0.46693877551020407, 0.41952036237110313, 0.064010989010989], ['3402776098684661.coc.gml', 0.11650485436893204, 2.8194444444444446, 3.7519799299918604, 0.19927536231884058], ['3403267493072603.coc.gml', 0.15151515151515152, 0.66749999999999998, 0.65273941967679561, 0.15459770114942528], ['3403863023733701.coc.gml', 0.07407407407407407, 3.625, 4.4034787384521339, 0.3], ['3404274883505106.coc.gml', 0.20967741935483872, 0.53698224852071008, 0.47996746983708927, 0.08361204013377926], ['3425350770267160.coc.gml', 0.10714285714285714, 0.75111111111111117, 1.6281717384466983, 0.08070175438596491], ['3428308005526158.coc.gml', 0.1728395061728395, 0.36734693877551022, 0.65596452855612941, 0.06689342403628118], ['3428528739801892.coc.gml', 0.08849557522123894, 1.6100000000000001, 2.0265487904316539, 0.1588235294117647], ['3429334025391583.coc.gml', 0.09090909090909091, 1.7959183673469388, 3.0609523688575799, 0.21428571428571427], ['3429588258743940.coc.gml', 0.08379888268156424, 1.1422222222222222, 1.8763031685329836, 0.11891891891891891], ['3430283762648349.coc.gml', 0.0830860534124629, 0.56122448979591832, 1.7855575732984685, 0.06373626373626375], ['3430335716843081.coc.gml', 0.09482758620689655, 1.2892561983471074, 2.2445587204312196, 0.21212121212121213], ['3430668065297419.coc.gml', 0.1276595744680851, 0.83024691358024694, 1.1116468187316051, 0.1111111111111111], ['3431023705030524.coc.gml', 0.18686868686868688, 0.35938641344046746, 0.32131946600325445, 0.07380457380457381], ['3431094353698885.coc.gml', 0.15018315018315018, 0.23795359904818561, 0.73976880788105981, 0.03864428254672157], ['3431737105763161.coc.gml', 0.1597222222222222, 0.21455576559546313, 0.48105833965723965, 0.03595317725752508], ['3432170243211946.coc.gml', 0.10152284263959391, 0.59250000000000003, 1.5129668700933276, 0.07037037037037037], ['3432525189989140.coc.gml', 0.040268456375838924, 4.416666666666667, 5.4863044972947899, 0.22916666666666666], ['3433746454019952.coc.gml', 0.24603174603174602, 0.56919875130072839, 0.47533563803802376, 0.1032258064516129], ['3434292485521437.coc.gml', 0.21, 0.47619047619047616, 0.35966606787419986, 0.1021505376344086], ['3435948283388972.coc.gml', 0.042735042735042736, 5.4399999999999995, 6.7709969723815409, 0.2857142857142857], ['3439812252362083.coc.gml', 0.2222222222222222, 0.27040816326530609, 0.47220759029251375, 0.06578947368421052], ['3440602006168810.coc.gml', 0.05113636363636364, 2.4197530864197532, 4.0095146707722549, 0.1515151515151515], ['3441634194910767.coc.gml', 0.17647058823529413, 0.51700680272108845, 0.51103484895039009, 0.09523809523809523], ['3442111980741300.coc.gml', 0.26229508196721313, 0.2197265625, 0.30623449169518541, 0.052403846153846155], ['3443073797743047.coc.gml', 0.12912087912087913, 0.26708918062471704, 0.74464603825551456, 0.040665751544269046], ['3443355667223867.coc.gml', 0.13584905660377358, 0.27314814814814814, 0.787050653476983, 0.04656862745098039], ['3443416153317033.coc.gml', 0.21296296296296297, 0.35916824196597352, 0.36268303112255912, 0.07275953859804792], ['3443492326385825.coc.gml', 0.19834710743801653, 0.4045138888888889, 0.63184666139088097, 0.08333333333333333], ['3443673498955248.coc.gml', 0.1125, 1.2345679012345678, 2.1586235290837483, 0.1388888888888889], ['3443744265281796.coc.gml', 0.1, 1.0, 2.3610920826366995, 0.09848484848484848], ['3443805103639827.coc.gml', 0.06896551724137931, 2.625, 3.3042349719715758, 0.15], ['3443829309398044.coc.gml', 0.09278350515463918, 1.0462962962962963, 1.3282116136124467, 0.10897435897435898], ['3444219576564892.coc.gml', 0.10077519379844961, 0.80473372781065089, 1.2884471021412873, 0.10526315789473685], ['3445044281384443.coc.gml', 0.1956521739130435, 0.49108367626886146, 0.37805371893086698, 0.07407407407407407], ['3445163550624397.coc.gml', 0.10638297872340426, 0.72444444444444445, 1.6333318216168362, 0.09696969696969697], ['3445566971379762.coc.gml', 0.09803921568627451, 3.5100000000000002, 3.4607658112042197, 0.2103448275862069], ['3447345331395476.coc.gml', 0.0379746835443038, 2.9753086419753085, 6.0776362133462021, 0.13333333333333333], ['3447401585338269.coc.gml', 0.16498316498316498, 0.2099125364431487, 0.38021785158953014, 0.05604850635906537], ['3447504811582619.coc.gml', 0.18023255813953487, 0.38709677419354838, 0.3608887317642987, 0.0565684899485741], ['3447758196911455.coc.gml', 0.13220338983050847, 0.34319526627218938, 0.64318152615344137, 0.042429792429792425], ['3448178085407204.coc.gml', 0.1409090909090909, 0.64412070759625395, 0.91663466982181852, 0.07949308755760369], ['3448186452366671.coc.gml', 0.1165644171779141, 0.32132963988919672, 0.92064015090556806, 0.049877600979192166], ['3448222901573576.coc.gml', 0.17073170731707318, 0.18845663265306123, 0.50335879460640343, 0.032542293233082706], ['3448524715110674.coc.gml', 0.05993690851735016, 0.94182825484764543, 1.5443952381603805, 0.08204334365325078], ['3448580884997872.coc.gml', 0.04639175257731959, 3.1481481481481479, 4.0071951747730337, 0.15873015873015872], ['3449158323300895.coc.gml', 0.22900763358778625, 0.3611111111111111, 0.34438171471798379, 0.06467661691542288], ['3451171018291015.coc.gml', 0.08, 1.3500000000000001, 1.9085334683992314, 0.12727272727272726], ['3451840329079188.coc.gml', 0.05, 3.4166666666666665, 6.2255075652065868, 0.19444444444444445], ['3452192143211197.coc.gml', 0.07975460122699386, 1.0828402366863905, 2.5330696795253158, 0.09615384615384616], ['3452512994803254.coc.gml', 0.1282051282051282, 0.83111111111111113, 1.2328507564398543, 0.09393939393939395], ['3452821297309374.coc.gml', 0.03773584905660377, 9.0625, 8.9989148651379072, 0.35], ['3452834043619851.coc.gml', 0.08737864077669903, 0.72222222222222221, 1.6450240886991743, 0.07333333333333333], ['3455798066008083.coc.gml', 0.2972972972972973, 0.60330578512396693, 0.14644665410470536, 0.2830578512396694], ['3455801434063025.coc.gml', 0.10734463276836158, 0.57340720221606656, 0.97091313951921798, 0.0953058321479374], ['3456392600040737.coc.gml', 0.22, 2.4049586776859502, 0.74801378505684368, 0.21212121212121213], ['3457216382606769.coc.gml', 0.06756756756756757, 1.8199999999999998, 2.8180844557961704, 0.13076923076923078], ['3457458590948980.coc.gml', 0.1532258064516129, 0.51800554016620504, 0.65453728507845621, 0.10141206675224647], ['3458260155974734.coc.gml', 0.10810810810810811, 0.37244897959183676, 1.0356011801114511, 0.047619047619047616], ['3458382231009304.coc.gml', 0.18125, 0.36860879904875149, 0.34638216144788825, 0.05757389162561576], ['3459709472618541.coc.gml', 0.1282051282051282, 1.1699999999999999, 1.5943964375273798, 0.14666666666666667], ['3461761758135160.coc.gml', 0.16891891891891891, 0.61280000000000001, 0.76220480187414197, 0.09055555555555556], ['3462527801769811.coc.gml', 0.08737864077669903, 1.4567901234567902, 3.0222968602096394, 0.1234567901234568], ['3464345767606270.coc.gml', 0.08284023668639054, 1.1836734693877553, 3.1049093879486933, 0.07619047619047618], ['3464705244725034.coc.gml', 0.016286644951140065, 12.640000000000001, 20.083585337284774, 0.24], ['3464854910351381.coc.gml', 0.09900990099009901, 1.29, 2.1252999788265186, 0.17142857142857143], ['3466348208670198.coc.gml', 0.07971014492753623, 2.4628099173553717, 1.9356494028492433, 0.21867321867321865], ['3466382715287291.coc.gml', 0.07534246575342465, 1.3719008264462811, 2.5167969296420534, 0.14285714285714285], ['3466382765276535.coc.gml', 0.08928571428571429, 0.8355555555555555, 1.5370711285822096, 0.07916666666666666], ['3466578031231644.coc.gml', 0.1987179487179487, 0.2351716961498439, 0.51399098970413459, 0.04996837444655282], ['3466699703809713.coc.gml', 0.1258741258741259, 0.71604938271604945, 0.90644107443874156, 0.10069444444444445], ['3466705462858968.coc.gml', 0.17123287671232876, 0.33119999999999999, 0.48325827463169219, 0.058536585365853655], ['3466749821485858.coc.gml', 0.20833333333333334, 0.19506172839506175, 0.33066882014727184, 0.03950617283950617], ['3469573347006484.coc.gml', 0.041666666666666664, 6.125, 9.1660037639093304, 0.25], ['3472429655943907.coc.gml', 0.12598425196850394, 1.796875, 1.9160225933232102, 0.14760638297872342], ['3475083715093089.coc.gml', 0.07407407407407407, 1.074074074074074, 1.819155805961898, 0.08444444444444445], ['3476098195491864.coc.gml', 0.09259259259259259, 2.7600000000000002, 2.7521627858831317, 0.24], ['3476515322550681.coc.gml', 0.14285714285714285, 0.89619377162629754, 1.0113400194564059, 0.10561497326203208], ['3476909163641066.coc.gml', 0.19246861924686193, 0.33790170132325142, 0.34323848806463236, 0.04762945160585537], ['3477038419958946.coc.gml', 0.09734513274336283, 1.0909090909090908, 1.7335683440828951, 0.12987012987012989], ['3478679923748497.coc.gml', 0.14893617021276595, 0.72576530612244905, 1.338215680415326, 0.08048289738430583], ['3479445619333773.coc.gml', 0.04504504504504504, 4.6399999999999997, 6.0191693779125366, 0.2], ['3479624388879132.coc.gml', 0.0755813953488372, 1.2011834319526626, 3.1188764800055164, 0.09615384615384616], ['3481902797536816.coc.gml', 0.11538461538461539, 1.1405895691609977, 1.2421420163518202, 0.14107142857142857], ['3481923777474913.coc.gml', 0.10714285714285714, 0.76000000000000001, 1.4625801655775112, 0.09206349206349206], ['3481928609200009.coc.gml', 0.1792452830188679, 0.445983379501385, 0.62048624839279376, 0.0756578947368421], ['3482159342150289.coc.gml', 0.1834862385321101, 0.67500000000000004, 0.50830601019464638, 0.10500000000000001], ['3483267271112598.coc.gml', 0.0958904109589041, 1.1428571428571428, 1.7240368925858529, 0.10119047619047619], ['3484321828620387.coc.gml', 0.09950248756218906, 0.78000000000000003, 1.0575206853768866, 0.07857142857142857], ['3486178575789465.coc.gml', 0.0880952380952381, 0.49817384952520083, 1.04649837414053, 0.05192833282720923], ['3487349066960837.coc.gml', 0.09615384615384616, 2.6399999999999997, 2.940476151918257, 0.3142857142857143], ['3488143468769060.coc.gml', 0.16721311475409836, 0.23414071510957324, 0.46849160913007198, 0.033655253146034535], ['3489084565802342.coc.gml', 0.16621253405994552, 0.19940876108572964, 0.34078535375129682, 0.02275211127670144], ['3489089556938378.coc.gml', 0.15056818181818182, 0.24136703453186187, 0.49170607858474752, 0.03787158952083616], ['3489092799395971.coc.gml', 0.05660377358490566, 3.9722222222222219, 3.5097966948964685, 0.2142857142857143], ['3489137279411789.coc.gml', 0.18357487922705315, 0.18698060941828257, 0.43951015298468638, 0.03475670307845084], ['3489148290157520.coc.gml', 0.21052631578947367, 0.24691358024691359, 0.49667868844449725, 0.0595679012345679], ['3489586389438248.coc.gml', 0.2032967032967033, 0.30241051862673485, 0.19878987916836433, 0.06187766714082503], ['3489804664943100.coc.gml', 0.12844036697247707, 0.6428571428571429, 1.3681082254770913, 0.08843537414965986], ['3491356209051281.coc.gml', 0.1322314049586777, 1.5859375, 1.8462476461985673, 0.1340909090909091], ['3491608849042650.coc.gml', 0.14457831325301204, 0.71354166666666663, 0.69527511083410021, 0.10101010101010101], ['3492678328811636.coc.gml', 0.16455696202531644, 0.27071005917159763, 0.37222738773266684, 0.05257242757242757], ['3492682624290826.coc.gml', 0.18421052631578946, 0.91581632653061218, 0.95646937382075115, 0.0967741935483871], ['3492684352482154.coc.gml', 0.1388888888888889, 0.36333333333333334, 0.79704779353887245, 0.06519607843137255], ['3492805542177005.coc.gml', 0.23741007194244604, 0.35537190082644626, 0.24142041102156081, 0.09987770077456176], ['3493173752439900.coc.gml', 0.08928571428571429, 1.26, 1.7356266879718114, 0.14375], ['3494236496496836.coc.gml', 0.23214285714285715, 1.4378698224852071, 1.3665436751856521, 0.14219114219114218], ['3495157817228723.coc.gml', 0.16042780748663102, 0.79555555555555557, 0.69605945538588776, 0.06930693069306931], ['3495425338709660.coc.gml', 0.18452380952380953, 0.29448491155046824, 0.28557124132440809, 0.04985337243401759], ['3495908950319195.coc.gml', 0.6666666666666666, 1.25, 0.25, 0.5], ['3497476986739290.coc.gml', 0.14545454545454545, 0.5849609375, 0.73267899132805503, 0.0756340579710145], ['3498227716765728.coc.gml', 0.25225225225225223, 0.5982142857142857, 0.50765145882220697, 0.0888704318936877], ['3501198583561829.coc.gml', 0.3, 8.6666666666666661, 3.6616126785075735, 0.4166666666666667], ['3504355070314763.coc.gml', 0.058823529411764705, 2.7551020408163263, 5.00070798819264, 0.21428571428571427], ['3505165031934094.coc.gml', 0.22580645161290322, 0.17233560090702948, 0.32727774187597236, 0.039047619047619046], ['3506858382217257.coc.gml', 0.18518518518518517, 0.36320000000000002, 0.411501834746821, 0.10400000000000001], ['3507543877973721.coc.gml', 0.125, 0.5855555555555555, 1.0501034634504436, 0.06956521739130435], ['3507607539020444.coc.gml', 0.17054263565891473, 0.60330578512396693, 0.60553759050346179, 0.10265924551638837], ['3507662178094930.coc.gml', 0.14583333333333334, 1.653061224489796, 1.1826174036286059, 0.2380952380952381], ['3507671015760502.coc.gml', 0.09473684210526316, 1.5555555555555556, 2.9574021892855007, 0.1234567901234568], ['3508035156247306.coc.gml', 0.14919354838709678, 0.56756756756756754, 0.71246796804647361, 0.06704494549798418], ['3508278808120380.coc.gml', 0.12962962962962962, 1.1428571428571428, 1.2936264483053452, 0.17857142857142858], ['3509438231211989.coc.gml', 0.11965811965811966, 0.72959183673469385, 1.2712275040017211, 0.09999999999999999], ['3509885691781722.coc.gml', 0.15267175572519084, 0.54749999999999999, 0.72482325431790606, 0.08372093023255814], ['3510108007340190.coc.gml', 0.2421875, 0.71488033298647236, 0.62158249090862083, 0.08770161290322581], ['3510150776647546.coc.gml', 0.10638297872340426, 2.9699999999999998, 2.9254230463302227, 0.2], ['3510947052234805.coc.gml', 0.07462686567164178, 4.6399999999999997, 3.522271994040211, 0.24], ['3511918478199744.coc.gml', 0.11949685534591195, 0.80332409972299168, 1.1417716690211632, 0.09424724602203183], ['3511950958712857.coc.gml', 0.15463917525773196, 0.16194444444444445, 0.28251666071268594, 0.027864583333333335], ['3511983850692431.coc.gml', 0.22813688212927757, 0.2175, 0.33809686591380989, 0.0526071842410197], ['3512192651209611.coc.gml', 0.11666666666666667, 1.6122448979591837, 1.7772686577205914, 0.2619047619047619], ['3512225370844164.coc.gml', 0.09826589595375723, 0.74740484429065734, 1.5672410590914201, 0.08088235294117647], ['3512260125547589.coc.gml', 0.13872832369942195, 0.43055555555555558, 0.96919170535472243, 0.06912878787878789], ['3512261920248384.coc.gml', 0.17142857142857143, 0.63580246913580252, 0.39249746498714894, 0.12014453477868112], ['3512288331952105.coc.gml', 0.08, 1.2013888888888888, 2.9310080614308855, 0.10119047619047618], ['3512343789230980.coc.gml', 0.06060606060606061, 1.2040816326530612, 3.4310001116191851, 0.07653061224489796], ['3512367365458914.coc.gml', 0.09691629955947137, 1.0289256198347108, 1.695031047109739, 0.08070500927643785], ['3512387527089684.coc.gml', 0.1023391812865497, 0.36979591836734693, 0.86537389029973322, 0.04074074074074074], ['3512557425820703.coc.gml', 0.16517857142857142, 0.254200146092038, 0.58138115341278407, 0.03620601733809281], ['3512564992138128.coc.gml', 0.1891891891891892, 1.6377551020408163, 1.6402962039881677, 0.1533613445378151], ['3512586882317341.coc.gml', 0.14012738853503184, 0.47727272727272729, 0.93473641005016939, 0.06598240469208211], ['3512598488183675.coc.gml', 0.11650485436893204, 0.78472222222222221, 1.8510689896168071, 0.09027777777777778], ['3512631170591260.coc.gml', 0.08123791102514506, 0.36054421768707484, 1.469864025862113, 0.029047619047619048], ['3512638635619787.coc.gml', 0.16666666666666666, 2.46875, 2.2841377270755805, 0.1865530303030303], ['3512649558390590.coc.gml', 0.13043478260869565, 0.88888888888888884, 1.4028465329684954, 0.1111111111111111], ['3512661431797880.coc.gml', 0.13157894736842105, 0.84000000000000008, 1.7884071124886529, 0.11000000000000001], ['3512665513423714.coc.gml', 0.09615384615384616, 1.3699999999999999, 2.7155294143131647, 0.13076923076923078], ['3512673221800564.coc.gml', 0.14563106796116504, 0.31444444444444447, 0.67919796565008184, 0.04390243902439025], ['3512681942436390.coc.gml', 0.16393442622950818, 1.1499999999999999, 0.94472218138455921, 0.16842105263157894], ['3512703459106709.coc.gml', 0.1087866108786611, 0.45266272189349116, 1.1573569710477523, 0.05673076923076924], ['3512704620407286.coc.gml', 0.2062780269058296, 0.27173913043478259, 0.21182621655007042, 0.049217391304347824], ['3512722819965166.coc.gml', 0.09803921568627451, 3.7999999999999998, 4.3432706570049264, 0.275], ['3512723826282658.coc.gml', 0.2235294117647059, 0.43490304709141275, 0.40367249267472288, 0.08819345661450924], ['3512731699677616.coc.gml', 0.07718120805369127, 0.63516068052930053, 1.9814517206876603, 0.06649616368286444], ['3512751521888667.coc.gml', 0.08522727272727272, 0.43666666666666665, 1.5662966466111026, 0.045000000000000005], ['3512753392115009.coc.gml', 0.1278772378516624, 0.26800000000000002, 0.74071316985726665, 0.0305], ['3512755191392034.coc.gml', 0.078125, 0.9555555555555556, 2.4897617517037474, 0.09206349206349206], ['3512764419413627.coc.gml', 0.23076923076923078, 0.46296296296296302, 0.37679611017362602, 0.08602150537634408], ['3512767539668338.coc.gml', 0.09090909090909091, 0.80864197530864201, 2.0486388654510161, 0.07555555555555556], ['3512956526933221.coc.gml', 0.17857142857142858, 0.8355555555555555, 0.83616998685290078, 0.184], ['3512965133226559.coc.gml', 0.175, 0.36607142857142855, 0.61856195790930657, 0.08421052631578947], ['3513008209201946.coc.gml', 0.11805555555555555, 0.60553633217993086, 1.4384395600633919, 0.07754010695187165], ['3513054618763335.coc.gml', 0.17894736842105263, 0.72664359861591699, 0.96043047384111757, 0.09803921568627451], ['3513299721572710.coc.gml', 0.1724137931034483, 0.64222222222222214, 0.80764556460410997, 0.10112994350282485], ['3513353957580369.coc.gml', 0.0915032679738562, 0.49107142857142855, 1.3366001986786742, 0.052884615384615384], ['3513457897170153.coc.gml', 0.148, 0.32651570489408327, 0.92374828543733423, 0.05297297297297297], ['3513472585606907.coc.gml', 0.14084507042253522, 2.27, 3.6452846253756368, 0.18641975308641975], ['3513477123020136.coc.gml', 0.1794871794871795, 0.59183673469387765, 0.7425178637643427, 0.12442396313364056], ['3513651849587189.coc.gml', 0.13138686131386862, 0.97530864197530875, 1.1319378697411457, 0.0925925925925926], ['3513665519425522.coc.gml', 0.1, 1.1074380165289257, 1.7817921156038159, 0.12337662337662338], ['3513732346670068.coc.gml', 0.09059233449477352, 0.51331360946745563, 1.2600954326090696, 0.0554561717352415], ['3513732681977292.coc.gml', 0.20833333333333334, 0.47499999999999998, 0.58105507484230789, 0.09268292682926829], ['3513733382475662.coc.gml', 0.056179775280898875, 1.8899999999999999, 4.5704376158087978, 0.10909090909090909], ['3513738009766461.coc.gml', 0.205, 0.34681737061273049, 0.32963767128527172, 0.0917313215311415], ['3513747752676493.coc.gml', 0.12345679012345678, 0.47249999999999998, 1.0090187064668326, 0.06086956521739131], ['3513797614859184.coc.gml', 0.15151515151515152, 1.1040000000000001, 1.5092673719391141, 0.1075438596491228], ['3513821030864621.coc.gml', 0.09443099273607748, 0.31821170282708744, 1.1852860047347402, 0.03780964797913951], ['3514047335033747.coc.gml', 0.07906976744186046, 0.79238754325259519, 2.3024971308731628, 0.07219251336898395], ['3514054737834781.coc.gml', 0.10526315789473684, 1.1599999999999999, 1.8789358690492872, 0.1588235294117647], ['3514061079292261.coc.gml', 0.07647058823529412, 0.6375739644970414, 1.5793808627202552, 0.054945054945054944], ['3514083762166772.coc.gml', 0.06937799043062201, 0.53983353151010705, 2.1079224916700401, 0.041379310344827586], ['3514112790871598.coc.gml', 0.1267605633802817, 1.691358024691358, 3.0696209503031242, 0.06666666666666667], ['3514202721379789.coc.gml', 0.16296296296296298, 0.57644628099173556, 0.54655301987822713, 0.07792207792207792], ['3514207033581502.coc.gml', 0.10655737704918032, 0.50591715976331364, 1.2797080695421461, 0.05098389982110912], ['3514216143529145.coc.gml', 0.22466960352422907, 0.25297962322183776, 0.28092291608425252, 0.05202312138728324], ['3514229367981237.coc.gml', 0.2427536231884058, 0.12296725328580976, 0.2169199292164827, 0.029850746268656716], ['3514287944897392.coc.gml', 0.16071428571428573, 0.49108367626886146, 0.4301062339859768, 0.09144947416552354], ['3514415834044554.coc.gml', 0.17829457364341086, 0.46502835538752363, 0.44808291543593087, 0.08779264214046822], ['3514416295764354.coc.gml', 0.15730337078651685, 0.97448979591836726, 0.89239054863624234, 0.1406926406926407], ['3514448201592653.coc.gml', 0.12015503875968993, 0.18418314255983351, 0.78146810143963086, 0.025466893039049237], ['3514574454119360.coc.gml', 0.06321839080459771, 1.4628099173553719, 3.6773423555717462, 0.09917355371900825], ['3514712136139677.coc.gml', 0.14942528735632185, 0.61538461538461542, 0.50374112460571052, 0.09419152276295134], ['3514721241880789.coc.gml', 0.06153846153846154, 1.5416666666666667, 3.8106970539570626, 0.09027777777777778], ['3516669001647040.coc.gml', 0.205761316872428, 0.33799999999999997, 0.64420183172667256, 0.028415841584158413], ['3517122988859030.coc.gml', 0.17647058823529413, 1.1111111111111112, 2.0563061692579723, 0.13383838383838384], ['3517807143042530.coc.gml', 0.15577889447236182, 0.67741935483870963, 1.2851091347565395, 0.053763440860215055], ['3518018271478014.coc.gml', 0.17261904761904762, 0.30915576694411412, 0.46822726029357581, 0.04490777866880514], ['3518037380208073.coc.gml', 0.20306513409961685, 0.17337130651477395, 0.24065398412732075, 0.06363011006289308], ['3518192234385654.coc.gml', 0.14383561643835616, 0.2284580498866213, 0.68079179884738772, 0.03755868544600939], ['3518864421482109.coc.gml', 0.11185682326621924, 0.27839999999999998, 0.73075128463794026, 0.0316504854368932], ['3518889973774430.coc.gml', 0.21333333333333335, 0.263671875, 0.38913289241464077, 0.05296610169491525], ['3518897993492899.coc.gml', 0.24120603015075376, 0.15755208333333334, 0.27328968392212011, 0.037747524752475246], ['3518902011319515.coc.gml', 0.15725806451612903, 0.24786324786324784, 0.58485909465493868, 0.053946053946053944], ['3519083113115752.coc.gml', 0.12442396313364056, 0.41426611796982166, 0.90092655185386394, 0.053402239448751075], ['3519104033490770.coc.gml', 0.13253012048192772, 1.2954545454545454, 1.8209044697358046, 0.1242807825086306], ['3519173332815242.coc.gml', 0.1956521739130435, 0.61179698216735257, 0.47594904966646356, 0.09579504316346421], ['3519233508278922.coc.gml', 0.16666666666666666, 0.45706371191135736, 0.75057496191878204, 0.10941828254847645], ['3521836014420909.coc.gml', 0.2028985507246377, 0.23696145124716553, 0.16827474796963177, 0.0412008281573499], ['3522707796773995.coc.gml', 0.18045112781954886, 0.40104166666666669, 0.80971857429614724, 0.06060606060606061], ['3523242033543909.coc.gml', 0.2186046511627907, 0.34721593481213214, 0.31579973087493873, 0.0661694816114645], ['3526708160065364.coc.gml', 0.23333333333333334, 1.1173469387755102, 1.011453051868916, 0.1134453781512605], ['3527143084709163.coc.gml', 0.16736401673640167, 0.33875, 0.37951243392015505, 0.048], ['3527449691832220.coc.gml', 0.20238095238095238, 0.27422145328719727, 0.30135731589905534, 0.04555236728837877], ['3558226899367894.coc.gml', 0.19689119170984457, 0.20567867036011081, 0.50966941476530092, 0.04032809295967191], ['3558246365665871.coc.gml', 0.08994708994708994, 1.4221453287197232, 1.5708046834663099, 0.09313725490196079], ['3560576217397500.coc.gml', 0.14, 0.18909438775510204, 0.36129248696130328, 0.027649769585253454], ['3581830525479047.coc.gml', 0.1282051282051282, 0.21454545454545457, 0.77492948226231451, 0.03353808353808354], ['3581866814344587.coc.gml', 0.13657407407407407, 0.22120080436656134, 0.51081929686148952, 0.038528743665909485], ['3582187498347368.coc.gml', 0.21052631578947367, 0.42750000000000005, 0.68565206190895389, 0.0696969696969697]]
xyz = zip(*x)
xyzz = gt.normlistlist(listlista=xyz[1:],metacolcount=0,sumormax='max')#xyz[1:]#
xy = zip(*xyzz)#[1:]
z = zip(*xyz[:1])
print xy
labels,kmcenter,kmfit = netky.kmeans(xy,k=6)
netky.classfig(labels,x,z)
# if __name2__=='__main__':
# resultone = ['filen']
# g = ig.Graph.Read_GML(gmlfolder+filen)
# resultone.extend(featuresofgraph(g))
# result.append(resultone)
| [
"peterwannng@gmail.com"
] | peterwannng@gmail.com |
34b07eb971fcf1b17bd38070f6938a5bc9f06ddf | 8645f5bb6321cf1097c0ea173e1c790618e65ddb | /skipper-lib/skipper_lib/workflow/workflow_helper.py | 70061de591b13506246b6a53e853fb629e28f855 | [
"Apache-2.0"
] | permissive | gov-ai/thinking | 8102130c781d6dad0ed457fd9b27e901d1e9fca1 | 9557ee10fe62f6c91f7daf2b33c9577303bea2e5 | refs/heads/master | 2023-09-01T07:48:56.100197 | 2021-10-13T01:18:22 | 2021-10-13T01:18:22 | 415,279,387 | 2 | 0 | Apache-2.0 | 2021-10-13T01:18:23 | 2021-10-09T10:42:30 | Python | UTF-8 | Python | false | false | 284 | py | import requests
def call(task_type, url, mode):
valid = {'_sync', '_async'}
if mode not in valid:
raise ValueError("call: status must be one of %r." % valid)
r = requests.get(url + task_type + mode)
queue_name = r.json()['queue_name']
return queue_name
| [
"andrejus.baranovskis@gmail.com"
] | andrejus.baranovskis@gmail.com |
bd101d2f6fb5381a070656f43ff6aca8685ee7b2 | 632ffbde1f909b18f02890efe58d7d041c6ed5f3 | /Tools/training_logFilesCretaor.py | 114237ecda7bd82a96c68846282f44190d101f08 | [] | no_license | ankit-world/aws_deployed_premium_prediction | 56d5ba032512a7a1975a5d037ee99de9f9828d8d | e46bcc8f2228ee1f10b2a3872a92fc0a8de915ca | refs/heads/main | 2023-08-26T04:24:23.725349 | 2021-11-14T14:19:01 | 2021-11-14T14:19:01 | 422,933,098 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,276 | py | import os
# this file used to setup the logging infrastructure (dirs/files)
training_log_files = ["valuesfromSchemaValidationLog", "GeneralLog", "nameValidationLog",
"columnValidationLog", "missingValuesInColumn", "DataBaseConnectionLog", "DBInsertLog",
"TrainingMainLog", "DataTransformLog", "ModelTrainingLog", "DataTransform"]
# prediction_log_files = ['DataBaseConnectionLog' ,"valuesfromSchemaValidationLog","nameValidationLog" ,"ExportTOCSV" ,
# "PredictionLog" , 'DBtableCreateLog' , "missingValuesInColumn","DataTransform","DBInsertLog"]
prediction_log_files = ["PredictionLog"]
class LogsDirFileGenerator:
def __init__(self, training_log_files, prediction_log_files):
self.training_log_files = training_log_files
self.prediction_log_files = prediction_log_files
self.training_logs_dir = 'Training_Logs'
self.prediction_logs_dir = 'Prediction_Logs'
def create_log_dirs(self):
"""
:Task : Create the Training and prediction Logs dir
"""
try:
os.makedirs(self.training_logs_dir, exist_ok=True)
os.makedirs(self.prediction_logs_dir, exist_ok=True)
except Exception as e:
return e
def create_Training_LogsFiles(self):
"""
task : create the LogFiles to store training Info.
"""
for file_ in self.training_log_files:
try:
with open(os.path.join(self.training_logs_dir, file_ + '.txt'), mode='w') as f:
f.close()
except Exception as e:
return e
def create_Prediction_LogsFiles(self):
"""
:Task : Create the PredictionLogs files
"""
for file_ in self.prediction_log_files:
try:
with open(os.path.join(self.prediction_logs_dir, file_ + '.txt'), mode='w') as f:
f.close()
except Exception as e:
return e
if __name__ == '__main__':
a = LogsDirFileGenerator(training_log_files=training_log_files, prediction_log_files=prediction_log_files)
a.create_log_dirs()
a.create_Training_LogsFiles()
a.create_Prediction_LogsFiles()
| [
"ankitmarwahaa7@gmail.com"
] | ankitmarwahaa7@gmail.com |
60b30866749221d40116e6c03faf481a0873ce7f | 4f8dcdbda297e034ffe70636bde9ee8888ce349c | /Archive/CAD_to_GDB.py | 1dffc4a47c97cb4e9a663235dd7b26a1e2d00df7 | [] | no_license | Dewakarsonusingh/CAD-GIS-Conversion | 1b28f78788bba8d361a25a0e400051d2d803b7db | c953816a615888cc75998c49fad29e2e9201688d | refs/heads/master | 2022-12-24T01:42:04.852324 | 2018-08-17T01:33:46 | 2018-08-17T01:33:46 | 300,406,235 | 0 | 0 | null | 2020-10-01T19:58:05 | 2020-10-01T19:50:33 | null | UTF-8 | Python | false | false | 13,044 | py | # Takes a selected clean, georefrenced CAD File, places it in selected geodatabase (by floor level)
# and saves each building feature types(ie doors, window, walls, etc), ie.. polygons and polylines,
# as geodatabase feature classes. Also add class-specific fields to created GDB feature classes
#
# Sam Mix 2018
#
# Import needed libs
import arcpy as ar
import numpy as np
from Tkinter import *
import Tkinter, Tkconstants, tkFileDialog
import os, getpass
import datetime, time
import matplotlib.pyplot as plt
print "Getting started..."
# fucton to get floor level/ GDB datset
def getfloor():
floordef ={ 'FirstFloor':'F1', 'SecondFloor' :'F2', 'GroundFloor': 'F0'}
while True:
flrlvl = str(raw_input("What floor? (Ground, First, or Second)\n")).capitalize() +"Floor"
if flrlvl.strip() == 'FirstFloor'or flrlvl.strip() == 'SecondFloor' or flrlvl.strip() == 'GroundFloor':
return flrlvl.strip(), floordef[flrlvl.strip()]
break
else:
print "try again, bud."
#fuction
def CADtoGDB(CADpath, GDBpath ):
arcpy.ClearWorkspaceCache_management()
reference_scale = "1000"
CAD_name = CADpath.split("/")[-1][:-4]
print "Converting", CAD_name,"FROM", CADpath, "to be PLACED in ", out_gdb_path
try:
ar.CADToGeodatabase_conversion(CADpath, GDBpath, CAD_name, reference_scale)
except:
print "File may already be there. Attempting to replace/update"
arcpy.Delete_management(str(GDBpath)+ "/"+str(CAD_name),"DEFeatureDataset")
ar.CADToGeodatabase_conversion(CADpath, GDBpath, CAD_name, reference_scale)
#fuction for getting path of converted CAD file's polygonn
def GDB_polyGfile_loc(datasate_name):
for fc in arcpy.ListFeatureClasses(feature_dataset=datasate_name, feature_type= 'Polygon'):
path = os.path.join(arcpy.env.workspace, datasate_name, fc)
return(path)
#fuction for getting path of converted CAD file's polyLine
def GDB_polyLfile_loc(datasate_name):
for fc in arcpy.ListFeatureClasses(feature_dataset=datasate_name, feature_type= 'Polyline'):
path = os.path.join(arcpy.env.workspace, datasate_name, fc)
return(path)
#fuction for removing FID_polylineXX/polygonXX field
def FID_poly_feild_delate(fc):
fieldlist =[("{0} ".format(field.name)) for field in ar.ListFields(fc)]
for field in fieldlist:
if field[:8]== "FID_Poly":
print "FIELD DELETED:", field
ar.DeleteField_management(fc, field)
#fuction for matching and adding fields (from dictionary)
def field_generator(inputclass):
#dictionary of feature type class types and their respected feilds
classfield ={
"room" : [],#["RoomName", "TEXT"],["RoomNumber", "TEXT"],["Department", "TEXT"], [ "PaintCode", "TEXT"], ["FlooringMaterial", "TEXT"],["FloorInstallDate", "TEXT"],["DocumentPathway", "TEXT"]],
"window" :[["WindowType", "TEXT"], ["DoesOpen", "TEXT"]],
"plumbing":[["TypeOfFixture", "TEXT"]],
"furniture":[["FurnitureType", "TEXT"], ["Use", "TEXT"]],
"wall": [],
"railing":[[ "Material", "TEXT"]],
"partition":[["Material", "TEXT"]],
"door":[["Swing", "TEXT"], [ "Size", "TEXT"], [ "DoorNumber", "TEXT"], [ "KeyType", "TEXT"],["Material", "TEXT"], [ "Alarmed", "TEXT"],[ "ExteriorDoor", "TEXT"]],
"incline":[],
"doorswing": [],
"stair":[],
"hvac":[],
"fence":[],
"unknown":[],
"department":[["FacilityID", "TEXT"], ["Department", "TEXT"]]
}
for key in classfield.keys():
if key == inputclass:
for field in classfield[inputclass]:
try:
ar.AddField_management(output, field[0], field[1])
print "FIELD ADDED: %s as %s datatype" % (field[0], field[1])
except:
print "FEILD SKIPPED: ", field[0]
# cal/ populate filed fo Create date and createby
exp = '''def Add_date():
import time
return time.strftime('%d/%m/%Y')'''
ar.CalculateField_management(output, "CreateDate", 'Add_date()', "PYTHON", exp)
exp = str("\""+ str(getpass.getuser())+"\"")
ar.CalculateField_management(output, "CreateBy", exp, "PYTHON")
# fuction to ensure a GDB is selected
def getGDBPath():
while True:
GDB_path = tkFileDialog.askdirectory(initialdir = "/",title = "Select Geodatabase (.gdb)",)
if GDB_path.lower()[-3:] == "gdb":
return GDB_path
break
else:
print "Not a Geodatabase. Please try again, bud."
#### BEGINING
print "------------------------------------------------------------------------"
# clearing workspace from priovous runs
arcpy.ClearWorkspaceCache_management()
# get locations of CAD file and GDB
root = Tk()
CADpath =str(tkFileDialog.askopenfilename(initialdir = "//vsrvgisprod01/gisintern/",title = "Select Drawing file (.DWG)",filetypes = (("DWG files","*.dwg"),("all files","*.*"))))
#name CAD file
out_dataset_name = CADpath.split("/")[-1][:-4]
# get locaiton of GDB
out_gdb_path = getGDBPath()
root.destroy()
#bring cad to Geodatbase
CADtoGDB(CADpath, out_gdb_path)
#get floor type
flrlvl =getfloor()
#Create name var
flpath = out_gdb_path+'/'+flrlvl[0]
# creater list of feature cout for final report
strbdf= []
finbdf= []
labl = []
# set workplace
ar.env.workspace =out_gdb_path
ar.env.overwriteOutput== True
# used fuction at top to selct path of input polygon feature class
print "------------------------------------------------------------------------"
print GDB_polyGfile_loc(out_dataset_name)
polygon = GDB_polyGfile_loc(out_dataset_name)
#line feature colected
line = GDB_polyLfile_loc(out_dataset_name)
# Build a list of types of building features /GDB feature classes for polygon and polyline
poly_lyrtypes = np.unique(ar.da.TableToNumPyArray(polygon,"layer"))
line_lyrtypes = np.unique(ar.da.TableToNumPyArray(line,"layer"))
#remove building features in polygon list fromthe polyline list
line_lyrtypes = [x for x in line_lyrtypes if x not in poly_lyrtypes]
# set has GBD FCs as features layers
ar.MakeFeatureLayer_management(polygon, "polygon")
ar.MakeFeatureLayer_management(line, "line")
#list of commnon fields. (aka fields that all building types have )
comfields= [["AssetID", "TEXT"],["RoomID", "TEXT"],["CreateDate", "DATE"], ["CreateBy", "TEXT"],["UpdateDate", "DATE"], ["UpdateBy", "TEXT"], ["Notes", "TEXT"]]
# list of fields not used
delfields =["Entity", "Handle", "LyrFrzn", "LyrLock", "LyrOn", "LyrVPFrzn", "LyrHandle",
"Color", "EntColor", "LyrColor", "BlkColor", "Linetype","EntLinetype", "LyrLnType", "BlkLinetype", "Elevation",
"Thickness", "LineWt", "EntLineWt", "LyrLineWt", "BlkLineWt",
"RefName", "LTScale", "ExtX", "ExtY", "ExtZ", "DocName",
"DocPath", "DocType", "DocVer"]
for field in delfields:
try:
ar.DeleteField_management("polygon", field)
print "FIELD DELETED:", field
except:
print "FEILD SKIPPED: ", field
for field in comfields:
ar.AddField_management("polygon", field[0], field[1])
print "FIELD ADDED: %s as %s datatype" % (field[0], field[1])
print "------------------------------------------------------------------------"
print line
for field in delfields:
try:
ar.DeleteField_management("line", field)
print "FIELD DELETED:", field
except:
print "FEILD SKIPPED: ", field
for field in comfields:
ar.AddField_management("line", field[0], field[1])
print "FIELD ADDED: %s as %s datatype" % (field[0], field[1])
print "------------------------------------------------------------------------"
# for loop to created a GDB FC for each polygon building feature(i.e. doors, walls, etc.)
for lyrtype in poly_lyrtypes:
# Append to lable list
labl.append(lyrtype)
lyrtype= str(lyrtype).replace(" ","_")
#output
output = flpath +"/"+out_dataset_name+"_"+str(lyrtype)[3:-3]
#removing old Feature Class if exists
if ar.Exists(output):
print "Replaced old Feature Class located at:", output
ar.Delete_management(output)
SQL_query = "Layer= "+ str(lyrtype)[2:-2]
# Select building feature type
selected = ar.SelectLayerByAttribute_management("polygon", "NEW_SELECTION", SQL_query)
print str(lyrtype)[3:-3], " \t \t Starting Number: \t" + str(ar.GetCount_management(selected))
# append to starlist
strbdf.append(int(ar.GetCount_management(selected).getOutput(0)))
# Feature To Polygon or Line
ar.FeatureToPolygon_management(selected, output)
# Add Location Field to created GDB feature class
ar.AddField_management(output, "Location", "TEXT")
# Calculate Location Field created GDB feature class (based off of )
calc = str("\""+ out_dataset_name +"\"")
ar.CalculateField_management(output, "Location", calc, "PYTHON")
print str(lyrtype)[3:-3], " \t \t Final Number: \t" + str(ar.GetCount_management(output))
#append to final list
finbdf.append(int(ar.GetCount_management(output).getOutput(0)))
#remove FID_Polyline/gon field and layer field
FID_poly_feild_delate(output)
ar.DeleteField_management(output, "layer")
#Add class-specific Fields to created GDB feature class
try:
field_generator(str(lyrtype)[3:-3])
except:
print "building type not defined"
print "Completed Feature Class located at:", output
print "------------------------------------------------------------------------"
## to created a GDB FC for each polyline building feature(i.e. doorswing, roomlines, etc.)
print line
print "------------------------------------------------------------------------"
for lyrtype in line_lyrtypes:# Append to lable list
labl.append(lyrtype)
lyrtype= str(lyrtype).replace(" ","_")
#output
output = flpath +"/"+out_dataset_name+"_"+str(lyrtype)[3:-3]
#removing old Feature Class if exists
if ar.Exists(output):
print "Replaced old Feature Class located at:", output
ar.Delete_management(output)
SQL_query = "Layer= "+ str(lyrtype)[2:-2]
# Select building feature type
selected = ar.SelectLayerByAttribute_management("line", "NEW_SELECTION", SQL_query)
print str(lyrtype)[3:-3], " \t \t Starting Number: \t" + str(ar.GetCount_management(selected))
# append to starlist
strbdf.append(int(ar.GetCount_management(selected).getOutput(0)))
# Feature To Polygon or Line
ar.FeatureToLine_management(selected, output)
# Add Location Field to created GDB feature class
ar.AddField_management(output, "Location", "TEXT")
# Calculate Location Field created GDB feature class (based off of )
calc = str("\""+ out_dataset_name +"\"")
ar.CalculateField_management(output, "Location", calc, "PYTHON")
print str(lyrtype)[3:-3], " \t \t Final Number: \t" + str(ar.GetCount_management(output))
#append to final list
finbdf.append(int(ar.GetCount_management(output).getOutput(0)))
#remove FID_Polyline/gon field and layer field
FID_poly_feild_delate(output)
ar.DeleteField_management(output, "layer")
# Add class-specific Fields to created GDB feature class
try:
field_generator(str(lyrtype)[3:-3])
except:
print "building type not defined"
print "Completed Feature Class located at:", output
print "------------------------------------------------------------------------"
# by this point all should be done
print "ALL DONE"
#print strbdf
#print finbdf
#print labl
def reportbuilder(strl,finl,labl):
n_groups = len(labl)
index = np.arange(n_groups)
fig, ax = plt.subplots()
labl = [str(x)[3:-3] for x in labl]
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = ax.bar(index, strl, bar_width, alpha=opacity, color='b', label='Start')
rects2 = ax.bar(index+ bar_width , finl, bar_width, alpha=opacity, color='G', label='Final')
ax.set_ylabel('Count of Features')
ax.set_xlabel('Building Feature Type')
ax.set_title('CAD-GDB Report for: ' +out_dataset_name)
ax.set_xticks(index)
ax.set_xticklabels(labl,ha = 'center', rotation= 40)
ax.legend()
plt.show(block=True)
while True:
Report = str(raw_input("Want a Report Graphic? (yes or no) "))
if Report in["y", "Y", "yes", "Yes"]:
reportbuilder(strbdf,finbdf,labl)
break
elif Report.lower() in ["no", "N", "No", "n"]:
break
else:
print "Not an answer. Try again."
print "------------------------------------------------------------------------"
close = str(raw_input("Hit Enter to Close"))
| [
"33844825+Mixsa12@users.noreply.github.com"
] | 33844825+Mixsa12@users.noreply.github.com |
4e1fa727fab5fb543e9932c0bd692b63c5f39388 | 73c5e85d58b1d42e60822b4bbe4447097c908375 | /movie/serializers.py | 4b15cc1e7aac55c016d8cfac8c6813ec0a54f46a | [] | no_license | ishantgit/twitterBackend | 9865ee2a8bdb3b6b7046f8cca799a272f4174387 | d4122b66812944cbf37d05ea872423b0429c4490 | refs/heads/master | 2021-01-10T10:25:16.555978 | 2015-11-29T17:08:21 | 2015-11-29T17:08:21 | 45,641,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from rest_framework import serializers
from movie.models import *
class MovieCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
class MovieReadSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
depth = 2
| [
"ishant@atcouch.com"
] | ishant@atcouch.com |
b08f3840e780e082aad97256d99c215839e1e058 | 1012f61f46ff7aaf37cd3ce0ead64e035ec201dc | /coding-challange/codewars/8kyu/~2021-07-25/capitalization-and-mutability/capitalization-and-mutability.py | 70ab2ba79b4d13199ed131fb83a863ae49274dcb | [] | no_license | polyglotm/coding-dojo | 89efe22f5a34088e94c9e3a4e25cad510b04172a | 43da9c75e3125f5cb1ac317d275475f1c0ea6727 | refs/heads/develop | 2023-08-17T11:59:30.945061 | 2023-08-16T14:13:45 | 2023-08-16T14:13:45 | 188,733,115 | 2 | 0 | null | 2023-03-04T05:49:21 | 2019-05-26T21:26:25 | JavaScript | UTF-8 | Python | false | false | 386 | py | """
capitalization-and-mutability
codewars/8kyu/Capitalization and Mutability
Difficulty: 8kyu
URL: https://www.codewars.com/kata/595970246c9b8fa0a8000086/
"""
def capitalize_word(word):
return word.capitalize()
def test_capitalize_word():
assert capitalize_word('word') == 'Word'
assert capitalize_word('i') == 'I'
assert capitalize_word('glasswear') == 'Glasswear'
| [
"polyglot.m@gmail.com"
] | polyglot.m@gmail.com |
3883fde7e7b92875e069f83e4b65ddf1c4b2a542 | f6328ac298ac1b99920be0cccc2b19f0efe61794 | /docs/conf.py | 57efdd93314c7b0344e984fe287f3d16df49fe3d | [
"BSD-3-Clause"
] | permissive | Krytic/riroriro | f1b2f3a54e9e3c9fbf5c169edf72e124645e01cb | 64427ecba5974d5c675884a5d7b55084e3184b87 | refs/heads/main | 2023-05-09T13:40:32.788205 | 2021-05-05T03:53:37 | 2021-05-05T03:53:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,931 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'riroriro'
copyright = '2021, Wouter van Zeist'
author = 'Wouter van Zeist'
# The full version, including alpha/beta/rc tags
release = '1.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'classic'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | [
"woutervanzeist7@gmail.com"
] | woutervanzeist7@gmail.com |
49ddfd050e02c9a29ad478cd2401367cf761db46 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/murtaza/pusher/demo_state_td3.py | 1bb88eaceb172d7677d3cb4f22eca88400bb1641 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,221 | py | from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
from rlkit.launchers.launcher_util import run_experiment
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.experiments.murtaza.rfeatures_rl import state_td3bc_experiment
from rlkit.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
env_id='SawyerPushNIPSEasy-v0',
algo_kwargs=dict(
batch_size=1024,
num_epochs=300,
num_eval_steps_per_epoch=500,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=1000,
min_num_steps_before_training=10000,
max_path_length=50,
),
td3_trainer_kwargs=dict(
discount=0.99,
),
td3_bc_trainer_kwargs=dict(
discount=0.99,
demo_path=["demos/icml2020/pusher/demos_action_noise_1000.npy"],
demo_off_policy_path=None,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=10000,
rl_weight=1.0,
bc_weight=0,
reward_scale=1.0,
target_update_period=2,
policy_update_period=2,
obs_key='state_observation',
env_info_key='puck_distance',
max_path_length=50,
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
exploration_noise=.8,
load_demos=True,
pretrain_rl=False,
pretrain_policy=False,
es='ou',
td3_bc=True,
save_video=True,
image_env_kwargs=dict(
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
transpose=True,
normalize=True,
),
num_exps_per_instance=1,
region='us-west-2',
logger_variant=dict(
tensorboard=True,
),
)
search_space = {
'td3_bc_trainer_kwargs.use_awr': [False],
# 'td3_bc_trainer_kwargs.demo_beta':[1, 10],
'td3_bc_trainer_kwargs.bc_weight': [1, 0],
'td3_bc_trainer_kwargs.rl_weight': [1],
'algo_kwargs.num_epochs': [1000],
'algo_kwargs.num_eval_steps_per_epoch': [100],
'algo_kwargs.num_expl_steps_per_train_loop': [100],
'algo_kwargs.min_num_steps_before_training': [0],
# 'td3_bc_trainer_kwargs.add_demos_to_replay_buffer':[True, False],
# 'td3_bc_trainer_kwargs.num_trains_per_train_loop':[1000, 2000, 4000, 10000, 16000],
# 'exploration_noise':[0.1, .3, .5],
# 'pretrain_rl':[True],
# 'pretrain_policy':[False],
'pretrain_rl': [False],
'pretrain_policy': [False],
'seedid': range(5),
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(state_td3bc_experiment, variants, run_id=0)
| [
"alexanderkhazatsky@gmail.com"
] | alexanderkhazatsky@gmail.com |
f7108676fd592dd4627eb37e71d5d30a122a3ac8 | 9beeae9b19902ca626106bd392dcb18178ad3fc9 | /staphopia/signals.py | 5d0d58cd2ed01b51cfed315b461d63ef586f53c1 | [] | no_license | staphopia/staphopia-web | e27f0d01d5a95c1aad9a5802bbdcc1161f6af1d3 | 2c35ee47e131a74642e60fae6f1cc23561d8b1a6 | refs/heads/master | 2023-08-09T04:07:47.279139 | 2022-02-10T16:09:18 | 2022-02-10T16:09:18 | 31,989,879 | 5 | 1 | null | 2023-08-01T21:08:34 | 2015-03-11T00:39:13 | Python | UTF-8 | Python | false | false | 451 | py | from django.contrib.auth.models import Group
from staphopia.forms import RegistrationFormWithName
def user_created(sender, user, request, **kwargs):
form = RegistrationFormWithName(request.POST)
user.first_name = form.data['first_name']
user.last_name = form.data['last_name']
user.save()
Group.objects.get(name='public').user_set.add(user)
from registration.signals import user_registered
user_registered.connect(user_created)
| [
"robert.petit@emory.edu"
] | robert.petit@emory.edu |
27b8bb8a1aa06eea5daf0e65ae35336ce49cec83 | be5f6490e47347dc3d28faeb46484144e9e3e5ba | /wsgi.py | 860302d633efaa61db75cf173e03b0e301c94686 | [] | no_license | ipcrm/flask_puppet | 4d9e0706bc843942047ffaa1db5fe1f01f67dc24 | d8a5400a2d31c9a0a19bb5d1761143bdca502040 | refs/heads/master | 2021-01-01T05:05:33.990321 | 2017-05-30T03:40:48 | 2017-05-30T03:40:48 | 58,177,844 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | #!env/bin/python
from webui import webui
if __name__ == "__main__":
webui.run()
| [
"mcadorette@puppetlabs.com"
] | mcadorette@puppetlabs.com |
810a5acd8ed978b4868643ba13f4e8ac63666fd7 | 7ec3900ab140907e93858f5476f34dfa7d5baae9 | /Dictionary/urls.py | f22d45c3a3e1c8526e577b11fc47d823275a1383 | [] | no_license | D10/Dictionary | 650648e5a2f2fade3a83789d7fcc301158e67f0b | e0377cdf0b2cbe5113c6fbfd7852de44a7f97a26 | refs/heads/main | 2023-05-01T21:45:07.770894 | 2021-05-19T22:46:11 | 2021-05-19T22:46:11 | 369,018,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('dictsite.urls'))
]
| [
"rgadamurov@bk.ru"
] | rgadamurov@bk.ru |
d9bef3ef39bbcccbdcadc4e05a37250ec144be34 | 35aa4fe6d3690ca7099fe1497fd0903fff9d420d | /B103.py | 5ab0752b9ee8323e609f08875299e43df2c4138d | [] | no_license | SATHANASELLAMUTHU/MYPROJECT | 07e1b9945d51f1ed0c7d82526dcbccd1fce86453 | 67325a9f2e8ac553299d89b4248465ea2fca56a8 | refs/heads/master | 2020-05-05T03:02:17.340845 | 2019-06-08T09:53:27 | 2019-06-08T09:53:27 | 179,659,079 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | n=raw_input("Enter the string:")
print(n.title())
| [
"noreply@github.com"
] | noreply@github.com |
7cf435eb848c7d03f3f9aad53f457dca59045ba8 | fb91c53b311cf191bc0f3a4efe5b1a0ba197944e | /play/tmp/0003_auto__add_field_coupon_coupons_released.py | e83fcdb8627bc6416c6a929aaadb00a6125eb43e | [
"MIT"
] | permissive | fraferra/PlayCity | e0ba878c52a321afbdbba68d25717b29a5dd3109 | 2bf97c30599b686ca0e642d1ebaf73fc99705291 | refs/heads/master | 2021-01-19T16:26:24.955470 | 2014-08-08T21:56:08 | 2014-08-08T21:56:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,807 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Coupon.coupons_released'
db.add_column(u'play_coupon', 'coupons_released',
self.gf('django.db.models.fields.DecimalField')(default=10, max_digits=4, decimal_places=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Coupon.coupons_released'
db.delete_column(u'play_coupon', 'coupons_released')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'play.challenge': {
'Meta': {'object_name': 'Challenge'},
'challenge_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['play.Player']", 'symmetrical': 'False'}),
'points': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
u'play.coupon': {
'Meta': {'object_name': 'Coupon'},
'buyers': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': u"orm['play.Player']", 'null': 'True', 'symmetrical': 'False'}),
'coupons_released': ('django.db.models.fields.DecimalField', [], {'default': '10', 'max_digits': '4', 'decimal_places': '0'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['play.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'})
},
u'play.couponhistory': {
'Meta': {'object_name': 'CouponHistory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['play.Player']"}),
'shop': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'play.event': {
'Meta': {'object_name': 'Event'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50'}),
'experience': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '5', 'decimal_places': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'organizer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['play.Organization']"}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': u"orm['play.Player']", 'null': 'True', 'symmetrical': 'False'}),
'points': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'play.eventhistory': {
'Meta': {'object_name': 'EventHistory'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['play.Player']"}),
'points': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'play.idea': {
'Meta': {'object_name': 'Idea'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True'}),
'experience': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '5', 'decimal_places': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
u'play.organization': {
'Meta': {'object_name': 'Organization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Super Duper!'", 'max_length': '100', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'play.player': {
'Meta': {'object_name': 'Player'},
'experience': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '5', 'decimal_places': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '4', 'decimal_places': '0'}),
'picture_url': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}),
'score': ('django.db.models.fields.DecimalField', [], {'default': '20', 'null': 'True', 'max_digits': '4', 'decimal_places': '0'}),
'token': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'play.shop': {
'Meta': {'object_name': 'Shop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Super shop!'", 'max_length': '100', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['play'] | [
"fraferra@cisco.com"
] | fraferra@cisco.com |
014dadd6751fe3bf44a387e8ed90580c24168a91 | 72c8c39a930b910d2ff406c8cf831d56a489ea3d | /MergeSort/mergeSort.py | 45e0aee4648f5dee030ca756554c24c1895a6986 | [
"MIT"
] | permissive | santhoshvai/Sorts | f0788a503bbe39cae409899f5bde0abf8764fd49 | 180c6d7ec656468dd1fa53b7924bc351d3ce7e2e | refs/heads/master | 2016-09-05T22:04:51.700190 | 2014-09-09T13:51:46 | 2014-09-09T13:51:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | def merge_sort_MIT(A):
"""
Sort list A into order, and return result.
"""
n = len(A)
if n==1:
return A
mid = n//2 # floor division
L = merge_sort_MIT(A[:mid])
R = merge_sort_MIT(A[mid:])
return merge(L,R)
def merge(L,R):
"""
Given two sorted sequences L and R, return their merge.
"""
i = 0
j = 0
answer = []
while i<len(L) and j<len(R):
if L[i]<R[j]:
answer.append(L[i])
i += 1
else:
answer.append(R[j])
j += 1
if i<len(L):
answer.extend(L[i:])
if j<len(R):
answer.extend(R[j:])
return answer
def main():
array = [54,26,93,17,77,31,44,55,20]
print;print "inputArray: " + str(array)
answer = merge_sort_MIT( array )
print;print "sortedArray: " + str(answer)
if __name__ == "__main__":
main() | [
"santhoshvai@gmail.com"
] | santhoshvai@gmail.com |
3b7a89aed9eab0f5e558d9f4ec7f2fcf3604a239 | 4dd4edb7df160a151aa541c6aca51ac0c7912a46 | /WeLinkAPI/WeLinkAPI/__init__.py | f0ace5136acb89fd39c45c7f1480f2f45a456663 | [] | no_license | tshawn2014/WeLinkBackend | 9e05368b09b79f1612542a310f5c60a503bfee72 | 2542697b6f368bffcf1efd641230eaa7c22da493 | refs/heads/master | 2023-02-02T23:31:43.276350 | 2020-12-16T06:38:49 | 2020-12-16T06:38:49 | 304,161,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | # import pymysql
# <!-- tsh local run --
# pymysql.version_info = (1, 4, 13, "final", 0)
# -- tsh local run -->
# pymysql.install_as_MySQLdb() | [
"tshawn2014@gmail.com"
] | tshawn2014@gmail.com |
010ccca8e5866021ae689a6b1229dd26275293f7 | ba9455bf842f3a118718f6c42623f5407d104375 | /Data_Reduction.py | 47159a5c5d8c8a5179b8a559442e41fae23b23e8 | [] | no_license | WKroese/ALOP | 9ca3dcbfd91a0dd1ec919b02e6fc027235d2c039 | 3a03f18f27c49495fa04f6a278dc0d71602d633f | refs/heads/master | 2021-05-07T04:02:12.436822 | 2018-04-29T02:43:45 | 2018-04-29T02:43:45 | 111,083,428 | 0 | 1 | null | 2017-12-01T10:19:08 | 2017-11-17T09:16:45 | null | UTF-8 | Python | false | false | 3,746 | py | # Last update: 17-04-2018
# ALOP La Palma project 'Transients' Data calibration
# Authors: Daphne Abbink, Willem Kroese, Sacha van Ruiten, Yke Rusticus, Kira Strelow
import numpy as np
import astropy.io as ast
from astropy.io import fits
import matplotlib.pyplot as plt
import sp
XMIN = 1600
XMAX = 2601
YMIN = 500
YMAX = 1501
# Function to unpack the data
# 'a' is the filter colour.
def import_data(a):
filename = raw_input("Enter "+a+"-file number\n")
hdul = fits.open("r"+filename+".fit")
im = hdul[4].data
hdr0 = hdul[0].header
hdul.close()
exp_t = hdr0['EXPTIME']
return(im[1600:2601, 500:1501], exp_t)
# Function to give the mean bias image or mean normalized flat field image (calibration image).
# parameters: a is either 'bias' or 'flat', depending on the file type.
# b is 'True' for flats, and 'False' for biases.
def make_cal_im(a,b):
number1 = int(raw_input("first file number "+a+"\n"))
number2 = int(raw_input("last file number "+a+"\n"))
cal_im_total = np.zeros([4200,2154]) # voor ons is dit straks ([1000,1000])
for i in range(number1,number2+1,1):
filename = "r"+str(i)+".fit"
hdul = fits.open(filename)
im = hdul[4].data
hdr0 = hdul[0].header
hdul.close()
if b:
im = np.true_divide((im),hdr0['EXPTIME'])
cal_im_total += im
cal_im = cal_im_total/(float(number2-number1+1))
if b:
cal_im /= np.mean(im)
return cal_im[XMIN:XMAX, YMIN:YMAX]
# Thir function calibrates the image with use of: Image = (source + sky)*flat + bias
# The bias is needed, as well as the exposure time of the image, and the normalized flat field in
# the right filter that has already been devided by the exposure time of th eflat field.
def calibrate(im, im_t, bias, flat):
im = im - bias
im = im/(flat*im_t)
#hmin is minimal threshold for detection. Should be 3-4 sigma above background RMS
hmin = 10000
[x,y,flux,sharpness,roundness] = sp.find(im, hmin ,5. )
# Bij skyrad even zelf invullen: Two element vector giving the inner and outer radii to be used
# for the sky annulus
[flux, fluxerr, sky, skyerr] = sp.aper(image=im, xc=x, yc=y, phpadu=5., apr=[5], skyrad=[10,20], \
badpix=[0,0], flux=True,nan=True)
return(flux, fluxerr)
#1295045
''' !!!Hiertussen moeten we eigenlijk kijken welke sterren bruikbaar zijn en welke indices daarbij
horen. Ook de index van de transient moeten we weten, plus de magnitdes van de hulpsterren.!!! '''
# PHOTOMETRIC CALIBRATION
#voor de indices nam ik nu even aan dat de transient op 0 zit, de rest is dus reference stars.
def magnitude(flux, err_flux, m_ref, err_m_ref):
m_arr = -2.5 * np.log10(flux[0] / flux[1:]) + m_ref
err_m_arr = np.sqrt((2.5/(flux[0]*np.log(10))*err_flux[0]))**2 + ((2.5/(flux[1:]*np.log(10))*err_flux[1:])**2 + err_m_ref**2)
m = np.sum(m_arr/(err_m_arr)**2) # take the weighted average of all reference stars
err_m = np.sum(1./(err_m_ar)**2)
return(m, err_m)
raw_R, t_R = import_data("R")
raw_V, t_V = import_data("V")
bias = make_cal_im("bias",False)
flat_R = make_cal_im("flat_R",True)
flat_V = make_cal_im("flat_V",True)
print flat_R
print flat_V
science_R, err_R = calibrate(raw_R, t_R, bias, flat_R)
science_V, err_V = calibrate(raw_V, t_V, bias, flat_V)
m_R, err_m_R = magnitude(science_R, err_R, m_R_ref, err_m_R_ref)
m_V, err_m_V = magnitude(science_V, err_V, m_V_ref, err_m_V_ref)
#OHJA, WE HEBBEN OOK DE JULIAN DATE NODIG NATUURLIJK
# We can use this to add new lines to an existing text file (adding new data points)
#with open('datapoints.txt', 'a') as file:
# file.writelines('{}\t{}\t{}\t{}\t{}\n'.format(t, m_V, err_m_V, m_R, err_m_R))
| [
"noreply@github.com"
] | noreply@github.com |
4b10fa53b97294463e20ad06343f2dd982acc650 | afebbb07b2b4eada17a5853c1ce63b4075d280df | /marketsim/gen/_intrinsic/orderbook/of_trader.py | 804ce5709645171b35783b2eb31d41c8a145e2c1 | [] | no_license | peter1000/marketsimulator | 8c0a55fc6408b880311d3ad49defc55e9af57824 | 1b677200a9d5323f2970c83f076c2b83d39d4fe6 | refs/heads/master | 2021-01-18T01:39:04.869755 | 2015-03-29T17:47:24 | 2015-03-29T17:47:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,224 | py | from marketsim import types
from marketsim.gen._out.trader._singleproxy import SingleProxy
from marketsim import getLabel
from marketsim.gen._out._intrinsic_base.orderbook.of_trader import OfTrader_Base, Proxy_Base
class Base(object):
_properties = {}
def __getattr__(self, name):
if name[0:2] != '__' and self._impl:
return getattr(self._impl, name)
else:
raise AttributeError
def __str__(self):
return getLabel(self._impl) if self._impl else ''
def __repr__(self):
return self.__str__()
class OfTrader_Impl(Base, OfTrader_Base):
def __init__(self):
self._alias = ["$(TraderAsset)"] if type(self.Trader) == SingleProxy else ['OfTrader']
Base.__init__(self)
@property
def _impl(self):
try:
return self.Trader.orderBook
except AttributeError:
return None
class Proxy_Impl(Base, Proxy_Base):
def __init__(self):
self._impl = None
Base.__init__(self)
@property
def label(self):
return self._impl.label if self._impl else '$(OrderBook)'
def bind_impl(self, ctx):
if self._impl is None:
self._impl = ctx.orderbook
| [
"anton.kolotaev@gmail.com"
] | anton.kolotaev@gmail.com |
b7345219fb5ba716b3fed095337bf4ff6b1df307 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/nwxtho001/question2.py | e5b2fd67ffe16d5f456ab603de434f28d2291d9f | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,398 | py | print ("Welcome to the 30 Second Rule Expert\n------------------------------------\nAnswer the following questions by selecting from among the options.")
seen = input ('Did anyone see you? (yes/no)\n')
if seen == 'yes' :
seen_type = input ('Was it a boss/lover/parent? (yes/no)\n')
if seen_type == 'no' :
print ('Decision: Eat it.')
else :
exp = input ('Was it expensive? (yes/no)\n')
if exp == 'yes' :
cut = input ('Can you cut off the part that touched the floor? (yes/no)\n')
if cut == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Your call.')
else :
choc = input ('Is it chocolate? (yes/no)\n')
if choc == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Don\'t eat it.')
else :
sticky = input ('Was it sticky? (yes/no)\n')
if sticky == 'yes' :
steak = input ('Is it a raw steak? (yes/no)\n')
if steak == 'yes' :
puma = input ('Are you a puma? (yes/no)\n')
if puma == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Don\'t eat it.')
else :
cat = input ('Did the cat lick it? (yes/no)\n')
if cat == 'yes' :
health = input ('Is your cat healthy? (yes/no)\n')
if health == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Your call.')
else :
print ('Decision: Eat it.')
else :
emau = input ('Is it an Emausaurus? (yes/no)\n')
if emau == 'yes':
mega = input ('Are you a Megalosaurus? (yes/no)\n')
if mega == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Don\'t eat it.')
else :
cat = input ('Did the cat lick it? (yes/no)\n')
if cat == 'yes' :
health = input ('Is your cat healthy? (yes/no)\n')
if health == 'yes' :
print ('Decision: Eat it.')
else :
print ('Decision: Your call.')
else :
print ('Decision: Eat it.') | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
10a1d871ef9ee3fa46f37d5c885fcd50bfa4cf6a | a7e7ed714ab21e64d137ea379e6f1433ac0ee67b | /Building/urls.py | 7ae1e0e0d0f64f82949166931bb46c326982686e | [
"MIT"
] | permissive | LukaszHoszowski/Django_ProEstate | 2b234b53ec68dd46d917eda60f44fb4481263085 | 36c5cc25842f4e5afebd9ff6eaa83c9457fb7a3a | refs/heads/main | 2023-08-11T08:27:16.270730 | 2021-10-01T20:24:10 | 2021-10-01T20:24:10 | 403,704,922 | 1 | 0 | MIT | 2021-10-01T20:17:56 | 2021-09-06T17:19:37 | HTML | UTF-8 | Python | false | false | 1,646 | py | from django.urls import path
from Building.views import BuildingListView, BuildingDetailView, \
BuildingCartographyView, BuildingCoopView, BuildingPhotosView, BuildingDocsView, BuildingPhotosCreate, \
BuildingDocsCreate, BuildingFlatsView, FlatDetailView, FlatUpdateView, FlatAddUserUpdate, FlatDeleteUserUpdate, \
MeasureUpdateView
app_name = 'Building'
urlpatterns = [
# Building urls
path('', BuildingListView.as_view(), name='buildings'),
path('<slug:slug>/', BuildingDetailView.as_view(), name='building_details'),
path('<slug:slug>/flats/', BuildingFlatsView.as_view(), name='building_flats'),
path('<slug:slug>/cartography/', BuildingCartographyView.as_view(), name='building_cartography'),
path('<slug:slug>/coop/', BuildingCoopView.as_view(), name='building_coop'),
path('<slug:slug>/photos/', BuildingPhotosView.as_view(), name='building_photos'),
path('<slug:slug>/add_photos/', BuildingPhotosCreate.as_view(), name='building_photos_add'),
path('<slug:slug>/documents/', BuildingDocsView.as_view(), name='building_documents'),
path('<slug:slug>/add_docs/', BuildingDocsCreate.as_view(), name='building_docs_add'),
# Flat urls
path('<slug:slug>/<int:pk>', FlatDetailView.as_view(), name='flat_details'),
path('<slug:slug>/<int:pk>/update', FlatUpdateView.as_view(), name='flat_update'),
path('add/user_to_flat/<int:pk>/', FlatAddUserUpdate.as_view(), name='flat_add_user'),
path('del/user_from_flat/<int:pk>/', FlatDeleteUserUpdate.as_view(), name='flat_delete_user'),
path('update/measure/<int:pk>/', MeasureUpdateView.as_view(), name='measure_update'),
]
| [
"hoszowski@hpe.com"
] | hoszowski@hpe.com |
e3a4187c4bb177fb87c1bbaf83b0ba2474e6fb35 | fd11fd3a63ab711612bc914e383b504125c1eed6 | /nova/api/openstack/compute/contrib/server_sort_keys.py | 01ce14b70536245876fbf4395aa4d805eb3f7eb7 | [
"Apache-2.0"
] | permissive | projectcalico/calico-nova | 7692a2d391d41ca23d9282c13a4ed998d9e081e5 | d01a4e54df558092702ffeae3cb4551bfb2d7707 | refs/heads/calico-readme | 2023-06-12T04:10:20.471307 | 2015-09-07T22:14:51 | 2015-09-07T22:14:51 | 21,241,973 | 7 | 4 | Apache-2.0 | 2020-08-12T08:58:02 | 2014-06-26T13:41:49 | Python | UTF-8 | Python | false | false | 969 | py | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Server_sort_keys(extensions.ExtensionDescriptor):
"""Add sort keys and directions to the Server GET v2 API."""
name = "ServerSortKeys"
alias = "os-server-sort-keys"
namespace = ("http://docs.openstack.org/compute/ext/"
"server_sort_keys/api/v2")
updated = "2014-05-22T00:00:00Z"
| [
"kaufer@us.ibm.com"
] | kaufer@us.ibm.com |
075d717759921834a2a8c9622dbb53790cf0228a | b198ab1d3faf79d34b1745236daa5eb02a37e18e | /yggdrasil/metaschema/properties/tests/test_JSONArrayMetaschemaProperties.py | ed812677d1d5d9df256fbc5b8f6903ae12c185fa | [
"BSD-3-Clause"
] | permissive | leighmatth/yggdrasil | 688f13aa0d274217daec9f412269fbbaf5f10aef | dcc4d75a4d2c6aaa7e50e75095a16df1df6b2b0a | refs/heads/master | 2021-07-09T10:39:25.422978 | 2021-04-14T16:40:04 | 2021-04-14T16:40:04 | 245,011,886 | 0 | 0 | NOASSERTION | 2020-03-04T21:54:25 | 2020-03-04T21:54:24 | null | UTF-8 | Python | false | false | 1,645 | py | from yggdrasil.metaschema.properties.tests import (
test_MetaschemaProperty as parent)
class TestItemsMetaschemaProperty(parent.TestMetaschemaProperty):
r"""Test class for ItemsMetaschemaProperty class."""
_mod = 'JSONArrayMetaschemaProperties'
_cls = 'ItemsMetaschemaProperty'
def __init__(self, *args, **kwargs):
super(TestItemsMetaschemaProperty, self).__init__(*args, **kwargs)
nele = 3
valid_value = [int(i) for i in range(nele)]
valid_sing = {'type': 'int'}
valid_mult = [{'type': 'int'} for i in range(nele)]
invalid_sing = {'type': 'float'}
invalid_mult = [{'type': 'float'} for i in range(nele)]
self._valid = [(valid_value, valid_sing),
(valid_value, valid_mult),
([int(i) for i in range(nele - 1)], valid_sing)]
self._invalid = [([float(i) for i in range(nele)], valid_sing),
([float(i) for i in range(nele)], valid_mult)]
# ([int(i) for i in range(nele - 1)], valid_mult)]
self._valid_compare = [(valid_sing, valid_sing),
(valid_sing, valid_mult),
(valid_mult, valid_sing),
(valid_mult, valid_mult)]
self._invalid_compare = [(valid_sing, invalid_sing),
(valid_sing, invalid_mult),
(valid_mult, invalid_sing),
(valid_mult, invalid_mult),
(1, 1),
(valid_mult, valid_mult[:-1])]
| [
"langmm.astro@gmail.com"
] | langmm.astro@gmail.com |
ff2fdf9485bb9ed0503010a3bc8eee00abd92789 | 6fd51fb841fb687d66f2f9e83839663d5158884f | /ee/clickhouse/views/test/test_clickhouse_funnel_correlation.py | 5e22a4aa46fb0ac9739f31f8cfaf597cc11058a1 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | sean810720/posthog | 1b846b57d62877bff9988577fca3c9bf8720acab | 1f951f7c39bbe55c7bcfd92a57bea15a401ea046 | refs/heads/master | 2023-08-07T01:09:33.008320 | 2021-10-07T08:41:07 | 2021-10-07T08:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,485 | py | import dataclasses
import json
from datetime import datetime
from typing import Any, Dict, Optional, TypedDict
from uuid import uuid4
import pytest
from django.core.cache import cache
from django.test import Client
from freezegun import freeze_time
from ee.clickhouse.models.event import create_event
from posthog.models.person import Person
from posthog.models.team import Team
from posthog.test.base import BaseTest
@pytest.mark.clickhouse_only
class FunnelCorrelationTest(BaseTest):
"""
Tests for /api/projects/:project_id/funnel/correlation/
"""
CLASS_DATA_LEVEL_SETUP = False
def test_requires_authn(self):
response = get_funnel_correlation(
client=self.client,
team_id=self.team.pk,
request=FunnelCorrelationRequest(date_to="2020-04-04", events=json.dumps([])),
)
assert response.status_code == 401
def test_event_correlation_endpoint_picks_up_events_for_odds_ratios(self):
with freeze_time("2020-01-01"):
self.client.force_login(self.user)
# Add in two people:
#
# Person 1 - a single signup event
# Person 2 - a signup event and a view insights event
#
# Both of them have a "watched video" event
#
# We then create Person 3, one successful, the other
# not. Both have not watched the video.
#
# So our contingency table for "watched video" should be
#
# | | success | failure | total |
# | ---------------- | -------- | -------- | -------- |
# | watched | 1 | 1 | 2 |
# | did not watched | 1 | 0 | 1 |
# | total | 2 | 1 | 3 |
#
events = {
"Person 1": [
# Failure / watched
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
{"event": "watched video", "timestamp": datetime(2020, 1, 2)},
],
"Person 2": [
# Success / watched
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
{"event": "watched video", "timestamp": datetime(2020, 1, 2)},
{"event": "view insights", "timestamp": datetime(2020, 1, 3)},
],
"Person 3": [
# Success / did not watched
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
{"event": "view insights", "timestamp": datetime(2020, 1, 3)},
],
}
create_events(events_by_person=events, team=self.team)
odds = get_funnel_correlation_ok(
client=self.client,
team_id=self.team.pk,
request=FunnelCorrelationRequest(
events=json.dumps([EventPattern(id="signup"), EventPattern(id="view insights")]),
date_to="2020-04-04",
),
)
assert odds == {
"is_cached": False,
"last_refresh": "2020-01-01T00:00:00Z",
"result": {
"events": [
{
"event": "watched video",
"success_count": 1,
"failure_count": 1,
"odds_ratio": 0.5,
"correlation_type": "failure",
},
]
},
}
def test_event_correlation_is_partitioned_by_team(self):
"""
Ensure there's no crosstalk between teams
We check this by:
1. loading events into team 1
2. checking correlation for team 1
3. loading events into team 2
4. checking correlation for team 1 again, they should be the same
"""
with freeze_time("2020-01-01"):
self.client.force_login(self.user)
events = {
"Person 1": [
{"event": "watched video", "timestamp": datetime(2019, 1, 2)},
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
],
"Person 2": [
{"event": "watched video", "timestamp": datetime(2019, 1, 2)},
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
{"event": "view insights", "timestamp": datetime(2020, 1, 3)},
],
}
create_events(events_by_person=events, team=self.team)
odds_before = get_funnel_correlation_ok(
client=self.client,
team_id=self.team.pk,
request=FunnelCorrelationRequest(
events=json.dumps([EventPattern(id="signup"), EventPattern(id="view insights")]),
date_to="2020-04-04",
),
)
other_team = create_team(organization=self.organization)
create_events(events_by_person=events, team=other_team)
# We need to make sure we clear the cache so we get the same results again
cache.clear()
odds_after = get_funnel_correlation_ok(
client=self.client,
team_id=self.team.pk,
request=FunnelCorrelationRequest(
events=json.dumps([EventPattern(id="signup"), EventPattern(id="view insights")]),
date_to="2020-04-04",
),
)
assert odds_before == odds_after
def test_event_correlation_endpoint_does_not_include_historical_events(self):
with freeze_time("2020-01-01"):
self.client.force_login(self.user)
# Add in two people:
#
# Person 1 - a single signup event
# Person 2 - a signup event and a view insights event
#
# Both of them have a "watched video" event but they are before the
# signup event
events = {
"Person 1": [
{"event": "watched video", "timestamp": datetime(2019, 1, 2)},
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
],
"Person 2": [
{"event": "watched video", "timestamp": datetime(2019, 1, 2)},
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
{"event": "view insights", "timestamp": datetime(2020, 1, 3)},
],
}
create_events(events_by_person=events, team=self.team)
# We need to make sure we clear the cache other tests that have run
# done interfere with this test
cache.clear()
odds = get_funnel_correlation_ok(
client=self.client,
team_id=self.team.pk,
request=FunnelCorrelationRequest(
events=json.dumps([EventPattern(id="signup"), EventPattern(id="view insights")]),
date_to="2020-04-04",
),
)
assert odds == {
"is_cached": False,
"last_refresh": "2020-01-01T00:00:00Z",
"result": {"events": []},
}
def test_event_correlation_endpoint_does_not_funnel_steps(self):
with freeze_time("2020-01-01"):
self.client.force_login(self.user)
# Add Person1 with only the funnel steps involved
events = {
"Person 1": [
{"event": "signup", "timestamp": datetime(2020, 1, 1)},
{"event": "some waypoint", "timestamp": datetime(2020, 1, 2)},
{"event": "", "timestamp": datetime(2020, 1, 3)},
],
}
create_events(events_by_person=events, team=self.team)
# We need to make sure we clear the cache other tests that have run
# done interfere with this test
cache.clear()
odds = get_funnel_correlation_ok(
client=self.client,
team_id=self.team.pk,
request=FunnelCorrelationRequest(
events=json.dumps(
[EventPattern(id="signup"), EventPattern(id="some waypoint"), EventPattern(id="view insights")]
),
date_to="2020-04-04",
),
)
assert odds == {
"is_cached": False,
"last_refresh": "2020-01-01T00:00:00Z",
"result": {"events": []},
}
@pytest.fixture(autouse=True)
def clear_django_cache():
cache.clear()
def create_team(organization):
return Team.objects.create(name="Test Team", organization=organization)
def create_events(events_by_person, team: Team):
"""
Helper for creating specific events for a team.
"""
for distinct_id, events in events_by_person.items():
create_person(distinct_ids=[distinct_id], team=team)
for event in events:
_create_event(
team=team,
distinct_id=distinct_id,
event=event["event"],
timestamp=event["timestamp"],
properties=event.get("properties", {}),
)
class EventPattern(TypedDict):
id: str
@dataclasses.dataclass
class FunnelCorrelationRequest:
# Needs to be json encoded list of `EventPattern`s
events: str
date_to: str
funnel_step: Optional[int] = None
date_from: Optional[str] = None
def get_funnel_correlation(client: Client, team_id: int, request: FunnelCorrelationRequest):
return client.get(
f"/api/projects/{team_id}/insights/funnel/correlation",
data={key: value for key, value in dataclasses.asdict(request).items() if value is not None},
)
def get_funnel_correlation_ok(client: Client, team_id: int, request: FunnelCorrelationRequest) -> Dict[str, Any]:
response = get_funnel_correlation(client=client, team_id=team_id, request=request)
assert response.status_code == 200
return response.json()
def create_person(**kwargs):
person = Person.objects.create(**kwargs)
return person
def _create_event(**kwargs):
kwargs.update({"event_uuid": uuid4()})
create_event(**kwargs)
| [
"noreply@github.com"
] | noreply@github.com |
fb42934d23cdc94376fa7676d0d959c43185e4a6 | c42c21ca1d17595f0163097d521252c95d4ba08e | /game.py | 384e99a149b85c76a39ce8f9b114024cd65af5ea | [] | no_license | deepsleeping/dev | 913892b985df2287806cae6c2051aef6eaf07bf7 | 3c8d9a3034da6dad9cbbcbffa5224cf76cbbc37e | refs/heads/master | 2021-01-13T17:24:06.996392 | 2017-02-13T11:45:29 | 2017-02-13T11:45:29 | 81,791,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | import random
print("반가워")
point = 10
rand=random.randrange(1,11)
tong=[]
while(point>0):
i=input("1부터10까지중 원하는 숫자를 골라서 적어")
if(rand>int(i)):
point = point -1
tong.append(i)
print("그 숫자보단 커")
elif(rand<int(i)):
point = point -1
tong.append(i)
print("그 숫자보단 작아")
else:
print("축하해")
print("네 점수는 {}점 이야".format(point))
for a in tong:
print("네가 고른 틀린 숫자들은{}".format(a))
break | [
"kimrung4130@gmail.com"
] | kimrung4130@gmail.com |
5c9c68f5451ef62036cba6f3ce7dd3a70f5b2164 | 6744024e7b3a8715e709d2b78ff3f52eddc56dde | /tcase/api/serializers.py | fde4edd0a78acd7338255b528755a2ee491ed1d2 | [] | no_license | jc36/testcase | 97b45daf25cff78e1dcdfc5f47c27c9b87046627 | 3d761dcb3d9c1ee8154855c4796fd83326f5daf4 | refs/heads/master | 2020-03-09T21:36:05.315853 | 2018-04-19T06:36:05 | 2018-04-19T06:36:05 | 129,012,948 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | from rest_framework import serializers
from rest_framework.fields import CurrentUserDefault
from tcase import services
from ..models import Like, Post, User
class LikeSerializer(serializers.ModelSerializer):
liker = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
model = Like
fields = (
'url',
'liker',
'post',
'like'
)
class PostSerializer(serializers.ModelSerializer):
author = serializers.HiddenField(default=serializers.CurrentUserDefault())
# is_fan = serializers.Serialize rMethodField()
class Meta:
model = Post
fields = (
'url',
'pk',
'author',
'text',
# 'created_at',
# 'updated_at',
# 'is_fan'
)
# def get_is_fan(self, obj) -> bool:
# """Проверяет, лайкнул ли `request.user` твит (`obj`).
# """
# user = self.context.get('request').user
# return services.is_fan(user, obj)
class UserSerializer(serializers.HyperlinkedModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = User
fields = (
'url',
'username',
'password',
'email',
# 'is_staff'
)
def create(self, validated_data):
user = super(UserSerializer, self).create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
| [
"jc36@rambler.ru"
] | jc36@rambler.ru |
298b9cbf5ba7b4f5dbafb6f07e7d309fcb309ec5 | 6b9fddd4c0b5665127d18d99cf9f832ed010331a | /run.py | 77c6751c59a34602001f34a75a9a442dd7ead77f | [] | no_license | oleksost/Continuous_Control_DDPG | e41a6b6d0a0fb56a5b96a38b5784658646bcdf7f | 89b1aeacc065d824b3064da7c671ee62be3261de | refs/heads/master | 2020-04-29T01:01:15.790344 | 2019-03-14T23:59:27 | 2019-03-14T23:59:27 | 175,716,102 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py |
from unityagents import UnityEnvironment
import numpy as np
import torch
import sys
import argparse
from collections import deque
from ddpg_agent import Agent
import matplotlib.pyplot as plt
sys.stdout.flush()
parser=argparse.ArgumentParser(description='Train an agent:')
parser.add_argument('--env',default='Reacher_Linux_NoVis/Reacher.x86_64', type=str,required=False,help='Path to the downloaded Unity environment')
parser.add_argument('--n_episodes',default=100, type=int, required=False,help='Path to the trained critic')
opt=parser.parse_args()
env = UnityEnvironment(file_name=opt.env)
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=False)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
agent = Agent(state_size=state_size, action_size=action_size, random_seed=2)
def ddpg(n_episodes=opt.n_episodes, max_t=1000, print_every=1):
scores_deque = deque(maxlen=print_every)
scores = []
for i_episode in range(1, n_episodes + 1):
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
agent.reset()
score = np.zeros(num_agents)
for t in range(max_t):
actions = agent.act(states, True)
env_info = env.step(actions)[brain_name]
next_states, rewards, dones = env_info.vector_observations, env_info.rewards, env_info.local_done
# print(next_states.shape)
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
# print(done)
agent.step(state, action, reward, next_state, done, t)
states = next_states
score += rewards
if np.any(done): # exit loop if episode finished
break
scores_deque.append(np.mean(score))
scores.append(np.mean(score))
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end="")
if i_episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')
return scores
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores) + 1), scores)
plt.ylabel('Score')
fig.savefig('score_clip_every_20.pdf', format='pdf', bbox_inches="tight", dpi=300)
| [
"oleks.ostapenko@icloud.com"
] | oleks.ostapenko@icloud.com |
b28426550722f331695c013bf055d217500ceada | 2486bac4d422a99cf06db68bb5b78f31bfff6526 | /ext/serf/build/check.py | fc49d336454f9da71bf4e7fd2c0d5395650656ff | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | beachmiles/CommitMonitorSVN | 2d7bc1c7509cdbbcc107473cab3c2be4cf6d2445 | a0cbe1eb2d6e6063f3603477c0a9ef1ab39774dd | refs/heads/master | 2020-03-30T01:18:53.698019 | 2018-09-28T22:11:04 | 2018-09-28T22:11:04 | 150,569,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,188 | py | #!/usr/bin/env python
#
# check.py : Run all the test cases.
#
# ===================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ===================================================================
#
import sys
import glob
import subprocess
import os
if __name__ == '__main__':
# get the test directory from the commandline, if set.
if len(sys.argv) > 1:
testdir = sys.argv[1]
else:
testdir = 'test'
if len(sys.argv) > 2:
test_builddir = sys.argv[2]
else:
test_builddir = 'test'
# define test executable paths
if sys.platform == 'win32':
SERF_RESPONSE_EXE = 'serf_response.exe'
TEST_ALL_EXE = 'test_all.exe'
else:
SERF_RESPONSE_EXE = 'serf_response'
TEST_ALL_EXE = 'test_all'
SERF_RESPONSE_EXE = os.path.join(test_builddir, SERF_RESPONSE_EXE)
TEST_ALL_EXE = os.path.join(test_builddir, TEST_ALL_EXE)
# Find test responses and run them one by one
for case in glob.glob(testdir + "/testcases/*.response"):
print "== Testing %s ==" % (case)
try:
subprocess.check_call([SERF_RESPONSE_EXE, case])
except subprocess.CalledProcessError:
print "ERROR: test case %s failed" % (case)
sys.exit(1)
print "== Running the unit tests =="
try:
subprocess.check_call(TEST_ALL_EXE)
except subprocess.CalledProcessError:
print "ERROR: test(s) failed in test_all"
sys.exit(1)
| [
"miles2know@yahoo.com"
] | miles2know@yahoo.com |
f486e9a0a0c4bfa8648db2f3ab716096708a8df8 | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/common/types/feed_common.py | 12888a33eb9f184c2402a3337e503e869b2be75f | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 1,263 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.common",
marshal="google.ads.googleads.v8",
manifest={"Money",},
)
class Money(proto.Message):
r"""Represents a price in a particular currency.
Attributes:
currency_code (str):
Three-character ISO 4217 currency code.
amount_micros (int):
Amount in micros. One million is equivalent
to one unit.
"""
currency_code = proto.Field(proto.STRING, number=3, optional=True,)
amount_micros = proto.Field(proto.INT64, number=4, optional=True,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | noreply@github.com |
a2210b80b17fc2c25f10a5d1274a2508f5ca54df | 86baf32396ddc18644f32d097b15b430f00c03cc | /ros2_track_imu/driver.py | 46775c1e7d36c97d88f55ceadfdc18cc4a53bb68 | [
"MIT"
] | permissive | capitaneanu/ros2_track_imu | b7e277eba4b3d97b6431d61339b6a6100277a3c6 | 21741c413ceec1104c62276a6d00deddb95fec4c | refs/heads/master | 2023-07-28T09:03:22.877895 | 2020-01-18T01:50:37 | 2020-01-18T01:50:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,452 | py | import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Imu
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
import math
import time
from transforms3d.euler import euler2quat as quaternion_from_euler
degrees2rad = math.pi / 180.0
imu_yaw_calibration = 0.0
class Ros2TrackIMUDriver(Node):
def __init__(self):
super().__init__('track_imu_driver')
# We only care about the most recent measurement, i.e. queue_size=1
self.pub = self.create_publisher(Imu, 'imu', 10)
self.diag_pub = self.create_publisher(DiagnosticArray, 'diagnostics', 10)
self.diag_pub_time = self.get_clock().now()
self.imu_msg = Imu()
self.frame_id = self.declare_parameter('frame_id', "base_imu_link").value
# Orientation covariance estimation:
# Observed orientation noise: 0.3 degrees in x, y, 0.6 degrees in z
# Magnetometer linearity: 0.1% of full scale (+/- 2 gauss) => 4 milligauss
# Earth's magnetic field strength is ~0.5 gauss, so magnetometer nonlinearity could
# cause ~0.8% yaw error (4mgauss/0.5 gauss = 0.008) => 2.8 degrees, or 0.050 radians
# i.e. variance in yaw: 0.0025
# Accelerometer non-linearity: 0.2% of 4G => 0.008G. This could cause
# static roll/pitch error of 0.8%, owing to gravity orientation sensing
# error => 2.8 degrees, or 0.05 radians. i.e. variance in roll/pitch: 0.0025
# so set all covariances the same.
self.imu_msg.orientation_covariance = [
0.0025, 0.0, 0.0,
0.0, 0.0025, 0.0,
0.0, 0.0, 0.0025
]
# Angular velocity covariance estimation:
# Observed gyro noise: 4 counts => 0.28 degrees/sec
# nonlinearity spec: 0.2% of full scale => 8 degrees/sec = 0.14 rad/sec
# Choosing the larger (0.14) as std dev, variance = 0.14^2 ~= 0.02
self.imu_msg.angular_velocity_covariance = [
0.02, 0.0, 0.0,
0.0, 0.02, 0.0,
0.0, 0.0, 0.02
]
# linear acceleration covariance estimation:
# observed acceleration noise: 5 counts => 20milli-G's ~= 0.2m/s^2
# nonliniarity spec: 0.5% of full scale => 0.2m/s^2
# Choosing 0.2 as std dev, variance = 0.2^2 = 0.04
self.imu_msg.linear_acceleration_covariance = [
0.04, 0.0, 0.0,
0.0, 0.04, 0.0,
0.0, 0.0, 0.04
]
self.roll = 0
self.pitch = 0
self.yaw = 0
self.seq = 0
self.accel_factor = 9.806 / 256.0 # sensor reports accel as 256.0 = 1G (9.8m/s^2). Convert to m/s^2.
# read calibration parameters
# accelerometer
# self.declare_parameter('time_ref_source').value
self.accel_x_min = self.declare_parameter('accel_x_min', -250.0).value
self.accel_x_max = self.declare_parameter('accel_x_max', 250.0).value
self.accel_y_min = self.declare_parameter('accel_y_min', -250.0).value
self.accel_y_max = self.declare_parameter('accel_y_max', 250.0).value
self.accel_z_min = self.declare_parameter('accel_z_min', -250.0).value
self.accel_z_max = self.declare_parameter('accel_z_max', 250.0).value
# magnetometer
self.magn_x_min = self.declare_parameter('magn_x_min', -600.0).value
self.magn_x_max = self.declare_parameter('magn_x_max', 600.0).value
self.magn_y_min = self.declare_parameter('magn_y_min', -600.0).value
self.magn_y_max = self.declare_parameter('magn_y_max', 600.0).value
self.magn_z_min = self.declare_parameter('magn_z_min', -600.0).value
self.magn_z_max = self.declare_parameter('magn_z_max', 600.0).value
self.calibration_magn_use_extended = self.declare_parameter('calibration_magn_use_extended', False).value
self.magn_ellipsoid_center = self.declare_parameter('magn_ellipsoid_center', [0, 0, 0]).value
# Array of arrays parameters not supported.
#self.magn_ellipsoid_transform = self.declare_parameter('magn_ellipsoid_transform', [[0, 0, 0], [0, 0, 0], [0, 0, 0]]).value
self.magn_ellipsoid_transform = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
self.imu_yaw_calibration = self.declare_parameter('imu_yaw_calibration', 0.0).value
# gyroscope
self.gyro_average_offset_x = self.declare_parameter('gyro_average_offset_x', 0.0).value
self.gyro_average_offset_y = self.declare_parameter('gyro_average_offset_y', 0.0).value
self.gyro_average_offset_z = self.declare_parameter('gyro_average_offset_z', 0.0).value
def configure_imu(self, ser):
### configure board ###
# stop datastream
ser.write('#o0' + chr(13))
# discard old input
# automatic flush - NOT WORKING
# ser.flushInput() #discard old input, still in invalid format
# flush manually, as above command is not working
discard = ser.readlines()
# set output mode
ser.write('#ox' + chr(13)) # To start display angle and sensor reading in text
self.get_logger().info("Writing calibration values to razor IMU board...")
# set calibration values
ser.write('#caxm' + str(self.accel_x_min) + chr(13))
ser.write('#caxM' + str(self.accel_x_max) + chr(13))
ser.write('#caym' + str(self.accel_y_min) + chr(13))
ser.write('#cayM' + str(self.accel_y_max) + chr(13))
ser.write('#cazm' + str(self.accel_z_min) + chr(13))
ser.write('#cazM' + str(self.accel_z_max) + chr(13))
if (not self.calibration_magn_use_extended):
ser.write('#cmxm' + str(self.magn_x_min) + chr(13))
ser.write('#cmxM' + str(self.magn_x_max) + chr(13))
ser.write('#cmym' + str(self.magn_y_min) + chr(13))
ser.write('#cmyM' + str(self.magn_y_max) + chr(13))
ser.write('#cmzm' + str(self.magn_z_min) + chr(13))
ser.write('#cmzM' + str(self.magn_z_max) + chr(13))
else:
ser.write('#ccx' + str(self.magn_ellipsoid_center[0]) + chr(13))
ser.write('#ccy' + str(self.magn_ellipsoid_center[1]) + chr(13))
ser.write('#ccz' + str(self.magn_ellipsoid_center[2]) + chr(13))
ser.write('#ctxX' + str(self.magn_ellipsoid_transform[0][0]) + chr(13))
ser.write('#ctxY' + str(self.magn_ellipsoid_transform[0][1]) + chr(13))
ser.write('#ctxZ' + str(self.magn_ellipsoid_transform[0][2]) + chr(13))
ser.write('#ctyX' + str(self.magn_ellipsoid_transform[1][0]) + chr(13))
ser.write('#ctyY' + str(self.magn_ellipsoid_transform[1][1]) + chr(13))
ser.write('#ctyZ' + str(self.magn_ellipsoid_transform[1][2]) + chr(13))
ser.write('#ctzX' + str(self.magn_ellipsoid_transform[2][0]) + chr(13))
ser.write('#ctzY' + str(self.magn_ellipsoid_transform[2][1]) + chr(13))
ser.write('#ctzZ' + str(self.magn_ellipsoid_transform[2][2]) + chr(13))
ser.write('#cgx' + str(self.gyro_average_offset_x) + chr(13))
ser.write('#cgy' + str(self.gyro_average_offset_y) + chr(13))
ser.write('#cgz' + str(self.gyro_average_offset_z) + chr(13))
# print calibration values for verification by user
ser.flushInput()
ser.write('#p' + chr(13))
calib_data = ser.readlines()
calib_data_print = "Printing set calibration values:\r\n"
for line in calib_data:
calib_data_print += line
self.get_logger().info(calib_data_print)
def publish_imu_data(self, line):
line = line.decode("utf-8").replace("ypr,", "").strip().replace("\t", "") # Delete "#YPRAG="
# f.write(line) # Write to the output log file
words = line.split(",") # Fields split
if len(words) > 2:
# in AHRS firmware z axis points down, in ROS z axis points up (see REP 103)
yaw_deg = -float(words[0])
yaw_deg = yaw_deg + self.imu_yaw_calibration
if yaw_deg > 180.0:
yaw_deg = yaw_deg - 360.0
if yaw_deg < -180.0:
yaw_deg = yaw_deg + 360.0
yaw = yaw_deg * degrees2rad
# in AHRS firmware y axis points right, in ROS y axis points left (see REP 103)
pitch = -float(words[1]) * degrees2rad
roll = float(words[2]) * degrees2rad
# Publish message
# AHRS firmware accelerations are negated
# This means y and z are correct for ROS, but x needs reversing
# self.imu_msg.linear_acceleration.x = -float(words[3]) * self.accel_factor
# self.imu_msg.linear_acceleration.y = float(words[4]) * self.accel_factor
# self.imu_msg.linear_acceleration.z = float(words[5]) * self.accel_factor
# self.imu_msg.angular_velocity.x = float(words[6])
# in AHRS firmware y axis points right, in ROS y axis points left (see REP 103)
# self.imu_msg.angular_velocity.y = -float(words[7])
# in AHRS firmware z axis points down, in ROS z axis points up (see REP 103)
# self.imu_msg.angular_velocity.z = -float(words[8])
q = quaternion_from_euler(roll, pitch, yaw)
self.imu_msg.orientation.x = q[0]
self.imu_msg.orientation.y = q[1]
self.imu_msg.orientation.z = q[2]
self.imu_msg.orientation.w = q[3]
#self.imu_msg.header.stamp = rclpy.time.Time(seconds=time.time()).to_msg()
self.imu_msg.header.stamp = self.get_clock().now().to_msg()
self.imu_msg.header.frame_id = self.frame_id
self.seq = self.seq + 1
self.pub.publish(self.imu_msg)
if self.diag_pub_time < self.get_clock().now():
diag_arr = DiagnosticArray()
diag_arr.header.stamp = self.get_clock().now().to_msg()
diag_arr.header.frame_id = '1'
diag_msg = DiagnosticStatus()
diag_msg.name = 'Track_IMU'
diag_msg.level = DiagnosticStatus.OK
diag_msg.message = 'Received AHRS measurement'
roll_key_val = KeyValue()
yaw_key_val = KeyValue()
pitch_key_val = KeyValue()
seq_key_val = KeyValue()
roll_key_val.key = 'roll (deg)'
roll_key_val.value = str(roll * (180.0 / math.pi))
yaw_key_val.key = 'yaw (deg)'
yaw_key_val.value = str(yaw * (180.0 / math.pi))
pitch_key_val.key = 'pitch (deg)'
pitch_key_val.value = str(pitch * (180.0 / math.pi))
diag_msg.values.append(roll_key_val)
diag_msg.values.append(yaw_key_val)
diag_msg.values.append(pitch_key_val)
seq_key_val.key= 'sequence number'
seq_key_val.value = str(self.seq)
diag_msg.values.append(seq_key_val)
diag_arr.status.append(diag_msg)
self.diag_pub.publish(diag_arr)
# Callback for dynamic reconfigure requests
def reconfig_callback(self, config, level):
global imu_yaw_calibration
self.get_logger().info("""Reconfigure request for yaw_calibration: %d""" % (config['yaw_calibration']))
# if imu_yaw_calibration != config('yaw_calibration'):
imu_yaw_calibration = config['yaw_calibration']
self.get_logger().info("Set imu_yaw_calibration to %d" % (imu_yaw_calibration))
return config | [
"ankl@kth.se"
] | ankl@kth.se |
535f22dc015e9609cba7c8829680c4e77a5af4c9 | e8db54847a0f2d2878579dc9feb269e60ec5019d | /python/login.py | b570cdc7479dd0a30ad7cf0c838500e3bd209980 | [] | no_license | Poseidon1979/Tanzu_GitOps_demo | 62dd60f72a6f5f084186484937b6787703b1f80a | 7cf6753c1b27d8905282155e38c2723c5db25c2d | refs/heads/main | 2023-04-24T12:01:34.844768 | 2021-05-17T11:58:53 | 2021-05-17T11:58:53 | 359,063,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from pathlib import Path
import json
import requests
s = requests.Session()
def get_clusters():
url_vcenter = 'https://vcsa-01.haas-451.pez.vmware.com/rest/com/vmware/cis/session'
username_vcenter = 'administrator@vsphere.local'
password_vcenter = 'rsKNMG4sw5R8mcvDpu!'
url_get_clusters = 'https://vcsa-01.haas-451.pez.vmware.com/api/vcenter/namespace-management/clusters'
resp_login = s.post(url_vcenter, auth=(username_vcenter,password_vcenter), verify=False)
resp_clusters = s.get(url_get_clusters, verify=False)
clusters_json = resp_clusters.json()
cluster_ids = list()
for cluster_json in clusters_json:
cluster_ids.append(cluster_json['cluster'])
return cluster_ids
#number_SC = len(cluster_ids)
#i = 0
#print("Here are existing supervisor clusters:")
#while i < number_SC:
# i = i+1
# print(str(i) + ": " + clusters_json[i-1]['cluster_name'])
#sc_selected = input("Please input the number of the supervisor cluster:")
#while not str.isdigit(sc_selected):
# sc_selected = input("This is not a number. Please input the number of the supervisor cluster:")
#while int(sc_selected) > i:
# sc_selected = input("This number does not exist. Please input the number of the supervisor cluster:")
def create_namespace():
print("Creating namespace: ")
url_create_namespace = 'https://vcsa-01.haas-451.pez.vmware.com/api/vcenter/namespaces/instances'
json_create_namespace = '/Users/pyang/Downloads/v7k8s-tc-templates-master/sc06/ns-create_api-payload.json'
with open(json_create_namespace) as f:
payload = json.load(f)
resp_create_namespace = s.post(url_create_namespace, json=payload, verify=False)
# url_get_namespace = 'https://vcsa-01.haas-451.pez.vmware.com/api/vcenter/namespaces/instances/ns06'
# resp_get_namespace = s.get(url_get_namespace, verify=False)
return (resp_create_namespace) | [
"paul.yang1979@gmail.com"
] | paul.yang1979@gmail.com |
9c71cae29c646dba625f6dd2f8a26ec85b2aa165 | 045e18de0e2c510f9815063a52e22ec582fa2edf | /test.py | 41daf909f5da1c09d8b16801f0ecf269dcfacb3a | [
"Apache-2.0"
] | permissive | Jerry671/natural-language-project | 283653a13e4863a73d2f2acadb99e61578221dc9 | 943415ac6085a74363b4f7e881454ebcfccc7689 | refs/heads/main | 2023-01-23T11:17:21.907122 | 2020-11-29T15:30:45 | 2020-11-29T15:30:45 | 316,448,524 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
path = "E:\\Mechine_training data\\soc-pokec-relationships.txt"
relation = pd.read_table(path, sep=' ', header=None) | [
"noreply@github.com"
] | noreply@github.com |
fb5219a026dbe5e28b72b4d3c360e4020955bb23 | a1bbcdcbd174748c753a04930521edb63c920105 | /launcher.py | 331e48cc55542f053fb9e71d34164870333ff005 | [
"MIT"
] | permissive | palmtrww/Discord.py-Music-Bot | 85e4a5dbd9af53d035b1ef4fb35ccbb4a949ac5f | b9290924209e3708f7aec6d4c7b31eebdf35741d | refs/heads/main | 2023-06-05T16:50:59.138252 | 2021-06-25T03:37:03 | 2021-06-25T03:37:03 | 380,049,529 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from bot import MusicBot
def main():
bot = MusicBot()
bot.run()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
26f36f99e3b9ef14808b1fb7abada37a0237de7b | a6c516f69b29dfc0b6a82078b9e5e74be856dd94 | /models/spectra.py | 601aabe978c8145f180202745657b148bf43816c | [] | no_license | scottwedge/InfraredSpectraDBApp | 894469b88947de420347bc767661ff057ee00b04 | d451a6613e4669fe09cddc7164913d85320878fc | refs/heads/master | 2021-05-16T19:00:36.065396 | 2019-06-17T10:53:19 | 2019-06-17T10:53:19 | 250,430,443 | 0 | 0 | null | 2020-03-27T03:25:05 | 2020-03-27T03:25:04 | null | UTF-8 | Python | false | false | 924 | py | from sqlalchemy import (
Column,
Index,
Integer,
Text,
ForeignKey,
String,
)
from .meta import Base
from sqlalchemy.orm import relationship
class Spectra(Base):
__tablename__ = 'Spectra'
spectra_id = Column(Integer, primary_key=True)
label = Column(String(32), nullable=False, unique=True)
time = Column(Integer, nullable=False)
class Spectra_detail(Base):
__tablename__ = 'Spectra_detail'
spectra_id = Column(Integer, primary_key=True)
index= Column(Integer, nullable=False, unique=True)
value = Column(Integer, nullable=False)
class Graph_experiment(Base):
__tablename__ = 'Graph_experiment'
spectra_id = Column(Integer, primary_key = True)
a = Column(Integer, nullable=False)
b = Column(Integer, nullable=False)
c = Column(Integer, nullable=False)
d = Column(Integer, nullable=False)
| [
"noreply@github.com"
] | noreply@github.com |
ffc66bf74de39af85ba59380d90ca0ccdb914a3e | 8ea17c587875d9342b25492ef325e372eed59126 | /crowdcropper.py | 13357f95440dc47218f1a862ca4f2452ecc5b379 | [] | no_license | rcui/crowdanalytic | eb370dcd27def2f2cb2d0c716bce5b8e85107afb | ac5e5d398a56c0ea7759f521c54e81235891f32a | refs/heads/master | 2021-05-04T08:51:10.456448 | 2016-10-09T02:57:03 | 2016-10-09T02:57:03 | 70,372,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | import os
import numpy
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
img = cv2.imread('srcimg/crowd.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
cropped = []
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cropped.append(roi_color)
for i, face in enumerate(cropped):
cv2.imwrite('dumpimg/face' + str(i) + '.png', face)
| [
"rcui@umass.edu"
] | rcui@umass.edu |
9d5debcb9f80b706bfaf5b69b6b8ba1db581e571 | 88df0c723112277cd63783a3609c2fcd8eff525d | /BOJ/4949.py | 57fc068539dce32d471aa3866b09a6effbae30e5 | [] | no_license | JHW0900/Problem_Sloving | 72fc020f567eab2689dd9371c6d237c8777ce9fd | 0094ad023263cb2fa9872ddbaf98f96af810767f | refs/heads/main | 2023-06-29T04:37:58.396901 | 2021-08-06T23:13:20 | 2021-08-06T23:13:20 | 324,377,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | import sys
input = sys.stdin.readline
tuple_types = {'(': ')', '[': ']'}
prev = []
while True:
now = cmd = list(map(str, input().rstrip()))
stack = []
if len(now) == 1 and now[0] == '.':
break
# if not cmd[-1] == '.':
# prev += cmd
# continue
# else:
# cmd = prev + cmd
# prev = []
for i in range(len(cmd)):
if i == (len(cmd) - 1):
if len(stack) == 0:
print('yes')
break
else:
print('no')
break
elif cmd[i] == '(' or cmd[i] == '[':
stack.append(cmd[i])
elif cmd[i] == ')' or cmd[i] == ']':
if len(stack) == 0:
print('no')
break
elif tuple_types[stack[-1]] == cmd[i]:
stack.pop()
else:
print('no')
break | [
"jhw0900@gmail.com"
] | jhw0900@gmail.com |
b4c6f3998452f1ebf85e09914d0b178af59e03e1 | 0b55f95166e5b8d76f0a12ed223dc1a37b739c03 | /src/py/trainlib/norm_train.py | a8fc16dc6a863bbb2640298c7d0ba0f5d0c9dc41 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lbumbolo/ShapeVariationAnalyzer | d183f5138a67992c5d14e67034c53df8ebd48d08 | 976e22cbacc87fb593d92e24cbdbba6c99a64060 | refs/heads/master | 2020-08-08T06:01:07.090597 | 2019-10-15T21:02:33 | 2019-10-15T21:02:33 | 213,746,294 | 0 | 0 | Apache-2.0 | 2019-10-08T20:24:37 | 2019-10-08T20:24:37 | null | UTF-8 | Python | false | false | 7,245 | py |
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
import argparse
import norm_nn as nn
import os
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from datetime import datetime
from math import sqrt
from matplotlib import pyplot
print("Tensorflow version:", tf.__version__)
parser = argparse.ArgumentParser()
# group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('--pickle', type=str, help='Pickle file, check the script readImages to generate this file. check generate_polys.py in generatelib', required=True)
parser.add_argument('--out', help='Output directory,the output name will be <out>/model-<num step>', default="./")
parser.add_argument('--learning_rate', help='Learning rate, default=1e-5', type=float, default=1e-5)
parser.add_argument('--decay_rate', help='decay rate, default=0.96', type=float, default=0.96)
parser.add_argument('--decay_steps', help='decay steps, default=10000', type=int, default=10000)
parser.add_argument('--batch_size', help='Batch size for evaluation, default=64', type=int, default=1)
parser.add_argument('--num_epochs', help='Number of iterations, default=10', type=int, default=10)
parser.add_argument('--model', help='Model file computed with metatrader_train.py')
parser.add_argument('--lookback', help='Create sets of series of this size to train the network', type=int, default=1)
parser.add_argument('--lookforward', help='Create sets of series of this size for prediction', type=int, default=1)
parser.add_argument('--ps_device', help='Process device, to store memory', type=str, default="/cpu:0")
parser.add_argument('--w_device', help='Work device, does operations', type=str, default="/cpu:0")
args = parser.parse_args()
pickle_file = args.pickle
outvariablesdirname = args.out
learning_rate = args.learning_rate
decay_rate = args.decay_rate
decay_steps = args.decay_steps
batch_size = args.batch_size
num_epochs = args.num_epochs
model = args.model
lookback = args.lookback
lookforward = args.lookforward
ps_device = args.ps_device
w_device = args.w_device
f = open(pickle_file, 'rb')
data = pickle.load(f)
train_dataset = data["train_dataset"]
train_labels = data["train_labels"]
points_to_cells = data["points_to_cells"]
cells_to_points = data["cells_to_points"]
train_dataset_shape = train_dataset.shape
train_labels_shape = train_labels.shape
print('Training set', train_dataset.shape, train_labels.shape)
print('Conversion set', points_to_cells.shape, cells_to_points.shape)
print('learning_rate', learning_rate)
print('decay_rate', decay_rate)
print('decay_steps', decay_steps)
print('batch_size', batch_size)
print('num_epochs', num_epochs)
graph = tf.Graph()
with graph.as_default():
keep_prob = tf.placeholder(tf.float32)
dataset_x = tf.data.Dataset.from_tensor_slices(train_dataset)
dataset_x = dataset_x.repeat(num_epochs)
dataset_x = dataset_x.batch(batch_size)
iterator_x = dataset_x.make_initializable_iterator()
next_train_batch_x = iterator_x.get_next()
dataset_y = tf.data.Dataset.from_tensor_slices(train_labels)
dataset_y = dataset_y.repeat(num_epochs)
dataset_y = dataset_y.batch(batch_size)
iterator_y = dataset_y.make_initializable_iterator()
next_train_batch_y = iterator_y.get_next()
tf_cells_to_points = tf.placeholder(tf.float32,shape=cells_to_points.shape, name="cells_to_points")
x = tf.placeholder(tf.float32,shape=(batch_size, train_dataset_shape[1], train_dataset_shape[2], train_dataset_shape[3]), name="x")
y = tf.placeholder(tf.float32, shape=(batch_size, train_labels_shape[1], train_labels_shape[2]), name="y_")
is_training = tf.placeholder_with_default(tf.Variable(False, dtype=tf.bool, trainable=False),shape=None)
# calculate the loss from the results of inference and the labels
# with tf.variable_scope("batch_normalization"):
# x_norm = tf.layers.batch_normalization(x, training=is_training)
# with tf.variable_scope("batch_normalization", reuse=True):
# y_norm = tf.layers.batch_normalization(y, training=is_training)
y_conv = nn.inference_rnn(x, cells_to_points=tf_cells_to_points, batch_size=batch_size, keep_prob=keep_prob, training=is_training, ps_device=ps_device, w_device=w_device)
loss = nn.loss(y_conv, y)
# setup the training operations
train_step = nn.training(loss, learning_rate, decay_steps, decay_rate)
accuracy_eval = nn.evaluation(y_conv, y)
tf.summary.scalar(loss.op.name, loss)
tf.summary.scalar('accuracy', accuracy_eval[0])
summary_op = tf.summary.merge_all()
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver()
if model is not None:
print("Restoring model:", model)
saver.restore(sess, model)
# specify where to write the log files for import to TensorBoard
now = datetime.now()
summary_writer = tf.summary.FileWriter(os.path.join(outvariablesdirname, now.strftime("%Y%m%d-%H%M%S")), sess.graph)
sess.run([iterator_x.initializer, iterator_y.initializer])
step = 0
accuracy_t = 0
while True:
try:
next_batch_x, next_batch_y = sess.run([next_train_batch_x, next_train_batch_y])
_, loss_value, summary, accuracy = sess.run([train_step, loss, summary_op, accuracy_eval], feed_dict={keep_prob: 0.3, y: next_batch_y, x: next_batch_x, tf_cells_to_points: cells_to_points, is_training: True})
step += 1
accuracy_t += accuracy[1]
if step % 100 == 0:
print('OUTPUT: Step %d: loss = %.6f' % (step, loss_value))
print('Accuracy:', accuracy)
# output some data to the log files for tensorboard
summary_writer.add_summary(summary, step)
summary_writer.flush()
# less frequently output checkpoint files. Used for evaluating the model
if step % 1000 == 0:
save_path = saver.save(sess, os.path.join(outvariablesdirname, "model"), global_step=step)
print('Model saved to:', save_path)
except tf.errors.OutOfRangeError:
break
print('Step', step)
print('Accuracy:', accuracy, accuracy_t/step)
saver.save(sess, os.path.join(outvariablesdirname, "model"), global_step=step)
predictions = []
for tx in test_dataset:
try:
tx = tx.reshape(1,lookback,train_dataset_shape[2])
y_predict = sess.run(y_conv, feed_dict={keep_prob: 1, x: tx})
print(y_predict)
predictions.append(y_predict)
except tf.errors.OutOfRangeError:
break
predictions = np.array(predictions).reshape(-1,5)
predictions_scaled = 2.0*scaler.inverse_transform(predictions)
predictions_scaled[0] = raw_dataset[train_len + lookback] + predictions_scaled[0]
for i in range(1, len(predictions_scaled)):
predictions_scaled[i] += predictions_scaled[i - 1]
rmse = sqrt(mean_squared_error(raw_dataset[train_len + lookback:-1], predictions_scaled))
print('Test RMSE: %.3f' % rmse)
pyplot.plot(raw_dataset[train_len + lookback:-1,0])
pyplot.plot(predictions_scaled[:,0])
pyplot.show()
| [
"juanprietob@gmail.com"
] | juanprietob@gmail.com |
d5655f14e27d61edfb7d6882009fe9f0ad295296 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2755/60793/267817.py | 8e0ef8cec59b834a9a8d68728452208db38b0567 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | for test in range(0, int(input())):
input()
ls1 = list(map(int, input().split()))
ls2 = list(map(int, input().split()))
l1, l2 = len(ls1), len(ls2)
ls3 = [0 for x in range(0, l1 + l2 - 1)]
for i in range(0, l1):
for j in range(0, l2):
ls3[i + j] += ls1[i] * ls2[j]
for i in ls3:
print(i, end=" ")
print(ls3[-1]) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
eba0c62274f2fdbf0b6d65d26e27890f3d410adf | 36a54f621ebb1b88c576398f900bf253d06336ad | /backend/app.py | d7b03b570194c9bdc6517eba380264309966ee9c | [] | no_license | g-freire/wind-dashboard | 224fa47b512b9c54b2669af78fb155479888a4bd | af2b3dc9f8043ba1d5caf18afbe1a6b91389091b | refs/heads/master | 2023-01-11T16:13:46.611203 | 2019-10-20T01:35:35 | 2019-10-20T01:35:35 | 179,980,246 | 0 | 0 | null | 2023-01-07T06:27:17 | 2019-04-07T14:48:17 | TypeScript | UTF-8 | Python | false | false | 3,559 | py | #!/usr/bin/env python3
from random import random, uniform
from time import sleep, time
import json
from threading import Thread, Event
# import pyodbc
from flask import Flask, render_template, url_for, copy_current_request_context, jsonify,make_response, request
from flask_cors import CORS
from flask_socketio import SocketIO, emit
# from sklearn.externals import joblib
import joblib
# import eventlet
# eventlet.monkey_patch()
app = Flask(__name__)
CORS(app, supports_credentials=True)
app.config['DEBUG'] = True
socketio = SocketIO(app)
thread = Thread()
thread_stop_event = Event()
# server = '127.0.0.1,1433'
# database = 'client_sensors'
# username = 'SA'
# password = '1q2w3e%&!'
# cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
# cursor = cnxn.cursor()
class QueryMongoThread(Thread):
def __init__(self):
self.delay = 2
super(QueryMongoThread, self).__init__()
def getLastSample(self):
print("Querying last record from db")
while not thread_stop_event.isSet():
# cursor.execute("SELECT TOP(1) * FROM [client_sensors].[dbo].[sensors] ORDER BY timestamps DESC")
# row = cursor.fetchone()
# a = row[1] * round(random()*.221230, 2)dock
# b = row[1] * round(uniform(0.1, 1.0), 2)
# c = row[1] * round(random()*100, 2)
a = 100 * round(random()*.221230, 2)
b = 100 * round(uniform(0.1, 1.0), 2)
c = 100 * round(random()*100, 2)
print('-----------------------------------------------------------')
print("Queried from db")
print('a:',a,'b:',b,'c:',c)
model_from_joblib = joblib.load('xgb-3features-fmc.joblib')
model_prediction = model_from_joblib.predict([a, b, c])
print("Predicted value:", model_prediction)
print('-----------------------------------------------------------')
# a flag que sera injetada no frontend é a primeira string
socketio.emit('wind', a, namespace='/wind')
print("emited value",a)
# print(socketio.emit('newnumber', a, namespace='wind'))
sleep(self.delay)
def run(self):
self.getLastSample()
# @app.route('/predict', methods=['GET', 'POST'])
# def predict():
# try:
# start = time.time()
# post_body = json.loads(request.data.decode('utf-8'))
#
# sensor1 = float(post_body['sensor1'])
# sensor2 = float(post_body['sensor2'])
# sensor3 = float(post_body['sensor3'])
#
# data = aiProcessor.predict_pump_output(sensor1, sensor2, sensor3)
#
# return str(data[0])
# except Exception as e:print(e)
@socketio.on('connect', namespace='/wind')
def test_connect():
global thread
print('Client connected')
if not thread.isAlive():
print("Starting QueryMongo main Thread")
thread = QueryMongoThread()
thread.start()
@socketio.on('disconnect', namespace='/wind')
def test_disconnect():
print('Client disconnected')
@app.route('/')
def index():
return 'SQL SERVER API'
@app.errorhandler(400)
def bad_request(error):
return make_response(jsonify({'error': 'Bad request'}), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
# socketio.run(app)
socketio.run(app, host='0.0.0.0')
# requirements
# eventlet==0.24.1
| [
"g.freire@rioanalytics.com.br"
] | g.freire@rioanalytics.com.br |
c79372907a8f305b66e2b74c8a808c77cb75c5a3 | e14a60082da2955c9003802f75993b3c741d3152 | /petla_for.py | a0867f7206c1e6abc5c48d6bdf619c4cd94a794b | [] | no_license | AnnaKuc/Python-Tutorial | 98079c31f276cb395ef9d01053de4b6831b893b9 | 72e854b83570d7313c93bfb8a28f4eeb0dc0577e | refs/heads/master | 2022-11-15T03:54:41.758664 | 2020-07-06T09:55:15 | 2020-07-06T09:55:15 | 272,981,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | for number in range(3, 20, 2):
print(number)
for character in "Python":
print(character) | [
"ania@test.pl"
] | ania@test.pl |
e4f7d50b81def02a4fc5c109097676d372a8b5c3 | fbb12b2b7dcf7f2a33235f6766b4176c083a0c8e | /ARsyntax/workflow/rules/pseudoReplicates.smk | 66d10732db24581cccd7e5b362ac228197b0e3d1 | [] | no_license | birkiy/TermProjectCOMP541 | b76c8fa3a01e48dc302dc040a2c499c2c9f1b8ba | 400a81765889a21d0590b599c4ba0e529a56e3ca | refs/heads/main | 2023-01-19T21:36:55.085293 | 2020-11-30T12:59:14 | 2020-11-30T12:59:14 | 306,048,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | smk |
folder = "results/mapping/processed"
rule pseudoReplicates:
input:
"results/mapping/processed/{raw}.merged.final.bam"
output:
header=temp("results/mapping/processed/{raw}.merged.header.final.sam"),
pseudo1="results/mapping/processed/{raw}.pseudo1.final.bam",
pseudo2="results/mapping/processed/{raw}.pseudo2.final.bam"
message:
"Executing pseudoReplicates rule for {wildcards.raw}"
shell:
"""
samtools view -H {input} > {output.header}
#Split merged treatments
nlines=$(samtools view {input} | wc -l )
nlines=$(( (nlines + 1) / 2 )) # half that number
samtools view {input} | shuf - | split -d -l $nlines - "{folder}/{wildcards.raw}"
cat {output.header} {folder}/{wildcards.raw}00 | \
samtools view -bS - > {output.pseudo1}
cat {output.header} {folder}/{wildcards.raw}01 | \
samtools view -bS - > {output.pseudo2}
"""
rule pool:
input:
expand("results/mapping/processed/{{raw}}.{rep}.final.bam", rep=["rep1", "rep2"])
output:
"results/mapping/processed/{raw}.merged.final.bam"
message:
"Executing pool rule for {wildcards.raw}"
threads:
16
shell:
"""
#Merge treatment BAMS
samtools merge -@ {threads} -u {output} {input}
"""
| [
"umutberkayaltintas@gmail.com"
] | umutberkayaltintas@gmail.com |
0ff48efc847a28f6d741a32eb687282b9fde2b37 | ff8d00c161e7120d965c353f10f10871c1dabc5c | /models/unet2dantialias.py | ad27c046690a3df8801b2ab58247e22ddfd0d3b2 | [] | no_license | Scu-sen/AAPM2020_open_kbp | b6c4dd0cbce958f8174bf18ef74e94cfdd1dd51e | 2db52d7186c8fe788994c7a5fd70dc9052e0ff8e | refs/heads/main | 2023-07-16T09:15:43.018031 | 2021-08-02T09:04:26 | 2021-08-02T09:04:26 | 391,871,924 | 26 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,320 | py | # https://github.com/vlievin/Unet
# https://github.com/adobe/antialiased-cnns
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
from IPython import embed
class Downsample(nn.Module):
def __init__(self, pad_type='reflect', filt_size=3, stride=2, channels=None, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2)), int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2))]
self.pad_sizes = [pad_size+pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride-1)/2.)
self.channels = channels
# print('Filter size [%i]'%filt_size)
if(self.filt_size==1):
a = np.array([1.,])
elif(self.filt_size==2):
a = np.array([1., 1.])
elif(self.filt_size==3):
a = np.array([1., 2., 1.])
elif(self.filt_size==4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size==5):
a = np.array([1., 4., 6., 4., 1.])
elif(self.filt_size==6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(self.filt_size==7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:,None]*a[None,:])
filt = filt/torch.sum(filt)
self.register_buffer('filt', filt[None,None,:,:].repeat((self.channels,1,1,1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size==1):
if(self.pad_off==0):
return inp[:,:,::self.stride,::self.stride]
else:
return self.pad(inp)[:,:,::self.stride,::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
def get_pad_layer(pad_type):
if(pad_type in ['refl','reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl','replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type=='zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized'%pad_type)
return PadLayer
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=in_channels,bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
# class gated_resnet(nn.Module):
# """
# Gated Residual Block
# """
# def __init__(self, num_filters, kernel_size, padding, nonlinearity=nn.ReLU, dropout=0.2, dilation=1,batchNormObject=nn.BatchNorm2d):
# super(gated_resnet, self).__init__()
# self.gated = True
# num_hidden_filters =2 * num_filters if self.gated else num_filters
# self.conv_input = SeparableConv2d(num_filters, num_hidden_filters, kernel_size=kernel_size,stride=1,padding=padding,dilation=dilation )
# self.dropout = nn.Dropout2d(dropout)
# self.nonlinearity = nonlinearity()
# self.batch_norm1 = batchNormObject(num_hidden_filters)
# self.conv_out = SeparableConv2d(num_hidden_filters, num_hidden_filters, kernel_size=kernel_size,stride=1,padding=padding,dilation=dilation )
# self.batch_norm2 = batchNormObject(num_filters)
# def forward(self, og_x):
# x = self.conv_input(og_x)
# x = self.batch_norm1(x)
# x = self.nonlinearity(x)
# x = self.dropout(x)
# x = self.conv_out(x)
# if self.gated:
# a, b = torch.chunk(x, 2, dim=1)
# c3 = a * F.sigmoid(b)
# else:
# c3 = x
# out = og_x + c3
# out = self.batch_norm2(out)
# return out
class ResidualBlock(nn.Module):
"""
Residual Block
"""
def __init__(self, num_filters, kernel_size, padding, nonlinearity=nn.ReLU, dropout=0.2, dilation=1,batchNormObject=nn.BatchNorm2d):
super(ResidualBlock, self).__init__()
num_hidden_filters = num_filters
self.conv1 = SeparableConv2d(num_filters, num_hidden_filters, kernel_size=kernel_size,stride=1,padding=padding,dilation=dilation )
self.dropout = nn.Dropout2d(dropout)
self.nonlinearity = nonlinearity(inplace=False)
self.batch_norm1 = batchNormObject(num_hidden_filters)
self.conv2 = SeparableConv2d(num_hidden_filters, num_hidden_filters, kernel_size=kernel_size,stride=1,padding=padding,dilation=dilation )
self.batch_norm2 = batchNormObject(num_filters)
def forward(self, og_x):
x = og_x
x = self.dropout(x)
x = self.conv1(og_x)
x = self.batch_norm1(x)
x = self.nonlinearity(x)
x = self.conv2(x)
out = og_x + x
out = self.batch_norm2(out)
out = self.nonlinearity(out)
return out
class ConvolutionalEncoder(nn.Module):
"""
Convolutional Encoder providing skip connections
"""
def __init__(self,n_features_input,num_hidden_features,kernel_size,padding,n_resblocks,dropout_min=0,dropout_max=0.2, blockObject=ResidualBlock,batchNormObject=nn.BatchNorm2d):
"""
n_features_input (int): number of intput features
num_hidden_features (list(int)): number of features for each stage
kernel_size (int): convolution kernel size
padding (int): convolution padding
n_resblocks (int): number of residual blocks at each stage
dropout (float): dropout probability
blockObject (nn.Module): Residual block to use. Default is ResidualBlock
batchNormObject (nn.Module): normalization layer. Default is nn.BatchNorm2d
"""
super(ConvolutionalEncoder,self).__init__()
self.n_features_input = n_features_input
self.num_hidden_features = num_hidden_features
self.stages = nn.ModuleList()
dropout = iter([(1-t)*dropout_min + t*dropout_max for t in np.linspace(0,1,(len(num_hidden_features)))])
dropout = iter(dropout)
# input convolution block
block = [SeparableConv2d(n_features_input, num_hidden_features[0], kernel_size=kernel_size,stride=1, padding=padding)]
for _ in range(n_resblocks):
p = next(iter(dropout))
block += [blockObject(num_hidden_features[0], kernel_size, padding, dropout=p,batchNormObject=batchNormObject)]
self.stages.append(nn.Sequential(*block))
# layers
for features_in,features_out in [num_hidden_features[i:i+2] for i in range(0,len(num_hidden_features), 1)][:-1]:
# downsampling
# block = [nn.MaxPool2d(2),SeparableConv2d(features_in, features_out, kernel_size=1,padding=0 ),batchNormObject(features_out),nn.ReLU()]
block = [nn.MaxPool2d(kernel_size=2, stride=1),Downsample(channels=features_in, filt_size=3, stride=2),SeparableConv2d(features_in, features_out, kernel_size=1,padding=0 ),batchNormObject(features_out),nn.ReLU()]
#block = [SeparableConv2d(features_in, features_out, kernel_size=kernel_size,stride=2,padding=padding ),nn.BatchNorm2d(features_out),nn.ReLU()]
# residual blocks
# p = next(iter(dropout))
for _ in range(n_resblocks):
block += [blockObject(features_out, kernel_size, padding, dropout=p,batchNormObject=batchNormObject)]
self.stages.append(nn.Sequential(*block))
def forward(self,x):
skips = []
for stage in self.stages:
x = stage(x)
skips.append(x)
return x,skips
def getInputShape(self):
return (-1,self.n_features_input,-1,-1)
def getOutputShape(self):
return (-1,self.num_hidden_features[-1], -1,-1)
class ConvolutionalDecoder(nn.Module):
"""
Convolutional Decoder taking skip connections
"""
def __init__(self,n_features_output,num_hidden_features,kernel_size,padding,n_resblocks,dropout_min=0,dropout_max=0.2,blockObject=ResidualBlock,batchNormObject=nn.BatchNorm2d):
"""
n_features_output (int): number of output features
num_hidden_features (list(int)): number of features for each stage
kernel_size (int): convolution kernel size
padding (int): convolution padding
n_resblocks (int): number of residual blocks at each stage
dropout (float): dropout probability
blockObject (nn.Module): Residual block to use. Default is ResidualBlock
batchNormObject (nn.Module): normalization layer. Default is nn.BatchNorm2d
"""
super(ConvolutionalDecoder,self).__init__()
self.n_features_output = n_features_output
self.num_hidden_features = num_hidden_features
self.upConvolutions = nn.ModuleList()
self.skipMergers = nn.ModuleList()
self.residualBlocks = nn.ModuleList()
dropout = iter([(1-t)*dropout_min + t*dropout_max for t in np.linspace(0,1,(len(num_hidden_features)))][::-1])
# input convolution block
# layers
for features_in,features_out in [num_hidden_features[i:i+2] for i in range(0,len(num_hidden_features), 1)][:-1]:
# downsampling
self.upConvolutions.append(nn.Sequential(nn.ConvTranspose2d(features_in, features_out, kernel_size=3, stride=2,padding=1,output_padding=1),batchNormObject(features_out),nn.ReLU()))
self.skipMergers.append(SeparableConv2d(2*features_out, features_out, kernel_size=kernel_size,stride=1, padding=padding))
# residual blocks
block = []
p = next(iter(dropout))
for _ in range(n_resblocks):
block += [blockObject(features_out, kernel_size, padding, dropout=p,batchNormObject=batchNormObject)]
self.residualBlocks.append(nn.Sequential(*block))
# output convolution block
block = [SeparableConv2d(num_hidden_features[-1],n_features_output, kernel_size=kernel_size,stride=1, padding=padding)]
self.output_convolution = nn.Sequential(*block)
def forward(self,x, skips):
for up,merge,conv,skip in zip(self.upConvolutions,self.skipMergers, self.residualBlocks,skips):
x = up(x)
cat = torch.cat([x,skip],1)
x = merge(cat)
x = conv(x)
return self.output_convolution(x)
def getInputShape(self):
return (-1,self.num_hidden_features[0],-1,-1)
def getOutputShape(self):
return (-1,self.n_features_output, -1,-1)
class DilatedConvolutions(nn.Module):
"""
Sequential Dialted convolutions
"""
def __init__(self, n_channels, n_convolutions, dropout):
super(DilatedConvolutions, self).__init__()
kernel_size = 3
padding = 1
self.dropout = nn.Dropout2d(dropout)
self.non_linearity = nn.ReLU(inplace=True)
self.strides = [2**(k+1) for k in range(n_convolutions)]
convs = [SeparableConv2d(n_channels, n_channels, kernel_size=kernel_size,dilation=s, padding=s) for s in self.strides ]
self.convs = nn.ModuleList()
self.bns = nn.ModuleList()
for c in convs:
self.convs.append(c)
self.bns.append(nn.BatchNorm2d(n_channels))
def forward(self,x):
skips = []
for (c,bn,s) in zip(self.convs,self.bns,self.strides):
x_in = x
x = c(x)
x = bn(x)
x = self.non_linearity(x)
x = self.dropout(x)
x = x_in + x
skips.append(x)
return x,skips
class DilatedConvolutions2(nn.Module):
"""
Sequential Dialted convolutions
"""
def __init__(self, n_channels, n_convolutions,dropout,kernel_size,blockObject=ResidualBlock,batchNormObject=nn.BatchNorm2d):
super(DilatedConvolutions2, self).__init__()
self.dilatations = [2**(k+1) for k in range(n_convolutions)]
self.blocks = nn.ModuleList([blockObject(n_channels, kernel_size, d, dropout=dropout, dilation=d,batchNormObject=batchNormObject) for d in self.dilatations ])
def forward(self,x):
skips = []
for b in self.blocks:
x = b(x)
skips.append(x)
return x, skips
class UNet(nn.Module):
"""
U-Net model with dynamic number of layers, Residual Blocks, Dilated Convolutions, Dropout and Group Normalization
"""
def __init__(self, in_channels, out_channels, num_hidden_features,n_resblocks,num_dilated_convs, dropout_min=0, dropout_max=0, gated=False, padding=1, kernel_size=3,group_norm=32):
"""
initialize the model
Args:
in_channels (int): number of input channels (image=3)
out_channels (int): number of output channels (n_classes)
num_hidden_features (list(int)): number of hidden features for each layer (the number of layer is the lenght of this list)
n_resblocks (int): number of residual blocks at each layer
num_dilated_convs (int): number of dilated convolutions at the last layer
dropout (float): float in [0,1]: dropout probability
gated (bool): use gated Convolutions, default is False
padding (int): padding for the convolutions
kernel_size (int): kernel size for the convolutions
group_norm (bool): number of groups to use for Group Normalization, default is 32, if zero: use nn.BatchNorm2d
"""
super(UNet, self).__init__()
if group_norm > 0:
for h in num_hidden_features:
assert h%group_norm==0, "Number of features at each layer must be divisible by 'group_norm'"
blockObject = gated_resnet if gated else ResidualBlock
batchNormObject = lambda n_features : nn.GroupNorm(group_norm,n_features) if group_norm > 0 else nn.BatchNorm2d(n_features)
self.encoder = ConvolutionalEncoder(in_channels,num_hidden_features,kernel_size,padding,n_resblocks,dropout_min=dropout_min,dropout_max=dropout_max,blockObject=blockObject,batchNormObject=batchNormObject)
if num_dilated_convs > 0:
#self.dilatedConvs = DilatedConvolutions2(num_hidden_features[-1], num_dilated_convs,dropout_max,kernel_size,blockObject=blockObject,batchNormObject=batchNormObject)
self.dilatedConvs = DilatedConvolutions(num_hidden_features[-1],num_dilated_convs,dropout_max) # <v11 uses dilatedConvs2
else:
self.dilatedConvs = None
self.decoder = ConvolutionalDecoder(out_channels,num_hidden_features[::-1],kernel_size,padding,n_resblocks,dropout_min=dropout_min,dropout_max=dropout_max,blockObject=blockObject,batchNormObject=batchNormObject)
def forward(self, x):
x,skips = self.encoder(x)
if self.dilatedConvs is not None:
x,dilated_skips = self.dilatedConvs(x)
for d in dilated_skips:
x += d
x += skips[-1]
x = self.decoder(x,skips[:-1][::-1])
return x
| [
"scusenyang@tencent.com"
] | scusenyang@tencent.com |
88378b845ff068fb43132006a8b345799334f79d | 155a404b32b8e2708595c92dbce0e91a2d6949b3 | /tensorflow/batch_normalization_practice.py | f3ff93500e2f2c36d76c2d11de6d9efddcb73783 | [] | no_license | shykuopo/Python | c17b4d1da59f0fcf15f90632bad4e758676ca9f7 | e382dd924da0580a244e0852e4550915ff4a1e62 | refs/heads/master | 2021-05-15T23:27:31.751004 | 2017-10-10T09:44:35 | 2017-10-10T09:44:35 | 106,391,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,968 | py | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
ACTIVATION = tf.nn.relu
N_LAYERS = 7
N_HIDDEN_UNITS = 30
def fix_seed(seed=1):
# reproducible
np.random.seed(seed)
tf.set_random_seed(seed)
def plot_his(inputs, inputs_norm):
# plot histogram for the inputs of every layer
for j, all_inputs in enumerate([inputs, inputs_norm]):
for i, input in enumerate(all_inputs):
plt.subplot(2, len(all_inputs), j*len(all_inputs)+(i+1))
plt.cla()
if i == 0:
the_range = (-7, 10)
else:
the_range = (-1, 1)
plt.hist(input.ravel(), bins=15, range=the_range, color='#FF5733')
plt.yticks(())
if j == 1:
plt.xticks(the_range)
else:
plt.xticks(())
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
plt.title("%s normalizing" % ("Without" if j == 0 else "With"))
plt.draw()
plt.pause(0.01)
def built_net(xs, ys, norm):
def add_layer(inputs, in_size, out_size, activation_function=None, norm=False):
# weights and biases (bad initialization for this case)
Weights = tf.Variable(tf.random_normal([in_size, out_size], mean=0., stddev=1.))
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
# fully connected product
Wx_plus_b = tf.matmul(inputs, Weights) + biases
# normalize fully connected product
if norm:
# Batch Normalize
fc_mean, fc_var = tf.nn.moments(
Wx_plus_b,
axes=[0], # the dimension you wanna normalize, here [0] for batch
# for image, you wanna do [0, 1, 2] for [batch, height, width] but not channel
)
scale = tf.Variable(tf.ones([out_size]))
shift = tf.Variable(tf.zeros([out_size]))
epsilon = 0.001
# apply moving average for mean and var when train on batch
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([fc_mean, fc_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(fc_mean), tf.identity(fc_var)
mean, var = mean_var_with_update()
Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, mean, var, shift, scale, epsilon)
# similar with this two steps:
# Wx_plus_b = (Wx_plus_b - fc_mean) / tf.sqrt(fc_var + 0.001)
# Wx_plus_b = Wx_plus_b * scale + shift
# activation
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
fix_seed(1)
if norm:
# BN for the first input
fc_mean, fc_var = tf.nn.moments(
xs,
axes=[0],
)
scale = tf.Variable(tf.ones([1]))
shift = tf.Variable(tf.zeros([1]))
epsilon = 0.001
# apply moving average for mean and var when train on batch
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([fc_mean, fc_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(fc_mean), tf.identity(fc_var)
mean, var = mean_var_with_update()
xs = tf.nn.batch_normalization(xs, mean, var, shift, scale, epsilon)
# record inputs for every layer
layers_inputs = [xs]
# build hidden layers
for l_n in range(N_LAYERS):
layer_input = layers_inputs[l_n]
in_size = layers_inputs[l_n].get_shape()[1].value
output = add_layer(
layer_input, # input
in_size, # input size
N_HIDDEN_UNITS, # output size
ACTIVATION, # activation function
norm, # normalize before activation
)
layers_inputs.append(output) # add output for next run
# build output layer
prediction = add_layer(layers_inputs[-1], 30, 1, activation_function=None)
cost = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]))
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(cost)
return [train_op, cost, layers_inputs]
# make up data
fix_seed(1)
x_data = np.linspace(-7, 10, 2500)[:, np.newaxis]
np.random.shuffle(x_data)
noise = np.random.normal(0, 8, x_data.shape)
y_data = np.square(x_data) - 5 + noise
# plot input data
plt.scatter(x_data, y_data)
plt.show()
xs = tf.placeholder(tf.float32, [None, 1]) # [num_samples, num_features]
ys = tf.placeholder(tf.float32, [None, 1])
train_op, cost, layers_inputs = built_net(xs, ys, norm=False) # without BN
train_op_norm, cost_norm, layers_inputs_norm = built_net(xs, ys, norm=True) # with BN
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# record cost
cost_his = []
cost_his_norm = []
record_step = 5
plt.ion()
plt.figure(figsize=(7, 3))
for i in range(250):
if i % 50 == 0:
# plot histogram
all_inputs, all_inputs_norm = sess.run([layers_inputs, layers_inputs_norm], feed_dict={xs: x_data, ys: y_data})
plot_his(all_inputs, all_inputs_norm)
# train on batch
sess.run([train_op, train_op_norm], feed_dict={xs: x_data[i*10:i*10+10], ys: y_data[i*10:i*10+10]})
if i % record_step == 0:
# record cost
cost_his.append(sess.run(cost, feed_dict={xs: x_data, ys: y_data}))
cost_his_norm.append(sess.run(cost_norm, feed_dict={xs: x_data, ys: y_data}))
plt.ioff()
plt.figure()
plt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his), label='no BN') # no norm
plt.plot(np.arange(len(cost_his))*record_step, np.array(cost_his_norm), label='BN') # norm
plt.legend()
plt.show() | [
"Sam82267535@yahoo.com.tw"
] | Sam82267535@yahoo.com.tw |
1fa173f6bfa99361c4de753688e6de4aa025f83f | ea378480ba678eb123ef826e3ca0c3eb8f4e538f | /paused/05. bk old/bk future includes/candidates/06.misc from nodebox/context.py | 1d6b06a511c7ed2c494ad636d23c4867059aa457 | [] | no_license | msarch/py | 67235643666b1ed762d418263f7eed3966d3f522 | dcd25e633a87cdb3710e90224e5387d3516c1cd3 | refs/heads/master | 2021-01-01T05:21:58.175043 | 2017-05-25T08:15:26 | 2017-05-25T08:15:26 | 87,453,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155,609 | py | #=== CONTEXT =========================================================================================
# 2D NodeBox API in OpenGL.
# Authors: Tom De Smedt, Frederik De Bleser
# License: BSD (see LICENSE.txt for details).
# Copyright (c) 2008 City In A Bottle (cityinabottle.org)
# http://cityinabottle.org/nodebox
# All graphics are drawn directly to the screen.
# No scenegraph is kept for obvious performance reasons (therefore, no canvas._grobs as in NodeBox).
# Debugging must be switched on or of before other modules are imported.
import pyglet
from pyglet.gl import *
from pyglet.image import Texture
from math import cos, sin, radians, pi, floor
from time import time
from random import seed, choice, shuffle, random as rnd
from new import instancemethod
from glob import glob
from os import path, remove
from sys import getrefcount
from StringIO import StringIO
from hashlib import md5
from types import FunctionType
from datetime import datetime
#=====================================================================================================
#--- COLORS -------------------------------------------------------------------
Color = namedtuple('Color', 'r g b a')
orange = Color(255, 127, 0, 255)
white = Color(255, 255, 255, 255)
black = Color( 0, 0, 0, 255)
yellow = Color(255, 255, 0, 255)
red = Color(255, 0, 0, 255)
blue = Color(127, 127, 255, 255)
blue50 = Color(127, 127, 255, 127)
pink = Color(255, 187, 187, 255)
very_light_grey = Color(242, 242, 242, 0)
# kapla_colors
r_k = Color(255, 69, 0, 255) # red kapla
b_k = Color( 0, 0, 140, 255) # blue kapla
g_k = Color( 0, 99, 0, 255) # green kapla
y_k = Color(255, 214, 0, 255) # yellow kapla
kapla_colors=(r_k, g_k, b_k, y_k, b_k) # addded 1 color for pb w 4 kaplas TODO
def set_background_color(color=white):
glClearColor(*blue)
#--- COLOR -------------------------------------------------------------------------------------------
RGB = "RGB"
HSB = "HSB"
XYZ = "XYZ"
LAB = "LAB"
_background = None # Current state background color.
_fill = None # Current state fill color.
_stroke = None # Current state stroke color.
_strokewidth = 1 # Current state strokewidth.
_strokestyle = "solid" # Current state strokestyle.
_alpha = 1 # Current state alpha transparency.
class Color(list):
def __init__(self, *args, **kwargs):
""" A color with R,G,B,A channels, with channel values ranging between 0.0-1.0.
Either takes four parameters (R,G,B,A), three parameters (R,G,B),
two parameters (grayscale and alpha) or one parameter (grayscale or Color object).
An optional base=1.0 parameter defines the range of the given parameters.
An optional colorspace=RGB defines the color space of the given parameters.
"""
# Values are supplied as a tuple.
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
# R, G, B and A.
if len(args) == 4:
r, g, b, a = args[0], args[1], args[2], args[3]
# R, G and B.
elif len(args) == 3:
r, g, b, a = args[0], args[1], args[2], 1
# Two values, grayscale and alpha.
elif len(args) == 2:
r, g, b, a = args[0], args[0], args[0], args[1]
# One value, another color object.
elif len(args) == 1 and isinstance(args[0], Color):
r, g, b, a = args[0].r, args[0].g, args[0].b, args[0].a
# One value, None.
elif len(args) == 1 and args[0] is None:
r, g, b, a = 0, 0, 0, 0
# One value, grayscale.
elif len(args) == 1:
r, g, b, a = args[0], args[0], args[0], 1
# No value, transparent black.
elif len(args):
r, g, b, a = 0, 0, 0, 0
# Transform to base 1:
base = float(kwargs.get("base", 1.0))
if base != 1:
r, g, b, a = [ch/base for ch in r, g, b, a]
# Transform to color space RGB:
colorspace = kwargs.get("colorspace")
if colorspace and colorspace != RGB:
if colorspace == HSB: r, g, b = hsb_to_rgb(r, g, b)
if colorspace == XYZ: r, g, b = xyz_to_rgb(r, g, b)
if colorspace == LAB: r, g, b = lab_to_rgb(r, g, b)
list.__init__(self, [r, g, b, a])
self._dirty = False
def __setitem__(self, i, v):
list.__setitem__(self, i, v)
self._dirty = True
def _get_r(self): return self[0]
def _get_g(self): return self[1]
def _get_b(self): return self[2]
def _get_a(self): return self[3]
def _set_r(self, v): self[0] = v
def _set_g(self, v): self[1] = v
def _set_b(self, v): self[2] = v
def _set_a(self, v): self[3] = v
r = red = property(_get_r, _set_r)
g = green = property(_get_g, _set_g)
b = blue = property(_get_b, _set_b)
a = alpha = property(_get_a, _set_a)
def _get_rgb(self):
return self[0], self[1], self[2]
def _set_rgb(self, (r,g,b)):
self[0] = r
self[1] = g
self[2] = b
rgb = property(_get_rgb, _set_rgb)
def _get_rgba(self):
return self[0], self[1], self[2], self[3]
def _set_rgba(self, (r,g,b,a)):
self[0] = r
self[1] = g
self[2] = b
self[3] = a
rgba = property(_get_rgba, _set_rgba)
def copy(self):
return Color(self)
def _apply(self):
glColor4f(self[0], self[1], self[2], self[3] * _alpha)
def __repr__(self):
return "Color(%.3f, %.3f, %.3f, %.3f)" % \
(self[0], self[1], self[2], self[3])
def __eq__(self, clr):
if not isinstance(clr, Color): return False
return self[0] == clr[0] \
and self[1] == clr[1] \
and self[2] == clr[2] \
and self[3] == clr[3]
def __ne__(self, clr):
return not self.__eq__(clr)
def map(self, base=1.0, colorspace=RGB):
""" Returns a list of R,G,B,A values mapped to the given base,
e.g. from 0-255 instead of 0.0-1.0 which is useful for setting image pixels.
Other values than RGBA can be obtained by setting the colorspace (RGB/HSB/XYZ/LAB).
"""
r, g, b, a = self
if colorspace != RGB:
if colorspace == HSB: r, g, b = rgb_to_hsb(r, g, b)
if colorspace == XYZ: r, g, b = rgb_to_xyz(r, g, b)
if colorspace == LAB: r, g, b = rgb_to_lab(r, g, b)
if base != 1:
r, g, b, a = [ch*base for ch in r, g, b, a]
if base != 1 and isinstance(base, int):
r, g, b, a = [int(ch) for ch in r, g, b, a]
return r, g, b, a
def blend(self, clr, t=0.5, colorspace=RGB):
""" Returns a new color between the two colors.
Parameter t is the amount to interpolate between the two colors
(0.0 equals the first color, 0.5 is half-way in between, etc.)
Blending in CIE-LAB colorspace avoids "muddy" colors in the middle of the blend.
"""
ch = zip(self.map(1, colorspace)[:3], clr.map(1, colorspace)[:3])
r, g, b = [geometry.lerp(a, b, t) for a, b in ch]
a = geometry.lerp(self.a, len(clr)==4 and clr[3] or 1, t)
return Color(r, g, b, a, colorspace=colorspace)
def rotate(self, angle):
""" Returns a new color with it's hue rotated on the RYB color wheel.
"""
h, s, b = rgb_to_hsb(*self[:3])
h, s, b = rotate_ryb(h, s, b, angle)
return Color(h, s, b, self.a, colorspace=HSB)
color = Color
def background(*args, **kwargs):
""" Sets the current background color.
"""
global _background
if args:
_background = Color(*args, **kwargs)
xywh = (GLint*4)(); glGetIntegerv(GL_VIEWPORT, xywh); x,y,w,h = xywh
rect(x, y, w, h, fill=_background, stroke=None)
return _background
def fill(*args, **kwargs):
""" Sets the current fill color for drawing primitives and paths.
"""
global _fill
if args:
_fill = Color(*args, **kwargs)
return _fill
fill(0) # The default fill is black.
def stroke(*args, **kwargs):
""" Sets the current stroke color.
"""
global _stroke
if args:
_stroke = Color(*args, **kwargs)
return _stroke
def nofill():
""" No current fill color.
"""
global _fill
_fill = None
def nostroke():
""" No current stroke color.
"""
global _stroke
_stroke = None
def strokewidth(width=None):
""" Sets the outline stroke width.
"""
# Note: strokewidth is clamped to integers (e.g. 0.2 => 1),
# but finer lines can be achieved visually with a transparent stroke.
# Thicker strokewidth results in ugly (i.e. no) line caps.
global _strokewidth
if width is not None:
_strokewidth = width
glLineWidth(width)
return _strokewidth
SOLID = "solid"
DOTTED = "dotted"
DASHED = "dashed"
def strokestyle(style=None):
""" Sets the outline stroke style (SOLID / DOTTED / DASHED).
"""
global _strokestyle
if style is not None and style != _strokestyle:
_strokestyle = style
glLineDash(style)
return _strokestyle
def glLineDash(style):
if style == SOLID:
glDisable(GL_LINE_STIPPLE)
elif style == DOTTED:
glEnable(GL_LINE_STIPPLE); glLineStipple(0, 0x0101)
elif style == DASHED:
glEnable(GL_LINE_STIPPLE); glLineStipple(1, 0x000F)
def outputmode(mode=None):
raise NotImplementedError
def colormode(mode=None, range=1.0):
raise NotImplementedError
#--- COLOR SPACE -------------------------------------------------------------------------------------
# Transformations between RGB, HSB, CIE XYZ and CIE LAB color spaces.
# http://www.easyrgb.com/math.php
def rgb_to_hsb(r, g, b):
""" Converts the given R,G,B values to H,S,B (between 0.0-1.0).
"""
h, s, v = 0, 0, max(r, g, b)
d = v - min(r, g, b)
if v != 0:
s = d / float(v)
if s != 0:
if r == v: h = 0 + (g-b) / d
elif g == v: h = 2 + (b-r) / d
else : h = 4 + (r-g) / d
h = h / 6.0 % 1
return h, s, v
def hsb_to_rgb(h, s, v):
""" Converts the given H,S,B color values to R,G,B (between 0.0-1.0).
"""
if s == 0:
return v, v, v
h = h % 1 * 6.0
i = floor(h)
f = h - i
x = v * (1-s)
y = v * (1-s * f)
z = v * (1-s * (1-f))
if i > 4:
return v, x, y
return [(v,z,x), (y,v,x), (x,v,z), (x,y,v), (z,x,v)][int(i)]
def rgb_to_xyz(r, g, b):
""" Converts the given R,G,B values to CIE X,Y,Z (between 0.0-1.0).
"""
r, g, b = [ch > 0.04045 and ((ch+0.055) / 1.055) ** 2.4 or ch / 12.92 for ch in r, g, b]
r, g, b = [ch * 100.0 for ch in r, g, b]
r, g, b = ( # Observer = 2, Illuminant = D65
r * 0.4124 + g * 0.3576 + b * 0.1805,
r * 0.2126 + g * 0.7152 + b * 0.0722,
r * 0.0193 + g * 0.1192 + b * 0.9505)
return r/95.047, g/100.0, b/108.883
def xyz_to_rgb(x, y, z):
""" Converts the given CIE X,Y,Z color values to R,G,B (between 0.0-1.0).
"""
x, y, z = x*95.047, y*100.0, z*108.883
x, y, z = [ch / 100.0 for ch in x, y, z]
r = x * 3.2406 + y * -1.5372 + z * -0.4986
g = x * -0.9689 + y * 1.8758 + z * 0.0415
b = x * -0.0557 + y * -0.2040 + z * 1.0570
r, g, b = [ch > 0.0031308 and 1.055 * ch**(1/2.4) - 0.055 or ch * 12.92 for ch in r, g, b]
return r, g, b
def rgb_to_lab(r, g, b):
""" Converts the given R,G,B values to CIE L,A,B (between 0.0-1.0).
"""
x, y, z = rgb_to_xyz(r, g, b)
x, y, z = [ch > 0.008856 and ch**(1/3.0) or (ch*7.787) + (16/116.0) for ch in x, y, z]
l, a, b = y*116-16, 500*(x-y), 200*(y-z)
l, a, b = l/100.0, (a+86)/(86+98), (b+108)/(108+94)
return l, a, b
def lab_to_rgb(l, a, b):
""" Converts the given CIE L,A,B color values to R,G,B (between 0.0-1.0).
"""
l, a, b = l*100, a*(86+98)-86, b*(108+94)-108
y = (l+16)/116.0
x = y + a/500.0
z = y - b/200.0
x, y, z = [ch**3 > 0.008856 and ch**3 or (ch-16/116.0)/7.787 for ch in x, y, z]
return xyz_to_rgb(x, y, z)
def luminance(r, g, b):
""" Returns an indication (0.0-1.0) of how bright the color appears.
"""
return (r*0.2125 + g*0.7154 + b+0.0721) * 0.5
def darker(clr, step=0.2):
""" Returns a copy of the color with a darker brightness.
"""
h, s, b = rgb_to_hsb(clr.r, clr.g, clr.b)
r, g, b = hsb_to_rgb(h, s, max(0, b-step))
return Color(r, g, b, len(clr)==4 and clr[3] or 1)
def lighter(clr, step=0.2):
""" Returns a copy of the color with a lighter brightness.
"""
h, s, b = rgb_to_hsb(clr.r, clr.g, clr.b)
r, g, b = hsb_to_rgb(h, s, min(1, b+step))
return Color(r, g, b, len(clr)==4 and clr[3] or 1)
darken, lighten = darker, lighter
#--- COLOR ROTATION ----------------------------------------------------------------------------------
# Approximation of the RYB color wheel.
# In HSB, colors hues range from 0 to 360,
# but on the color wheel these values are not evenly distributed.
# The second tuple value contains the actual value on the wheel (angle).
_colorwheel = [
( 0, 0), ( 15, 8), ( 30, 17), ( 45, 26),
( 60, 34), ( 75, 41), ( 90, 48), (105, 54),
(120, 60), (135, 81), (150, 103), (165, 123),
(180, 138), (195, 155), (210, 171), (225, 187),
(240, 204), (255, 219), (270, 234), (285, 251),
(300, 267), (315, 282), (330, 298), (345, 329), (360, 360)
]
def rotate_ryb(h, s, b, angle=180):
""" Rotates the given H,S,B color (0.0-1.0) on the RYB color wheel.
The RYB colorwheel is not mathematically precise,
but focuses on aesthetically pleasing complementary colors.
"""
h = h*360 % 360
# Find the location (angle) of the hue on the RYB color wheel.
for i in range(len(_colorwheel)-1):
(x0, y0), (x1, y1) = _colorwheel[i], _colorwheel[i+1]
if y0 <= h <= y1:
a = geometry.lerp(x0, x1, t=(h-y0)/(y1-y0))
break
# Rotate the angle and retrieve the hue.
a = (a+angle) % 360
for i in range(len(_colorwheel)-1):
(x0, y0), (x1, y1) = _colorwheel[i], _colorwheel[i+1]
if x0 <= a <= x1:
h = geometry.lerp(y0, y1, t=(a-x0)/(x1-x0))
break
return h/360.0, s, b
def complement(clr):
""" Returns the color opposite on the color wheel.
The complementary color contrasts with the given color.
"""
if not isinstance(clr, Color):
clr = Color(clr)
return clr.rotate(180)
def analog(clr, angle=20, d=0.1):
""" Returns a random adjacent color on the color wheel.
Analogous color schemes can often be found in nature.
"""
h, s, b = rgb_to_hsb(*clr[:3])
h, s, b = rotate_ryb(h, s, b, angle=random(-angle,angle))
s *= 1 - random(-d,d)
b *= 1 - random(-d,d)
return Color(h, s, b, len(clr)==4 and clr[3] or 1, colorspace=HSB)
#--- COLOR MIXIN -------------------------------------------------------------------------------------
# Drawing commands like rect() have optional parameters fill and stroke to set the color directly.
def color_mixin(**kwargs):
fill = kwargs.get("fill", _fill)
stroke = kwargs.get("stroke", _stroke)
strokewidth = kwargs.get("strokewidth", _strokewidth)
strokestyle = kwargs.get("strokestyle", _strokestyle)
return (fill, stroke, strokewidth, strokestyle)
#--- COLOR PLANE -------------------------------------------------------------------------------------
# Not part of the standard API but too convenient to leave out.
def colorplane(x, y, width, height, *a):
""" Draws a rectangle that emits a different fill color from each corner.
An optional number of colors can be given:
- four colors define top left, top right, bottom right and bottom left,
- three colors define top left, top right and bottom,
- two colors define top and bottom,
- no colors assumes black top and white bottom gradient.
"""
if len(a) == 2:
# Top and bottom colors.
clr1, clr2, clr3, clr4 = a[0], a[0], a[1], a[1]
elif len(a) == 4:
# Top left, top right, bottom right, bottom left.
clr1, clr2, clr3, clr4 = a[0], a[1], a[2], a[3]
elif len(a) == 3:
# Top left, top right, bottom.
clr1, clr2, clr3, clr4 = a[0], a[1], a[2], a[2]
elif len(a) == 0:
# Black top, white bottom.
clr1 = clr2 = (0,0,0,1)
clr3 = clr4 = (1,1,1,1)
glPushMatrix()
glTranslatef(x, y, 0)
glScalef(width, height, 1)
glBegin(GL_QUADS)
glColor4f(clr1[0], clr1[1], clr1[2], clr1[3] * _alpha); glVertex2f(-0.0, 1.0)
glColor4f(clr2[0], clr2[1], clr2[2], clr2[3] * _alpha); glVertex2f( 1.0, 1.0)
glColor4f(clr3[0], clr3[1], clr3[2], clr3[3] * _alpha); glVertex2f( 1.0, -0.0)
glColor4f(clr4[0], clr4[1], clr4[2], clr4[3] * _alpha); glVertex2f(-0.0, -0.0)
glEnd()
glPopMatrix()
#=====================================================================================================
#--- TRANSFORMATIONS ---------------------------------------------------------------------------------
# Unlike NodeBox, all transformations are CORNER-mode and originate from the bottom-left corner.
# Example: using Transform to get a transformed path.
# t = Transform()
# t.rotate(45)
# p = BezierPath()
# p.rect(10,10,100,70)
# p = t.transform_path(p)
# p.contains(x,y) # now we can check if the mouse is in the transformed shape.
Transform = geometry.AffineTransform
def push():
""" Pushes the transformation state.
Subsequent transformations (translate, rotate, scale) remain in effect until pop() is called.
"""
glPushMatrix()
def pop():
""" Pops the transformation state.
This reverts the transformation to before the last push().
"""
glPopMatrix()
def translate(x, y, z=0):
""" By default, the origin of the layer or canvas is at the bottom left.
This origin point will be moved by (x,y) pixels.
"""
glTranslatef(round(x), round(y), round(z))
def rotate(degrees, axis=(0,0,1)):
""" Rotates the transformation state, i.e. all subsequent drawing primitives are rotated.
Rotations work incrementally:
calling rotate(60) and rotate(30) sets the current rotation to 90.
"""
glRotatef(degrees, *axis)
def scale(x, y=None, z=None):
""" Scales the transformation state.
"""
if y is None:
y = x
if z is None:
z = 1
glScalef(x, y, z)
def reset():
""" Resets the transform state of the layer or canvas.
"""
glLoadIdentity()
CORNER = "corner"
CENTER = "center"
def transform(mode=None):
if mode == CENTER:
raise NotImplementedError, "no center-mode transform"
return CORNER
def skew(x, y):
raise NotImplementedError
#=====================================================================================================
#--- DRAWING PRIMITIVES ------------------------------------------------------------------------------
# Drawing primitives: Point, line, rect, ellipse, arrow. star.
# The fill and stroke are two different shapes put on top of each other.
Point = geometry.Point
def line(x0, y0, x1, y1, **kwargs):
""" Draws a straight line from x0, y0 to x1, y1 with the current stroke color and strokewidth.
"""
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
if stroke is not None and strokewidth > 0:
glColor4f(stroke[0], stroke[1], stroke[2], stroke[3] * _alpha)
glLineWidth(strokewidth)
glLineDash(strokestyle)
glBegin(GL_LINE_LOOP)
glVertex2f(x0, y0)
glVertex2f(x1, y1)
glEnd()
def rect(x, y, width, height, **kwargs):
""" Draws a rectangle with the bottom left corner at x, y.
The current stroke, strokewidth and fill color are applied.
"""
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
for i, clr in enumerate((fill, stroke)):
if clr is not None and (i==0 or strokewidth > 0):
if i == 1:
glLineWidth(strokewidth)
glLineDash(strokestyle)
glColor4f(clr[0], clr[1], clr[2], clr[3] * _alpha)
# Note: this performs equally well as when using precompile().
glBegin((GL_POLYGON, GL_LINE_LOOP)[i])
glVertex2f(x, y)
glVertex2f(x+width, y)
glVertex2f(x+width, y+height)
glVertex2f(x, y+height)
glEnd()
def triangle(x1, y1, x2, y2, x3, y3, **kwargs):
""" Draws the triangle created by connecting the three given points.
The current stroke, strokewidth and fill color are applied.
"""
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
for i, clr in enumerate((fill, stroke)):
if clr is not None and (i==0 or strokewidth > 0):
if i == 1:
glLineWidth(strokewidth)
glLineDash(strokestyle)
glColor4f(clr[0], clr[1], clr[2], clr[3] * _alpha)
# Note: this performs equally well as when using precompile().
glBegin((GL_POLYGON, GL_LINE_LOOP)[i])
glVertex2f(x1, y1)
glVertex2f(x2, y2)
glVertex2f(x3, y3)
glEnd()
_ellipses = {}
ELLIPSE_SEGMENTS = 50
def ellipse(x, y, width, height, segments=ELLIPSE_SEGMENTS, **kwargs):
""" Draws an ellipse with the center located at x, y.
The current stroke, strokewidth and fill color are applied.
"""
if not segments in _ellipses:
# For the given amount of line segments, calculate the ellipse once.
# Then reuse the cached ellipse by scaling it to the desired size.
_ellipses[segments] = []
for mode in (GL_POLYGON, GL_LINE_LOOP):
_ellipses[segments].append(precompile(lambda:(
glBegin(mode),
[glVertex2f(cos(t)/2, sin(t)/2) for t in [2*pi*i/segments for i in range(segments)]],
glEnd()
)))
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
for i, clr in enumerate((fill, stroke)):
if clr is not None and (i==0 or strokewidth > 0):
if i == 1:
glLineWidth(strokewidth)
glLineDash(strokestyle)
glColor4f(clr[0], clr[1], clr[2], clr[3] * _alpha)
glPushMatrix()
glTranslatef(x, y, 0)
glScalef(width, height, 1)
glCallList(_ellipses[segments][i])
glPopMatrix()
oval = ellipse # Backwards compatibility.
def arrow(x, y, width, **kwargs):
""" Draws an arrow with its tip located at x, y.
The current stroke, strokewidth and fill color are applied.
"""
head = width * 0.4
tail = width * 0.2
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
for i, clr in enumerate((fill, stroke)):
if clr is not None and (i==0 or strokewidth > 0):
if i == 1:
glLineWidth(strokewidth)
glLineDash(strokestyle)
glColor4f(clr[0], clr[1], clr[2], clr[3] * _alpha)
# Note: this performs equally well as when using precompile().
glBegin((GL_POLYGON, GL_LINE_LOOP)[i])
glVertex2f(x, y)
glVertex2f(x-head, y+head)
glVertex2f(x-head, y+tail)
glVertex2f(x-width, y+tail)
glVertex2f(x-width, y-tail)
glVertex2f(x-head, y-tail)
glVertex2f(x-head, y-head)
glVertex2f(x, y)
glEnd()
def star(x, y, points=20, outer=100, inner=50, **kwargs):
""" Draws a star with the given points, outer radius and inner radius.
The current stroke, strokewidth and fill color are applied.
"""
# GL_POLYGON only works with convex polygons,
# so we use a BezierPath (which does tessellation for fill colors).
p = BezierPath(**kwargs)
p.moveto(x, y+outer)
for i in range(0, int(2*points)+1):
r = (outer, inner)[i%2]
a = pi*i/points
p.lineto(x+r*sin(a), y+r*cos(a))
p.closepath()
if kwargs.get("draw", True):
p.draw(**kwargs)
return p
#=====================================================================================================
#--- BEZIER PATH -------------------------------------------------------------------------------------
# A BezierPath class with lineto(), curveto() and moveto() commands.
# It has all the path math functionality from NodeBox and a ray casting algorithm for contains().
# A number of caching mechanisms are used for performance:
# drawn vertices, segment lengths, path bounds, and a hit test area for BezierPath.contains().
# For optimal performance, the path should be created once (not every frame) and left unmodified.
# When points in the path are added, removed or modified, a _dirty flag is set.
# When dirty, the cache will be cleared and the new path recalculated.
# If the path is being drawn with a fill color, this means doing tessellation
# (i.e. additional math for finding out if parts overlap and punch a hole in the shape).
MOVETO = "moveto"
LINETO = "lineto"
CURVETO = "curveto"
CLOSE = "close"
RELATIVE = "relative" # Number of straight lines to represent a curve = 20% of curve length.
RELATIVE_PRECISION = 0.2
class PathError(Exception):
pass
class NoCurrentPointForPath(Exception):
pass
class NoCurrentPath(Exception):
pass
class PathPoint(Point):
def __init__(self, x=0, y=0):
""" A control handle for PathElement.
"""
self._x = x
self._y = y
self._dirty = False
def _get_x(self): return self._x
def _set_x(self, v):
self._x = v
self._dirty = True
def _get_y(self): return self._y
def _set_y(self, v):
self._y = v
self._dirty = True
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
def copy(self, parent=None):
return PathPoint(self._x, self._y)
class PathElement(object):
def __init__(self, cmd=None, pts=None):
""" A point in the path, optionally with control handles:
- MOVETO : the list of points contains a single (x,y)-tuple.
- LINETO : the list of points contains a single (x,y)-tuple.
- CURVETO : the list of points contains (vx1,vy1), (vx2,vy2), (x,y) tuples.
- CLOSETO : no points.
"""
if cmd == MOVETO \
or cmd == LINETO:
pt, h1, h2 = pts[0], pts[0], pts[0]
elif cmd == CURVETO:
pt, h1, h2 = pts[2], pts[0], pts[1]
else:
pt, h1, h2 = (0,0), (0,0), (0,0)
self._cmd = cmd
self._x = pt[0]
self._y = pt[1]
self._ctrl1 = PathPoint(h1[0], h1[1])
self._ctrl2 = PathPoint(h2[0], h2[1])
self.__dirty = False
def _get_dirty(self):
return self.__dirty \
or self.ctrl1._dirty \
or self.ctrl2._dirty
def _set_dirty(self, b):
self.__dirty = b
self.ctrl1._dirty = b
self.ctrl2._dirty = b
_dirty = property(_get_dirty, _set_dirty)
@property
def cmd(self):
return self._cmd
def _get_x(self): return self._x
def _set_x(self, v):
self._x = v
self.__dirty = True
def _get_y(self): return self._y
def _set_y(self, v):
self._y = v
self.__dirty = True
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, (x,y)):
self.x = x
self.y = y
xy = property(_get_xy, _set_xy)
# Handle 1 describes now the curve from the previous point started.
def _get_ctrl1(self): return self._ctrl1
def _set_ctrl1(self, v):
self._ctrl1 = PathPoint(v.x, v.y)
self.__dirty = True
# Handle 2 describes how the curve from the previous point arrives in this point.
def _get_ctrl2(self): return self._ctrl2
def _set_ctrl2(self, v):
self._ctrl2 = PathPoint(v.x, v.y)
self.__dirty = True
ctrl1 = property(_get_ctrl1, _set_ctrl1)
ctrl2 = property(_get_ctrl2, _set_ctrl2)
def __eq__(self, pt):
if not isinstance(pt, PathElement): return False
return self.cmd == pt.cmd \
and self.x == pt.x \
and self.y == pt.y \
and self.ctrl1 == pt.ctrl1 \
and self.ctrl2 == pt.ctrl2
def __ne__(self, pt):
return not self.__eq__(pt)
def __repr__(self):
return "%s(cmd='%s', x=%.1f, y=%.1f, ctrl1=(%.1f, %.1f), ctrl2=(%.1f, %.1f))" % (
self.__class__.__name__, self.cmd, self.x, self.y,
self.ctrl1.x, self.ctrl1.y,
self.ctrl2.x, self.ctrl2.y)
def copy(self):
if self.cmd == MOVETO \
or self.cmd == LINETO:
pts = ((self.x, self.y),)
elif self.cmd == CURVETO:
pts = ((self.ctrl1.x, self.ctrl1.y), (self.ctrl2.x, self.ctrl2.y), (self.x, self.y))
else:
pts = None
return PathElement(self.cmd, pts)
class BezierPath(list):
def __init__(self, path=None, **kwargs):
""" A list of PathElements describing the curves and lines that make up the path.
"""
if isinstance(path, (BezierPath, list, tuple)):
self.extend([pt.copy() for pt in path])
self._kwargs = kwargs
self._cache = None # Cached vertices for drawing.
self._segments = None # Cached segment lengths.
self._bounds = None # Cached bounding rectangle.
self._polygon = None # Cached polygon hit test area.
self._dirty = False
self._index = {}
def copy(self):
return BezierPath(self, **self._kwargs)
def append(self, pt):
self._dirty = True; list.append(self, pt)
def extend(self, points):
self._dirty = True; list.extend(self, points)
def insert(self, i, pt):
self._dirty = True; self._index={}; list.insert(self, i, pt)
def remove(self, pt):
self._dirty = True; self._index={}; list.remove(self, pt)
def pop(self, i):
self._dirty = True; self._index={}; list.pop(self, i)
def __setitem__(self, i, pt):
self._dirty = True; self._index={}; list.__setitem__(self, i, pt)
def __delitem__(self, i):
self._dirty = True; self._index={}; list.__delitem__(self, i)
def sort(self):
self._dirty = True; self._index={}; list.sort(self)
def reverse(self):
self._dirty = True; self._index={}; list.reverse(self)
def index(self, pt):
return self._index.setdefault(pt, list.index(self, pt))
def _update(self):
# Called from BezierPath.draw().
# If points were added or removed, clear the cache.
b = self._dirty
for pt in self: b = b or pt._dirty; pt._dirty = False
if b:
if self._cache is not None:
if self._cache[0]: flush(self._cache[0])
if self._cache[1]: flush(self._cache[1])
self._cache = self._segments = self._bounds = self._polygon = None
self._dirty = False
def moveto(self, x, y):
""" Adds a new point to the path at x, y.
"""
self.append(PathElement(MOVETO, ((x, y),)))
def lineto(self, x, y):
""" Adds a line from the previous point to x, y.
"""
self.append(PathElement(LINETO, ((x, y),)))
def curveto(self, x1, y1, x2, y2, x3, y3):
""" Adds a Bezier-curve from the previous point to x3, y3.
The curvature is determined by control handles x1, y1 and x2, y2.
"""
self.append(PathElement(CURVETO, ((x1, y1), (x2, y2), (x3, y3))))
def arcto(self, x, y, radius=1, clockwise=True, short=False):
""" Adds a number of Bezier-curves that draw an arc with the given radius to (x,y).
The short parameter selects either the "long way" around or the "shortcut".
"""
x0, y0 = self[-1].x, self[-1].y
phi = geometry.angle(x0,y0,x,y)
for p in bezier.arcto(x0, y0, radius, radius, phi, short, not clockwise, x, y):
f = len(p) == 2 and self.lineto or self.curveto
f(*p)
def closepath(self):
""" Adds a line from the previous point to the last MOVETO.
"""
self.append(PathElement(CLOSE))
def rect(self, x, y, width, height, roundness=0.0):
""" Adds a (rounded) rectangle to the path.
Corner roundness can be given as a relative float or absolute int.
"""
if roundness <= 0:
self.moveto(x, y)
self.lineto(x+width, y)
self.lineto(x+width, y+height)
self.lineto(x, y+height)
self.lineto(x, y)
else:
if isinstance(roundness, int):
r = min(roundness, width/2, height/2)
else:
r = min(width, height)
r = min(roundness, 1) * r * 0.5
self.moveto(x+r, y)
self.lineto(x+width-r, y)
self.arcto(x+width, y+r, radius=r, clockwise=False)
self.lineto(x+width, y+height-r)
self.arcto(x+width-r, y+height, radius=r, clockwise=False)
self.lineto(x+r, y+height)
self.arcto(x, y+height-r, radius=r, clockwise=False)
self.lineto(x, y+r)
self.arcto(x+r, y, radius=r, clockwise=False)
def ellipse(self, x, y, width, height):
""" Adds an ellipse to the path.
"""
w, h = width*0.5, height*0.5
k = 0.5522847498 # kappa: (-1 + sqrt(2)) / 3 * 4
self.moveto(x, y-h) # http://www.whizkidtech.redprince.net/bezier/circle/
self.curveto(x+w*k, y-h, x+w, y-h*k, x+w, y, )
self.curveto(x+w, y+h*k, x+w*k, y+h, x, y+h)
self.curveto(x-w*k, y+h, x-w, y+h*k, x-w, y, )
self.curveto(x-w, y-h*k, x-w*k, y-h, x, y-h)
self.closepath()
oval = ellipse
def arc(self, x, y, width, height, start=0, stop=90):
""" Adds an arc to the path.
The arc follows the ellipse defined by (x, y, width, height),
with start and stop specifying what angle range to draw.
"""
w, h = width*0.5, height*0.5
for i, p in enumerate(bezier.arc(x-w, y-h, x+w, y+h, start, stop)):
if i == 0:
self.moveto(*p[:2])
self.curveto(*p[2:])
def smooth(self, *args, **kwargs):
""" Smooths the path by making the curve handles colinear.
With mode=EQUIDISTANT, the curve handles will be of equal (average) length.
"""
e = BezierEditor(self)
for i, pt in enumerate(self):
self._index[pt] = i
e.smooth(pt, *args, **kwargs)
def flatten(self, precision=RELATIVE):
""" Returns a list of contours, in which each contour is a list of (x,y)-tuples.
The precision determines the number of straight lines to use as a substition for a curve.
It can be a fixed number (int) or relative to the curve length (float or RELATIVE).
"""
if precision == RELATIVE:
precision = RELATIVE_PRECISION
contours = [[]]
x0, y0 = None, None
closeto = None
for pt in self:
if (pt.cmd == LINETO or pt.cmd == CURVETO) and x0 == y0 is None:
raise NoCurrentPointForPath
elif pt.cmd == LINETO:
contours[-1].append((x0, y0))
contours[-1].append((pt.x, pt.y))
elif pt.cmd == CURVETO:
# Curves are interpolated from a number of straight line segments.
# With relative precision, we use the (rough) curve length to determine the number of lines.
x1, y1, x2, y2, x3, y3 = pt.ctrl1.x, pt.ctrl1.y, pt.ctrl2.x, pt.ctrl2.y, pt.x, pt.y
if isinstance(precision, float):
n = int(max(0, precision) * bezier.curvelength(x0, y0, x1, y1, x2, y2, x3, y3, 3))
else:
n = int(max(0, precision))
if n > 0:
xi, yi = x0, y0
for i in range(n+1):
xj, yj, vx1, vy1, vx2, vy2 = bezier.curvepoint(float(i)/n, x0, y0, x1, y1, x2, y2, x3, y3)
contours[-1].append((xi, yi))
contours[-1].append((xj, yj))
xi, yi = xj, yj
elif pt.cmd == MOVETO:
contours.append([]) # Start a new contour.
closeto = pt
elif pt.cmd == CLOSE and closeto is not None:
contours[-1].append((x0, y0))
contours[-1].append((closeto.x, closeto.y))
x0, y0 = pt.x, pt.y
return contours
def draw(self, precision=RELATIVE, **kwargs):
""" Draws the path.
The precision determines the number of straight lines to use as a substition for a curve.
It can be a fixed number (int) or relative to the curve length (float or RELATIVE).
"""
if len(kwargs) > 0:
# Optional parameters in draw() overrule those set during initialization.
kw = dict(self._kwargs)
kw.update(kwargs)
fill, stroke, strokewidth, strokestyle = color_mixin(**kw)
else:
fill, stroke, strokewidth, strokestyle = color_mixin(**self._kwargs)
def _draw_fill(contours):
# Drawing commands for the path fill (as triangles by tessellating the contours).
v = geometry.tesselate(contours)
glBegin(GL_TRIANGLES)
for x, y in v:
glVertex3f(x, y, 0)
glEnd()
def _draw_stroke(contours):
# Drawing commands for the path stroke.
for path in contours:
glBegin(GL_LINE_STRIP)
for x, y in path:
glVertex2f(x, y)
glEnd()
self._update() # Remove the cache if points were modified.
if self._cache is None \
or self._cache[0] is None and fill \
or self._cache[1] is None and stroke \
or self._cache[-1] != precision:
# Calculate and cache the vertices as Display Lists.
# If the path requires a fill color, it will have to be tessellated.
if self._cache is not None:
if self._cache[0]: flush(self._cache[0])
if self._cache[1]: flush(self._cache[1])
contours = self.flatten(precision)
self._cache = [None, None, precision]
if fill : self._cache[0] = precompile(_draw_fill, contours)
if stroke : self._cache[1] = precompile(_draw_stroke, contours)
if fill is not None:
glColor4f(fill[0], fill[1], fill[2], fill[3] * _alpha)
glCallList(self._cache[0])
if stroke is not None and strokewidth > 0:
glColor4f(stroke[0], stroke[1], stroke[2], stroke[3] * _alpha)
glLineWidth(strokewidth)
glLineDash(strokestyle)
glCallList(self._cache[1])
def angle(self, t):
""" Returns the directional angle at time t (0.0-1.0) on the path.
"""
# The directed() enumerator is much faster but less precise.
pt0, pt1 = t==0 and (self.point(t), self.point(t+0.001)) or (self.point(t-0.001), self.point(t))
return geometry.angle(pt0.x, pt0.y, pt1.x, pt1.y)
def point(self, t):
""" Returns the PathElement at time t (0.0-1.0) on the path.
See the linear interpolation math in bezier.py.
"""
if self._segments is None:
self._segments = bezier.length(self, segmented=True, n=10)
return bezier.point(self, t, segments=self._segments)
def points(self, amount=2, start=0.0, end=1.0):
""" Returns a list of PathElements along the path.
To omit the last point on closed paths: end=1-1.0/amount
"""
if self._segments is None:
self._segments = bezier.length(self, segmented=True, n=10)
return bezier.points(self, amount, start, end, segments=self._segments)
def addpoint(self, t):
""" Inserts a new PathElement at time t (0.0-1.0) on the path.
"""
self._segments = None
self._index = {}
return bezier.insert_point(self, t)
split = addpoint
@property
def length(self, precision=10):
""" Returns an approximation of the total length of the path.
"""
return bezier.length(self, segmented=False, n=precision)
@property
def contours(self):
""" Returns a list of contours (i.e. segments separated by a MOVETO) in the path.
Each contour is a BezierPath object.
"""
return bezier.contours(self)
@property
def bounds(self, precision=100):
""" Returns a (x, y, width, height)-tuple of the approximate path dimensions.
"""
# In _update(), traverse all the points and check if they have changed.
# If so, the bounds must be recalculated.
self._update()
if self._bounds is None:
l = t = float( "inf")
r = b = float("-inf")
for pt in self.points(precision):
if pt.x < l: l = pt.x
if pt.y < t: t = pt.y
if pt.x > r: r = pt.x
if pt.y > b: b = pt.y
self._bounds = (l, t, r-l, b-t)
return self._bounds
def contains(self, x, y, precision=100):
""" Returns True when point (x,y) falls within the contours of the path.
"""
bx, by, bw, bh = self.bounds
if bx <= x <= bx+bw and \
by <= y <= by+bh:
if self._polygon is None \
or self._polygon[1] != precision:
self._polygon = [(pt.x,pt.y) for pt in self.points(precision)], precision
# Ray casting algorithm:
return geometry.point_in_polygon(self._polygon[0], x, y)
return False
def hash(self, state=None, decimal=1):
""" Returns the path id, based on the position and handles of its PathElements.
Two distinct BezierPath objects that draw the same path therefore have the same id.
"""
f = lambda x: int(x*10**decimal) # Format floats as strings with given decimal precision.
id = [state]
for pt in self: id.extend((
pt.cmd, f(pt.x), f(pt.y), f(pt.ctrl1.x), f(pt.ctrl1.y), f(pt.ctrl2.x), f(pt.ctrl2.y)))
id = str(id)
id = md5(id).hexdigest()
return id
def __repr__(self):
return "BezierPath(%s)" % repr(list(self))
def __del__(self):
# Note: it is important that __del__() is called since it unloads the cache from GPU.
# BezierPath and PathElement should contain no circular references, e.g. no PathElement.parent.
if hasattr(self, "_cache") and self._cache is not None and flush:
if self._cache[0]: flush(self._cache[0])
if self._cache[1]: flush(self._cache[1])
def drawpath(path, **kwargs):
""" Draws the given BezierPath (or list of PathElements).
The current stroke, strokewidth and fill color are applied.
"""
if not isinstance(path, BezierPath):
path = BezierPath(path)
path.draw(**kwargs)
_autoclosepath = True
def autoclosepath(close=False):
""" Paths constructed with beginpath() and endpath() are automatically closed.
"""
global _autoclosepath
_autoclosepath = close
_path = None
def beginpath(x, y):
""" Starts a new path at (x,y).
The commands moveto(), lineto(), curveto() and closepath()
can then be used between beginpath() and endpath() calls.
"""
global _path
_path = BezierPath()
_path.moveto(x, y)
def moveto(x, y):
""" Moves the current point in the current path to (x,y).
"""
if _path is None:
raise NoCurrentPath
_path.moveto(x, y)
def lineto(x, y):
""" Draws a line from the current point in the current path to (x,y).
"""
if _path is None:
raise NoCurrentPath
_path.lineto(x, y)
def curveto(x1, y1, x2, y2, x3, y3):
""" Draws a curve from the current point in the current path to (x3,y3).
The curvature is determined by control handles x1, y1 and x2, y2.
"""
if _path is None:
raise NoCurrentPath
_path.curveto(x1, y1, x2, y2, x3, y3)
def closepath():
""" Closes the current path with a straight line to the last MOVETO.
"""
if _path is None:
raise NoCurrentPath
_path.closepath()
def endpath(draw=True, **kwargs):
""" Draws and returns the current path.
With draw=False, only returns the path so it can be manipulated and drawn with drawpath().
"""
global _path, _autoclosepath
if _path is None:
raise NoCurrentPath
if _autoclosepath is True:
_path.closepath()
if draw:
_path.draw(**kwargs)
p, _path = _path, None
return p
def findpath(points, curvature=1.0):
""" Returns a smooth BezierPath from the given list of (x,y)-tuples.
"""
return bezier.findpath(list(points), curvature)
Path = BezierPath
#--- BEZIER EDITOR -----------------------------------------------------------------------------------
EQUIDISTANT = "equidistant"
IN, OUT, BOTH = "in", "out", "both" # Drag pt1.ctrl2, pt2.ctrl1 or both simultaneously?
class BezierEditor:
def __init__(self, path):
self.path = path
def _nextpoint(self, pt):
i = self.path.index(pt) # BezierPath caches this operation.
return i < len(self.path)-1 and self.path[i+1] or None
def translate(self, pt, x=0, y=0, h1=(0,0), h2=(0,0)):
""" Translates the point and its control handles by (x,y).
Translates the incoming handle by h1 and the outgoing handle by h2.
"""
pt1, pt2 = pt, self._nextpoint(pt)
pt1.x += x
pt1.y += y
pt1.ctrl2.x += x + h1[0]
pt1.ctrl2.y += y + h1[1]
if pt2 is not None:
pt2.ctrl1.x += x + (pt2.cmd == CURVETO and h2[0] or 0)
pt2.ctrl1.y += y + (pt2.cmd == CURVETO and h2[1] or 0)
def rotate(self, pt, angle, handle=BOTH):
""" Rotates the point control handles by the given angle.
"""
pt1, pt2 = pt, self._nextpoint(pt)
if handle == BOTH or handle == IN:
pt1.ctrl2.x, pt1.ctrl2.y = geometry.rotate(pt1.ctrl2.x, pt1.ctrl2.y, pt1.x, pt1.y, angle)
if handle == BOTH or handle == OUT and pt2 is not None and pt2.cmd == CURVETO:
pt2.ctrl1.x, pt2.ctrl1.y = geometry.rotate(pt2.ctrl1.x, pt2.ctrl1.y, pt1.x, pt1.y, angle)
def scale(self, pt, v, handle=BOTH):
""" Scales the point control handles by the given factor.
"""
pt1, pt2 = pt, self._nextpoint(pt)
if handle == BOTH or handle == IN:
pt1.ctrl2.x, pt1.ctrl2.y = bezier.linepoint(v, pt1.x, pt1.y, pt1.ctrl2.x, pt1.ctrl2.y)
if handle == BOTH or handle == OUT and pt2 is not None and pt2.cmd == CURVETO:
pt2.ctrl1.x, pt2.ctrl1.y = bezier.linepoint(v, pt1.x, pt1.y, pt2.ctrl1.x, pt2.ctrl1.y)
def smooth(self, pt, mode=None, handle=BOTH):
pt1, pt2, i = pt, self._nextpoint(pt), self.path.index(pt)
if pt2 is None:
return
if pt1.cmd == pt2.cmd == CURVETO:
if mode == EQUIDISTANT:
d1 = d2 = 0.5 * (
geometry.distance(pt1.x, pt1.y, pt1.ctrl2.x, pt1.ctrl2.y) + \
geometry.distance(pt1.x, pt1.y, pt2.ctrl1.x, pt2.ctrl1.y))
else:
d1 = geometry.distance(pt1.x, pt1.y, pt1.ctrl2.x, pt1.ctrl2.y)
d2 = geometry.distance(pt1.x, pt1.y, pt2.ctrl1.x, pt2.ctrl1.y)
if handle == IN:
a = geometry.angle(pt1.x, pt1.y, pt1.ctrl2.x, pt1.ctrl2.y)
if handle == OUT:
a = geometry.angle(pt2.ctrl1.x, pt2.ctrl1.y, pt1.x, pt1.y)
if handle == BOTH:
a = geometry.angle(pt2.ctrl1.x, pt2.ctrl1.y, pt1.ctrl2.x, pt1.ctrl2.y)
pt1.ctrl2.x, pt1.ctrl2.y = geometry.coordinates(pt1.x, pt1.y, d1, a)
pt2.ctrl1.x, pt2.ctrl1.y = geometry.coordinates(pt1.x, pt1.y, d2, a-180)
elif pt1.cmd == CURVETO and pt2.cmd == LINETO:
d = mode == EQUIDISTANT and \
geometry.distance(pt1.x, pt1.y, pt2.x, pt2.y) or \
geometry.distance(pt1.x, pt1.y, pt1.ctrl2.x, pt1.ctrl2.y)
a = geometry.angle(pt1.x, pt1.y, pt2.x, pt2.y)
pt1.ctrl2.x, pt1.ctrl2.y = geometry.coordinates(pt1.x, pt1.y, d, a-180)
elif pt1.cmd == LINETO and pt2.cmd == CURVETO and i > 0:
d = mode == EQUIDISTANT and \
geometry.distance(pt1.x, pt1.y, self.path[i-1].x, self.path[i-1].y) or \
geometry.distance(pt1.x, pt1.y, pt2.ctrl1.x, pt2.ctrl1.y)
a = geometry.angle(self.path[i-1].x, self.path[i-1].y, pt1.x, pt1.y)
pt2.ctrl1.x, pt2.ctrl1.y = geometry.coordinates(pt1.x, pt1.y, d, a)
#--- POINT ANGLES ------------------------------------------------------------------------------------
def directed(points):
""" Returns an iterator that yields (angle, point)-tuples for the given list of points.
The angle represents the direction of the point on the path.
This works with BezierPath, Bezierpath.points, [pt1, pt2, pt2, ...]
For example:
for a, pt in directed(path.points(30)):
push()
translate(pt.x, pt.y)
rotate(a)
arrow(0, 0, 10)
pop()
This is useful if you want to have shapes following a path.
To put text on a path, rotate the angle by +-90 to get the normal (i.e. perpendicular).
"""
p = list(points)
n = len(p)
for i, pt in enumerate(p):
if 0 < i < n-1 and pt.__dict__.get("_cmd") == CURVETO:
# For a point on a curve, the control handle gives the best direction.
# For PathElement (fixed point in BezierPath), ctrl2 tells us how the curve arrives.
# For DynamicPathElement (returnd from BezierPath.point()), ctrl1 tell how the curve arrives.
ctrl = isinstance(pt, bezier.DynamicPathElement) and pt.ctrl1 or pt.ctrl2
angle = geometry.angle(ctrl.x, ctrl.y, pt.x, pt.y)
elif 0 < i < n-1 and pt.__dict__.get("_cmd") == LINETO and p[i-1].__dict__.get("_cmd") == CURVETO:
# For a point on a line preceded by a curve, look ahead gives better results.
angle = geometry.angle(pt.x, pt.y, p[i+1].x, p[i+1].y)
elif i == 0 and isinstance(points, BezierPath):
# For the first point in a BezierPath, we can calculate a next point very close by.
pt1 = points.point(0.001)
angle = geometry.angle(pt.x, pt.y, pt1.x, pt1.y)
elif i == n-1 and isinstance(points, BezierPath):
# For the last point in a BezierPath, we can calculate a previous point very close by.
pt0 = points.point(0.999)
angle = geometry.angle(pt0.x, pt0.y, pt.x, pt.y)
elif i == n-1 and isinstance(pt, bezier.DynamicPathElement) and pt.ctrl1.x != pt.x or pt.ctrl1.y != pt.y:
# For the last point in BezierPath.points(), use incoming handle (ctrl1) for curves.
angle = geometry.angle(pt.ctrl1.x, pt.ctrl1.y, pt.x, pt.y)
elif 0 < i:
# For any point, look back gives a good result, if enough points are given.
angle = geometry.angle(p[i-1].x, p[i-1].y, pt.x, pt.y)
elif i < n-1:
# For the first point, the best (only) guess is the location of the next point.
angle = geometry.angle(pt.x, pt.y, p[i+1].x, p[i+1].y)
else:
angle = 0
yield angle, pt
#--- CLIPPING PATH -----------------------------------------------------------------------------------
class ClippingMask:
def draw(self, fill=(0,0,0,1), stroke=None):
pass
def beginclip(path):
""" Enables the given BezierPath (or ClippingMask) as a clipping mask.
Drawing commands between beginclip() and endclip() are constrained to the shape of the path.
"""
# Enable the stencil buffer to limit the area of rendering (stenciling).
glClear(GL_STENCIL_BUFFER_BIT)
glEnable(GL_STENCIL_TEST)
glStencilFunc(GL_NOTEQUAL, 0, 0)
glStencilOp(GL_INCR, GL_INCR, GL_INCR)
# Shouldn't depth testing be disabled when stencilling?
# In any case, if it is, transparency doesn't work.
#glDisable(GL_DEPTH_TEST)
path.draw(fill=(0,0,0,1), stroke=None) # Disregard color settings; always use a black mask.
#glEnable(GL_DEPTH_TEST)
glStencilFunc(GL_EQUAL, 1, 1)
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP)
def endclip():
glDisable(GL_STENCIL_TEST)
#--- SUPERSHAPE --------------------------------------------------------------------------------------
def supershape(x, y, width, height, m, n1, n2, n3, points=100, percentage=1.0, range=2*pi, **kwargs):
""" Returns a BezierPath constructed using the superformula,
which can be used to describe many complex shapes and curves that are found in nature.
"""
path = BezierPath()
first = True
for i in xrange(points):
if i <= points * percentage:
dx, dy = geometry.superformula(m, n1, n2, n3, i*range/points)
dx, dy = dx*width/2 + x, dy*height/2 + y
if first is True:
path.moveto(dx, dy); first=False
else:
path.lineto(dx, dy)
path.closepath()
if kwargs.get("draw", True):
path.draw(**kwargs)
return path
#=====================================================================================================
#--- IMAGE -------------------------------------------------------------------------------------------
# Textures and quad vertices are cached for performance.
# Textures remain in cache for the duration of the program.
# Quad vertices are cached as Display Lists and destroyed when the Image object is deleted.
# For optimal performance, images should be created once (not every frame) and its quads left unmodified.
# Performance should be comparable to (moving) pyglet.Sprites drawn in a batch.
pow2 = [2**n for n in range(20)] # [1, 2, 4, 8, 16, 32, 64, ...]
def ceil2(x):
""" Returns the nearest power of 2 that is higher than x, e.g. 700 => 1024.
"""
for y in pow2:
if y >= x: return y
class ImageError(Exception):
pass
_texture_cache = {} # pyglet.Texture referenced by filename.
_texture_cached = {} # pyglet.Texture.id is in keys once the image has been cached.
def texture(img, data=None):
""" Returns a (cached) texture from the given image filename or byte data.
When a Image or Pixels object is given, returns the associated texture.
"""
# Image texture stored in cache, referenced by file path (or a custom id defined with cache()).
if isinstance(img, (basestring, int)) and img in _texture_cache:
return _texture_cache[img]
# Image file path, load it, cache it, return texture.
if isinstance(img, basestring):
try: cache(img, pyglet.image.load(img).get_texture())
except IOError:
raise ImageError, "can't load image from %s" % repr(img)
return _texture_cache[img]
# Image texture, return original.
if isinstance(img, pyglet.image.Texture):
return img
# Image object, return image texture.
# (if you use this to create a new image, the new image will do expensive caching as well).
if isinstance(img, Image):
return img.texture
# Pixels object, return pixel texture.
if isinstance(img, Pixels):
return img.texture
# Pyglet image data.
if isinstance(img, pyglet.image.ImageData):
return img.texture
# Image data as byte string, load it, return texture.
if isinstance(data, basestring):
return pyglet.image.load("", file=StringIO(data)).get_texture()
# Don't know how to handle this image.
raise ImageError, "unknown image type: %s" % repr(img.__class__)
def cache(id, texture):
""" Store the given texture in cache, referenced by id (which can then be passed to image()).
This is useful for procedurally rendered images (which are not stored in cache by default).
"""
if isinstance(texture, (Image, Pixels)):
texture = texture.texture
if not isinstance(texture, pyglet.image.Texture):
raise ValueError, "can only cache texture, not %s" % repr(texture.__class__.__name__)
_texture_cache[id] = texture
_texture_cached[_texture_cache[id].id] = id
def cached(texture):
""" Returns the cache id if the texture has been cached (None otherwise).
"""
if isinstance(texture, (Image, Pixels)):
texture = texture.texture
if isinstance(texture, pyglet.image.Texture):
return _texture_cached.get(texture.texture.id)
if isinstance(texture, (basestring, int)):
return texture in _texture_cache and texture or None
return None
def _render(texture, quad=(0,0,0,0,0,0,0,0)):
""" Renders the texture on the canvas inside a quadtriliteral (i.e. rectangle).
The quadriliteral can be distorted by giving corner offset coordinates.
"""
t = texture.tex_coords # power-2 dimensions
w = texture.width # See Pyglet programming guide -> OpenGL imaging.
h = texture.height
dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4 = quad or (0,0,0,0,0,0,0,0)
glEnable(texture.target)
glBindTexture(texture.target, texture.id)
glBegin(GL_QUADS)
glTexCoord3f(t[0], t[1], t[2] ); glVertex3f(dx4, dy4, 0)
glTexCoord3f(t[3], t[4], t[5] ); glVertex3f(dx3+w, dy3, 0)
glTexCoord3f(t[6], t[7], t[8] ); glVertex3f(dx2+w, dy2+h, 0)
glTexCoord3f(t[9], t[10], t[11]); glVertex3f(dx1, dy1+h, 0)
glEnd()
glDisable(texture.target)
class Quad(list):
def __init__(self, dx1=0, dy1=0, dx2=0, dy2=0, dx3=0, dy3=0, dx4=0, dy4=0):
""" Describes the four-sided polygon on which an image texture is "mounted".
This is a quadrilateral (four sides) of which the vertices do not necessarily
have a straight angle (i.e. the corners can be distorted).
"""
list.__init__(self, (dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4))
self._dirty = True # Image objects poll Quad._dirty to check if the image cache is outdated.
def copy(self):
return Quad(*self)
def reset(self):
list.__init__(self, (0,0,0,0,0,0,0,0))
self._dirty = True
def __setitem__(self, i, v):
list.__setitem__(self, i, v)
self._dirty = True
def _get_dx1(self): return self[0]
def _get_dy1(self): return self[1]
def _get_dx2(self): return self[2]
def _get_dy2(self): return self[3]
def _get_dx3(self): return self[4]
def _get_dy3(self): return self[5]
def _get_dx4(self): return self[6]
def _get_dy4(self): return self[7]
def _set_dx1(self, v): self[0] = v
def _set_dy1(self, v): self[1] = v
def _set_dx2(self, v): self[2] = v
def _set_dy2(self, v): self[3] = v
def _set_dx3(self, v): self[4] = v
def _set_dy3(self, v): self[5] = v
def _set_dx4(self, v): self[6] = v
def _set_dy4(self, v): self[7] = v
dx1 = property(_get_dx1, _set_dx1)
dy1 = property(_get_dy1, _set_dy1)
dx2 = property(_get_dx2, _set_dx2)
dy2 = property(_get_dy2, _set_dy2)
dx3 = property(_get_dx3, _set_dx3)
dy3 = property(_get_dy3, _set_dy3)
dx4 = property(_get_dx4, _set_dx4)
dy4 = property(_get_dy4, _set_dy4)
class Image(object):
def __init__(self, path, x=0, y=0, width=None, height=None, alpha=1.0, data=None):
""" A texture that can be drawn at a given position.
The quadrilateral in which the texture is drawn can be distorted (slow, image cache is flushed).
The image can be resized, colorized and its opacity can be set.
"""
self._src = (path, data)
self._texture = texture(path, data=data)
self._cache = None
self.x = x
self.y = y
self.width = width or self._texture.width # Scaled width, Image.texture.width yields original width.
self.height = height or self._texture.height # Scaled height, Image.texture.height yields original height.
self.quad = Quad()
self.color = Color(1.0, 1.0, 1.0, alpha)
def copy(self, texture=None, width=None, height=None):
img = texture is None \
and self.__class__(self._src[0], data=self._src[1]) \
or self.__class__(texture)
img.x = self.x
img.y = self.y
img.width = self.width
img.height = self.height
img.quad = self.quad.copy()
img.color = self.color.copy()
if width is not None:
img.width = width
if height is not None:
img.height = height
return img
@property
def id(self):
return self._texture.id
@property
def texture(self):
return self._texture
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, (x,y)):
self.x = x
self.y = y
xy = property(_get_xy, _set_xy)
def _get_size(self):
return (self.width, self.height)
def _set_size(self, (w,h)):
self.width = w
self.height = h
size = property(_get_size, _set_size)
def _get_alpha(self):
return self.color[3]
def _set_alpha(self, v):
self.color[3] = v
alpha = property(_get_alpha, _set_alpha)
def distort(self, dx1=0, dy1=0, dx2=0, dy2=0, dx3=0, dy3=0, dx4=0, dy4=0):
""" Adjusts the four-sided polygon on which an image texture is "mounted",
by incrementing the corner coordinates with the given values.
"""
for i, v in enumerate((dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4)):
if v != 0:
self.quad[i] += v
def adjust(r=1.0, g=1.0, b=1.0, a=1.0):
""" Adjusts the image color by multiplying R,G,B,A channels with the given values.
"""
self.color[0] *= r
self.color[1] *= g
self.color[2] *= b
self.color[3] *= a
def draw(self, x=None, y=None, width=None, height=None, alpha=None, color=None, filter=None):
""" Draws the image.
The given parameters (if any) override the image's attributes.
"""
# Calculate and cache the quad vertices as a Display List.
# If the quad has changed, update the cache.
if self._cache is None or self.quad._dirty:
flush(self._cache)
self._cache = precompile(_render, self._texture, self.quad)
self.quad._dirty = False
# Given parameters override Image attributes.
if x is None:
x = self.x
if y is None:
y = self.y
if width is None:
width = self.width
if height is None:
height = self.height
if color and len(color) < 4:
color = color[0], color[1], color[2], 1.0
if color is None:
color = self.color
if alpha is not None:
color = color[0], color[1], color[2], alpha
if filter:
filter.texture = self._texture # Register the current texture with the filter.
filter.push()
# Round position (x,y) to nearest integer to avoid sub-pixel rendering.
# This ensures there are no visual artefacts on transparent borders (e.g. the "white halo").
# Halo can also be avoided by overpainting in the source image, but this requires some work:
# http://technology.blurst.com/remove-white-borders-in-transparent-textures/
x = round(x)
y = round(y)
w = float(width) / self._texture.width
h = float(height) / self._texture.height
# Transform and draw the quads.
glPushMatrix()
glTranslatef(x, y, 0)
glScalef(w, h, 0)
glColor4f(color[0], color[1], color[2], color[3] * _alpha)
glCallList(self._cache)
glPopMatrix()
if filter:
filter.pop()
def save(self, path):
""" Exports the image as a PNG-file.
"""
self._texture.save(path)
def __repr__(self):
return "%s(x=%.1f, y=%.1f, width=%.1f, height=%.1f, alpha=%.2f)" % (
self.__class__.__name__, self.x, self.y, self.width, self.height, self.alpha)
def __del__(self):
try:
if hasattr(self, "_cache") and self._cache is not None and flush:
flush(self._cache)
except:
pass
_IMAGE_CACHE = 200
_image_cache = {} # Image object referenced by Image.texture.id.
_image_queue = [] # Most recent id's are at the front of the list.
def image(img, x=None, y=None, width=None, height=None,
alpha=None, color=None, filter=None, data=None, draw=True):
""" Draws the image at (x,y), scaling it to the given width and height.
The image's transparency can be set with alpha (0.0-1.0).
Applies the given color adjustment, quad distortion and filter (one filter can be specified).
Note: with a filter enabled, alpha and color will not be applied.
This is because the filter overrides the default drawing behavior with its own.
"""
if not isinstance(img, Image):
# If the given image is not an Image object, create one on the fly.
# This object is cached for reuse.
# The cache has a limited size (200), so the oldest Image objects are deleted.
t = texture(img, data=data)
if t.id in _image_cache:
img = _image_cache[t.id]
else:
img = Image(img, data=data)
_image_cache[img.texture.id] = img
_image_queue.insert(0, img.texture.id)
for id in reversed(_image_queue[_IMAGE_CACHE:]):
del _image_cache[id]
del _image_queue[-1]
# Draw the image.
if draw:
img.draw(x, y, width, height, alpha, color, filter)
return img
def imagesize(img):
""" Returns a (width, height)-tuple with the image dimensions.
"""
t = texture(img)
return (t.width, t.height)
def crop(img, x=0, y=0, width=None, height=None):
""" Returns the given (x, y, width, height)-region from the image.
Use this to pass cropped image files to image().
"""
t = texture(img)
if width is None: width = t.width
if height is None: height = t.height
t = t.get_region(x, y, min(t.width-x, width), min(t.height-y, height))
if isinstance(img, Image):
img = img.copy(texture=t)
return img.copy(texture=t, width=t.width, height=t.height)
if isinstance(img, Pixels):
return Pixels(t)
if isinstance(img, pyglet.image.Texture):
return t
return Image(t)
#--- PIXELS ------------------------------------------------------------------------------------------
class Pixels(list):
def __init__(self, img):
""" A list of RGBA color values (0-255) for each pixel in the given image.
The Pixels object can be passed to the image() command.
"""
self._img = texture(img).get_image_data()
# A negative pitch means the pixels are stored top-to-bottom row.
self._flipped = self._img.pitch >= 0
# Data yields a byte array if no conversion (e.g. BGRA => RGBA) was necessary,
# or a byte string otherwise - which needs to be converted to a list of ints.
data = self._img.get_data("RGBA", self._img.width*4 * (-1,1)[self._flipped])
if isinstance(data, str):
data = map(ord, list(data))
# Some formats seem to store values from -1 to -256.
data = [(256+v)%256 for v in data]
self.array = data
self._texture = None
@property
def width(self):
return self._img.width
@property
def height(self):
return self._img.height
@property
def size(self):
return (self.width, self.height)
def __len__(self):
return len(self.array) / 4
def __iter__(self):
for i in xrange(len(self)):
yield self[i]
def __getitem__(self, i):
""" Returns a list of R,G,B,A channel values between 0-255 from pixel i.
Users need to wrap the list in a Color themselves for performance.
- r,g,b,a = Pixels[i]
- clr = color(Pixels[i], base=255)
"""
return self.array[i*4:i*4+4]
def __setitem__(self, i, v):
""" Sets pixel i to the given R,G,B,A values.
Users need to unpack a Color themselves for performance,
and are resposible for keeping channes values between 0 and 255
(otherwise an error will occur when Pixels.update() is called),
- Pixels[i] = r,g,b,a
- Pixels[i] = clr.map(base=255)
"""
for j in range(4):
self.array[i*4+j] = v[j]
def __getslice__(self, i, j):
return [self[i+n] for n in xrange(j-i)]
def __setslice__(self, i, j, seq):
for n in xrange(j-i):
self[i+n] = seq[n]
def map(self, function):
""" Applies a function to each pixel.
Function takes a list of R,G,B,A channel values and must return a similar list.
"""
for i in xrange(len(self)):
self[i] = function(self[i])
def get(self, i, j):
""" Returns the pixel at row i, column j as a Color object.
"""
if 0 <= i < self.width and 0 <= j < self.height:
return color(self[i+j*self.width], base=255)
def set(self, i, j, clr):
""" Sets the pixel at row i, column j from a Color object.
"""
if 0 <= i < self.width and 0 <= j < self.height:
self[i+j*self.width] = clr.map(base=255)
def update(self):
""" Pixels.update() must be called to refresh the image.
"""
data = self.array
data = "".join(map(chr, data))
self._img.set_data("RGBA", self._img.width*4*(-1,1)[self._flipped], data)
self._texture = self._img.get_texture()
@property
def texture(self):
if self._texture is None:
self.update()
return self._texture
def copy(self):
return Pixels(self.texture)
def __repr__(self):
return "%s(width=%.1f, height=%.1f)" % (
self.__class__.__name__, self.width, self.height)
pixels = Pixels
#--- ANIMATION ---------------------------------------------------------------------------------------
# A sequence of images displayed in a loop.
# Useful for storing pre-rendered effect frames like explosions etc.
class Animation(list):
def __init__(self, images=[], duration=None, loop=False, **kwargs):
""" Constructs an animation loop from the given image frames.
The duration specifies the time for the entire animation to run.
Animations are useful to cache effects like explosions,
that have for example been prepared in an offscreen buffer.
"""
list.__init__(self, list(images))
self.duration = duration # Duration of the entire animation.
self.loop = loop # Loop from last frame to first frame?
self._i = -1 # Frame counter.
self._t = Transition(0, interpolation=kwargs.get("interpolation", LINEAR))
def copy(self, **kwargs):
return Animation(self,
duration = kwargs.get("duration", self.duration),
loop = kwargs.get("loop", self.loop),
interpolation = self._t._interpolation)
def update(self):
if self.duration is not None:
# With a duration,
# skip to a next frame so that the entire animation takes the given time.
if self._i < 0 or self.loop and self._i == len(self)-1:
self._t.set(0, 0)
self._t.update()
self._t.set(len(self)-1, self.duration)
self._t.update()
self._i = int(self._t.current)
else:
# Without a duration,
# Animation.update() simply moves to the next frame.
if self._i < 0 or self.loop and self._i == len(self)-1:
self._i = -1
self._i = min(self._i+1, len(self)-1)
@property
def frames(self):
return self
@property
def frame(self):
# Yields the current frame Image (or None).
try: return self[self._i]
except:
return None
@property
def done(self):
# Yields True when the animation has stopped (or hasn't started).
return self.loop is False and self._i == len(self)-1
def draw(self, *args, **kwargs):
if not self.done:
image(self.frame, *args, **kwargs)
def __repr__(self):
return "%s(frames=%i, duration=%s)" % (
self.__class__.__name__, len(self), repr(self.duration))
animation = Animation
#--- OFFSCREEN RENDERING -----------------------------------------------------------------------------
# Offscreen buffers can be used to render images from paths etc.
# or to apply filters on images before drawing them to the screen.
# There are several ways to draw offscreen:
# - render(img, filter): applies the given filter to the image and returns it.
# - procedural(function, width, height): execute the drawing commands in function inside an image.
# - Create your own subclass of OffscreenBuffer with a draw() method:
# class MyBuffer(OffscreenBuffer):
# def draw(self): pass
# - Define drawing commands between OffscreenBuffer.push() and pop():
# b = MyBuffer()
# b.push()
# # drawing commands
# b.pop()
# img = Image(b.render())
#
# The shader.py module already defines several filters that use an offscreen buffer, for example:
# blur(), adjust(), multiply(), twirl(), ...
#
# The less you change about an offscreen buffer, the faster it runs.
# This includes switching it on and off and changing its size.
from shader import *
#=====================================================================================================
#--- FONT --------------------------------------------------------------------------------------------
def install_font(ttf):
""" Loads the given TrueType font from file, and returns True on success.
"""
try:
pyglet.font.add_file(ttf)
return True
except:
# This might fail with Carbon on 64-bit Mac systems.
# Fonts can be installed on the system manually if this is the case.
return False
# Load the platform-independent fonts shipped with NodeBox.
# The default font is Droid (licensed under Apache 2.0).
try:
for f in glob(path.join(path.dirname(__file__), "..", "font", "*")):
install_font(f)
DEFAULT_FONT = "Droid Sans"
except:
DEFAULT_FONT = "Arial"
# Font weight
NORMAL = "normal"
BOLD = "bold"
ITALIC = "italic"
# Text alignment
LEFT = "left"
RIGHT = "right"
CENTER = "center"
_fonts = [] # Custom fonts loaded from file.
_fontname = DEFAULT_FONT # Current state font name.
_fontsize = 12 # Current state font size.
_fontweight = [False, False] # Current state font weight (bold, italic).
_lineheight = 1.0 # Current state text lineheight.
_align = LEFT # Current state text alignment (LEFT/RIGHT/CENTER).
def font(fontname=None, fontsize=None, fontweight=None, file=None):
""" Sets the current font and/or fontsize.
If a filename is also given, loads the fontname from the given font file.
"""
global _fontname, _fontsize
if file is not None and file not in _fonts:
_fonts.append(file); install_font(file)
if fontname is not None:
_fontname = fontname
if fontsize is not None:
_fontsize = fontsize
if fontweight is not None:
_fontweight_(fontweight) # _fontweight_() is just an alias for fontweight().
return _fontname
def fontname(name=None):
""" Sets the current font used when drawing text.
"""
global _fontname
if name is not None:
_fontname = name
return _fontname
def fontsize(size=None):
""" Sets the current fontsize in points.
"""
global _fontsize
if size is not None:
_fontsize = size
return _fontsize
def fontweight(*args, **kwargs):
""" Sets the current font weight.
You can supply NORMAL, BOLD and/or ITALIC or set named parameters bold=True and/or italic=True.
"""
global _fontweight
if len(args) == 1 and isinstance(args, (list, tuple)):
args = args[0]
if NORMAL in args:
_fontweight = [False, False]
if BOLD in args or kwargs.get(BOLD):
_fontweight[0] = True
if ITALIC in args or kwargs.get(ITALIC):
_fontweight[1] = True
return _fontweight
_fontweight_ = fontweight
def lineheight(size=None):
""" Sets the vertical spacing between lines of text.
The given size is a relative value: lineheight 1.2 for fontsize 10 means 12.
"""
global _lineheight
if size is not None:
_lineheight = size
return _lineheight
def align(mode=None):
""" Sets the alignment of text paragrapgs (LEFT, RIGHT or CENTER).
"""
global _align
if mode is not None:
_align = mode
return _align
#--- FONT MIXIN --------------------------------------------------------------------------------------
# The text() command has optional parameters font, fontsize, fontweight, bold, italic, lineheight and align.
def font_mixin(**kwargs):
fontname = kwargs.get("fontname", kwargs.get("font", _fontname))
fontsize = kwargs.get("fontsize", _fontsize)
bold = kwargs.get("bold", BOLD in kwargs.get("fontweight", "") or _fontweight[0])
italic = kwargs.get("italic", ITALIC in kwargs.get("fontweight", "") or _fontweight[1])
lineheight = kwargs.get("lineheight", _lineheight)
align = kwargs.get("align", _align)
return (fontname, fontsize, bold, italic, lineheight, align)
#--- TEXT --------------------------------------------------------------------------------------------
# Text is cached for performance.
# For optimal performance, texts should be created once (not every frame) and left unmodified.
# Dynamic texts use a cache of recycled Text objects.
# pyglet.text.Label leaks memory when deleted, because its old batch continues to reference
# loaded font/fontsize/bold/italic glyphs.
# Adding all labels to our own batch remedies this.
_label_batch = pyglet.graphics.Batch()
def label(str="", width=None, height=None, **kwargs):
""" Returns a drawable pyglet.text.Label object from the given string.
Optional arguments include: font, fontsize, bold, italic, align, lineheight, fill.
If these are omitted the current state is used.
"""
fontname, fontsize, bold, italic, lineheight, align = font_mixin(**kwargs)
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
fill = fill is None and (0,0,0,0) or fill
# We use begin_update() so that the TextLayout doesn't refresh on each update.
# FormattedDocument allows individual styling of characters - see Text.style().
label = pyglet.text.Label(batch=_label_batch)
label.begin_update()
label.document = pyglet.text.document.FormattedDocument(str or " ")
label.width = width
label.height = height
label.font_name = fontname
label.font_size = fontsize
label.bold = bold
label.italic = italic
label.multiline = True
label.anchor_y = "bottom"
label.set_style("align", align)
label.set_style("line_spacing", lineheight * fontsize)
label.color = [int(ch*255) for ch in fill]
if str == "":
# Empty string "" does not set properties so we used " " first.
label.text = str
label.end_update()
return label
class Text(object):
def __init__(self, str, x=0, y=0, width=None, height=None, **kwargs):
""" A formatted string of text that can be drawn at a given position.
Text has the following properties:
text, x, y, width, height, font, fontsize, bold, italic, lineheight, align, fill.
Individual character ranges can be styled with Text.style().
"""
if width is None:
# Supplying a string with "\n" characters will crash if no width is given.
# On the outside it appears as None but inside we use a very large number.
width = geometry.INFINITE
a, kwargs["align"] = kwargs.get("align", _align), LEFT
else:
a = None
self.__dict__["x"] = x
self.__dict__["y"] = y
self.__dict__["_label"] = label(str, width, height, **kwargs)
self.__dict__["_dirty"] = False
self.__dict__["_align"] = a
self.__dict__["_fill"] = None
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, (x,y)):
self.x = x
self.y = y
xy = property(_get_xy, _set_xy)
def _get_size(self):
return (self.width, self.height)
def _set_size(self, (w,h)):
self.width = w
self.height = h
size = property(_get_size, _set_size)
def __getattr__(self, k):
if k in self.__dict__:
return self.__dict__[k]
elif k in ("text", "height", "bold", "italic"):
return getattr(self._label, k)
elif k == "string":
return self._label.text
elif k == "width":
if self._label.width != geometry.INFINITE: return self._label.width
elif k in ("font", "fontname"):
return self._label.font_name
elif k == "fontsize":
return self._label.font_size
elif k == "fontweight":
return ((None, BOLD)[self._label.bold], (None, ITALIC)[self._label.italic])
elif k == "lineheight":
return self._label.get_style("line_spacing") / (self.fontsize or 1)
elif k == "align":
if not self._align: self._align = self._label.get_style(k)
return self._align
elif k == "fill":
if not self._fill: self._fill = Color([ch/255.0 for ch in self._label.color])
return self._fill
else:
raise AttributeError, "'Text' object has no attribute '%s'" % k
def __setattr__(self, k, v):
if k in self.__dict__:
self.__dict__[k] = v; return
# Setting properties other than x and y requires the label's layout to be updated.
self.__dict__["_dirty"] = True
self._label.begin_update()
if k in ("text", "height", "bold", "italic"):
setattr(self._label, k, v)
elif k == "string":
self._label.text = v
elif k == "width":
self._label.width = v is None and geometry.INFINITE or v
elif k in ("font", "fontname"):
self._label.font_name = v
elif k == "fontsize":
self._label.font_size = v
elif k == "fontweight":
self._label.bold, self._label.italic = BOLD in v, ITALIC in v
elif k == "lineheight":
self._label.set_style("line_spacing", v * (self.fontsize or 1))
elif k == "align":
self._align = v
self._label.set_style(k, self._label.width == geometry.INFINITE and LEFT or v)
elif k == "fill":
self._fill = v
self._label.color = [int(255*ch) for ch in self._fill or (0,0,0,0)]
else:
raise AttributeError, "'Text' object has no attribute '%s'" % k
def _update(self):
# Called from Text.draw(), Text.copy() and Text.metrics.
# Ensures that all the color changes have been reflected in Text._label.
# If necessary, recalculates the label's layout (happens in end_update()).
if hasattr(self._fill, "_dirty") and self._fill._dirty:
self.fill = self._fill
self._fill._dirty = False
if self._dirty:
self._label.end_update()
self._dirty = False
@property
def path(self):
raise NotImplementedError
@property
def metrics(self):
""" Yields a (width, height)-tuple of the actual text content.
"""
self._update()
return self._label.content_width, self._label.content_height
def draw(self, x=None, y=None):
""" Draws the text.
"""
# Given parameters override Text attributes.
if x is None:
x = self.x
if y is None:
y = self.y
# Fontsize is rounded, and fontsize 0 will output a default font.
# Therefore, we don't draw text with a fontsize smaller than 0.5.
if self._label.font_size >= 0.5:
glPushMatrix()
glTranslatef(x, y, 0)
self._update()
self._label.draw()
glPopMatrix()
def copy(self):
self._update()
txt = Text(self.text, self.x, self.y, self.width, self.height,
fontname = self.fontname,
fontsize = self.fontsize,
bold = self.bold,
italic = self.italic,
lineheight = self.lineheight,
align = self.align,
fill = self.fill
)
# The individual character styling is retrieved from Label.document._style_runs.
# Traverse it and set the styles in the new text.
txt._label.begin_update()
for k in self._label.document._style_runs:
for i, j, v in self._label.document._style_runs[k]:
txt.style(i,j, **{k:v})
txt._label.end_update()
return txt
def style(self, i, j, **kwargs):
""" Defines the styling for a range of characters in the text.
Valid arguments can include: font, fontsize, bold, italic, lineheight, align, fill.
For example: text.style(0, 10, bold=True, fill=color(1,0,0))
"""
attributes = {}
for k,v in kwargs.items():
if k in ("font", "fontname"):
attributes["font_name"] = v
elif k == "fontsize":
attributes["font_size"] = v
elif k in ("bold", "italic", "align"):
attributes[k] = v
elif k == "fontweight":
attributes.setdefault("bold", BOLD in v)
attributes.setdefault("italic", ITALIC in v)
elif k == "lineheight":
attributes["line_spacing"] = v * self._label.font_size
elif k == "fill":
attributes["color"] = [int(ch*255) for ch in v]
else:
attributes[k] = v
self._dirty = True
self._label.begin_update()
self._label.document.set_style(i, j, attributes)
def __len__(self):
return len(self.text)
def __del__(self):
if hasattr(self, "_label") and self._label:
self._label.delete()
_TEXT_CACHE = 200
_text_cache = {}
_text_queue = []
def text(str, x=None, y=None, width=None, height=None, draw=True, **kwargs):
""" Draws the string at the given position, with the current font().
Lines of text will span the given width before breaking to the next line.
The text will be displayed with the current state font(), fontsize(), fontweight(), etc.
When the given text is a Text object, the state will not be applied.
"""
if isinstance(str, Text) and width is None and height is None and len(kwargs) == 0:
txt = str
else:
# If the given text is not a Text object, create one on the fly.
# Dynamic Text objects are cached by (font, fontsize, bold, italic),
# and those that are no longer referenced by the user are recycled.
# Changing Text properties is still faster than creating a new Text.
# The cache has a limited size (200), so the oldest Text objects are deleted.
fontname, fontsize, bold, italic, lineheight, align = font_mixin(**kwargs)
fill, stroke, strokewidth, strokestyle = color_mixin(**kwargs)
id = (fontname, int(fontsize), bold, italic)
recycled = False
if id in _text_cache:
for txt in _text_cache[id]:
# Reference count 3 => Python, _text_cache[id], txt.
# No other variables are referencing the text, so we can recycle it.
if getrefcount(txt) == 3:
txt.text = str
txt.x = x or 0
txt.y = y or 0
txt.width = width
txt.height = height
txt.lineheight = lineheight
txt.align = align
txt.fill = fill
recycled = True
break
if not recycled:
txt = Text(str, x or 0, y or 0, width, height, **kwargs)
_text_cache.setdefault(id, [])
_text_cache[id].append(txt)
_text_queue.insert(0, id)
for id in reversed(_text_queue[_TEXT_CACHE:]):
del _text_cache[id][0]
del _text_queue[-1]
if draw:
txt.draw(x, y)
return txt
def textwidth(txt, **kwargs):
""" Returns the width of the given text.
"""
if not isinstance(txt, Text) or len(kwargs) > 0:
kwargs["draw"] = False
txt = text(txt, 0, 0, **kwargs)
return txt.metrics[0]
def textheight(txt, width=None, **kwargs):
""" Returns the height of the given text.
"""
if not isinstance(txt, Text) or len(kwargs) > 0 or width != txt.width:
kwargs["draw"] = False
txt = text(txt, 0, 0, width=width, **kwargs)
return txt.metrics[1]
def textmetrics(txt, width=None, **kwargs):
""" Returns a (width, height)-tuple for the given text.
"""
if not isinstance(txt, Text) or len(kwargs) > 0 or width != txt.width:
kwargs["draw"] = False
txt = text(txt, 0, 0, width=width, **kwargs)
return txt.metrics
#--- TEXTPATH ----------------------------------------------------------------------------------------
class GlyphPathError(Exception):
pass
import cPickle
glyphs = {}
try:
# Load cached font glyph path information from nodebox/font/glyph.p.
# By default, it has glyph path info for Droid Sans, Droid Sans Mono, Droid Serif.
glyphs = path.join(path.dirname(__file__), "..", "font", "glyph.p")
glyphs = cPickle.load(open(glyphs))
except:
pass
def textpath(string, x=0, y=0, **kwargs):
""" Returns a BezierPath from the given text string.
The fontname, fontsize and fontweight can be given as optional parameters,
width, height, lineheight and align are ignored.
Only works with ASCII characters in the default fonts (Droid Sans, Droid Sans Mono, Droid Serif, Arial).
See nodebox/font/glyph.py on how to activate other fonts.
"""
fontname, fontsize, bold, italic, lineheight, align = font_mixin(**kwargs)
w = bold and italic and "bold italic" or bold and "bold" or italic and "italic" or "normal"
p = BezierPath()
f = fontsize / 1000.0
for ch in string:
try: glyph = glyphs[fontname][w][ch]
except:
raise GlyphPathError, "no glyph path information for %s %s '%s'" % (w, fontname, ch)
for pt in glyph:
if pt[0] == MOVETO:
p.moveto(x+pt[1]*f, y-pt[2]*f)
elif pt[0] == LINETO:
p.lineto(x+pt[1]*f, y-pt[2]*f)
elif pt[0] == CURVETO:
p.curveto(x+pt[3]*f, y-pt[4]*f, x+pt[5]*f, y-pt[6]*f, x+pt[1]*f, y-pt[2]*f)
elif pt[0] == CLOSE:
p.closepath()
x += textwidth(ch, font=fontname, fontsize=fontsize, bold=bold, italic=italic)
return p
#=====================================================================================================
#--- UTILITIES ---------------------------------------------------------------------------------------
_RANDOM_MAP = [90.0, 9.00, 4.00, 2.33, 1.50, 1.00, 0.66, 0.43, 0.25, 0.11, 0.01]
def _rnd_exp(bias=0.5):
bias = max(0, min(bias, 1)) * 10
i = int(floor(bias)) # bias*10 => index in the _map curve.
n = _RANDOM_MAP[i] # If bias is 0.3, rnd()**2.33 will average 0.3.
if bias < 10:
n += (_RANDOM_MAP[i+1]-n) * (bias-i)
return n
def random(v1=1.0, v2=None, bias=None):
""" Returns a number between v1 and v2, including v1 but not v2.
The bias (0.0-1.0) represents preference towards lower or higher numbers.
"""
if v2 is None:
v1, v2 = 0, v1
if bias is None:
r = rnd()
else:
r = rnd()**_rnd_exp(bias)
x = r * (v2-v1) + v1
if isinstance(v1, int) and isinstance(v2, int):
x = int(x)
return x
def grid(cols, rows, colwidth=1, rowheight=1, shuffled=False):
""" Yields (x,y)-tuples for the given number of rows and columns.
The space between each point is determined by colwidth and colheight.
"""
rows = range(int(rows))
cols = range(int(cols))
if shuffled:
shuffle(rows)
shuffle(cols)
for y in rows:
for x in cols:
yield (x*colwidth, y*rowheight)
def files(path="*"):
""" Returns a list of files found at the given path.
"""
return glob(path)
#=====================================================================================================
#--- PROTOTYPE ----------------------------------------------------------------------------------------
class Prototype(object):
def __init__(self):
""" A base class that allows on-the-fly extension.
This means that external functions can be bound to it as methods,
and properties set at runtime are copied correctly.
Prototype can handle:
- functions (these become class methods),
- immutable types (str, unicode, int, long, float, bool),
- lists, tuples and dictionaries of immutable types,
- objects with a copy() method.
"""
self._dynamic = {}
def _deepcopy(self, value):
if isinstance(value, FunctionType):
return instancemethod(value, self)
elif hasattr(value, "copy"):
return value.copy()
elif isinstance(value, (list, tuple)):
return [self._deepcopy(x) for x in value]
elif isinstance(value, dict):
return dict([(k, self._deepcopy(v)) for k,v in value.items()])
elif isinstance(value, (str, unicode, int, long, float, bool)):
return value
else:
# Biggest problem here is how to find/relink circular references.
raise TypeError, "Prototype can't bind %s." % str(value.__class__)
def _bind(self, key, value):
""" Adds a new method or property to the prototype.
For methods, the given function is expected to take the object (i.e. self) as first parameter.
For properties, values can be: list, tuple, dict, str, unicode, int, long, float, bool,
or an object with a copy() method.
For example, we can define a Layer's custom draw() method in two ways:
- By subclassing:
class MyLayer(Layer):
def draw(layer):
pass
layer = MyLayer()
layer.draw()
- By function binding:
def my_draw(layer):
pass
layer = Layer()
layer._bind("draw", my_draw)
layer.draw()
"""
self._dynamic[key] = value
object.__setattr__(self, key, self._deepcopy(value))
def set_method(self, function, name=None):
""" Creates a dynamic method (with the given name) from the given function.
"""
if not name:
name = function.__name__
self._bind(name, function)
def set_property(self, key, value):
""" Adds a property to the prototype.
Using this method ensures that dynamic properties are copied correctly - see inherit().
"""
self._bind(key, value)
def inherit(self, prototype):
""" Inherit all the dynamic properties and methods of another prototype.
"""
for k,v in prototype._dynamic.items():
self._bind(k,v)
#=====================================================================================================
#--- EVENT HANDLER ------------------------------------------------------------------------------------
class EventHandler:
def __init__(self):
# Use __dict__ directly so we can do multiple inheritance in combination with Prototype:
self.__dict__["enabled"] = True # Receive events from the canvas?
self.__dict__["focus"] = False # True when this object receives the focus.
self.__dict__["pressed"] = False # True when the mouse is pressed on this object.
self.__dict__["dragged"] = False # True when the mouse is dragged on this object.
self.__dict__["_queue"] = []
def on_mouse_enter(self, mouse):
pass
def on_mouse_leave(self, mouse):
pass
def on_mouse_motion(self, mouse):
pass
def on_mouse_press(self, mouse):
pass
def on_mouse_release(self, mouse):
pass
def on_mouse_drag(self, mouse):
pass
def on_mouse_scroll(self, mouse):
pass
def on_key_press(self, keys):
pass
def on_key_release(self, keys):
pass
# Instead of calling an event directly it could be queued,
# e.g. layer.queue_event(layer.on_mouse_press, canvas.mouse).
# layer.process_events() can then be called whenever desired,
# e.g. after the canvas has been drawn so that events can contain drawing commands.
def queue_event(self, event, *args):
self._queue.append((event, args))
def process_events(self):
for event, args in self._queue:
event(*args)
self._queue = []
# Note: there is no event propagation.
# Event propagation means that, for example, if a layer is pressed
# all its child (or parent) layers receive an on_mouse_press() event as well.
# If this kind of behavior is desired, it is the responsibility of custom subclasses of Layer.
#=====================================================================================================
#--- TRANSITION --------------------------------------------------------------------------------------
# Transition.update() will tween from the last value to transition.set() new value in the given time.
# Transitions are used as attributes (e.g. position, rotation) for the Layer class.
TIME = 0 # the current time in this frame changes when the canvas is updated
LINEAR = "linear"
SMOOTH = "smooth"
class Transition(object):
def __init__(self, value, interpolation=SMOOTH):
self._v0 = value # Previous value => Transition.start.
self._vi = value # Current value => Transition.current.
self._v1 = value # Desired value => Transition.stop.
self._t0 = TIME # Start time.
self._t1 = TIME # End time.
self._interpolation = interpolation
def copy(self):
t = Transition(None)
t._v0 = self._v0
t._vi = self._vi
t._v1 = self._v1
t._t0 = self._t0
t._t1 = self._t1
t._interpolation = self._interpolation
return t
def get(self):
""" Returns the transition stop value.
"""
return self._v1
def set(self, value, duration=1.0):
""" Sets the transition stop value, which will be reached in the given duration (seconds).
Calling Transition.update() moves the Transition.current value toward Transition.stop.
"""
if duration == 0:
# If no duration is given, Transition.start = Transition.current = Transition.stop.
self._vi = value
self._v1 = value
self._v0 = self._vi
self._t0 = TIME # Now.
self._t1 = TIME + duration
@property
def start(self):
return self._v0
@property
def stop(self):
return self._v1
@property
def current(self):
return self._vi
@property
def done(self):
return TIME >= self._t1
def update(self):
""" Calculates the new current value. Returns True when done.
The transition approaches the desired value according to the interpolation:
- LINEAR: even transition over the given duration time,
- SMOOTH: transition goes slower at the beginning and end.
"""
if TIME >= self._t1 or self._vi is None:
self._vi = self._v1
return True
else:
# Calculate t: the elapsed time as a number between 0.0 and 1.0.
t = (TIME-self._t0) / (self._t1-self._t0)
if self._interpolation == LINEAR:
self._vi = self._v0 + (self._v1-self._v0) * t
else:
self._vi = self._v0 + (self._v1-self._v0) * geometry.smoothstep(0.0, 1.0, t)
return False
#--- LAYER -------------------------------------------------------------------------------------------
# The Layer class is responsible for the following:
# - it has a draw() method to override; all sorts of NodeBox drawing commands can be put here,
# - it has a transformation origin point and rotates/scales its drawn items as a group,
# - it has child layers that transform relative to this layer,
# - when its attributes (position, scale, angle, ...) change, they will tween smoothly over time.
_UID = 0
def _uid():
global _UID; _UID+=1; return _UID
RELATIVE = "relative" # Origin point is stored as float, e.g. (0.5, 0.5).
ABSOLUTE = "absolute" # Origin point is stored as int, e.g. (100, 100).
class LayerRenderError(Exception):
pass
# When Layer.clipped=True, children are clipped to the bounds of the layer.
# The layer clipping masks lazily changes size with the layer.
class LayerClippingMask(ClippingMask):
def __init__(self, layer):
self.layer = layer
def draw(self, fill=(0,0,0,1), stroke=None):
w = not self.layer.width and geometry.INFINITE or self.layer.width
h = not self.layer.height and geometry.INFINITE or self.layer.height
rect(0, 0, w, h, fill=fill, stroke=stroke)
class Layer(list, Prototype, EventHandler):
def __init__(self, x=0, y=0, width=None, height=None, origin=(0,0),
scale=1.0, rotation=0, opacity=1.0, duration=0.0, name=None,
parent=None, **kwargs):
""" Creates a new drawing layer that can be appended to the canvas.
The duration defines the time (seconds) it takes to animate transformations or opacity.
When the animation has terminated, layer.done=True.
"""
if origin == CENTER:
origin = (0.5,0.5)
origin_mode = RELATIVE
elif isinstance(origin[0], float) \
and isinstance(origin[1], float):
origin_mode = RELATIVE
else:
origin_mode = ABSOLUTE
Prototype.__init__(self) # Facilitates extension on the fly.
EventHandler.__init__(self)
self._id = _uid()
self.name = name # Layer name. Layers are accessible as ParentLayer.[name]
self.canvas = None # The canvas this layer is drawn to.
self.parent = parent # The layer this layer is a child of.
self._x = Transition(x) # Layer horizontal position in pixels, from the left.
self._y = Transition(y) # Layer vertical position in pixels, from the bottom.
self._width = Transition(width) # Layer width in pixels.
self._height = Transition(height) # Layer height in pixels.
self._dx = Transition(origin[0]) # Transformation origin point.
self._dy = Transition(origin[1]) # Transformation origin point.
self._origin = origin_mode # Origin point as RELATIVE or ABSOLUTE coordinates?
self._scale = Transition(scale) # Layer width and height scale.
self._rotation = Transition(rotation) # Layer rotation.
self._opacity = Transition(opacity) # Layer opacity.
self.duration = duration # The time it takes to animate transformations.
self.top = True # Draw on top of or beneath parent?
self.flipped = False # Flip the layer horizontally?
self.clipped = False # Clip child layers to bounds?
self.hidden = False # Hide the layer?
self._transform_cache = None # Cache of the local transformation matrix.
self._transform_stack = None # Cache of the cumulative transformation matrix.
self._clipping_mask = LayerClippingMask(self)
@classmethod
def from_image(self, img, *args, **kwargs):
""" Returns a new layer that renders the given image, and with the same size as the image.
The layer's draw() method and an additional image property are set.
"""
if not isinstance(img, Image):
img = Image(img, data=kwargs.get("data"))
kwargs.setdefault("width", img.width)
kwargs.setdefault("height", img.height)
def draw(layer):
image(layer.image)
layer = self(*args, **kwargs)
layer.set_method(draw)
layer.set_property("image", img)
return layer
@classmethod
def from_function(self, function, *args, **kwargs):
""" Returns a new layer that renders the drawing commands in the given function.
The layer's draw() method is set.
"""
def draw(layer):
function(layer)
layer = self(*args, **kwargs)
layer.set_method(draw)
return layer
def copy(self, parent=None, canvas=None):
""" Returns a copy of the layer.
All Layer properties will be copied, except for the new parent and canvas,
which you need to define as optional parameters.
This means that copies are not automatically appended to the parent layer or canvas.
"""
layer = self.__class__() # Create instance of the derived class, not Layer.
layer.duration = 0 # Copy all transitions instantly.
layer.canvas = canvas
layer.parent = parent
layer.name = self.name
layer._x = self._x.copy()
layer._y = self._y.copy()
layer._width = self._width.copy()
layer._height = self._height.copy()
layer._origin = self._origin
layer._dx = self._dx.copy()
layer._dy = self._dy.copy()
layer._scale = self._scale.copy()
layer._rotation = self._rotation.copy()
layer._opacity = self._opacity.copy()
layer.duration = self.duration
layer.top = self.top
layer.flipped = self.flipped
layer.clipped = self.clipped
layer.hidden = self.hidden
layer.enabled = self.enabled
# Use base Layer.extend(), we don't care about what subclass.extend() does.
Layer.extend(layer, [child.copy() for child in self])
# Inherit all the dynamic properties and methods.
Prototype.inherit(layer, self)
return layer
def __getattr__(self, key):
""" Returns the given property, or the layer with the given name.
"""
if key in self.__dict__:
return self.__dict__[key]
for layer in self:
if layer.name == key:
return layer
raise AttributeError, "%s instance has no attribute '%s'" % (self.__class__.__name__, key)
def _set_container(self, key, value):
# If Layer.canvas is set to None, the canvas should no longer contain the layer.
# If Layer.canvas is set to Canvas, this canvas should contain the layer.
# Remove the layer from the old canvas/parent.
# Append the layer to the new container.
if self in (self.__dict__.get(key) or ()):
self.__dict__[key].remove(self)
if isinstance(value, list) and self not in value:
list.append(value, self)
self.__dict__[key] = value
def _get_canvas(self):
return self.__dict__.get("canvas")
def _get_parent(self):
return self.__dict__.get("parent")
def _set_canvas(self, canvas):
self._set_container("canvas", canvas)
def _set_parent(self, layer):
self._set_container("parent", layer)
canvas = property(_get_canvas, _set_canvas)
parent = property(_get_parent, _set_parent)
@property
def root(self):
return self.parent and self.parent.root or self
@property
def layers(self):
return self
def insert(self, index, layer):
list.insert(self, index, layer)
layer.__dict__["parent"] = self
def append(self, layer):
list.append(self, layer)
layer.__dict__["parent"] = self
def extend(self, layers):
for layer in layers:
Layer.append(self, layer)
def remove(self, layer):
list.remove(self, layer)
layer.__dict__["parent"] = None
def pop(self, index):
layer = list.pop(self, index)
layer.__dict__["parent"] = None
return layer
def _get_x(self):
return self._x.get()
def _get_y(self):
return self._y.get()
def _get_width(self):
return self._width.get()
def _get_height(self):
return self._height.get()
def _get_scale(self):
return self._scale.get()
def _get_rotation(self):
return self._rotation.get()
def _get_opacity(self):
return self._opacity.get()
def _set_x(self, x):
self._transform_cache = None
self._x.set(x, self.duration)
def _set_y(self, y):
self._transform_cache = None
self._y.set(y, self.duration)
def _set_width(self, width):
self._transform_cache = None
self._width.set(width, self.duration)
def _set_height(self, height):
self._transform_cache = None
self._height.set(height, self.duration)
def _set_scale(self, scale):
self._transform_cache = None
self._scale.set(scale, self.duration)
def _set_rotation(self, rotation):
self._transform_cache = None
self._rotation.set(rotation, self.duration)
def _set_opacity(self, opacity):
self._opacity.set(opacity, self.duration)
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
width = property(_get_width, _set_width)
height = property(_get_height, _set_height)
scaling = property(_get_scale, _set_scale)
rotation = property(_get_rotation, _set_rotation)
opacity = property(_get_opacity, _set_opacity)
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, (x,y)):
self.x = x
self.y = y
xy = property(_get_xy, _set_xy)
def _get_origin(self, relative=False):
""" Returns the point (x,y) from which all layer transformations originate.
When relative=True, x and y are defined percentually (0.0-1.0) in terms of width and height.
In some cases x=0 or y=0 is returned:
- For an infinite layer (width=None or height=None), we can't deduct the absolute origin
from coordinates stored relatively (e.g. what is infinity*0.5?).
- Vice versa, for an infinite layer we can't deduct the relative origin from coordinates
stored absolute (e.g. what is 200/infinity?).
"""
dx = self._dx.current
dy = self._dy.current
w = self._width.current
h = self._height.current
# Origin is stored as absolute coordinates and we want it relative.
if self._origin == ABSOLUTE and relative:
if w is None: w = 0
if h is None: h = 0
dx = w!=0 and dx/w or 0
dy = h!=0 and dy/h or 0
# Origin is stored as relative coordinates and we want it absolute.
elif self._origin == RELATIVE and not relative:
dx = w is not None and dx*w or 0
dy = h is not None and dy*h or 0
return dx, dy
def _set_origin(self, x, y, relative=False):
""" Sets the transformation origin point in either absolute or relative coordinates.
For example, if a layer is 400x200 pixels, setting the origin point to (200,100)
all transformations (translate, rotate, scale) originate from the center.
"""
self._transform_cache = None
self._dx.set(x, self.duration)
self._dy.set(y, self.duration)
self._origin = relative and RELATIVE or ABSOLUTE
def origin(self, x=None, y=None, relative=False):
""" Sets or returns the point (x,y) from which all layer transformations originate.
"""
if x is not None:
if x == CENTER:
x, y, relative = 0.5, 0.5, True
if y is not None:
self._set_origin(x, y, relative)
return self._get_origin(relative)
def _get_relative_origin(self):
return self.origin(relative=True)
def _set_relative_origin(self, xy):
self._set_origin(xy[0], xy[1], relative=True)
relative_origin = property(_get_relative_origin, _set_relative_origin)
def _get_absolute_origin(self):
return self.origin(relative=False)
def _set_absolute_origin(self, xy):
self._set_origin(xy[0], xy[1], relative=False)
absolute_origin = property(_get_absolute_origin, _set_absolute_origin)
def _get_visible(self):
return not self.hidden
def _set_visible(self, b):
self.hidden = not b
visible = property(_get_visible, _set_visible)
def translate(self, x, y):
self.x += x
self.y += y
def rotate(self, angle):
self.rotation += angle
def scale(self, f):
self.scaling *= f
def flip(self):
self.flipped = not self.flipped
def _update(self):
""" Called each frame from canvas._update() to update the layer transitions.
"""
done = self._x.update()
done &= self._y.update()
done &= self._width.update()
done &= self._height.update()
done &= self._dx.update()
done &= self._dy.update()
done &= self._scale.update()
done &= self._rotation.update()
if not done: # i.e. the layer is being transformed
self._transform_cache = None
self._opacity.update()
self.update()
for layer in self:
layer._update()
def update(self):
"""Override this method to provide custom updating code.
"""
pass
@property
def done(self):
""" Returns True when all transitions have finished.
"""
return self._x.done \
and self._y.done \
and self._width.done \
and self._height.done \
and self._dx.done \
and self._dy.done \
and self._scale.done \
and self._rotation.done \
and self._opacity.done
def _draw(self):
""" Draws the transformed layer and all of its children.
"""
if self.hidden:
return
glPushMatrix()
# Be careful that the transformations happen in the same order in Layer._transform().
# translate => flip => rotate => scale => origin.
# Center the contents around the origin point.
dx, dy = self.origin(relative=False)
glTranslatef(round(self._x.current), round(self._y.current), 0)
if self.flipped:
glScalef(-1, 1, 1)
glRotatef(self._rotation.current, 0, 0, 1)
glScalef(self._scale.current, self._scale.current, 1)
# Enable clipping mask if Layer.clipped=True.
if self.clipped:
beginclip(self._clipping_mask)
# Draw child layers below.
for layer in self:
if layer.top is False:
layer._draw()
# Draw layer.
global _alpha
_alpha = self._opacity.current # XXX should also affect child layers?
glPushMatrix()
glTranslatef(-round(dx), -round(dy), 0) # Layers are drawn relative from parent origin.
self.draw()
glPopMatrix()
_alpha = 1
# Draw child layers on top.
for layer in self:
if layer.top is True:
layer._draw()
if self.clipped:
endclip()
glPopMatrix()
def draw(self):
"""Override this method to provide custom drawing code for this layer.
At this point, the layer is correctly transformed.
"""
pass
def render(self):
""" Returns the layer as a flattened image.
The layer and all of its children need to have width and height set.
"""
b = self.bounds
if geometry.INFINITE in (b.x, b.y, b.width, b.height):
raise LayerRenderError, "can't render layer of infinite size"
return render(lambda: (translate(-b.x,-b.y), self._draw()), b.width, b.height)
def layer_at(self, x, y, clipped=False, enabled=False, transformed=True, _covered=False):
""" Returns the topmost layer containing the mouse position, None otherwise.
With clipped=True, no parts of child layers outside the parent's bounds are checked.
With enabled=True, only enabled layers are checked (useful for events).
"""
if self.hidden:
# Don't do costly operations on layers the user can't see.
return None
if enabled and not self.enabled:
# Skip disabled layers during event propagation.
return None
if _covered:
# An ancestor is blocking this layer, so we can't select it.
return None
hit = self.contains(x, y, transformed)
if clipped:
# If (x,y) is not inside the clipped bounds, return None.
# If children protruding beyond the layer's bounds are clipped,
# we only need to look at children on top of the layer.
# Each child is drawn on top of the previous child,
# so we hit test them in reverse order (highest-first).
if not hit:
return None
children = [layer for layer in reversed(self) if layer.top is True]
else:
# Otherwise, traverse all children in on-top-first order to avoid
# selecting a child underneath the layer that is in reality
# covered by a peer on top of the layer, further down the list.
children = sorted(reversed(self), key=lambda layer: not layer.top)
for child in children:
# An ancestor (e.g. grandparent) may be covering the child.
# This happens when it hit tested and is somewhere on top of the child.
# We keep a recursive covered-state to verify visibility.
# The covered-state starts as False, but stays True once it switches.
_covered = _covered or (hit and not child.top)
child = child.layer_at(x, y, clipped, enabled, transformed, _covered)
if child is not None:
# Note: "if child:" won't work because it can be an empty list (no children).
# Should be improved by not having Layer inherit from list.
return child
if hit:
return self
else:
return None
def _transform(self, local=True):
""" Returns the transformation matrix of the layer:
a calculated state of its translation, rotation and scaling.
If local=False, prepends all transformations of the parent layers,
i.e. you get the absolute transformation state of a nested layer.
"""
if self._transform_cache is None:
# Calculate the local transformation matrix.
# Be careful that the transformations happen in the same order in Layer._draw().
# translate => flip => rotate => scale => origin.
tf = Transform()
dx, dy = self.origin(relative=False)
tf.translate(round(self._x.current), round(self._y.current))
if self.flipped:
tf.scale(-1, 1)
tf.rotate(self._rotation.current)
tf.scale(self._scale.current, self._scale.current)
tf.translate(-round(dx), -round(dy))
self._transform_cache = tf
# Flush the cumulative transformation cache of all children.
def _flush(layer):
layer._transform_stack = None
self.traverse(_flush)
if not local:
# Return the cumulative transformation matrix.
# All of the parent transformation states need to be up to date.
# If not, we need to recalculate the whole chain.
if self._transform_stack is None:
if self.parent is None:
self._transform_stack = self._transform_cache.copy()
else:
# Accumulate all the parent layer transformations.
# In the process, we update the transformation state of any outdated parent.
dx, dy = self.parent.origin(relative=False)
# Layers are drawn relative from parent origin.
tf = self.parent._transform(local=False).copy()
tf.translate(round(dx), round(dy))
self._transform_stack = self._transform_cache.copy()
self._transform_stack.prepend(tf)
return self._transform_stack
return self._transform_cache
@property
def transform(self):
return self._transform(local=False)
def _bounds(self, local=True):
""" Returns the rectangle that encompasses the transformed layer and its children.
If one of the children has width=None or height=None, bounds will be infinite.
"""
w = self._width.current; w = w is None and geometry.INFINITE or w
h = self._height.current; h = h is None and geometry.INFINITE or h
# Find the transformed bounds of the layer:
p = self.transform.map([(0,0), (w,0), (w,h), (0,h)])
x = min(p[0][0], p[1][0], p[2][0], p[3][0])
y = min(p[0][1], p[1][1], p[2][1], p[3][1])
w = max(p[0][0], p[1][0], p[2][0], p[3][0]) - x
h = max(p[0][1], p[1][1], p[2][1], p[3][1]) - y
b = geometry.Bounds(x, y, w, h)
if not local:
for child in self:
b = b.union(child.bounds)
return b
@property
def bounds(self):
return self._bounds(local=False)
def contains(self, x, y, transformed=True):
""" Returns True if (x,y) falls within the layer's rectangular area.
Useful for GUI elements: with transformed=False the calculations are much faster;
and it will report correctly as long as the layer (or parent layer)
is not rotated or scaled, and has its origin at (0,0).
"""
w = self._width.current; w = w is None and geometry.INFINITE or w
h = self._height.current; h = h is None and geometry.INFINITE or h
if not transformed:
x0, y0 = self.absolute_position()
return x0 <= x <= x0+w \
and y0 <= y <= y0+h
# Find the transformed bounds of the layer:
p = self.transform.map([(0,0), (w,0), (w,h), (0,h)])
return geometry.point_in_polygon(p, x, y)
hit_test = contains
def absolute_position(self, root=None):
""" Returns the absolute (x,y) position (i.e. cumulative with parent position).
"""
x = 0
y = 0
layer = self
while layer is not None and layer != root:
x += layer.x
y += layer.y
layer = layer.parent
return x, y
def traverse(self, visit=lambda layer: None):
""" Recurses the layer structure and calls visit() on each child layer.
"""
visit(self)
[layer.traverse(visit) for layer in self]
def __repr__(self):
return "Layer(%sx=%.2f, y=%.2f, scale=%.2f, rotation=%.2f, opacity=%.2f, duration=%.2f)" % (
self.name is not None and "name='%s', " % self.name or "",
self.x,
self.y,
self.scaling,
self.rotation,
self.opacity,
self.duration
)
def __eq__(self, other):
return isinstance(other, Layer) and self._id == other._id
def __ne__(self, other):
return not self.__eq__(other)
layer = Layer
#--- GROUP -------------------------------------------------------------------------------------------
class Group(Layer):
def __init__(self, *args, **kwargs):
""" A layer that serves as a container for other layers.
It has no width or height and doesn't draw anything.
"""
Layer.__init__(self, *args, **kwargs)
self._set_width(0)
self._set_height(0)
@classmethod
def from_image(*args, **kwargs):
raise NotImplementedError
@classmethod
def from_function(*args, **kwargs):
raise NotImplementedError
@property
def width(self):
return 0
@property
def height(self):
return 0
def layer_at(self, x, y, clipped=False, enabled=False, transformed=True, _covered=False):
# Ignores clipped=True for Group (since it has no width or height).
for child in reversed(self):
layer = child.layer_at(x, y, clipped, enabled, transformed, _covered)
if layer:
return layer
group = Group
#=====================================================================================================
#--- MOUSE -------------------------------------------------------------------------------------------
# Mouse cursors:
DEFAULT = "default"
HIDDEN = "hidden"
CROSS = pyglet.window.Window.CURSOR_CROSSHAIR
HAND = pyglet.window.Window.CURSOR_HAND
TEXT = pyglet.window.Window.CURSOR_TEXT
WAIT = pyglet.window.Window.CURSOR_WAIT
# Mouse buttons:
LEFT = "left"
RIGHT = "right"
MIDDLE = "middle"
class Mouse(Point):
def __init__(self, canvas, x=0, y=0):
""" Keeps track of the mouse position on the canvas, buttons pressed and the cursor icon.
"""
Point.__init__(self, x, y)
self._canvas = canvas
self._cursor = DEFAULT # Mouse cursor: CROSS, HAND, HIDDEN, TEXT, WAIT.
self._button = None # Mouse button pressed: LEFT, RIGHT, MIDDLE.
self.modifiers = [] # Mouse button modifiers: CTRL, SHIFT, OPTION.
self.pressed = False # True if the mouse button is pressed.
self.dragged = False # True if the mouse is dragged.
self.scroll = Point(0,0) # Scroll offset.
self.dx = 0 # Relative offset from previous horizontal position.
self.dy = 0 # Relative offset from previous vertical position.
# Backwards compatibility due to an old typo:
@property
def vx(self):
return self.dx
@property
def vy(self):
return self.dy
@property
def relative_x(self):
try: return float(self.x) / self._canvas.width
except ZeroDivisionError:
return 0
@property
def relative_y(self):
try: return float(self.y) / self._canvas.height
except ZeroDivisionError:
return 0
def _get_cursor(self):
return self._cursor
def _set_cursor(self, mode):
self._cursor = mode != DEFAULT and mode or None
if mode == HIDDEN:
self._canvas._window.set_mouse_visible(False); return
self._canvas._window.set_mouse_cursor(
self._canvas._window.get_system_mouse_cursor(
self._cursor))
cursor = property(_get_cursor, _set_cursor)
def _get_button(self):
return self._button
def _set_button(self, button):
self._button = \
button == pyglet.window.mouse.LEFT and LEFT or \
button == pyglet.window.mouse.RIGHT and RIGHT or \
button == pyglet.window.mouse.MIDDLE and MIDDLE or None
button = property(_get_button, _set_button)
def __repr__(self):
return "Mouse(x=%.1f, y=%.1f, pressed=%s, dragged=%s)" % (
self.x, self.y, repr(self.pressed), repr(self.dragged))
#--- KEYBOARD ----------------------------------------------------------------------------------------
# Key codes:
BACKSPACE = "backspace"
DELETE = "delete"
TAB = "tab"
ENTER = "enter"
SPACE = "space"
ESCAPE = "escape"
UP = "up"
DOWN = "down"
LEFT = "left"
RIGHT = "right"
# Key modifiers:
OPTION = \
ALT = "option"
CTRL = "ctrl"
SHIFT = "shift"
COMMAND = "command"
MODIFIERS = (OPTION, CTRL, SHIFT, COMMAND)
class Keys(list):
def __init__(self, canvas):
""" Keeps track of the keys pressed and any modifiers (e.g. shift or control key).
"""
self._canvas = canvas
self.code = None # Last key pressed
self.char = "" # Last key character representation (i.e., SHIFT + "a" = "A").
self.modifiers = [] # Modifier keys pressed (OPTION, CTRL, SHIFT, COMMAND).
self.pressed = False
def append(self, code):
code = self._decode(code)
if code in MODIFIERS:
self.modifiers.append(code)
list.append(self, code)
self.code = self[-1]
def remove(self, code):
code = self._decode(code)
if code in MODIFIERS:
self.modifiers.remove(code)
list.remove(self, self._decode(code))
self.code = len(self) > 0 and self[-1] or None
def _decode(self, code):
if not isinstance(code, int):
s = code
else:
s = pyglet.window.key.symbol_string(code) # 65288 => "BACKSPACE"
s = s.lower() # "BACKSPACE" => "backspace"
s = s.lstrip("_") # "_1" => "1"
s = s.replace("return", ENTER) # "return" => "enter"
s = s.replace("num_", "") # "num_space" => "space"
s = s.endswith(MODIFIERS) and s.lstrip("lr") or s # "lshift" => "shift"
return s
def __repr__(self):
return "Keys(char=%s, code=%s, modifiers=%s, pressed=%s)" % (
repr(self.char), repr(iter(self)), repr(self.modifiers), repr(self.pressed))
#=====================================================================================================
#--- CANVAS ------------------------------------------------------------------------------------------
VERY_LIGHT_GREY = 0.95
FRAME = 0
# Window styles.
WINDOW_DEFAULT = pyglet.window.Window.WINDOW_STYLE_DEFAULT
WINDOW_BORDERLESS = pyglet.window.Window.WINDOW_STYLE_BORDERLESS
# Configuration settings for the canvas.
# http://www.pyglet.org/doc/programming_guide/opengl_configuration_options.html
# The stencil buffer is enabled (we need it to do clipping masks).
# Multisampling will be enabled (if possible) to do anti-aliasing.
settings = OPTIMAL = dict(
# buffer_size = 32, # Let Pyglet decide automatically.
# red_size = 8,
# green_size = 8,
# blue_size = 8,
depth_size = 24,
stencil_size = 1,
alpha_size = 8,
double_buffer = 1,
sample_buffers = 1,
samples = 4
)
def _configure(settings):
""" Returns a pyglet.gl.Config object from the given dictionary of settings.
If the settings are not supported, returns the default settings.
"""
screen = pyglet.window.get_platform().get_default_display().get_default_screen()
c = pyglet.gl.Config(**settings)
try:
c = screen.get_best_config(c)
except pyglet.window.NoSuchConfigException:
# Probably the hardwarde doesn't support multisampling.
# We can still do some anti-aliasing by turning on GL_LINE_SMOOTH.
c = pyglet.gl.Config()
c = screen.get_best_config(c)
return c
class Canvas(list, Prototype, EventHandler):
def __init__(self, width=640, height=480, name="NodeBox for OpenGL", resizable=False, border=True, settings=OPTIMAL, vsync=True):
""" The main application window containing the drawing canvas.
It is opened when Canvas.run() is called.
It is a collection of drawable Layer objects, and it has its own draw() method.
This method must be overridden with your own drawing commands, which will be executed each frame.
Event handlers for keyboard and mouse interaction can also be overriden.
Events will be passed to layers that have been appended to the canvas.
"""
window = dict(
caption = name,
visible = False,
width = width,
height = height,
resizable = resizable,
style = border is False and WINDOW_BORDERLESS or WINDOW_DEFAULT,
config = _configure(settings),
vsync = vsync
)
Prototype.__init__(self)
EventHandler.__init__(self)
self.profiler = Profiler(self)
self._window = pyglet.window.Window(**window)
self._fps = 60 # Frames per second.
self._frame = 0 # The current frame.
self._elapsed = 0 # dt = time elapsed since last frame.
self._active = False # Application is running?
self.paused = False # Pause animation?
self._mouse = Mouse(self) # The mouse cursor location.
self._keys = Keys(self) # The keys pressed on the keyboard.
self._focus = None # The layer being focused by the mouse.
# Mouse and keyboard events:
self._window.on_mouse_enter = self._on_mouse_enter
self._window.on_mouse_leave = self._on_mouse_leave
self._window.on_mouse_motion = self._on_mouse_motion
self._window.on_mouse_press = self._on_mouse_press
self._window.on_mouse_release = self._on_mouse_release
self._window.on_mouse_drag = self._on_mouse_drag
self._window.on_mouse_scroll = self._on_mouse_scroll
self._window.on_key_pressed = False
self._window.on_key_press = self._on_key_press
self._window.on_key_release = self._on_key_release
self._window.on_text = self._on_text
self._window.on_text_motion = self._on_text_motion
self._window.on_move = self._on_move
self._window.on_resize = self._on_resize
self._window.on_close = self.stop
def _get_name(self):
return self._window.caption
def _set_name(self, str):
self._window.set_caption(str)
name = property(_get_name, _set_name)
def _get_vsync(self):
return self._window.vsync
def _set_vsync(self, bool):
self._window.set_vsync(bool)
vsync = property(_get_vsync, _set_vsync)
@property
def layers(self):
return self
def insert(self, index, layer):
list.insert(self, index, layer)
layer.__dict__["canvas"] = self
def append(self, layer):
list.append(self, layer)
layer.__dict__["canvas"] = self
def extend(self, layers):
for layer in layers:
self.append(layer)
def remove(self, layer):
list.remove(self, layer)
layer.__dict__["canvas"] = None
def pop(self, index):
layer = list.pop(index)
layer.__dict__["canvas"] = None
return layer
def _get_x(self):
return self._window.get_location()[0]
def _set_x(self, v):
self._window.set_location(v, self.y)
def _get_y(self):
return self._window.get_location()[1]
def _set_y(self, v):
self._window.set_location(self.x, v)
def _get_xy(self):
return (self.x, self.y)
def _set_xy(self, (x,y)):
self.x = x
self.y = y
def _get_width(self):
return self._window.width
def _get_height(self):
return self._window.height
def _get_size(self):
return (self.width, self.height)
def _set_width(self, v):
self._window.width = v
def _set_height(self, v):
self._window.height = v
def _set_size(self, (w,h)):
self.width = w
self.height = h
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
xy = property(_get_xy, _set_xy)
width = property(_get_width, _set_width)
height = property(_get_height, _set_height)
size = property(_get_size, _set_size)
def _get_fullscreen(self):
return self._window.fullscreen
def _set_fullscreen(self, mode=True):
self._window.set_fullscreen(mode)
fullscreen = property(_get_fullscreen, _set_fullscreen)
@property
def screen(self):
return pyglet.window.get_platform().get_default_display().get_default_screen()
@property
def frame(self):
""" Yields the current frame number.
"""
return self._frame
@property
def elapsed(self):
""" Yields the elapsed time since last frame.
"""
return self._elapsed
dt = elapsed
@property
def mouse(self):
""" Yields a Point(x, y) with the mouse position on the canvas.
"""
return self._mouse
@property
def keys(self):
return self._keys
@property # Backwards compatibility.
def key(self):
return self._keys
@property
def focus(self):
return self._focus
#--- Event dispatchers ------------------------------
# First events are dispatched, then update() and draw() are called.
def layer_at(self, x, y, **kwargs):
""" Find the topmost layer at the specified coordinates.
This method returns None if no layer was found.
"""
for layer in reversed(self):
layer = layer.layer_at(x, y, **kwargs)
if layer is not None:
return layer
return None
def _on_mouse_enter(self, x, y):
self._mouse.x = x
self._mouse.y = y
self.on_mouse_enter(self._mouse)
def _on_mouse_leave(self, x, y):
self._mouse.x = x
self._mouse.y = y
self.on_mouse_leave(self._mouse)
# When the mouse leaves the canvas, no layer has the focus.
if self._focus is not None:
self._focus.on_mouse_leave(self._mouse)
self._focus.focus = False
self._focus.pressed = False
self._focus.dragged = False
self._focus = None
def _on_mouse_motion(self, x, y, dx, dy):
self._mouse.x = x
self._mouse.y = y
self._mouse.dx = int(dx)
self._mouse.dy = int(dy)
self.on_mouse_motion(self._mouse)
# Get the topmost layer over which the mouse is hovering.
layer = self.layer_at(x, y, enabled=True)
# If the layer differs from the layer which currently has the focus,
# or the mouse is not over any layer, remove the current focus.
if self._focus is not None and (self._focus != layer or not self._focus.contains(x,y)):
self._focus.on_mouse_leave(self._mouse)
self._focus.focus = False
self._focus = None
# Set the focus.
if self.focus != layer and layer is not None:
self._focus = layer
self._focus.focus = True
self._focus.on_mouse_enter(self._mouse)
# Propagate mouse motion to layer with the focus.
if self._focus is not None:
self._focus.on_mouse_motion(self._mouse)
def _on_mouse_press(self, x, y, button, modifiers):
self._mouse.pressed = True
self._mouse.button = button
self._mouse.modifiers = [a for (a,b) in (
(CTRL, pyglet.window.key.MOD_CTRL),
(SHIFT, pyglet.window.key.MOD_SHIFT),
(OPTION, pyglet.window.key.MOD_OPTION)) if modifiers & b]
self.on_mouse_press(self._mouse)
# Propagate mouse clicking to the layer with the focus.
if self._focus is not None:
self._focus.pressed = True
self._focus.on_mouse_press(self._mouse)
def _on_mouse_release(self, x, y, button, modifiers):
if self._focus is not None:
self._focus.on_mouse_release(self._mouse)
self._focus.pressed = False
self._focus.dragged = False
self.on_mouse_release(self._mouse)
self._mouse.button = None
self._mouse.modifiers = []
self._mouse.pressed = False
self._mouse.dragged = False
if self._focus is not None:
# Get the topmost layer over which the mouse is hovering.
layer = self.layer_at(x, y, enabled=True)
# If the mouse is no longer over the layer with the focus
# (this can happen after dragging), remove the focus.
if self._focus != layer or not self._focus.contains(x,y):
self._focus.on_mouse_leave(self._mouse)
self._focus.focus = False
self._focus = None
# Propagate mouse to the layer with the focus.
if self._focus != layer and layer is not None:
layer.focus = True
layer.on_mouse_enter(self._mouse)
self._focus = layer
def _on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self._mouse.dragged = True
self._mouse.x = x
self._mouse.y = y
self._mouse.dx = int(dx)
self._mouse.dy = int(dy)
self._mouse.modifiers = [a for (a,b) in (
(CTRL, pyglet.window.key.MOD_CTRL),
(SHIFT, pyglet.window.key.MOD_SHIFT),
(OPTION, pyglet.window.key.MOD_OPTION)) if modifiers & b]
# XXX also needs to log buttons.
self.on_mouse_drag(self._mouse)
# Propagate mouse dragging to the layer with the focus.
if self._focus is not None:
self._focus.dragged = True
self._focus.on_mouse_drag(self._mouse)
def _on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self._mouse.scroll.x = scroll_x
self._mouse.scroll.y = scroll_y
self.on_mouse_scroll(self._mouse)
# Propagate mouse scrolling to the layer with the focus.
if self._focus is not None:
self._focus.on_mouse_scroll(self._mouse)
def _on_key_press(self, keycode, modifiers):
self._keys.pressed = True
self._keys.append(keycode)
if self._keys.code == TAB:
self._keys.char = "\t"
# The event is delegated in _update():
self._window.on_key_pressed = True
def _on_key_release(self, keycode, modifiers):
for layer in self:
layer.on_key_release(self.key)
self.on_key_release(self.key)
self._keys.char = ""
self._keys.remove(keycode)
self._keys.pressed = False
def _on_text(self, text):
self._keys.char = text
# The event is delegated in _update():
self._window.on_key_pressed = True
def _on_text_motion(self, keycode):
self._keys.char = ""
# The event is delegated in _update():
self._window.on_key_pressed = True
def _on_move(self, x, y):
self.on_move()
def _on_resize(self, width, height):
pyglet.window.Window.on_resize(self._window, width, height)
self.on_resize()
# Event methods are meant to be overridden or patched with Prototype.set_method().
def on_key_press(self, keys):
""" The default behavior of the canvas:
- ESC exits the application,
- CTRL-P pauses the animation,
- CTRL-S saves a screenshot.
"""
if keys.code == ESCAPE:
self.stop()
if keys.code == "p" and CTRL in keys.modifiers:
self.paused = not self.paused
if keys.code == "s" and CTRL in keys.modifiers:
self.save("nodebox-%s.png" % str(datetime.now()).split(".")[0].replace(" ","-").replace(":","-"))
def on_move(self):
pass
def on_resize(self):
pass
#--- Main loop --------------------------------------
def setup(self):
pass
def update(self):
pass
def draw(self):
self.clear()
def draw_overlay(self):
""" Override this method to draw once all the layers have been drawn.
"""
pass
draw_over = draw_overlay
def _setup(self):
# Set the window color, this will be transparent in saved images.
glClearColor(VERY_LIGHT_GREY, VERY_LIGHT_GREY, VERY_LIGHT_GREY, 0)
# Reset the transformation state.
# Most of this is already taken care of in Pyglet.
#glMatrixMode(GL_PROJECTION)
#glLoadIdentity()
#glOrtho(0, self.width, 0, self.height, -1, 1)
#glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# Enable line anti-aliasing.
glEnable(GL_LINE_SMOOTH)
# Enable alpha transparency.
glEnable(GL_BLEND)
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Start the application (if not already running).
if not self._active:
self._window.switch_to()
self._window.dispatch_events()
self._window.set_visible()
self._active = True
self.clear()
self.setup()
def _draw(self, lapse=0):
""" Draws the canvas and its layers.
This method gives the same result each time it gets drawn; only _update() advances state.
"""
if self.paused:
return
self._window.switch_to()
glPushMatrix()
self.draw()
glPopMatrix()
glPushMatrix()
for layer in self:
layer._draw()
glPopMatrix()
glPushMatrix()
self.draw_overlay()
glPopMatrix()
def _update(self, lapse=0):
""" Updates the canvas and its layers.
This method does not actually draw anything, it only updates the state.
"""
self._elapsed = lapse
if not self.paused:
# Advance the animation by updating all layers.
# This is only done when the canvas is not paused.
# Events will still be propagated during pause.
global TIME; TIME = time()
self._frame += 1
self.update()
for layer in self:
layer._update()
if self._window.on_key_pressed is True:
# Fire on_key_press() event,
# which combines _on_key_press(), _on_text() and _on_text_motion().
self._window.on_key_pressed = False
self.on_key_press(self._keys)
for layer in self:
layer.on_key_press(self._keys)
def stop(self):
# If you override this method, don't forget to call Canvas.stop() to exit the app.
# Any user-defined stop method, added with canvas.set_method() or canvas.run(stop=stop),
# is called first.
try: self._user_defined_stop()
except:
pass
for f in (self._update, self._draw):
pyglet.clock.unschedule(f)
self._window.close()
self._active = False
pyglet.app.exit()
def clear(self):
""" Clears the previous frame from the canvas.
"""
glClear(GL_COLOR_BUFFER_BIT)
glClear(GL_DEPTH_BUFFER_BIT)
glClear(GL_STENCIL_BUFFER_BIT)
def run(self, draw=None, setup=None, update=None, stop=None):
""" Opens the application windows and starts drawing the canvas.
Canvas.setup() will be called once during initialization.
Canvas.draw() and Canvas.update() will be called each frame.
Canvas.clear() needs to be called explicitly to clear the previous frame drawing.
Canvas.stop() closes the application window.
If the given setup, draw or update parameter is a function,
it overrides that canvas method.
"""
if isinstance(setup, FunctionType):
self.set_method(setup, name="setup")
if isinstance(draw, FunctionType):
self.set_method(draw, name="draw")
if isinstance(update, FunctionType):
self.set_method(update, name="update")
if isinstance(stop, FunctionType):
self.set_method(stop, name="stop")
self._setup()
self.fps = self._fps # Schedule the _update and _draw events.
pyglet.app.run()
@property
def active(self):
return self._active
def _get_fps(self):
return self._fps
def _set_fps(self, v):
# Use pyglet.clock to schedule _update() and _draw() events.
# The clock will then take care of calling them enough times.
# Note: frames per second is related to vsync.
# If the vertical refresh rate is about 30Hz you'll get top speed of around 33fps.
# It's probably a good idea to leave vsync=True if you don't want to fry the GPU.
for f in (self._update, self._draw):
pyglet.clock.unschedule(f)
if v is None:
pyglet.clock.schedule(f)
if v > 0:
pyglet.clock.schedule_interval(f, 1.0/v)
self._fps = v
fps = property(_get_fps, _set_fps)
#--- Frame export -----------------------------------
def render(self):
""" Returns a screenshot of the current frame as a texture.
This texture can be passed to the image() command.
"""
return pyglet.image.get_buffer_manager().get_color_buffer().get_texture()
buffer = screenshot = render
@property
def texture(self):
return pyglet.image.get_buffer_manager().get_color_buffer().get_texture()
def save(self, path):
""" Exports the current frame as a PNG-file.
"""
pyglet.image.get_buffer_manager().get_color_buffer().save(path)
#--- Prototype --------------------------------------
def __setattr__(self, k, v):
# Canvas is a Prototype, so Canvas.draw() can be overridden
# but it can also be patched with Canvas.set_method(draw).
# Specific methods (setup, draw, mouse and keyboard events) can also be set directly
# (e.g. canvas.on_mouse_press = my_mouse_handler).
# This way we don't have to explain set_method() to beginning users..
if isinstance(v, FunctionType) and (k in ("setup", "draw", "update", "stop") \
or k.startswith("on_") and k in (
"on_mouse_enter",
"on_mouse_leave",
"on_mouse_motion",
"on_mouse_press",
"on_mouse_release",
"on_mouse_drag",
"on_mouse_scroll",
"on_key_press",
"on_key_release",
"on_move",
"on_resize")):
self.set_method(v, name=k)
else:
object.__setattr__(self, k, v)
def set_method(self, function, name=None):
if name == "stop" \
or name is None and function.__name__ == "stop":
Prototype.set_method(self, function, name="_user_defined_stop") # Called from Canvas.stop().
else:
Prototype.set_method(self, function, name)
def __repr__(self):
return "Canvas(name='%s', size='%s', layers=%s)" % (self.name, self.size, repr(list(self)))
#--- PROFILER ----------------------------------------------------------------------------------------
CUMULATIVE = "cumulative"
SLOWEST = "slowest"
_profile_canvas = None
_profile_frames = 100
def profile_run():
for i in range(_profile_frames):
_profile_canvas._update()
_profile_canvas._draw()
class Profiler:
def __init__(self, canvas):
self.canvas = canvas
@property
def framerate(self):
return pyglet.clock.get_fps()
def run(self, draw=None, setup=None, update=None, frames=100, sort=CUMULATIVE, top=30):
""" Runs cProfile on the canvas for the given number of frames.
The performance statistics are returned as a string, sorted by SLOWEST or CUMULATIVE.
For example, instead of doing canvas.run(draw):
print canvas.profiler.run(draw, frames=100)
"""
# Register the setup, draw, update functions with the canvas (if given).
if isinstance(setup, FunctionType):
self.canvas.set_method(setup, name="setup")
if isinstance(draw, FunctionType):
self.canvas.set_method(draw, name="draw")
if isinstance(update, FunctionType):
self.canvas.set_method(update, name="update")
# If enabled, turn Psyco off.
psyco_stopped = False
try:
psyco.stop()
psyco_stopped = True
except:
pass
# Set the current canvas and the number of frames to profile.
# The profiler will then repeatedly execute canvas._update() and canvas._draw().
# Statistics are redirected from stdout to a temporary file.
global _profile_canvas, _profile_frames
_profile_canvas = self.canvas
_profile_frames = frames
import cProfile
import pstats
cProfile.run("profile_run()", "_profile")
p = pstats.Stats("_profile")
p.stream = open("_profile", "w")
p.sort_stats(sort==SLOWEST and "time" or sort).print_stats(top)
p.stream.close()
s = open("_profile").read()
remove("_profile")
# Restart Psyco if we stopped it.
if psyco_stopped:
psyco.profile()
return s
#--- LIBRARIES ---------------------------------------------------------------------------------------
# Import the library and assign it a _ctx variable containing the current context.
# This mimics the behavior in NodeBox for Mac OS X.
def ximport(library):
from sys import modules
library = __import__(library)
library._ctx = modules[__name__]
return library
#-----------------------------------------------------------------------------------------------------
# Linear interpolation math for BezierPath.point() etc.
import bezier
| [
"msarch@free.fr"
] | msarch@free.fr |
086a9a37c222334524b2121455b685678a95f665 | 63c7060562ec5d1a9153f0454ea6886b0a62a28e | /tb/axi_cdma/test_axi_cdma.py | 6b7ce9326dc3e25a24752ed080d6e17b2cf42064 | [
"MIT"
] | permissive | alexforencich/verilog-axi | 666e6dfbd14fd124bdcbc2798b4f557347fb8261 | 38915fb5330cb8270b454afc0140a94489dc56db | refs/heads/master | 2023-03-30T07:34:17.721579 | 2023-03-30T07:12:13 | 2023-03-30T07:12:13 | 142,810,315 | 1,042 | 342 | MIT | 2023-03-05T19:52:57 | 2018-07-30T01:36:26 | Verilog | UTF-8 | Python | false | false | 6,800 | py | """
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import pytest
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.axi import AxiBus, AxiRam
from cocotbext.axi.stream import define_stream
DescBus, DescTransaction, DescSource, DescSink, DescMonitor = define_stream("Desc",
signals=["read_addr", "write_addr", "len", "tag", "valid", "ready"]
)
DescStatusBus, DescStatusTransaction, DescStatusSource, DescStatusSink, DescStatusMonitor = define_stream("DescStatus",
signals=["tag", "error", "valid"]
)
class TB(object):
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 10, units="ns").start())
# control interface
self.desc_source = DescSource(DescBus.from_prefix(dut, "s_axis_desc"), dut.clk, dut.rst)
self.desc_status_sink = DescStatusSink(DescStatusBus.from_prefix(dut, "m_axis_desc_status"), dut.clk, dut.rst)
# AXI interface
self.axi_ram = AxiRam(AxiBus.from_prefix(dut, "m_axi"), dut.clk, dut.rst, size=2**16)
dut.enable.setimmediatevalue(0)
def set_idle_generator(self, generator=None):
if generator:
self.desc_source.set_pause_generator(generator())
self.axi_ram.write_if.b_channel.set_pause_generator(generator())
self.axi_ram.read_if.r_channel.set_pause_generator(generator())
def set_backpressure_generator(self, generator=None):
if generator:
self.axi_ram.write_if.aw_channel.set_pause_generator(generator())
self.axi_ram.write_if.w_channel.set_pause_generator(generator())
self.axi_ram.read_if.ar_channel.set_pause_generator(generator())
async def cycle_reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst.value = 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, data_in=None, idle_inserter=None, backpressure_inserter=None):
tb = TB(dut)
byte_lanes = tb.axi_ram.write_if.byte_lanes
step_size = 1 if int(os.getenv("PARAM_ENABLE_UNALIGNED")) else byte_lanes
tag_count = 2**len(tb.desc_source.bus.tag)
cur_tag = 1
await tb.cycle_reset()
tb.set_idle_generator(idle_inserter)
tb.set_backpressure_generator(backpressure_inserter)
dut.enable.value = 1
for length in list(range(1, byte_lanes*4+1))+[128]:
for read_offset in list(range(8, 8+byte_lanes*2, step_size))+list(range(4096-byte_lanes*2, 4096, step_size)):
for write_offset in list(range(8, 8+byte_lanes*2, step_size))+list(range(4096-byte_lanes*2, 4096, step_size)):
tb.log.info("length %d, read_offset %d, write_offset %d", length, read_offset, write_offset)
read_addr = read_offset+0x1000
write_addr = 0x00008000+write_offset+0x1000
test_data = bytearray([x % 256 for x in range(length)])
tb.axi_ram.write(read_addr, test_data)
tb.axi_ram.write(write_addr & 0xffff80, b'\xaa'*(len(test_data)+256))
desc = DescTransaction(read_addr=read_addr, write_addr=write_addr, len=len(test_data), tag=cur_tag)
await tb.desc_source.send(desc)
status = await tb.desc_status_sink.recv()
tb.log.info("status: %s", status)
assert int(status.tag) == cur_tag
assert int(status.error) == 0
tb.log.debug("%s", tb.axi_ram.hexdump_str((write_addr & ~0xf)-16, (((write_addr & 0xf)+length-1) & ~0xf)+48))
assert tb.axi_ram.read(write_addr-8, len(test_data)+16) == b'\xaa'*8+test_data+b'\xaa'*8
cur_tag = (cur_tag + 1) % tag_count
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def cycle_pause():
return itertools.cycle([1, 1, 1, 0])
if cocotb.SIM_NAME:
for test in [run_test]:
factory = TestFactory(test)
factory.add_option("idle_inserter", [None, cycle_pause])
factory.add_option("backpressure_inserter", [None, cycle_pause])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
@pytest.mark.parametrize("unaligned", [0, 1])
@pytest.mark.parametrize("axi_data_width", [8, 16, 32])
def test_axi_cdma(request, axi_data_width, unaligned):
dut = "axi_cdma"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['AXI_DATA_WIDTH'] = axi_data_width
parameters['AXI_ADDR_WIDTH'] = 16
parameters['AXI_STRB_WIDTH'] = parameters['AXI_DATA_WIDTH'] // 8
parameters['AXI_ID_WIDTH'] = 8
parameters['AXI_MAX_BURST_LEN'] = 16
parameters['LEN_WIDTH'] = 20
parameters['TAG_WIDTH'] = 8
parameters['ENABLE_UNALIGNED'] = unaligned
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
106e8ef9bdd0d3126da30706509d16c6946a7daf | 138c96ca9d4fc9755cd3473bb6a5f614a446c3a3 | /05_chapter/kreator_postaci.py | 3c27b68f6f83a173ee65f442eaa3752192fd21d4 | [] | no_license | EdytaBalcerzak/python-for-all | 553d4a75fd800eef789fd76ad7cbbce982b81648 | 3b41ea38f978761a3b940134558e5c861c4810ea | refs/heads/master | 2021-01-20T09:24:09.975892 | 2017-04-24T16:34:22 | 2017-04-24T16:34:22 | 82,615,394 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,474 | py | # Napisz program Kreator postaci do gry z podziałem na role. Gracz powinien
# otrzymać pulę 30 punktów, którą może spożytkować na cztery atrybuty: siła,
# zdrowie, mądrość i zręczność. Gracz powinien mieć możliwość przeznaczania
# punktów z puli na dowolny atrybut, jak również możliwość odbierania
# punktów przypisanych do atrybutu i przekazywania ich z powrotem do puli.
print("Witaj w grze")
print("Za moment stoczysz walkę, przygotuj się do niej odpowiednio")
print("Masz do dyspozycji 4 atrybuty i 30 pkt do rozdzielenia na nie")
print("\nTwoje atrybuty to:\n")
atrybuty = {"sila": 0, "zdrowie": 0, "madrosc": 0, "zrecznosc": 0}
punkty = 30
for zdolnosci in atrybuty:
print(zdolnosci)
print("\nIlość Twoich punktów: ", punkty)
wybor = None
wybor_atrybutu = ""
while wybor != 0:
print(
"""
0 - wyjdź z gry
1 - dodaj punkty
2 - usuń punkty
3 - sprawdz stan punktów
"""
)
wybor = int(input("Co wybierasz? "))
if wybor == 0:
print("\nDo zobaczenia")
elif wybor == 1:
for lista in atrybuty:
print(lista)
if punkty > 0:
wybor_atrybutu = input("\nW któym atrybucie chcesz zmienić punkty? ")
if wybor_atrybutu in atrybuty:
ilosc_punktow = int(input("\nPodaj ilość punktów , którą chcesz przyznać atrybutowi: "))
punkty -= ilosc_punktow
if punkty >= 0:
atrybuty[wybor_atrybutu] += ilosc_punktow
else:
punkty += ilosc_punktow
print("\nMasz zbyt mala ilosc punktow")
else:
print("\nmasz zbyt małą ilość punktów")
elif wybor == 2:
if wybor_atrybutu in atrybuty:
for lista in atrybuty:
print(lista)
wybor_atrybutu = input("\nZ którego atrybutu chcesz usunąć punkty? ")
odjete_punkty = int(input("\nIle punktow chcesz odjac? "))
if odjete_punkty > atrybuty[wybor_atrybutu]:
print(wybor_atrybutu, " nie posiada tylu punktów")
else:
atrybuty[wybor_atrybutu] -= odjete_punkty
punkty += odjete_punkty
else:
print("\natrybuty nie posiadają punktów")
elif wybor == 3:
print(atrybuty)
print("\nilość dostępnych punktów ", punkty)
input("Aby zakonczyc grę, nacisnij Enter")
| [
"edytabalcerzak1@gmail.com"
] | edytabalcerzak1@gmail.com |
3611831f18561cfa5af0f745acdf03a946f45c97 | d3762b1b4d908b2b43f6e0ae362daa7136c6c7a4 | /elections/management/commands/migrate_data.py | d427572608b5a937a16039325feb542271465cab | [] | no_license | pbahle/elections-api | c58cdf2b05f1560c8d6a69f8bc07e878458585c1 | 60cc06610ab7a279102018078f29f38d31e8bd26 | refs/heads/master | 2020-09-02T10:44:03.663386 | 2019-11-02T19:26:30 | 2019-11-02T19:26:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | # pylint: disable=no-self-use
import sys
from datetime import timedelta
from pathlib import Path
from django.core.management.base import BaseCommand
from django.utils import timezone
import log
from elections import defaults
from elections.helpers import normalize_jurisdiction
from elections.models import District, DistrictCategory, Election, Party, Position
class Command(BaseCommand):
help = "Initialize contants and migrate data between existing models"
def handle(self, verbosity: int, **_kwargs):
log.init(verbosity=verbosity if '-v' in sys.argv else 2)
defaults.initialize_parties()
defaults.initialize_districts()
self.update_elections()
self.update_jurisdictions()
self.import_descriptions()
self.export_descriptions()
def update_elections(self):
for election in Election.objects.filter(active=True):
age = timezone.now() - timedelta(weeks=3)
if election.date < age.date():
log.info(f'Deactivating election: {election}')
election.active = False
election.save()
def update_jurisdictions(self):
jurisdiction = DistrictCategory.objects.get(name="Jurisdiction")
for district in District.objects.filter(category=jurisdiction):
old = district.name
new = normalize_jurisdiction(district.name)
if new != old:
if District.objects.filter(category=jurisdiction, name=new):
log.warning(f'Deleting district {old!r} in favor of {new!r}')
district.delete()
else:
log.info(f'Renaming district {old!r} to {new!r}')
district.name = new
district.save()
def import_descriptions(self):
pass
def export_descriptions(self):
elections = {}
for election in Election.objects.all():
elections[election.name] = election.description
self._write('elections', elections)
districts = {}
for category in DistrictCategory.objects.all():
districts[category.name] = category.description
self._write('districts', districts)
parties = {}
for party in Party.objects.all():
parties[party.name] = party.description
self._write('parties', parties)
positions = {}
for position in Position.objects.all():
positions[position.name] = position.description
self._write('positions', positions)
def _write(self, name, data):
with Path(f'content/{name}.txt').open('w') as f:
for key, value in sorted(data.items()):
f.write(f'name: {key}\n')
f.write(f'description: {value}\n')
f.write('\n')
| [
"jacebrowning@gmail.com"
] | jacebrowning@gmail.com |
9c7884d6188ebf3902443ffe842e48faf11034b4 | 521111c3b272748b41c98d3bf238b5b64bf5b36f | /dynamics_sim-master/games/hdb.py | 07b0ccb0e6352c1aefe6d452c7d1c15d7c7014fe | [] | no_license | aaandrew152/dynamics_sim | 5ebd3c2d1fc9408e2f4d0ec414191745dffbdc7c | a2bae9dd550f17013a94bc969b8d7c92cb4dc029 | refs/heads/master | 2022-08-12T15:31:49.438988 | 2022-08-02T17:33:18 | 2022-08-02T17:33:18 | 32,927,918 | 6 | 3 | null | 2015-03-26T13:09:35 | 2015-03-26T13:09:35 | null | UTF-8 | Python | false | false | 950 | py | from games.game import SymmetricNPlayerGame
class HawkDoveBourgeois(SymmetricNPlayerGame):
"""
A class used to to represent the 2 player hawk-dove-bourgeois game. See U{http://www.life.umd.edu/classroom/zool360/L18-ESS/ess.html}
"""
DEFAULT_PARAMS = dict(v=30, c=100)
STRATEGY_LABELS = ('Hawk', 'Dove', 'Bourgeois')
EQUILIBRIA_LABELS = ('Bourgeois Bourgeois')
def __init__(self, v, c):
payoff_matrix = (((v - c) / 2, v, 3 * v / 4 - c / 4),
(0, v / 2, v / 4),
((v - c) / 4, 3 * v / 4, v / 2))
super(HawkDoveBourgeois, self).__init__(payoff_matrix, 2)
@classmethod
def classify(cls, params, state, tolerance):
threshold = 1 - tolerance
if state[0][2] >= threshold:
return 0#Bourgeois Bourgeois
else:
return super(HawkDoveBourgeois, cls).classify(params, state, tolerance)
| [
"pingandrew@gmail.com"
] | pingandrew@gmail.com |
8b471253d968c59ece847fca7d424aeb60978e69 | 658af335063dc25e97630694ffe95d6a553e0d6b | /Awele/Joueurs/joueur_alphabeta_a.py | a4a22d8cbb13507fe8b61f989afc0d752cc5e7c0 | [] | no_license | Juju49/2i013Players | d99d4b63b86717e47925c1245eaf072beec605e2 | 69e727451095a08c871b2706ddc950b39c22d226 | refs/heads/master | 2020-04-18T03:36:04.306593 | 2019-05-21T16:03:08 | 2019-05-21T16:03:08 | 167,204,806 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("../..")
import game
import time
MOI = None
JEU = None
N = 3
WEIGHTS = [0.35, 0.15, 0.5]
TIMEOUT = 1.0
START_TIME = None
def getparcoursJoueur(joueur):
"""Int->List[Tuple(Int,Int)]
Retourne la liste des paires d'indices de cases dans l'ordre du début de la rangée du joueur dont c'est le tour
"""
IDCJ1 = [(0,0), (0,1), (0,2), (0,3), (0,4), (0,5)]
IDCJ2 = [(1,5), (1,4), (1,3), (1,2), (1,1), (1,0)]
if joueur == 1:
return IDCJ1 + IDCJ2
if joueur == 2:
return IDCJ2 + IDCJ1
def EScore(jeu):
j = game.getJoueur(jeu)
return game.getScore(jeu, j) - game.getScore(JEU, j) - (game.getScore(jeu, j%2+1) - game.getScore(JEU, j%2+1))
def Egrenier(jeu):
plt = game.getPlateau(jeu)
joueur = game.getJoueur(jeu)
parcours = getparcoursJoueur(joueur)
ret = 0
for i,j in parcours[:6]:
case = plt[i][j]
if 0 < case < 4:
ret-=1 #on pénalise les config capturables
if case > 4:
ret += case-4 #on récompense les greniers
return ret
def EGagne(jeu):
if game.finJeu(jeu): #plus de coup valides...
gg = game.getGagnant(jeu)
if gg == game.getJoueur(jeu):
return 1000
elif gg == 0: #pas de gagnant
return 0
else:#adversaire
return -1000
return 0
def evaluation(jeu):
w=WEIGHTS
f=[EScore, Egrenier, EGagne]
return sum([fi(jeu)*wi for fi,wi in zip(f,w)])#dot
def estimation(jeu, coup, n=N, alpha=-float("inf"), beta=float("inf")): #negamax
next_game = game.getCopieJeu(jeu)
game.joueCoup(next_game, coup)
cv = game.getCoupsValides(next_game)
#profondeur d'arret ou plus de coup valides...
if n <= 0 or not cv:
return evaluation(next_game)
score_coup = -float("inf")
for cp in cv:
score_coup = max(score_coup, -estimation(next_game, cp, n-1, -beta, -alpha))
alpha = max(alpha, score_coup)
if alpha >= beta:
break
#elagage si timeout
if (time.time() - START_TIME) > TIMEOUT:
break
return score_coup
def decision(jeu):
cv = game.getCoupsValides(jeu)
estimations = []
for cp in cv:
estimations.append(-estimation(jeu, cp))
i_max = estimations.index(max(estimations))
coup = cv[i_max]
return coup
def saisieCoup(jeu):
""" jeu -> coup
Retourne un coup a jouer
"""
global MOI
global JEU
global START_TIME
JEU = jeu
MOI = game.getJoueur(JEU)
START_TIME = time.time()
coup = decision(jeu)
return coup | [
"julian.merle-remond@etu.upmc.fr"
] | julian.merle-remond@etu.upmc.fr |
8a5b9c0407ee9bebaf9b240ad797a428e4f1daf0 | 01af5c75c4b29e33f862ef8460cf77b1a21edf2a | /vicks/covidmail.py | e5f6f2614c72e8225090aa8d611c82bf4dc84d91 | [] | no_license | imvickykumar999/covid19slot | 42c72a1e795bce32ecf7c1850c9b71b6f21258b0 | 689ce0819f6f3fe4453eb6437f303558bef8a99a | refs/heads/main | 2023-05-27T03:00:15.272312 | 2021-06-06T22:19:35 | 2021-06-06T22:19:35 | 374,466,577 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py |
def covail(l=[
{'1key1':'1value1', '1key2':'1value2'},
{'2key1':'2value1', '2key2':'2value2'}
],
toaddr = "hellovickykumar123@gmail.com",
filename = None,
):
if filename == None:
filename = f"{toaddr.split('@')[0]}.xlsx"
import pandas as pd
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
for i, j in enumerate(l):
pd.DataFrame(l[i],
# index=[0]
).to_excel(writer, sheet_name = f'Sheet {i}')
writer.save()
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
fromaddr = "imvickykumar999@gmail.com"
# toaddr = "hellovickykumar123@gmail.com"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "COVID19 Slot Notification"
body = f'''
Book your Slot here : https://selfregistration.cowin.gov.in/
This E-Mail is Sent using python code by vicks,
Slots is... (open attached excel file)
{l}
'''
msg.attach(MIMEText(body, 'plain'))
attachment = open(filename, "rb")
p = MIMEBase('application', 'octet-stream')
p.set_payload((attachment).read())
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(p)
s = smtplib.SMTP('smtp.gmail.com', 587)
s.starttls()
s.login(fromaddr, "Hellovix999@")
text = msg.as_string()
s.sendmail(fromaddr, toaddr, text)
s.quit()
return len(l)
# covail()
| [
"imvickykumar999@gmail.com"
] | imvickykumar999@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.