blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
124ef503647674d5954b7b9f0a1783d787153177 | 9b3a3a84d51b46f576b12ebf4da42f9c7244fc95 | /introduction-to-python/02-datatype.py | a2b4764e02b72c09cf67285dc8434db055c74fd9 | [] | no_license | Markweell/introduction-to-python | bce97e40a1553e7caa90e7b82d7e1510a816c3a1 | ea3ceb38e482cab265c2fe8a638225ba1dc47e26 | refs/heads/master | 2022-06-20T02:07:37.597644 | 2020-05-09T18:34:46 | 2020-05-09T18:34:46 | 262,634,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | #string
print(type("Hello word")) # str
print(type('Hello word')) # str
print(type('''Hello word''')) # str
print(type("""Hello word""")) # str
# Number
print(type(30)) # int
print(type(30.3 )) # float
# Bolean
print(type(False)) # bool
# List
print(type([12,123,123,123])) # List
print(type([12,False,123,'string'])) # List
# Tuplas
print(type((10,12,12,'123'))) # Tupla, como una lista, pero inmutable
# Dictionaries.
print(type({"name": "Marcos"})) # dict
# None
print(type(None)) | [
"marcosgallardoperez@gmail.com"
] | marcosgallardoperez@gmail.com |
4b803dc11bc61c8e8bfaa692a0a6f248f40f8b06 | bf885e4a58ac5ab888890e297eafcfca907d7845 | /hello_world_project/hello_world_project/urls.py | d3bf32b3ef4875f4d09711b297f85325df8055ae | [] | no_license | manishbalyan/django-hello_world_app | c54e4875a9bb3dac7e58224f11e1cf6d60b70463 | bc53fa0a8d3e57bc085bc113c0d5640521c45e44 | refs/heads/master | 2021-01-23T16:28:18.954683 | 2019-02-13T05:55:24 | 2019-02-13T05:55:24 | 38,373,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'hello_world_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# this link the project urls to app urls
url(r'^admin/', include(admin.site.urls)), url(r'^', include('hello_world.urls')), url(r'^about/', include('hello_world.urls'))
)
| [
"balyan05.manish@gmail.com"
] | balyan05.manish@gmail.com |
cafc587d98b94559ed1c700073ef8df288023c8a | a2f08f07c5a8473fc6d65b54844948524a8d56a5 | /codigwithsilentsec/src/account/admin.py | b72b53b9d4b00e41e1423fa4da64b110879754f5 | [] | no_license | silnetmagar321/etenderwebsite | 9fd94e11d4103b68397a2af6179453f659591bb5 | 39521808442c63cc2ade17602430f625d091f213 | refs/heads/main | 2023-01-07T13:25:56.174456 | 2020-10-20T09:08:14 | 2020-10-20T09:08:14 | 304,482,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from account.models import Account
# Register your models here.
class AccountAdmin(UserAdmin):
list_display = ('email', 'username', 'date_joined', 'last_login', 'is_admin', 'is_staff')
search_fields = ('email', 'username',)
readonly_fields = ('date_joined', 'last_login')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(Account, AccountAdmin) | [
"56421032+silnetmagar321@users.noreply.github.com"
] | 56421032+silnetmagar321@users.noreply.github.com |
b5b837af34e57caa301ef9f8de09196ca005dc82 | d913ab73cc005749478ae374729a68952ba16a1a | /mrjob/mrjob_spark_word_count.py | 7acd36db76956ae3a19002fb4a1a350df16bd507 | [] | no_license | matrixor/BigData | f5fa238b5c5bd165d6df2b227684a3f4774fd07b | 1ba320424220891302b4643635b0e4938b691ba2 | refs/heads/master | 2021-09-03T20:07:57.349489 | 2018-01-11T16:21:52 | 2018-01-11T16:21:52 | 109,291,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | ### You must define this file name as 'mrjob_spark_xxxxx'
### xxxxx is your job description, like 'word_count'
#########################################################
from mrjob.job import MRJob
class MRSparkJob(MRJob):
### update below 'job' function to define your job
def job(self,lines):
from operator import add
import re
WORD_RE = re.compile(r"[\w']+")
counts = (
lines.flatMap(lambda line: WORD_RE.findall(line))
.map(lambda word: (word, 1))
.reduceByKey(add))
return counts
def spark(self, input_path, output_path):
from pyspark import SparkContext
sc = SparkContext(appName='wmi_mr_spark_job')
input_lines = sc.textFile(input_path)
results = self.job(input_lines)
results.saveAsTextFile(output_path)
sc.stop()
if __name__ == '__main__':
MRSparkJob.run() | [
"alan.0002.t@gmail.com"
] | alan.0002.t@gmail.com |
41f4b127bfbd6b75174719694a023c07f6cca470 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.TZO/Sun-ExtA_16/pdf_to_json_test_Latn.TZO_Sun-ExtA_16.py | 1ec5da78381362fbe785a67e34d5996d974a7995 | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TZO/Sun-ExtA_16/udhr_Latn.TZO_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
9a0d49f1da096724198cae1381ead5d2f5ab901e | 7d4f3360991786ea493b5020e327951613ea3e54 | /node_modules/mongoose/node_modules/mongodb/node_modules/bson/build/config.gypi | 65bb45cf42ebcb13eaa1fc5f90908018caa7f712 | [
"Apache-2.0",
"MIT"
] | permissive | kiranw/travelingtable | ec7224e4d2496ce55a66de87a24f697e17796ef2 | 520d99bff8d0d72ba9986c3a0e0762fb8074892c | refs/heads/master | 2016-09-05T22:02:38.520882 | 2015-03-02T03:36:34 | 2015-03-02T03:36:34 | 31,521,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/bin/python",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "false",
"want_separate_host_toolset": 0,
"nodedir": "/Users/kiran/.node-gyp/0.10.35",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"email": "",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/kiran/.npm-init.js",
"userconfig": "/Users/kiran/.npmrc",
"node_version": "0.10.35",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/kiran/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/1.4.28 node/v0.10.35 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/tmp",
"unsafe_perm": "",
"link": "",
"prefix": "/usr/local"
}
}
| [
"kiranw@mit.edu"
] | kiranw@mit.edu |
71618da8689e4208efa693fb48db8b677d588d1c | 5ad97fca229f380bba82e6647b606ca93ba19f37 | /tests/docstring_tests.py | 59076ce548ac59be270952b895fa0c9f1523e983 | [
"MIT"
] | permissive | KeyserSosa/lighthouse | d53df9b643c2214cd42811c61f2c0b3bcb58f388 | 5d66718daa14d0e5a243e3f2930cda818a88d33d | refs/heads/master | 2021-01-23T01:08:54.881556 | 2015-06-05T22:23:22 | 2015-06-05T22:23:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,794 | py | import inspect
import re
import lighthouse.balancer
import lighthouse.check
import lighthouse.checks.http
import lighthouse.checks.redis
import lighthouse.cluster
import lighthouse.configs.handler
import lighthouse.configs.monitor
import lighthouse.configs.watcher
import lighthouse.configurable
import lighthouse.discovery
import lighthouse.haproxy.balancer
import lighthouse.haproxy.config
import lighthouse.haproxy.control
import lighthouse.haproxy.stanzas.section
import lighthouse.haproxy.stanzas.stanza
import lighthouse.haproxy.stanzas.meta
import lighthouse.haproxy.stanzas.frontend
import lighthouse.haproxy.stanzas.backend
import lighthouse.haproxy.stanzas.peers
import lighthouse.haproxy.stanzas.proxy
import lighthouse.haproxy.stanzas.stats
import lighthouse.log
import lighthouse.node
import lighthouse.peer
import lighthouse.pluggable
import lighthouse.reporter
import lighthouse.service
import lighthouse.writer
import lighthouse.zookeeper
import lighthouse.events
modules_to_test = (
lighthouse.balancer,
lighthouse.check,
lighthouse.checks.http,
lighthouse.checks.redis,
lighthouse.cluster,
lighthouse.configs.handler,
lighthouse.configs.monitor,
lighthouse.configs.watcher,
lighthouse.configurable,
lighthouse.discovery,
lighthouse.haproxy.balancer,
lighthouse.haproxy.config,
lighthouse.haproxy.control,
lighthouse.haproxy.stanzas.section,
lighthouse.haproxy.stanzas.stanza,
lighthouse.haproxy.stanzas.meta,
lighthouse.haproxy.stanzas.frontend,
lighthouse.haproxy.stanzas.backend,
lighthouse.haproxy.stanzas.peers,
lighthouse.haproxy.stanzas.proxy,
lighthouse.haproxy.stanzas.stats,
lighthouse.log,
lighthouse.node,
lighthouse.peer,
lighthouse.pluggable,
lighthouse.reporter,
lighthouse.service,
lighthouse.writer,
lighthouse.zookeeper,
lighthouse.events,
)
def test_docstrings():
for module in modules_to_test:
for path, thing in get_module_things(module):
yield create_docstring_assert(path, thing)
def get_module_things(module):
module_name = module.__name__
for func_name, func in get_module_functions(module):
if inspect.getmodule(func) != module:
continue
yield (module_name + "." + func_name, func)
for class_name, klass in get_module_classes(module):
if inspect.getmodule(klass) != module:
continue
yield (module_name + "." + class_name, klass)
for method_name, method in get_class_methods(klass):
if method_name not in klass.__dict__:
continue
yield (module_name + "." + class_name + ":" + method_name, method)
def get_module_classes(module):
for name, klass in inspect.getmembers(module, predicate=inspect.isclass):
yield (name, klass)
def get_module_functions(module):
for name, func in inspect.getmembers(module, predicate=inspect.isfunction):
yield (name, func)
def get_class_methods(klass):
for name, method in inspect.getmembers(klass, predicate=inspect.ismethod):
yield (name, method)
def create_docstring_assert(path, thing):
def test_function():
assert_docstring_present(thing, path)
# TODO(wglass): uncomment this assert and fill out the param info
# for methods and functions
# assert_docstring_includes_param_metadata(thing, path)
test_name = "test_docstring__%s" % de_camelcase(path)
test_function.__name__ = test_name
test_function.description = test_name
return test_function
def assert_docstring_present(thing, path):
# TODO(wglass): remove this check for __init__ when the param metadata
# assert is re-enabled
if path.endswith("__init__"):
return
docstring = inspect.getdoc(thing)
if not docstring or not docstring.strip():
raise AssertionError("No docstring present for %s" % path)
def assert_docstring_includes_param_metadata(thing, path):
if inspect.isclass(thing):
return
docstring = inspect.getdoc(thing)
if not docstring:
return
for arg_name in inspect.getargspec(thing).args:
if arg_name in ("self", "cls"):
continue
if ":param %s:" % arg_name not in docstring:
raise AssertionError(
"Missing :param: for arg %s of %s" % (arg_name, path)
)
if ":type %s:" % arg_name not in docstring:
raise AssertionError(
"Missing :type: for arg %s of %s" % (arg_name, path)
)
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def de_camelcase(name):
return all_cap_re.sub(
r'\1_\2',
first_cap_re.sub(r'\1_\2', name)
).lower()
| [
"william@hipmunk.com"
] | william@hipmunk.com |
9b698fbb7fa81969df29bb6c75b7825c6c411271 | 50fa5e53d749f1ee829ecdb8351374ca80db1b5a | /DraggablePoint.py | 5654e97856b41c44480631c84f82ac1e76ae3482 | [] | no_license | locdoan12121997/matplot-test | bd938a47be13dfe0f89b87558fd4d6e5ee6fe0ec | 7536ca3f3c1748e095195a2b8778443795504e96 | refs/heads/master | 2022-07-07T22:14:31.764868 | 2019-10-01T03:58:31 | 2019-10-01T03:58:31 | 211,907,700 | 0 | 0 | null | 2022-06-21T22:57:51 | 2019-09-30T16:44:48 | Jupyter Notebook | UTF-8 | Python | false | false | 3,459 | py | import matplotlib.patches as patches
from matplotlib.lines import Line2D
class DraggablePoint:
lock = None
id = None
lines = []
def __init__(self, parent, x, y, size=0.3):
self.parent = parent
self.point = patches.Circle((x, y), size, fill=True, fc='k', ec='g')
self.x = x
self.y = y
parent.fig.axes[0].add_patch(self.point)
self.press = None
self.background = None
self.connect()
def connect(self):
self.cidpress = self.point.figure.canvas.mpl_connect('button_press_event', self.on_press)
self.cidrelease = self.point.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.cidmotion = self.point.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
def on_press(self, event):
if event.inaxes != self.point.axes:
return
if DraggablePoint.lock is not None:
return
contains, attrd = self.point.contains(event)
if not contains:
return
self.press = self.point.center, event.xdata, event.ydata
DraggablePoint.lock = self
canvas = self.point.figure.canvas
axes = self.point.axes
self.point.set_animated(True)
if len(self.lines) > 0:
[line.set_animated(True) for line in self.lines]
canvas.draw()
self.background = canvas.copy_from_bbox(self.point.axes.bbox)
axes.draw_artist(self.point)
canvas.blit(axes.bbox)
def on_motion(self, event):
if DraggablePoint.lock is not self:
return
if event.inaxes != self.point.axes:
return
self.point.center, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.point.center = (self.point.center[0] + dx, self.point.center[1] + dy)
canvas = self.point.figure.canvas
axes = self.point.axes
canvas.restore_region(self.background)
axes.draw_artist(self.point)
if len(self.lines) > 0:
[axes.draw_artist(line) for line in self.lines]
self.x = self.point.center[0]
self.y = self.point.center[1]
if len(self.lines) > 0:
for line in self.lines:
if self == line.end_a:
line_x = [line.end_b.x, self.x]
line_y = [line.end_b.y, self.y]
line.set_data(line_x, line_y)
elif self == line.end_b:
line_x = [self.x, line.end_a.x]
line_y = [self.y, line.end_a.y]
for ln in line.end_a.lines:
if ln == line:
ln.set_data(line_x, line_y)
canvas.blit(axes.bbox)
def on_release(self, event):
if DraggablePoint.lock is not self:
return
self.press = None
DraggablePoint.lock = None
self.point.set_animated(False)
if len(self.lines) > 0:
[line.set_animated(False) for line in self.lines]
self.background = None
self.point.figure.canvas.draw()
self.x = self.point.center[0]
self.y = self.point.center[1]
def disconnect(self):
self.point.figure.canvas.mpl_disconnect(self.cidpress)
self.point.figure.canvas.mpl_disconnect(self.cidrelease)
self.point.figure.canvas.mpl_disconnect(self.cidmotion)
| [
"cs2015_loc.dtt@student.vgu.edu.vn"
] | cs2015_loc.dtt@student.vgu.edu.vn |
2a4891fc504a6b60e310d8e66dfe03173c3f98d5 | 6a2a6408be018ba2772a2888c8b3a7ee6838ddeb | /weechat/python/wee_slack.py | 820f99f2061d275436bb6a7bf7d249a53139de9d | [] | no_license | gicmo/dot-files | c5b4598ffa399936f7d149039e558a89f5de7239 | 6ca9343cad5612e3c6daa61a7c80aa8bbfa01e28 | refs/heads/master | 2023-04-06T07:48:14.453990 | 2023-03-27T14:20:27 | 2023-03-27T14:20:27 | 41,631,064 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 149,634 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from functools import wraps
from itertools import islice
import textwrap
import time
import json
import pickle
import sha
import os
import re
import urllib
import sys
import traceback
import collections
import ssl
import random
import string
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
from websocket import create_connection, WebSocketConnectionClosedException
# hack to make tests possible.. better way?
try:
import weechat
except:
pass
SCRIPT_NAME = "slack"
SCRIPT_AUTHOR = "Ryan Huber <rhuber@gmail.com>"
SCRIPT_VERSION = "2.0.0"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
RECORD_DIR = "/tmp/weeslack-debug"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "conversations.open",
"leave": "conversations.close",
"mark": "im.mark",
},
"mpim": {
"history": "mpim.history",
"join": "mpim.open", # conversations.open lacks unread_count_display
"leave": "conversations.close",
"mark": "mpim.mark",
"info": "groups.info",
},
"group": {
"history": "groups.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "groups.mark",
"info": "groups.info"
},
"thread": {
"history": None,
"join": None,
"leave": None,
"mark": None,
}
}
###### Decorators have to be up here
def slack_buffer_or_ignore(f):
"""
Only run this function if we're in a slack buffer, else ignore
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_OK
return f(data, current_buffer, *args, **kwargs)
return wrapper
def slack_buffer_required(f):
"""
Only run this function if we're in a slack buffer, else print error
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_ERROR
return f(data, current_buffer, *args, **kwargs)
return wrapper
def utf8_decode(f):
"""
Decode all arguments from byte strings to unicode strings. Use this for
functions called from outside of this script, e.g. callbacks from weechat.
"""
@wraps(f)
def wrapper(*args, **kwargs):
return f(*decode_from_utf8(args), **decode_from_utf8(kwargs))
return wrapper
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
EMOJI = []
###### Unicode handling
def encode_to_utf8(data):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, bytes):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(encode_to_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(encode_to_utf8, data))
else:
return data
def decode_from_utf8(data):
if isinstance(data, bytes):
return data.decode('utf-8')
if isinstance(data, unicode):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(decode_from_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(decode_from_utf8, data))
else:
return data
class WeechatWrapper(object):
def __init__(self, wrapped_class):
self.wrapped_class = wrapped_class
# Helper method used to encode/decode method calls.
def wrap_for_utf8(self, method):
def hooked(*args, **kwargs):
result = method(*encode_to_utf8(args), **encode_to_utf8(kwargs))
# Prevent wrapped_class from becoming unwrapped
if result == self.wrapped_class:
return self
return decode_from_utf8(result)
return hooked
# Encode and decode everything sent to/received from weechat. We use the
# unicode type internally in wee-slack, but has to send utf8 to weechat.
def __getattr__(self, attr):
orig_attr = self.wrapped_class.__getattribute__(attr)
if callable(orig_attr):
return self.wrap_for_utf8(orig_attr)
else:
return decode_from_utf8(orig_attr)
# Ensure all lines sent to weechat specifies a prefix. For lines after the
# first, we want to disable the prefix, which is done by specifying a space.
def prnt_date_tags(self, buffer, date, tags, message):
message = message.replace("\n", "\n \t")
return self.wrap_for_utf8(self.wrapped_class.prnt_date_tags)(buffer, date, tags, message)
##### Helpers
def get_nick_color_name(nick):
info_name_prefix = "irc_" if int(weechat_version) < 0x1050000 else ""
return w.info_get(info_name_prefix + "nick_color_name", nick)
##### BEGIN NEW
IGNORED_EVENTS = [
# "pref_change",
# "reconnect_url",
]
###### New central Event router
class EventRouter(object):
def __init__(self):
"""
complete
Eventrouter is the central hub we use to route:
1) incoming websocket data
2) outgoing http requests and incoming replies
3) local requests
It has a recorder that, when enabled, logs most events
to the location specified in RECORD_DIR.
"""
self.queue = []
self.slow_queue = []
self.slow_queue_timer = 0
self.teams = {}
self.context = {}
self.weechat_controller = WeechatController(self)
self.previous_buffer = ""
self.reply_buffer = {}
self.cmds = {k[8:]: v for k, v in globals().items() if k.startswith("command_")}
self.proc = {k[8:]: v for k, v in globals().items() if k.startswith("process_")}
self.handlers = {k[7:]: v for k, v in globals().items() if k.startswith("handle_")}
self.local_proc = {k[14:]: v for k, v in globals().items() if k.startswith("local_process_")}
self.shutting_down = False
self.recording = False
self.recording_path = "/tmp"
def record(self):
"""
complete
Toggles the event recorder and creates a directory for data if enabled.
"""
self.recording = not self.recording
if self.recording:
if not os.path.exists(RECORD_DIR):
os.makedirs(RECORD_DIR)
def record_event(self, message_json, file_name_field, subdir=None):
"""
complete
Called each time you want to record an event.
message_json is a json in dict form
file_name_field is the json key whose value you want to be part of the file name
"""
now = time.time()
if subdir:
directory = "{}/{}".format(RECORD_DIR, subdir)
else:
directory = RECORD_DIR
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get(file_name_field, 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write("{}".format(json.dumps(message_json)))
f.close()
def store_context(self, data):
"""
A place to store data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
identifier = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(40))
self.context[identifier] = data
dbg("stored context {} {} ".format(identifier, data.url))
return identifier
def retrieve_context(self, identifier):
"""
A place to retrieve data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
data = self.context.get(identifier, None)
if data:
# dbg("retrieved context {} ".format(identifier))
return data
def delete_context(self, identifier):
"""
Requests can span multiple requests, so we may need to delete this as a last step
"""
if identifier in self.context:
# dbg("deleted eontext {} ".format(identifier))
del self.context[identifier]
def shutdown(self):
"""
complete
This toggles shutdown mode. Shutdown mode tells us not to
talk to Slack anymore. Without this, typing /quit will trigger
a race with the buffer close callback and may result in you
leaving every slack channel.
"""
self.shutting_down = not self.shutting_down
def register_team(self, team):
"""
complete
Adds a team to the list of known teams for this EventRouter.
"""
if isinstance(team, SlackTeam):
self.teams[team.get_team_hash()] = team
else:
raise InvalidType(type(team))
def reconnect_if_disconnected(self):
for team_id, team in self.teams.iteritems():
if not team.connected:
team.connect()
dbg("reconnecting {}".format(team))
def receive_ws_callback(self, team_hash):
"""
incomplete (reconnect)
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
try:
# Read the data from the websocket associated with this team.
data = decode_from_utf8(self.teams[team_hash].ws.recv())
message_json = json.loads(data)
metadata = WeeSlackMetadata({
"team": team_hash,
}).jsonify()
message_json["wee_slack_metadata"] = metadata
if self.recording:
self.record_event(message_json, 'type', 'websocket')
self.receive_json(json.dumps(message_json))
except WebSocketConnectionClosedException:
# TODO: handle reconnect here
self.teams[team_hash].set_disconnected()
return w.WEECHAT_RC_OK
except ssl.SSLWantReadError:
# Expected to happen occasionally on SSL websockets.
return w.WEECHAT_RC_OK
except Exception:
dbg("socket issue: {}\n".format(traceback.format_exc()))
return w.WEECHAT_RC_OK
def receive_httprequest_callback(self, data, command, return_code, out, err):
"""
complete
Receives the result of an http request we previously handed
off to weechat (weechat bundles libcurl). Weechat can fragment
replies, so it buffers them until the reply is complete.
It is then populated with metadata here so we can identify
where the request originated and route properly.
"""
request_metadata = self.retrieve_context(data)
try:
dbg("RECEIVED CALLBACK with request of {} id of {} and code {} of length {}".format(request_metadata.request, request_metadata.response_id, return_code, len(out)))
except:
dbg(request_metadata)
return
if return_code == 0:
if len(out) > 0:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
try:
j = json.loads(self.reply_buffer[request_metadata.response_id].getvalue())
except:
pass
# dbg("Incomplete json, awaiting more", True)
try:
j["wee_slack_process_method"] = request_metadata.request_normalized
j["wee_slack_request_metadata"] = pickle.dumps(request_metadata)
self.reply_buffer.pop(request_metadata.response_id)
if self.recording:
self.record_event(j, 'wee_slack_process_method', 'http')
self.receive_json(json.dumps(j))
self.delete_context(data)
except:
dbg("HTTP REQUEST CALLBACK FAILED", True)
pass
# We got an empty reply and this is weird so just ditch it and retry
else:
dbg("length was zero, probably a bug..")
self.delete_context(data)
self.receive(request_metadata)
elif return_code != -1:
self.reply_buffer.pop(request_metadata.response_id, None)
self.delete_context(data)
else:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = StringIO()
self.reply_buffer[request_metadata.response_id].write(out)
def receive_json(self, data):
"""
complete
Receives a raw JSON string from and unmarshals it
as dict, then places it back on the queue for processing.
"""
dbg("RECEIVED JSON of len {}".format(len(data)))
message_json = json.loads(data)
self.queue.append(message_json)
def receive(self, dataobj):
"""
complete
Receives a raw object and places it on the queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.queue.append(dataobj)
def receive_slow(self, dataobj):
"""
complete
Receives a raw object and places it on the slow queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.slow_queue.append(dataobj)
def handle_next(self):
"""
complete
Main handler of the EventRouter. This is called repeatedly
via callback to drain events from the queue. It also attaches
useful metadata and context to events as they are processed.
"""
if len(self.slow_queue) > 0 and ((self.slow_queue_timer + 1) < time.time()):
# for q in self.slow_queue[0]:
dbg("from slow queue", 0)
self.queue.append(self.slow_queue.pop())
# self.slow_queue = []
self.slow_queue_timer = time.time()
if len(self.queue) > 0:
j = self.queue.pop(0)
# Reply is a special case of a json reply from websocket.
kwargs = {}
if isinstance(j, SlackRequest):
if j.should_try():
if j.retry_ready():
local_process_async_slack_api_request(j, self)
else:
self.slow_queue.append(j)
else:
dbg("Max retries for Slackrequest")
else:
if "reply_to" in j:
dbg("SET FROM REPLY")
function_name = "reply"
elif "type" in j:
dbg("SET FROM type")
function_name = j["type"]
elif "wee_slack_process_method" in j:
dbg("SET FROM META")
function_name = j["wee_slack_process_method"]
else:
dbg("SET FROM NADA")
function_name = "unknown"
# Here we are passing the actual objects. No more lookups.
meta = j.get("wee_slack_metadata", None)
if meta:
try:
if isinstance(meta, basestring):
dbg("string of metadata")
team = meta.get("team", None)
if team:
kwargs["team"] = self.teams[team]
if "user" in j:
kwargs["user"] = self.teams[team].users[j["user"]]
if "channel" in j:
kwargs["channel"] = self.teams[team].channels[j["channel"]]
except:
dbg("metadata failure")
if function_name not in IGNORED_EVENTS:
dbg("running {}".format(function_name))
if function_name.startswith("local_") and function_name in self.local_proc:
self.local_proc[function_name](j, self, **kwargs)
elif function_name in self.proc:
self.proc[function_name](j, self, **kwargs)
elif function_name in self.handlers:
self.handlers[function_name](j, self, **kwargs)
else:
raise ProcessNotImplemented(function_name)
def handle_next(*args):
"""
complete
This is just a place to call the event router globally.
This is a dirty hack. There must be a better way.
"""
try:
EVENTROUTER.handle_next()
except:
if config.debug_mode:
traceback.print_exc()
else:
pass
return w.WEECHAT_RC_OK
class WeechatController(object):
"""
Encapsulates our interaction with weechat
"""
def __init__(self, eventrouter):
self.eventrouter = eventrouter
self.buffers = {}
self.previous_buffer = None
self.buffer_list_stale = False
def iter_buffers(self):
for b in self.buffers:
yield (b, self.buffers[b])
def register_buffer(self, buffer_ptr, channel):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
self.buffers[buffer_ptr] = channel
else:
raise InvalidType(type(buffer_ptr))
def unregister_buffer(self, buffer_ptr, update_remote=False, close_buffer=False):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
try:
self.buffers[buffer_ptr].destroy_buffer(update_remote)
if close_buffer:
w.buffer_close(buffer_ptr)
del self.buffers[buffer_ptr]
except:
dbg("Tried to close unknown buffer")
else:
raise InvalidType(type(buffer_ptr))
def get_channel_from_buffer_ptr(self, buffer_ptr):
return self.buffers.get(buffer_ptr, None)
def get_all(self, buffer_ptr):
return self.buffers
def get_previous_buffer_ptr(self):
return self.previous_buffer
def set_previous_buffer(self, data):
self.previous_buffer = data
def check_refresh_buffer_list(self):
return self.buffer_list_stale and self.last_buffer_list_update + 1 < time.time()
def set_refresh_buffer_list(self, setting):
self.buffer_list_stale = setting
###### New Local Processors
def local_process_async_slack_api_request(request, event_router):
"""
complete
Sends an API request to Slack. You'll need to give this a well formed SlackRequest object.
DEBUGGING!!! The context here cannot be very large. Weechat will crash.
"""
if not event_router.shutting_down:
weechat_request = 'url:{}'.format(request.request_string())
weechat_request += '&nonce={}'.format(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
request.tried()
context = event_router.store_context(request)
# TODO: let flashcode know about this bug - i have to 'clear' the hashtable or retry requests fail
w.hook_process_hashtable('url:', params, config.slack_timeout, "", context)
w.hook_process_hashtable(weechat_request, params, config.slack_timeout, "receive_httprequest_callback", context)
###### New Callbacks
@utf8_decode
def receive_httprequest_callback(data, command, return_code, out, err):
"""
complete
This is a dirty hack. There must be a better way.
"""
# def url_processor_cb(data, command, return_code, out, err):
EVENTROUTER.receive_httprequest_callback(data, command, return_code, out, err)
return w.WEECHAT_RC_OK
@utf8_decode
def receive_ws_callback(*args):
"""
complete
The first arg is all we want here. It contains the team
hash which is set when we _hook the descriptor.
This is a dirty hack. There must be a better way.
"""
EVENTROUTER.receive_ws_callback(args[0])
return w.WEECHAT_RC_OK
@utf8_decode
def reconnect_callback(*args):
EVENTROUTER.reconnect_if_disconnected()
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_closing_callback(signal, sig_type, data):
"""
complete
Receives a callback from weechat when a buffer is being closed.
We pass the eventrouter variable name in as a string, as
that is the only way we can do dependency injection via weechat
callback, hence the eval.
"""
eval(signal).weechat_controller.unregister_buffer(data, True, False)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_input_callback(signal, buffer_ptr, data):
"""
incomplete
Handles everything a user types in the input bar. In our case
this includes add/remove reactions, modifying messages, and
sending messages.
"""
eventrouter = eval(signal)
channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(buffer_ptr)
if not channel:
return w.WEECHAT_RC_ERROR
reaction = re.match("^(\d*)(\+|-):(.*):\s*$", data)
substitute = re.match("^(\d*)s/", data)
if reaction:
if reaction.group(2) == "+":
channel.send_add_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif reaction.group(2) == "-":
channel.send_remove_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif substitute:
msgno = int(substitute.group(1) or 1)
try:
old, new, flags = re.split(r'(?<!\\)/', data)[1:]
except ValueError:
pass
else:
# Replacement string in re.sub() is a string, not a regex, so get
# rid of escapes.
new = new.replace(r'\/', '/')
old = old.replace(r'\/', '/')
channel.edit_nth_previous_message(msgno, old, new, flags)
else:
if data.startswith(('//', ' ')):
data = data[1:]
channel.send_message(data)
# this is probably wrong channel.mark_read(update_remote=True, force=True)
return w.WEECHAT_RC_OK
# Workaround for supporting multiline messages. It intercepts before the input
# callback is called, as this is called with the whole message, while it is
# normally split on newline before being sent to buffer_input_callback
def input_text_for_buffer_cb(data, modifier, current_buffer, string):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return string
message = decode_from_utf8(string)
if not message.startswith("/") and "\n" in message:
buffer_input_callback("EVENTROUTER", current_buffer, message)
return ""
return string
@utf8_decode
def buffer_switch_callback(signal, sig_type, data):
"""
incomplete
Every time we change channels in weechat, we call this to:
1) set read marker 2) determine if we have already populated
channel history data
"""
eventrouter = eval(signal)
prev_buffer_ptr = eventrouter.weechat_controller.get_previous_buffer_ptr()
# this is to see if we need to gray out things in the buffer list
prev = eventrouter.weechat_controller.get_channel_from_buffer_ptr(prev_buffer_ptr)
if prev:
prev.mark_read()
new_channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(data)
if new_channel:
if not new_channel.got_history:
new_channel.get_history()
eventrouter.weechat_controller.set_previous_buffer(data)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_list_update_callback(data, somecount):
"""
incomplete
A simple timer-based callback that will update the buffer list
if needed. We only do this max 1x per second, as otherwise it
uses a lot of cpu for minimal changes. We use buffer short names
to indicate typing via "#channel" <-> ">channel" and
user presence via " name" <-> "+name".
"""
eventrouter = eval(data)
# global buffer_list_update
for b in eventrouter.weechat_controller.iter_buffers():
b[1].refresh()
# buffer_list_update = True
# if eventrouter.weechat_controller.check_refresh_buffer_list():
# # gray_check = False
# # if len(servers) > 1:
# # gray_check = True
# eventrouter.weechat_controller.set_refresh_buffer_list(False)
return w.WEECHAT_RC_OK
def quit_notification_callback(signal, sig_type, data):
stop_talking_to_slack()
return w.WEECHAT_RC_OK
@utf8_decode
def typing_notification_cb(signal, sig_type, data):
msg = w.buffer_get_string(data, "input")
if len(msg) > 8 and msg[:1] != "/":
global typing_timer
now = time.time()
if typing_timer + 4 < now:
current_buffer = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel and channel.type != "thread":
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.team.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
@utf8_decode
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
@utf8_decode
def slack_never_away_cb(data, remaining_calls):
if config.never_away:
for t in EVENTROUTER.teams.values():
slackbot = t.get_channel_map()['slackbot']
channel = t.channels[slackbot]
request = {"type": "typing", "channel": channel.identifier}
channel.team.send_to_websocket(request, expect_reply=False)
return w.WEECHAT_RC_OK
@utf8_decode
def typing_bar_item_cb(data, current_buffer, args):
"""
Privides a bar item indicating who is typing in the current channel AND
why is typing a DM to you globally.
"""
typers = []
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# first look for people typing in this channel
if current_channel:
# this try is mostly becuase server buffers don't implement is_someone_typing
try:
if current_channel.type != 'im' and current_channel.is_someone_typing():
typers += current_channel.get_typing_list()
except:
pass
# here is where we notify you that someone is typing in DM
# regardless of which buffer you are in currently
for t in EVENTROUTER.teams.values():
for channel in t.channels.values():
if channel.type == "im":
if channel.is_someone_typing():
typers.append("D/" + channel.slack_name)
pass
typing = ", ".join(typers)
if typing != "":
typing = w.color('yellow') + "typing: " + typing
return typing
@utf8_decode
def nick_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u:
w.hook_completion_list_add(completion, "@" + u.name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def emoji_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all :-prefixed emoji to completion list
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None:
return w.WEECHAT_RC_OK
for e in current_channel.team.emoji_completions:
w.hook_completion_list_add(completion, ":" + e + ":", 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def complete_next_cb(data, current_buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# channel = channels.find(current_buffer)
if not hasattr(current_channel, 'members') or current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
line_input = w.buffer_get_string(current_buffer, "input")
current_pos = w.buffer_get_integer(current_buffer, "input_pos") - 1
input_length = w.buffer_get_integer(current_buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and line_input[current_pos] != '@' and not line_input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if line_input[l] != '@' and not line_input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not line_input[l].isalnum():
word_end = l
break
word = line_input[word_start:word_end]
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u and u.name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(current_buffer, "input", line_input[:word_start] + "@" + line_input[word_start:])
w.buffer_set(current_buffer, "input_pos", str(w.buffer_get_integer(current_buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
complete
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
EVENTROUTER.shutdown()
return w.WEECHAT_RC_OK
##### New Classes
class SlackRequest(object):
"""
complete
Encapsulates a Slack api request. Valuable as an object that we can add to the queue and/or retry.
makes a SHA of the requst url and current time so we can re-tag this on the way back through.
"""
def __init__(self, token, request, post_data={}, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.tries = 0
self.start_time = time.time()
self.domain = 'api.slack.com'
self.request = request
self.request_normalized = re.sub(r'\W+', '', request)
self.token = token
post_data["token"] = token
self.post_data = post_data
self.params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
self.url = 'https://{}/api/{}?{}'.format(self.domain, request, urllib.urlencode(encode_to_utf8(post_data)))
self.response_id = sha.sha("{}{}".format(self.url, self.start_time)).hexdigest()
self.retries = kwargs.get('retries', 3)
# def __repr__(self):
# return "URL: {} Tries: {} ID: {}".format(self.url, self.tries, self.response_id)
def request_string(self):
return "{}".format(self.url)
def tried(self):
self.tries += 1
self.response_id = sha.sha("{}{}".format(self.url, time.time())).hexdigest()
def should_try(self):
return self.tries < self.retries
def retry_ready(self):
return (self.start_time + (self.tries**2)) < time.time()
class SlackTeam(object):
"""
incomplete
Team object under which users and channels live.. Does lots.
"""
def __init__(self, eventrouter, token, websocket_url, subdomain, nick, myidentifier, users, bots, channels, **kwargs):
self.ws_url = websocket_url
self.connected = False
self.connecting = False
# self.ws = None
self.ws_counter = 0
self.ws_replies = {}
self.eventrouter = eventrouter
self.token = token
self.team = self
self.subdomain = subdomain
self.domain = subdomain + ".slack.com"
self.preferred_name = self.domain
self.nick = nick
self.myidentifier = myidentifier
try:
if self.channels:
for c in channels.keys():
if not self.channels.get(c):
self.channels[c] = channels[c]
except:
self.channels = channels
self.users = users
self.bots = bots
self.team_hash = SlackTeam.generate_team_hash(self.nick, self.subdomain)
self.name = self.domain
self.channel_buffer = None
self.got_history = True
self.create_buffer()
self.set_muted_channels(kwargs.get('muted_channels', ""))
for c in self.channels.keys():
channels[c].set_related_server(self)
channels[c].check_should_open()
# self.channel_set_related_server(c)
# Last step is to make sure my nickname is the set color
self.users[self.myidentifier].force_color(w.config_string(w.config_get('weechat.color.chat_nick_self')))
# This highlight step must happen after we have set related server
self.set_highlight_words(kwargs.get('highlight_words', ""))
self.load_emoji_completions()
def __repr__(self):
return "domain={} nick={}".format(self.subdomain, self.nick)
def __eq__(self, compare_str):
if compare_str == self.token or compare_str == self.domain or compare_str == self.subdomain:
return True
else:
return False
def load_emoji_completions(self):
self.emoji_completions = list(EMOJI)
if self.emoji_completions:
s = SlackRequest(self.token, "emoji.list", {}, team_hash=self.team_hash)
self.eventrouter.receive(s)
def add_channel(self, channel):
self.channels[channel["id"]] = channel
channel.set_related_server(self)
# def connect_request_generate(self):
# return SlackRequest(self.token, 'rtm.start', {})
# def close_all_buffers(self):
# for channel in self.channels:
# self.eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, update_remote=False, close_buffer=True)
# #also close this server buffer
# self.eventrouter.weechat_controller.unregister_buffer(self.channel_buffer, update_remote=False, close_buffer=True)
def create_buffer(self):
if not self.channel_buffer:
if config.short_buffer_names:
self.preferred_name = self.subdomain
elif config.server_aliases not in ['', None]:
name = config.server_aliases.get(self.subdomain, None)
if name:
self.preferred_name = name
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new("{}".format(self.preferred_name), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'server')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.nick)
w.buffer_set(self.channel_buffer, "localvar_set_server", self.preferred_name)
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
def set_muted_channels(self, muted_str):
self.muted_channels = {x for x in muted_str.split(',')}
def set_highlight_words(self, highlight_str):
self.highlight_words = {x for x in highlight_str.split(',')}
if len(self.highlight_words) > 0:
for v in self.channels.itervalues():
v.set_highlights()
def formatted_name(self, **kwargs):
return self.domain
def buffer_prnt(self, data):
w.prnt_date_tags(self.channel_buffer, SlackTS().major, tag("team"), data)
def find_channel_by_members(self, members, channel_type=None):
for channel in self.channels.itervalues():
if channel.get_members() == members and (
channel_type is None or channel.type == channel_type):
return channel
def get_channel_map(self):
return {v.slack_name: k for k, v in self.channels.iteritems()}
def get_username_map(self):
return {v.name: k for k, v in self.users.iteritems()}
def get_team_hash(self):
return self.team_hash
@staticmethod
def generate_team_hash(nick, subdomain):
return str(sha.sha("{}{}".format(nick, subdomain)).hexdigest())
def refresh(self):
self.rename()
def rename(self):
pass
# def attach_websocket(self, ws):
# self.ws = ws
def is_user_present(self, user_id):
user = self.users.get(user_id)
if user.presence == 'active':
return True
else:
return False
def mark_read(self, ts=None, update_remote=True, force=False):
pass
def connect(self):
if not self.connected and not self.connecting:
self.connecting = True
if self.ws_url:
try:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs)
self.hook = w.hook_fd(ws.sock._sock.fileno(), 1, 0, 0, "receive_ws_callback", self.get_team_hash())
ws.sock.setblocking(0)
self.ws = ws
# self.attach_websocket(ws)
self.set_connected()
self.connecting = False
except Exception as e:
dbg("websocket connection error: {}".format(decode_from_utf8(e)))
self.connecting = False
return False
else:
# The fast reconnect failed, so start over-ish
for chan in self.channels:
self.channels[chan].got_history = False
s = initiate_connection(self.token, retries=999)
self.eventrouter.receive(s)
self.connecting = False
# del self.eventrouter.teams[self.get_team_hash()]
self.set_reconnect_url(None)
def set_connected(self):
self.connected = True
def set_disconnected(self):
w.unhook(self.hook)
self.connected = False
def set_reconnect_url(self, url):
self.ws_url = url
def next_ws_transaction_id(self):
if self.ws_counter > 999:
self.ws_counter = 0
self.ws_counter += 1
return self.ws_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.next_ws_transaction_id()
message = json.dumps(data)
try:
if expect_reply:
self.ws_replies[data["id"]] = data
self.ws.send(encode_to_utf8(message))
dbg("Sent {}...".format(message[:100]))
except:
print "WS ERROR"
dbg("Unexpected error: {}\nSent: {}".format(sys.exc_info()[0], data))
self.set_connected()
def update_member_presence(self, user, presence):
user.presence = presence
for c in self.channels:
c = self.channels[c]
if user.id in c.members:
c.update_nicklist(user.id)
def subscribe_users_presence(self):
# FIXME: There is a limitation in the API to the size of the
# json we can send.
# We should try to be smarter to fetch the users whom we want to
# subscribe to.
users = self.users.keys()[0:750]
self.send_to_websocket({
"type": "presence_sub",
"ids": users,
}, expect_reply=False)
class SlackChannel(object):
"""
Represents an individual slack channel.
"""
def __init__(self, eventrouter, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.active = False
for key, value in kwargs.items():
setattr(self, key, value)
self.eventrouter = eventrouter
self.slack_name = kwargs["name"]
self.slack_purpose = kwargs.get("purpose", {"value": ""})
self.topic = kwargs.get("topic", {}).get("value", "")
self.identifier = kwargs["id"]
self.last_read = SlackTS(kwargs.get("last_read", SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team', None)
self.got_history = False
self.messages = OrderedDict()
self.hashed_messages = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.set_name(self.slack_name)
# short name relates to the localvar we change for typing indication
self.current_short_name = self.name
self.set_members(kwargs.get('members', []))
self.unread_count_display = 0
def __eq__(self, compare_str):
if compare_str == self.slack_name or compare_str == self.formatted_name() or compare_str == self.formatted_name(style="long_default"):
return True
else:
return False
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def set_name(self, slack_name):
self.name = "#" + slack_name
def refresh(self):
return self.rename()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(typing=self.is_someone_typing(), style="sidebar")
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def set_members(self, members):
self.members = set(members)
self.update_nicklist()
def get_members(self):
return self.members
def set_unread_count_display(self, count):
self.unread_count_display = count
self.new_messages = bool(self.unread_count_display)
for c in range(self.unread_count_display):
if self.type == "im":
w.buffer_set(self.channel_buffer, "hotlist", "2")
else:
w.buffer_set(self.channel_buffer, "hotlist", "1")
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
elif self.type == "group":
prepend = config.group_name_prefix
else:
prepend = "#"
select = {
"default": prepend + self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}{}".format(self.team.preferred_name, prepend, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return select[style]
def render_topic(self):
if self.channel_buffer:
if self.topic != "":
topic = self.topic
else:
topic = self.slack_purpose['value']
w.buffer_set(self.channel_buffer, "title", topic)
def set_topic(self, value):
self.topic = value
self.render_topic()
def update_from_message_json(self, message_json):
for key, value in message_json.items():
setattr(self, key, value)
def open(self, update_remote=True):
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
self.active = True
self.get_history()
# self.create_buffer()
def check_should_open(self, force=False):
if hasattr(self, "is_archived") and self.is_archived:
return
if force:
self.create_buffer()
return
# Only check is_member if is_open is not set, because in some cases
# (e.g. group DMs), is_member should be ignored in favor of is_open.
is_open = self.is_open if hasattr(self, "is_open") else self.is_member
if is_open or self.unread_count_display:
self.create_buffer()
if config.background_load_all_history:
self.get_history(slow_queue=True)
def set_related_server(self, team):
self.team = team
def set_highlights(self):
# highlight my own name and any set highlights
if self.channel_buffer:
highlights = self.team.highlight_words.union({'@' + self.team.nick, self.team.myidentifier, "!here", "!channel", "!everyone"})
h_str = ",".join(highlights)
w.buffer_set(self.channel_buffer, "highlight_words", h_str)
def create_buffer(self):
"""
incomplete (muted doesn't work)
Creates the weechat buffer where the channel magic happens.
"""
if not self.channel_buffer:
self.active = True
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.render_topic()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
if self.channel_buffer:
# if self.team.server_alias:
# w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.server_alias)
# else:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
# else:
# self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
self.update_nicklist()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if self.type == "im":
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def destroy_buffer(self, update_remote):
if self.channel_buffer is not None:
self.channel_buffer = None
self.messages = OrderedDict()
self.hashed_messages = {}
self.got_history = False
# if update_remote and not eventrouter.shutting_down:
self.active = False
if update_remote and not self.eventrouter.shutting_down:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["leave"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def buffer_prnt(self, nick, text, timestamp=str(time.time()), tagset=None, tag_nick=None, **kwargs):
data = "{}\t{}".format(format_nick(nick), text)
ts = SlackTS(timestamp)
last_read = SlackTS(self.last_read)
# without this, DMs won't open automatically
if not self.channel_buffer and ts > last_read:
self.open(update_remote=False)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
backlog = True if ts <= last_read else False
if tagset:
tags = tag(tagset, user=tag_nick)
self.new_messages = True
# we have to infer the tagset because we weren't told
elif ts <= last_read:
tags = tag("backlog", user=tag_nick)
elif self.type in ["im", "mpdm"]:
if tag_nick != self.team.nick:
tags = tag("dm", user=tag_nick)
self.new_messages = True
else:
tags = tag("dmfromme")
else:
tags = tag("default", user=tag_nick)
self.new_messages = True
try:
if config.unhide_buffers_with_activity and not self.is_visible() and (self.identifier not in self.team.muted_channels):
w.buffer_set(self.channel_buffer, "hidden", "0")
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_print_time(self.channel_buffer, ts.minorstr(), ts.major)
if backlog:
self.mark_read(ts, update_remote=False, force=True)
except:
dbg("Problem processing buffer_prnt")
def send_message(self, message, request_dict_ext={}):
# team = self.eventrouter.teams[self.team]
message = linkify_text(message, self.team, self)
dbg(message)
request = {"type": "message", "channel": self.identifier, "text": message, "_team": self.team.team_hash, "user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
self.mark_read(update_remote=False, force=True)
def store_message(self, message, team, from_me=False):
if not self.active:
return
if from_me:
message.message_json["user"] = team.myidentifier
self.messages[SlackTS(message.ts)] = message
sorted_messages = sorted(self.messages.items())
messages_to_delete = sorted_messages[:-SCROLLBACK_SIZE]
messages_to_keep = sorted_messages[-SCROLLBACK_SIZE:]
for message_hash in [m[1].hash for m in messages_to_delete]:
if message_hash in self.hashed_messages:
del self.hashed_messages[message_hash]
self.messages = OrderedDict(messages_to_keep)
def change_message(self, ts, text=None, suffix=None):
ts = SlackTS(ts)
if ts in self.messages:
m = self.messages[ts]
if text:
m.change_text(text)
if suffix:
m.change_suffix(suffix)
text = m.render(force=True)
modify_buffer_line(self.channel_buffer, text, ts.major, ts.minor)
return True
def edit_nth_previous_message(self, n, old, new, flags):
message = self.my_last_message(n)
if new == "" and old == "":
s = SlackRequest(self.team.token, "chat.delete", {"channel": self.identifier, "ts": message['ts']}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
else:
num_replace = 1
if 'g' in flags:
num_replace = 0
new_message = re.sub(old, new, message["text"], num_replace)
if new_message != message["text"]:
s = SlackRequest(self.team.token, "chat.update", {"channel": self.identifier, "ts": message['ts'], "text": new_message}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def my_last_message(self, msgno):
for key in self.main_message_keys_reversed():
m = self.messages[key]
if "user" in m.message_json and "text" in m.message_json and m.message_json["user"] == self.team.myidentifier:
msgno -= 1
if msgno == 0:
return m.message_json
def is_visible(self):
return w.buffer_get_integer(self.channel_buffer, "hidden") == 0
def get_history(self, slow_queue=False):
if not self.got_history:
# we have probably reconnected. flush the buffer
if self.team.connected:
w.buffer_clear(self.channel_buffer)
self.buffer_prnt('', 'getting channel history...', tagset='backlog')
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["history"], {"channel": self.identifier, "count": BACKLOG_SIZE}, team_hash=self.team.team_hash, channel_identifier=self.identifier, clear=True)
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
def send_add_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.add", msg_number, reaction)
def send_remove_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.remove", msg_number, reaction)
def send_change_reaction(self, method, msg_number, reaction):
if 0 < msg_number < len(self.messages):
keys = self.main_message_keys_reversed()
timestamp = next(islice(keys, msg_number - 1, None))
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
s = SlackRequest(self.team.token, method, data)
self.eventrouter.receive(s)
def main_message_keys_reversed(self):
return (key for key in reversed(self.messages)
if type(self.messages[key]) == SlackMessage)
# Typing related
def set_typing(self, user):
if self.channel_buffer and self.is_visible():
self.typing[user] = time.time()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def unset_typing(self, user):
if self.channel_buffer and self.is_visible():
u = self.typing.get(user, None)
if u:
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def is_someone_typing(self):
"""
Walks through dict of typing folks in a channel and fast
returns if any of them is actively typing. If none are,
nulls the dict and returns false.
"""
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
return False
def get_typing_list(self):
"""
Returns the names of everyone in the channel who is currently typing.
"""
typing = []
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
typing.append(user)
else:
del self.typing[user]
return typing
def mark_read(self, ts=None, update_remote=True, force=False):
if not ts:
ts = next(self.main_message_keys_reversed(), SlackTS())
if self.new_messages or force:
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
if update_remote:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["mark"], {"channel": self.identifier, "ts": ts}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.new_messages = False
def user_joined(self, user_id):
# ugly hack - for some reason this gets turned into a list
self.members = set(self.members)
self.members.add(user_id)
self.update_nicklist(user_id)
def user_left(self, user_id):
self.members.discard(user_id)
self.update_nicklist(user_id)
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
if self.type not in ["channel", "group", "mpim"]:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
# create nicklists for the current channel if they don't exist
# if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
if user and len(self.members) < 1000:
user = self.team.users[user]
if user.deleted:
return
nick = w.nicklist_search_nick(self.channel_buffer, "", user.name)
# since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
# now add it back in to whichever..
nick_group = afk
if self.team.is_user_present(user.identifier):
nick_group = here
if user.identifier in self.members:
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
# if we didn't get a user, build a complete list. this is expensive.
else:
if len(self.members) < 1000:
try:
for user in self.members:
user = self.team.users[user]
if user.deleted:
continue
nick_group = afk
if self.team.is_user_present(user.identifier):
nick_group = here
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
except Exception as e:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, decode_from_utf8(e)))
else:
w.nicklist_remove_all(self.channel_buffer)
for fn in ["1| too", "2| many", "3| users", "4| to", "5| show"]:
w.nicklist_add_group(self.channel_buffer, '', fn, w.color('white'), 1)
def hash_message(self, ts):
ts = SlackTS(ts)
def calc_hash(msg):
return sha.sha(str(msg.ts)).hexdigest()
if ts in self.messages and not self.messages[ts].hash:
message = self.messages[ts]
tshash = calc_hash(message)
hl = 3
shorthash = tshash[:hl]
while any(x.startswith(shorthash) for x in self.hashed_messages):
hl += 1
shorthash = tshash[:hl]
if shorthash[:-1] in self.hashed_messages:
col_msg = self.hashed_messages.pop(shorthash[:-1])
col_new_hash = calc_hash(col_msg)[:hl]
col_msg.hash = col_new_hash
self.hashed_messages[col_new_hash] = col_msg
self.change_message(str(col_msg.ts))
if col_msg.thread_channel:
col_msg.thread_channel.rename()
self.hashed_messages[shorthash] = message
message.hash = shorthash
class SlackDMChannel(SlackChannel):
"""
Subclass of a normal channel for person-to-person communication, which
has some important differences.
"""
def __init__(self, eventrouter, users, **kwargs):
dmuser = kwargs["user"]
kwargs["name"] = users[dmuser].name
super(SlackDMChannel, self).__init__(eventrouter, **kwargs)
self.type = 'im'
self.update_color()
self.set_name(self.slack_name)
self.topic = create_user_status_string(users[dmuser].profile)
def set_name(self, slack_name):
self.name = slack_name
def get_members(self):
return {self.user}
def create_buffer(self):
if not self.channel_buffer:
super(SlackDMChannel, self).create_buffer()
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
def update_color(self):
if config.colorize_private_chats:
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
else:
self.color = ""
self.color_name = ""
def formatted_name(self, style="default", typing=False, present=True, enable_color=False, **kwargs):
if config.colorize_private_chats and enable_color:
print_color = self.color
else:
print_color = ""
if not present:
prepend = " "
else:
prepend = "+"
select = {
"default": self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}".format(self.team.preferred_name, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return print_color + select[style]
def open(self, update_remote=True):
self.create_buffer()
# self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(style="sidebar", present=self.team.is_user_present(self.user), enable_color=config.colorize_private_chats)
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def refresh(self):
return self.rename()
class SlackGroupChannel(SlackChannel):
"""
A group channel is a private discussion group.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackGroupChannel, self).__init__(eventrouter, **kwargs)
self.type = "group"
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = config.group_name_prefix + slack_name
# def formatted_name(self, prepend="#", enable_color=True, basic=False):
# return prepend + self.slack_name
class SlackMPDMChannel(SlackChannel):
"""
An MPDM channel is a special instance of a 'group' channel.
We change the name to look less terrible in weechat.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackMPDMChannel, self).__init__(eventrouter, **kwargs)
n = kwargs.get('name')
self.set_name(n)
self.type = "mpim"
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote and 'join' in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]['join'], {'users': ','.join(self.members)}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
# self.create_buffer()
@staticmethod
def adjust_name(n):
return "|".join("-".join(n.split("-")[1:-1]).split("--"))
def set_name(self, n):
self.name = self.adjust_name(n)
def formatted_name(self, style="default", typing=False, **kwargs):
adjusted_name = self.adjust_name(self.slack_name)
if typing and config.channel_name_typing_indicator:
prepend = ">"
else:
prepend = "@"
select = {
"default": adjusted_name,
"sidebar": prepend + adjusted_name,
"base": adjusted_name,
"long_default": "{}.{}".format(self.team.preferred_name, adjusted_name),
"long_base": "{}.{}".format(self.team.preferred_name, adjusted_name),
}
return select[style]
def rename(self):
pass
class SlackThreadChannel(object):
"""
A thread channel is a virtual channel. We don't inherit from
SlackChannel, because most of how it operates will be different.
"""
def __init__(self, eventrouter, parent_message):
self.eventrouter = eventrouter
self.parent_message = parent_message
self.channel_buffer = None
# self.identifier = ""
# self.name = "#" + kwargs['name']
self.type = "thread"
self.got_history = False
self.label = None
self.members = self.parent_message.channel.members
self.team = self.parent_message.team
# self.set_name(self.slack_name)
# def set_name(self, slack_name):
# self.name = "#" + slack_name
def formatted_name(self, style="default", **kwargs):
hash_or_ts = self.parent_message.hash or self.parent_message.ts
styles = {
"default": " +{}".format(hash_or_ts),
"long_default": "{}.{}".format(self.parent_message.channel.formatted_name(style="long_default"), hash_or_ts),
"sidebar": " +{}".format(hash_or_ts),
}
return styles[style]
def refresh(self):
self.rename()
def mark_read(self, ts=None, update_remote=True, force=False):
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
def buffer_prnt(self, nick, text, timestamp, **kwargs):
data = "{}\t{}".format(format_nick(nick), text)
ts = SlackTS(timestamp)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
# backlog = False
# if ts <= SlackTS(self.last_read):
# tags = tag("backlog")
# backlog = True
# elif self.type in ["im", "mpdm"]:
# tags = tag("dm")
# self.new_messages = True
# else:
tags = tag("default")
# self.new_messages = True
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_print_time(self.channel_buffer, ts.minorstr(), ts.major)
# if backlog:
# self.mark_read(ts, update_remote=False, force=True)
def get_history(self):
self.got_history = True
for message in self.parent_message.submessages:
# message = SlackMessage(message_json, team, channel)
text = message.render()
# print text
suffix = ''
if 'edited' in message.message_json:
suffix = ' (edited)'
# try:
# channel.unread_count += 1
# except:
# channel.unread_count = 1
self.buffer_prnt(message.sender, text + suffix, message.ts)
def send_message(self, message):
# team = self.eventrouter.teams[self.team]
message = linkify_text(message, self.team, self)
dbg(message)
request = {"type": "message", "channel": self.parent_message.channel.identifier, "text": message, "_team": self.team.team_hash, "user": self.team.myidentifier, "thread_ts": str(self.parent_message.ts)}
self.team.send_to_websocket(request)
self.mark_read(update_remote=False, force=True)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
# if "info" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
# if update_remote:
# if "join" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"name": self.name}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
self.create_buffer()
def rename(self):
if self.channel_buffer and not self.label:
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
def create_buffer(self):
"""
incomplete (muted doesn't work)
Creates the weechat buffer where the thread magic happens.
"""
if not self.channel_buffer:
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
time_format = w.config_string(w.config_get("weechat.look.buffer_time_format"))
parent_time = time.localtime(SlackTS(self.parent_message.ts).major)
topic = '{} {} | {}'.format(time.strftime(time_format, parent_time), self.parent_message.sender, self.parent_message.render() )
w.buffer_set(self.channel_buffer, "title", topic)
# self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
# try:
# if self.unread_count != 0:
# for c in range(1, self.unread_count):
# if self.type == "im":
# w.buffer_set(self.channel_buffer, "hotlist", "2")
# else:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
# else:
# pass
# #dbg("no unread in {}".format(self.name))
# except:
# pass
# dbg("exception no unread count")
# if self.unread_count != 0 and not self.muted:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
def destroy_buffer(self, update_remote):
if self.channel_buffer is not None:
self.channel_buffer = None
self.got_history = False
# if update_remote and not eventrouter.shutting_down:
self.active = False
class SlackUser(object):
"""
Represends an individual slack user. Also where you set their name formatting.
"""
def __init__(self, **kwargs):
# We require these two things for a valid object,
# the rest we can just learn from slack
self.identifier = kwargs["id"]
self.profile = {} # in case it's not in kwargs
for key, value in kwargs.items():
setattr(self, key, value)
if self.profile.get("display_name"):
self.slack_name = self.profile["display_name"]
self.name = self.profile["display_name"].replace(' ', '')
else:
# No display name set. Fall back to the deprecated username field.
self.slack_name = kwargs["name"]
self.name = self.slack_name
self.update_color()
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def force_color(self, color_name):
self.color_name = color_name
self.color = w.color(self.color_name)
def update_color(self):
# This will automatically be none/"" if the user has disabled nick
# colourization.
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
def update_status(self, status_emoji, status_text):
self.profile["status_emoji"] = status_emoji
self.profile["status_text"] = status_text
def formatted_name(self, prepend="", enable_color=True):
if enable_color:
return self.color + prepend + self.name
else:
return prepend + self.name
class SlackBot(SlackUser):
"""
Basically the same as a user, but split out to identify and for future
needs
"""
def __init__(self, **kwargs):
super(SlackBot, self).__init__(**kwargs)
class SlackMessage(object):
"""
Represents a single slack message and associated context/metadata.
These are modifiable and can be rerendered to change a message,
delete a message, add a reaction, add a thread.
Note: these can't be tied to a SlackUser object because users
can be deleted, so we have to store sender in each one.
"""
def __init__(self, message_json, team, channel, override_sender=None):
self.team = team
self.channel = channel
self.message_json = message_json
self.submessages = []
self.thread_channel = None
self.hash = None
if override_sender:
self.sender = override_sender
self.sender_plain = override_sender
else:
senders = self.get_sender()
self.sender, self.sender_plain = senders[0], senders[1]
self.suffix = ''
self.ts = SlackTS(message_json['ts'])
text = self.message_json.get('text')
if text and text.startswith('_') and text.endswith('_') and 'subtype' not in message_json:
message_json['text'] = text[1:-1]
message_json['subtype'] = 'me_message'
if message_json.get('subtype') == 'me_message' and not message_json['text'].startswith(self.sender):
message_json['text'] = self.sender + ' ' + self.message_json['text']
def __hash__(self):
return hash(self.ts)
def render(self, force=False):
if len(self.submessages) > 0:
return "{} {} {}".format(render(self.message_json, self.team, self.channel, force), self.suffix, "{}[ Thread: {} Replies: {} ]".format(w.color(config.thread_suffix_color), self.hash or self.ts, len(self.submessages)))
return "{} {}".format(render(self.message_json, self.team, self.channel, force), self.suffix)
def change_text(self, new_text):
self.message_json["text"] = new_text
dbg(self.message_json)
def change_suffix(self, new_suffix):
self.suffix = new_suffix
dbg(self.message_json)
def get_sender(self):
name = ""
name_plain = ""
if 'user' in self.message_json:
if self.message_json['user'] == self.team.myidentifier:
u = self.team.users[self.team.myidentifier]
elif self.message_json['user'] in self.team.users:
u = self.team.users[self.message_json['user']]
name = "{}".format(u.formatted_name())
name_plain = "{}".format(u.formatted_name(enable_color=False))
elif 'username' in self.message_json:
u = self.message_json["username"]
if self.message_json.get("subtype") == "bot_message":
name = "{} :]".format(u)
name_plain = "{}".format(u)
else:
name = "-{}-".format(u)
name_plain = "{}".format(u)
elif 'service_name' in self.message_json:
name = "-{}-".format(self.message_json["service_name"])
name_plain = "{}".format(self.message_json["service_name"])
elif self.message_json.get('bot_id') in self.team.bots:
name = "{} :]".format(self.team.bots[self.message_json["bot_id"]].formatted_name())
name_plain = "{}".format(self.team.bots[self.message_json["bot_id"]].formatted_name(enable_color=False))
else:
name = ""
name_plain = ""
return (name, name_plain)
def add_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
found = False
for r in m:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({"name": reaction, "users": [user]})
else:
self.message_json["reactions"] = [{"name": reaction, "users": [user]}]
def remove_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
for r in m:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
else:
pass
class SlackThreadMessage(SlackMessage):
def __init__(self, parent_id, *args):
super(SlackThreadMessage, self).__init__(*args)
self.parent_id = parent_id
class WeeSlackMetadata(object):
"""
A simple container that we pickle/unpickle to hold data.
"""
def __init__(self, meta):
self.meta = meta
def jsonify(self):
return self.meta
class SlackTS(object):
def __init__(self, ts=None):
if ts:
self.major, self.minor = [int(x) for x in ts.split('.', 1)]
else:
self.major = int(time.time())
self.minor = 0
def __cmp__(self, other):
if isinstance(other, SlackTS):
if self.major < other.major:
return -1
elif self.major > other.major:
return 1
elif self.major == other.major:
if self.minor < other.minor:
return -1
elif self.minor > other.minor:
return 1
else:
return 0
else:
s = self.__str__()
if s < other:
return -1
elif s > other:
return 1
elif s == other:
return 0
def __hash__(self):
return hash("{}.{}".format(self.major, self.minor))
def __repr__(self):
return str("{0}.{1:06d}".format(self.major, self.minor))
def split(self, *args, **kwargs):
return [self.major, self.minor]
def majorstr(self):
return str(self.major)
def minorstr(self):
return str(self.minor)
###### New handlers
def handle_rtmstart(login_data, eventrouter):
"""
This handles the main entry call to slack, rtm.start
"""
metadata = pickle.loads(login_data["wee_slack_request_metadata"])
if not login_data["ok"]:
w.prnt("", "ERROR: Failed connecting to Slack with token starting with {}: {}"
.format(metadata.token[:15], login_data["error"]))
return
# Let's reuse a team if we have it already.
th = SlackTeam.generate_team_hash(login_data['self']['name'], login_data['team']['domain'])
if not eventrouter.teams.get(th):
users = {}
for item in login_data["users"]:
users[item["id"]] = SlackUser(**item)
bots = {}
for item in login_data["bots"]:
bots[item["id"]] = SlackBot(**item)
channels = {}
for item in login_data["channels"]:
channels[item["id"]] = SlackChannel(eventrouter, **item)
for item in login_data["ims"]:
channels[item["id"]] = SlackDMChannel(eventrouter, users, **item)
for item in login_data["groups"]:
if item["name"].startswith('mpdm-'):
channels[item["id"]] = SlackMPDMChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackGroupChannel(eventrouter, **item)
t = SlackTeam(
eventrouter,
metadata.token,
login_data['url'],
login_data["team"]["domain"],
login_data["self"]["name"],
login_data["self"]["id"],
users,
bots,
channels,
muted_channels=login_data["self"]["prefs"]["muted_channels"],
highlight_words=login_data["self"]["prefs"]["highlight_words"],
)
eventrouter.register_team(t)
else:
t = eventrouter.teams.get(th)
t.set_reconnect_url(login_data['url'])
t.connect()
t.buffer_prnt('Connected to Slack')
t.buffer_prnt('{:<20} {}'.format("Websocket URL", login_data["url"]))
t.buffer_prnt('{:<20} {}'.format("User name", login_data["self"]["name"]))
t.buffer_prnt('{:<20} {}'.format("User ID", login_data["self"]["id"]))
t.buffer_prnt('{:<20} {}'.format("Team name", login_data["team"]["name"]))
t.buffer_prnt('{:<20} {}'.format("Team domain", login_data["team"]["domain"]))
t.buffer_prnt('{:<20} {}'.format("Team id", login_data["team"]["id"]))
dbg("connected to {}".format(t.domain))
def handle_emojilist(emoji_json, eventrouter, **kwargs):
if emoji_json["ok"]:
request_metadata = pickle.loads(emoji_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
team.emoji_completions.extend(emoji_json["emoji"].keys())
def handle_channelsinfo(channel_json, eventrouter, **kwargs):
request_metadata = pickle.loads(channel_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
channel.set_unread_count_display(channel_json['channel']['unread_count_display'])
channel.set_members(channel_json['channel']['members'])
def handle_groupsinfo(group_json, eventrouter, **kwargs):
request_metadata = pickle.loads(group_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
group = team.channels[request_metadata.channel_identifier]
unread_count_display = group_json['group']['unread_count_display']
group_id = group_json['group']['id']
group.set_unread_count_display(unread_count_display)
def handle_conversationsopen(conversation_json, eventrouter, object_name='channel', **kwargs):
request_metadata = pickle.loads(conversation_json["wee_slack_request_metadata"])
# Set unread count if the channel isn't new (channel_identifier exists)
if hasattr(request_metadata, 'channel_identifier'):
channel_id = request_metadata.channel_identifier
team = eventrouter.teams[request_metadata.team_hash]
conversation = team.channels[channel_id]
unread_count_display = conversation_json[object_name]['unread_count_display']
conversation.set_unread_count_display(unread_count_display)
def handle_mpimopen(mpim_json, eventrouter, object_name='group', **kwargs):
handle_conversationsopen(mpim_json, eventrouter, object_name, **kwargs)
def handle_groupshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_channelshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_imhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_mpimhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_history(message_json, eventrouter, **kwargs):
request_metadata = pickle.loads(message_json["wee_slack_request_metadata"])
kwargs['team'] = eventrouter.teams[request_metadata.team_hash]
kwargs['channel'] = kwargs['team'].channels[request_metadata.channel_identifier]
try:
clear = request_metadata.clear
except:
clear = False
dbg(clear)
kwargs['output_type'] = "backlog"
if clear:
w.buffer_clear(kwargs['channel'].channel_buffer)
for message in reversed(message_json["messages"]):
process_message(message, eventrouter, **kwargs)
###### New/converted process_ and subprocess_ methods
def process_hello(message_json, eventrouter, **kwargs):
kwargs['team'].subscribe_users_presence()
def process_reconnect_url(message_json, eventrouter, **kwargs):
kwargs['team'].set_reconnect_url(message_json['url'])
def process_manual_presence_change(message_json, eventrouter, **kwargs):
process_presence_change(message_json, eventrouter, **kwargs)
def process_presence_change(message_json, eventrouter, **kwargs):
if "user" in kwargs:
# TODO: remove once it's stable
user = kwargs["user"]
team = kwargs["team"]
team.update_member_presence(user, message_json["presence"])
if "users" in message_json:
team = kwargs["team"]
for user_id in message_json["users"]:
user = team.users[user_id]
team.update_member_presence(user, message_json["presence"])
def process_pref_change(message_json, eventrouter, **kwargs):
team = kwargs["team"]
if message_json['name'] == 'muted_channels':
team.set_muted_channels(message_json['value'])
elif message_json['name'] == 'highlight_words':
team.set_highlight_words(message_json['value'])
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_user_change(message_json, eventrouter, **kwargs):
"""
Currently only used to update status, but lots here we could do.
"""
user = message_json['user']
profile = user.get("profile")
team = kwargs["team"]
team.users[user["id"]].update_status(profile.get("status_emoji"), profile.get("status_text"))
dmchannel = team.find_channel_by_members({user["id"]}, channel_type='im')
if dmchannel:
dmchannel.set_topic(create_user_status_string(profile))
def process_user_typing(message_json, eventrouter, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
if channel:
channel.set_typing(team.users.get(message_json["user"]).name)
w.bar_item_update("slack_typing_notice")
def process_team_join(message_json, eventrouter, **kwargs):
user = message_json['user']
team = kwargs["team"]
team.users[user["id"]] = SlackUser(**user)
def process_pong(message_json, eventrouter, **kwargs):
pass
def process_message(message_json, eventrouter, store=True, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
# try:
# send these subtype messages elsewhere
known_subtypes = [
'thread_message',
'message_replied',
'message_changed',
'message_deleted',
'channel_join',
'channel_leave',
'channel_topic',
# 'group_join',
# 'group_leave',
]
if "thread_ts" in message_json and "reply_count" not in message_json:
message_json["subtype"] = "thread_message"
subtype = message_json.get("subtype", None)
if subtype and subtype in known_subtypes:
f = eval('subprocess_' + subtype)
f(message_json, eventrouter, channel, team)
else:
message = SlackMessage(message_json, team, channel)
text = message.render()
dbg("Rendered message: %s" % text)
dbg("Sender: %s (%s)" % (message.sender, message.sender_plain))
# Handle actions (/me).
# We don't use `subtype` here because creating the SlackMessage may
# have changed the subtype based on the detected message contents.
if message.message_json.get('subtype') == 'me_message':
try:
channel.unread_count_display += 1
except:
channel.unread_count_display = 1
channel.buffer_prnt(w.prefix("action").rstrip(), text, message.ts, tag_nick=message.sender_plain, **kwargs)
else:
suffix = ''
if 'edited' in message_json:
suffix = ' (edited)'
try:
channel.unread_count_display += 1
except:
channel.unread_count_display = 1
channel.buffer_prnt(message.sender, text + suffix, message.ts, tag_nick=message.sender_plain, **kwargs)
if store:
channel.store_message(message, team)
dbg("NORMAL REPLY {}".format(message_json))
# except:
# channel.buffer_prnt("WEE-SLACK-ERROR", json.dumps(message_json), message_json["ts"], **kwargs)
# traceback.print_exc()
def subprocess_thread_message(message_json, eventrouter, channel, team):
# print ("THREADED: " + str(message_json))
parent_ts = message_json.get('thread_ts', None)
if parent_ts:
parent_message = channel.messages.get(SlackTS(parent_ts), None)
if parent_message:
message = SlackThreadMessage(parent_ts, message_json, team, channel)
parent_message.submessages.append(message)
channel.hash_message(parent_ts)
channel.store_message(message, team)
channel.change_message(parent_ts)
text = message.render()
# channel.buffer_prnt(message.sender, text, message.ts, **kwargs)
if parent_message.thread_channel:
parent_message.thread_channel.buffer_prnt(message.sender, text, message.ts)
# channel = channels.find(message_json["channel"])
# server = channel.server
# #threadinfo = channel.get_message(message_json["thread_ts"])
# message = Message(message_json, server=server, channel=channel)
# dbg(message, main_buffer=True)
#
# orig = channel.get_message(message_json['thread_ts'])
# if orig[0]:
# channel.get_message(message_json['thread_ts'])[2].add_thread_message(message)
# else:
# dbg("COULDN'T find orig message {}".format(message_json['thread_ts']), main_buffer=True)
# if threadinfo[0]:
# channel.messages[threadinfo[1]].become_thread()
# message_json["item"]["ts"], message_json)
# channel.change_message(message_json["thread_ts"], None, message_json["text"])
# channel.become_thread(message_json["item"]["ts"], message_json)
def subprocess_channel_join(message_json, eventrouter, channel, team):
joinprefix = w.prefix("join")
message = SlackMessage(message_json, team, channel, override_sender=joinprefix)
channel.buffer_prnt(joinprefix, message.render(), message_json["ts"], tagset='joinleave')
channel.user_joined(message_json['user'])
def subprocess_channel_leave(message_json, eventrouter, channel, team):
leaveprefix = w.prefix("quit")
message = SlackMessage(message_json, team, channel, override_sender=leaveprefix)
channel.buffer_prnt(leaveprefix, message.render(), message_json["ts"], tagset='joinleave')
channel.user_left(message_json['user'])
# channel.update_nicklist(message_json['user'])
# channel.update_nicklist()
def subprocess_message_replied(message_json, eventrouter, channel, team):
pass
def subprocess_message_changed(message_json, eventrouter, channel, team):
m = message_json.get("message", None)
if m:
new_message = m
# message = SlackMessage(new_message, team, channel)
if "attachments" in m:
message_json["attachments"] = m["attachments"]
if "text" in m:
if "text" in message_json:
message_json["text"] += m["text"]
dbg("added text!")
else:
message_json["text"] = m["text"]
if "fallback" in m:
if "fallback" in message_json:
message_json["fallback"] += m["fallback"]
else:
message_json["fallback"] = m["fallback"]
new_message["text"] += unwrap_attachments(message_json, new_message["text"])
if "edited" in new_message:
channel.change_message(new_message["ts"], new_message["text"], ' (edited)')
else:
channel.change_message(new_message["ts"], new_message["text"])
def subprocess_message_deleted(message_json, eventrouter, channel, team):
channel.change_message(message_json["deleted_ts"], "(deleted)", '')
def subprocess_channel_topic(message_json, eventrouter, channel, team):
text = unhtmlescape(unfurl_refs(message_json["text"], ignore_alt_text=False))
channel.buffer_prnt(w.prefix("network").rstrip(), text, message_json["ts"], tagset="muted")
channel.set_topic(unhtmlescape(message_json["topic"]))
def process_reply(message_json, eventrouter, **kwargs):
dbg('processing reply')
team = kwargs["team"]
identifier = message_json["reply_to"]
try:
original_message_json = team.ws_replies[identifier]
del team.ws_replies[identifier]
if "ts" in message_json:
original_message_json["ts"] = message_json["ts"]
else:
dbg("no reply ts {}".format(message_json))
c = original_message_json.get('channel', None)
channel = team.channels[c]
m = SlackMessage(original_message_json, team, channel)
# if "type" in message_json:
# if message_json["type"] == "message" and "channel" in message_json.keys():
# message_json["ts"] = message_json["ts"]
# channels.find(message_json["channel"]).store_message(m, from_me=True)
# channels.find(message_json["channel"]).buffer_prnt(server.nick, m.render(), m.ts)
process_message(m.message_json, eventrouter, channel=channel, team=team)
channel.mark_read(update_remote=True, force=True)
dbg("REPLY {}".format(message_json))
except KeyError:
dbg("Unexpected reply {}".format(message_json))
def process_channel_marked(message_json, eventrouter, **kwargs):
"""
complete
"""
channel = kwargs["channel"]
ts = message_json.get("ts", None)
if ts:
channel.mark_read(ts=ts, force=True, update_remote=False)
else:
dbg("tried to mark something weird {}".format(message_json))
def process_group_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_im_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_mpim_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_channel_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
kwargs['team'].channels[item["id"]].update_from_message_json(item)
kwargs['team'].channels[item["id"]].open()
def process_channel_created(message_json, eventrouter, **kwargs):
item = message_json["channel"]
c = SlackChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].buffer_prnt('Channel created: {}'.format(c.slack_name))
def process_channel_rename(message_json, eventrouter, **kwargs):
item = message_json["channel"]
channel = kwargs['team'].channels[item["id"]]
channel.slack_name = message_json['channel']['name']
def process_im_created(message_json, eventrouter, **kwargs):
team = kwargs['team']
item = message_json["channel"]
c = SlackDMChannel(eventrouter, team=team, users=team.users, **item)
team.channels[item["id"]] = c
kwargs['team'].buffer_prnt('IM channel created: {}'.format(c.name))
def process_im_open(message_json, eventrouter, **kwargs):
channel = kwargs['channel']
item = message_json
kwargs['team'].channels[item["channel"]].check_should_open(True)
w.buffer_set(channel.channel_buffer, "hotlist", "2")
def process_im_close(message_json, eventrouter, **kwargs):
item = message_json
cbuf = kwargs['team'].channels[item["channel"]].channel_buffer
eventrouter.weechat_controller.unregister_buffer(cbuf, False, True)
def process_group_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
if item["name"].startswith("mpdm-"):
c = SlackMPDMChannel(eventrouter, team=kwargs["team"], **item)
else:
c = SlackGroupChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].channels[item["id"]].open()
def process_reaction_added(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.add_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels.get(message_json["item"].get("channel"))
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.remove_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("Reaction to item type not supported: " + str(message_json))
def process_emoji_changed(message_json, eventrouter, **kwargs):
team = kwargs['team']
team.load_emoji_completions()
###### New module/global methods
def render_formatting(text):
text = re.sub(r'(^| )\*([^*]+)\*([^a-zA-Z0-9_]|$)',
r'\1{}\2{}\3'.format(w.color(config.render_bold_as),
w.color('-' + config.render_bold_as)),
text)
text = re.sub(r'(^| )_([^_]+)_([^a-zA-Z0-9_]|$)',
r'\1{}\2{}\3'.format(w.color(config.render_italic_as),
w.color('-' + config.render_italic_as)),
text)
return text
def render(message_json, team, channel, force=False):
# If we already have a rendered version in the object, just return that.
if not force and message_json.get("_rendered_text", ""):
return message_json["_rendered_text"]
else:
# server = servers.find(message_json["_server"])
if "fallback" in message_json:
text = message_json["fallback"]
elif "text" in message_json:
if message_json['text'] is not None:
text = message_json["text"]
else:
text = ""
else:
text = ""
text = unfurl_refs(text)
text += unfurl_refs(unwrap_attachments(message_json, text))
text = text.lstrip()
text = unhtmlescape(text.replace("\t", " "))
if message_json.get('mrkdwn', True):
text = render_formatting(text)
# if self.threads:
# text += " [Replies: {} Thread ID: {} ] ".format(len(self.threads), self.thread_id)
# #for thread in self.threads:
text += create_reaction_string(message_json.get("reactions", ""))
message_json["_rendered_text"] = text
return text
def linkify_text(message, team, channel):
# The get_username_map function is a bit heavy, but this whole
# function is only called on message send..
usernames = team.get_username_map()
channels = team.get_channel_map()
message = (message
# Replace IRC formatting chars with Slack formatting chars.
.replace('\x02', '*')
.replace('\x1D', '_')
.replace('\x1F', config.map_underline_to)
# Escape chars that have special meaning to Slack. Note that we do not
# (and should not) perform full HTML entity-encoding here.
# See https://api.slack.com/docs/message-formatting for details.
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.split(' '))
for item in enumerate(message):
targets = re.match('^\s*([@#])([\w.-]+[\w. -])(\W*)', item[1])
if targets and targets.groups()[0] == '@':
named = targets.groups()
if named[1] in ["group", "channel", "here"]:
message[item[0]] = "<!{}>".format(named[1])
else:
try:
if usernames[named[1]]:
message[item[0]] = "<@{}>{}".format(usernames[named[1]], named[2])
except:
message[item[0]] = "@{}{}".format(named[1], named[2])
if targets and targets.groups()[0] == '#':
named = targets.groups()
try:
if channels[named[1]]:
message[item[0]] = "<#{}|{}>{}".format(channels[named[1]], named[1], named[2])
except:
message[item[0]] = "#{}{}".format(named[1], named[2])
# dbg(message)
return " ".join(message)
def unfurl_refs(text, ignore_alt_text=None, auto_link_display=None):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# Test patterns lives in ./_pytest/test_unfurl.py
if ignore_alt_text is None:
ignore_alt_text = config.unfurl_ignore_alt_text
if auto_link_display is None:
auto_link_display = config.unfurl_auto_link_display
matches = re.findall(r"(<[@#]?(?:[^>]*)>)", text)
for m in matches:
# Replace them with human readable strings
text = text.replace(
m, unfurl_ref(m[1:-1], ignore_alt_text, auto_link_display))
return text
def unfurl_ref(ref, ignore_alt_text, auto_link_display):
id = ref.split('|')[0]
display_text = ref
if ref.find('|') > -1:
if ignore_alt_text:
display_text = resolve_ref(id)
else:
if id.startswith("#C"):
display_text = "#{}".format(ref.split('|')[1])
elif id.startswith("@U"):
display_text = ref.split('|')[1]
else:
url, desc = ref.split('|', 1)
match_url = r"^\w+:(//)?{}$".format(re.escape(desc))
url_matches_desc = re.match(match_url, url)
if url_matches_desc and auto_link_display == "text":
display_text = desc
elif url_matches_desc and auto_link_display == "url":
display_text = url
else:
display_text = "{} ({})".format(url, desc)
else:
display_text = resolve_ref(ref)
return display_text
def unhtmlescape(text):
return text.replace("<", "<") \
.replace(">", ">") \
.replace("&", "&")
def unwrap_attachments(message_json, text_before):
text_before_unescaped = unhtmlescape(text_before)
attachment_texts = []
a = message_json.get("attachments", None)
if a:
if text_before:
attachment_texts.append('')
for attachment in a:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
title = attachment.get('title', None)
title_link = attachment.get('title_link', '')
if title_link in text_before_unescaped:
title_link = ''
if title and title_link:
t.append('%s%s (%s)' % (prepend_title_text, title, title_link,))
prepend_title_text = ''
elif title and not title_link:
t.append('%s%s' % (prepend_title_text, title,))
prepend_title_text = ''
from_url = attachment.get('from_url', '')
if from_url not in text_before_unescaped and from_url != title_link:
t.append(from_url)
atext = attachment.get("text", None)
if atext:
tx = re.sub(r' *\n[\n ]+', '\n', atext)
t.append(prepend_title_text + tx)
prepend_title_text = ''
fields = attachment.get("fields", None)
if fields:
for f in fields:
if f['title'] != '':
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
fallback = attachment.get("fallback", None)
if t == [] and fallback:
t.append(fallback)
attachment_texts.append("\n".join([x.strip() for x in t if x]))
return "\n".join(attachment_texts)
def resolve_ref(ref):
# TODO: This hack to use eventrouter needs to go
# this resolver should probably move to the slackteam or eventrouter itself
# global EVENTROUTER
if 'EVENTROUTER' in globals():
e = EVENTROUTER
if ref.startswith('@U') or ref.startswith('@W'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].users:
# try:
return "@{}".format(e.teams[t].users[ref[1:]].name)
# except:
# dbg("NAME: {}".format(ref))
elif ref.startswith('#C'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].channels:
# try:
return "{}".format(e.teams[t].channels[ref[1:]].name)
# except:
# dbg("CHANNEL: {}".format(ref))
# Something else, just return as-is
return ref
def create_user_status_string(profile):
real_name = profile.get("real_name")
status_emoji = profile.get("status_emoji")
status_text = profile.get("status_text")
if status_emoji or status_text:
return "{} | {} {}".format(real_name, status_emoji, status_text)
else:
return real_name
def create_reaction_string(reactions):
count = 0
if not isinstance(reactions, list):
reaction_string = " [{}]".format(reactions)
else:
reaction_string = ' ['
for r in reactions:
if len(r["users"]) > 0:
count += 1
if config.show_reaction_nicks:
nicks = [resolve_ref("@{}".format(user)) for user in r["users"]]
users = "({})".format(",".join(nicks))
else:
users = len(r["users"])
reaction_string += ":{}:{} ".format(r["name"], users)
reaction_string = reaction_string[:-1] + ']'
if count == 0:
reaction_string = ''
return reaction_string
def modify_buffer_line(buffer, new_line, timestamp, time_id):
# get a pointer to this buffer's lines
own_lines = w.hdata_pointer(w.hdata_get('buffer'), buffer, 'own_lines')
if own_lines:
# get a pointer to the last line
line_pointer = w.hdata_pointer(w.hdata_get('lines'), own_lines, 'last_line')
# hold the structure of a line and of line data
struct_hdata_line = w.hdata_get('line')
struct_hdata_line_data = w.hdata_get('line_data')
# keep track of the number of lines with the matching time and id
number_of_matching_lines = 0
while line_pointer:
# get a pointer to the data in line_pointer via layout of struct_hdata_line
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
if data:
line_timestamp = w.hdata_time(struct_hdata_line_data, data, 'date')
line_time_id = w.hdata_integer(struct_hdata_line_data, data, 'date_printed')
# prefix = w.hdata_string(struct_hdata_line_data, data, 'prefix')
if timestamp == int(line_timestamp) and int(time_id) == line_time_id:
number_of_matching_lines += 1
elif number_of_matching_lines > 0:
# since number_of_matching_lines is non-zero, we have
# already reached the message and can stop traversing
break
else:
dbg(('Encountered line without any data while trying to modify '
'line. This is not handled, so aborting modification.'))
return w.WEECHAT_RC_ERROR
# move backwards one line and try again - exit the while if you hit the end
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, -1)
# split the message into at most the number of existing lines
lines = new_line.split('\n', number_of_matching_lines - 1)
# updating a line with a string containing newlines causes the lines to
# be broken when viewed in bare display mode
lines = [line.replace('\n', ' | ') for line in lines]
# pad the list with empty strings until the number of elements equals
# number_of_matching_lines
lines += [''] * (number_of_matching_lines - len(lines))
if line_pointer:
for line in lines:
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, 1)
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
w.hdata_update(struct_hdata_line_data, data, {"message": line})
return w.WEECHAT_RC_OK
def modify_print_time(buffer, new_id, time):
"""
This overloads the time printed field to let us store the slack
per message unique id that comes after the "." in a slack ts
"""
# get a pointer to this buffer's lines
own_lines = w.hdata_pointer(w.hdata_get('buffer'), buffer, 'own_lines')
if own_lines:
# get a pointer to the last line
line_pointer = w.hdata_pointer(w.hdata_get('lines'), own_lines, 'last_line')
# hold the structure of a line and of line data
struct_hdata_line = w.hdata_get('line')
struct_hdata_line_data = w.hdata_get('line_data')
prefix = ''
while not prefix and line_pointer:
# get a pointer to the data in line_pointer via layout of struct_hdata_line
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
if data:
prefix = w.hdata_string(struct_hdata_line_data, data, 'prefix')
w.hdata_update(struct_hdata_line_data, data, {"date_printed": new_id})
else:
dbg('Encountered line without any data while setting message id.')
return w.WEECHAT_RC_ERROR
# move backwards one line and repeat, so all the lines of the message are set
# exit when you reach a prefix, which means you have reached the
# first line of the message, or if you hit the end
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, -1)
return w.WEECHAT_RC_OK
def format_nick(nick):
nick_prefix = w.config_string(w.config_get('weechat.look.nick_prefix'))
nick_prefix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_prefix_color = w.color(nick_prefix_color_name)
nick_suffix = w.config_string(w.config_get('weechat.look.nick_suffix'))
nick_suffix_color_name = w.config_string(w.config_get('weechat.color.chat_nick_prefix'))
nick_suffix_color = w.color(nick_suffix_color_name)
return nick_prefix_color + nick_prefix + w.color("reset") + nick + nick_suffix_color + nick_suffix + w.color("reset")
def tag(tagset, user=None):
if user:
default_tag = "nick_" + user.replace(" ", "_")
else:
default_tag = 'nick_unknown'
tagsets = {
# messages in the team/server buffer, e.g. "new channel created"
"team": "no_highlight,log3",
# when replaying something old
"backlog": "irc_privmsg,no_highlight,notify_none,logger_backlog",
# when posting messages to a muted channel
"muted": "irc_privmsg,no_highlight,notify_none,log1",
# when receiving a direct message
"dm": "irc_privmsg,notify_private,log1",
"dmfromme": "irc_privmsg,no_highlight,notify_none,log1",
# when this is a join/leave, attach for smart filter ala:
# if user in [x.strip() for x in w.prefix("join"), w.prefix("quit")]
"joinleave": "irc_smart_filter,no_highlight,log4",
# catchall ?
"default": "irc_privmsg,notify_message,log1",
}
return "{},slack_{},{}".format(default_tag, tagset, tagsets[tagset])
###### New/converted command_ commands
@slack_buffer_or_ignore
@utf8_decode
def part_command_cb(data, current_buffer, args):
e = EVENTROUTER
args = args.split()
if len(args) > 1:
team = e.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
channel = "".join(args[1:])
if channel in cmap:
buffer_ptr = team.channels[cmap[channel]].channel_buffer
e.weechat_controller.unregister_buffer(buffer_ptr, update_remote=True, close_buffer=True)
else:
e.weechat_controller.unregister_buffer(current_buffer, update_remote=True, close_buffer=True)
return w.WEECHAT_RC_OK_EAT
def parse_topic_command(command):
args = command.split()[1:]
channel_name = None
topic = None
if args:
if args[0].startswith('#'):
channel_name = args[0][1:]
topic = args[1:]
else:
topic = args
if topic == []:
topic = None
if topic:
topic = ' '.join(topic)
if topic == '-delete':
topic = ''
return channel_name, topic
@slack_buffer_or_ignore
@utf8_decode
def topic_command_cb(data, current_buffer, command):
"""
Change the topic of a channel
/topic [<channel>] [<topic>|-delete]
"""
channel_name, topic = parse_topic_command(command)
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if channel_name:
channel = team.channels.get(team.get_channel_map().get(channel_name))
else:
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not channel:
w.prnt(team.channel_buffer, "#{}: No such channel".format(channel_name))
return w.WEECHAT_RC_OK_EAT
if topic is None:
w.prnt(channel.channel_buffer, 'Topic for {} is "{}"'.format(channel.name, channel.topic))
else:
s = SlackRequest(team.token, "channels.setTopic", {"channel": channel.identifier, "topic": topic}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def whois_command_cb(data, current_buffer, command):
"""
Get real name of user
/whois <display_name>
"""
args = command.split()
if len(args) < 2:
w.prnt(current_buffer, "Not enough arguments")
return w.WEECHAT_RC_OK_EAT
user = args[1]
if (user.startswith('@')):
user = user[1:]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
u = team.users.get(team.get_username_map().get(user))
if u:
team.buffer_prnt("[{}]: {}".format(user, u.real_name))
if u.profile.get("status_text"):
team.buffer_prnt("[{}]: {} {}".format(user, u.profile.status_emoji, u.profile.status_text))
team.buffer_prnt("[{}]: Real name: {}".format(user, u.profile.get('real_name_normalized', '')))
team.buffer_prnt("[{}]: Title: {}".format(user, u.profile.get('title', '')))
team.buffer_prnt("[{}]: Email: {}".format(user, u.profile.get('email', '')))
team.buffer_prnt("[{}]: Phone: {}".format(user, u.profile.get('phone', '')))
else:
team.buffer_prnt("[{}]: No such user".format(user))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def me_command_cb(data, current_buffer, args):
message = "_{}_".format(args.split(' ', 1)[1])
buffer_input_callback("EVENTROUTER", current_buffer, message)
return w.WEECHAT_RC_OK_EAT
def command_register(data, current_buffer, args):
CLIENT_ID = "2468770254.51917335286"
CLIENT_SECRET = "dcb7fe380a000cba0cca3169a5fe8d70" # Not really a secret.
if args == 'register':
message = textwrap.dedent("""
#### Retrieving a Slack token via OAUTH ####
1) Paste this into a browser: https://slack.com/oauth/authorize?client_id=2468770254.51917335286&scope=client
2) Select the team you wish to access from wee-slack in your browser.
3) Click "Authorize" in the browser **IMPORTANT: the redirect will fail, this is expected**
4) Copy the "code" portion of the URL to your clipboard
5) Return to weechat and run `/slack register [code]`
""")
w.prnt("", message)
return
try:
_, oauth_code = args.split()
except ValueError:
w.prnt("",
"ERROR: wrong number of arguments given for register command")
return
uri = (
"https://slack.com/api/oauth.access?"
"client_id={}&client_secret={}&code={}"
).format(CLIENT_ID, CLIENT_SECRET, oauth_code)
ret = urllib.urlopen(uri).read()
d = json.loads(ret)
if not d["ok"]:
w.prnt("",
"ERROR: Couldn't get Slack OAuth token: {}".format(d['error']))
return
if config.is_default('slack_api_token'):
w.config_set_plugin('slack_api_token', d['access_token'])
else:
# Add new token to existing set, joined by comma.
tok = config.get_string('slack_api_token')
w.config_set_plugin('slack_api_token',
','.join([tok, d['access_token']]))
w.prnt("", "Success! Added team \"%s\"" % (d['team_name'],))
w.prnt("", "Please reload wee-slack with: /python reload slack")
@slack_buffer_or_ignore
@utf8_decode
def msg_command_cb(data, current_buffer, args):
dbg("msg_command_cb")
aargs = args.split(None, 2)
who = aargs[1]
if who == "*":
who = EVENTROUTER.weechat_controller.buffers[current_buffer].slack_name
else:
command_talk(data, current_buffer, "talk " + who)
if len(aargs) > 2:
message = aargs[2]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
if who in cmap:
channel = team.channels[cmap[who]]
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_channels(data, current_buffer, args):
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Channels:")
for channel in team.get_channel_map():
team.buffer_prnt(" {}".format(channel))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_users(data, current_buffer, args):
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Users:")
for user in team.users.values():
team.buffer_prnt(" {:<25}({})".format(user.name, user.presence))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def command_talk(data, current_buffer, args):
"""
Open a chat with the specified user(s)
/slack talk <user>[,<user2>[,<user3>...]]
"""
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
channel_name = args.split(' ')[1]
if channel_name.startswith('#'):
channel_name = channel_name[1:]
# Try finding the channel by name
chan = team.channels.get(team.get_channel_map().get(channel_name))
# If the channel doesn't exist, try finding a DM or MPDM instead
if not chan:
# Get the IDs of the users
u = team.get_username_map()
users = set()
for user in channel_name.split(','):
if user.startswith('@'):
user = user[1:]
if user in u:
users.add(u[user])
if users:
if len(users) > 1:
channel_type = 'mpim'
# Add the current user since MPDMs include them as a member
users.add(team.myidentifier)
else:
channel_type = 'im'
chan = team.find_channel_by_members(users, channel_type=channel_type)
# If the DM or MPDM doesn't exist, create it
if not chan:
s = SlackRequest(team.token, SLACK_API_TRANSLATOR[channel_type]['join'], {'users': ','.join(users)}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
if chan:
chan.open()
if config.switch_buffer_on_join:
w.buffer_set(chan.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK_EAT
def command_showmuted(data, current_buffer, args):
current = w.current_buffer()
w.prnt(EVENTROUTER.weechat_controller.buffers[current].team.channel_buffer, str(EVENTROUTER.weechat_controller.buffers[current].team.muted_channels))
@utf8_decode
def thread_command_callback(data, current_buffer, args):
current = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current)
if channel:
args = args.split()
if args[0] == '/thread':
if len(args) == 2:
try:
pm = channel.messages[SlackTS(args[1])]
except:
pm = channel.hashed_messages[args[1]]
tc = SlackThreadChannel(EVENTROUTER, pm)
pm.thread_channel = tc
tc.open()
# tc.create_buffer()
if config.switch_buffer_on_join:
w.buffer_set(tc.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
elif args[0] == '/reply':
count = int(args[1])
msg = " ".join(args[2:])
mkeys = channel.main_message_keys_reversed()
parent_id = str(next(islice(mkeys, count - 1, None)))
channel.send_message(msg, request_dict_ext={"thread_ts": parent_id})
return w.WEECHAT_RC_OK_EAT
w.prnt(current, "Invalid thread command.")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def rehistory_command_callback(data, current_buffer, args):
current = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current)
channel.got_history = False
w.buffer_clear(channel.channel_buffer)
channel.get_history()
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def hide_command_callback(data, current_buffer, args):
c = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if c:
name = c.formatted_name(style='long_default')
if name in config.distracting_channels:
w.buffer_set(c.channel_buffer, "hidden", "1")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def slack_command_cb(data, current_buffer, args):
a = args.split(' ', 1)
if len(a) > 1:
function_name, args = a[0], args
else:
function_name, args = a[0], args
try:
EVENTROUTER.cmds[function_name]("", current_buffer, args)
except KeyError:
w.prnt("", "Command not found: " + function_name)
return w.WEECHAT_RC_OK
@slack_buffer_required
def command_distracting(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel:
fullname = channel.formatted_name(style="long_default")
if config.distracting_channels.count(fullname) == 0:
config.distracting_channels.append(fullname)
else:
config.distracting_channels.pop(config.distracting_channels.index(fullname))
save_distracting_channels()
def save_distracting_channels():
w.config_set_plugin('distracting_channels', ','.join(config.distracting_channels))
@slack_buffer_required
def command_slash(data, current_buffer, args):
"""
Support for custom slack commands
/slack slash /customcommand arg1 arg2 arg3
"""
e = EVENTROUTER
channel = e.weechat_controller.buffers.get(current_buffer, None)
if channel:
team = channel.team
if args == 'slash':
w.prnt("", "Usage: /slack slash /someslashcommand [arguments...].")
return
split_args = args.split(None, 2)
command = split_args[1]
text = split_args[2] if len(split_args) > 2 else ""
s = SlackRequest(team.token, "chat.command", {"command": command, "text": text, 'channel': channel.identifier}, team_hash=team.team_hash, channel_identifier=channel.identifier)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_mute(data, current_buffer, args):
current = w.current_buffer()
channel_id = EVENTROUTER.weechat_controller.buffers[current].identifier
team = EVENTROUTER.weechat_controller.buffers[current].team
if channel_id not in team.muted_channels:
team.muted_channels.add(channel_id)
else:
team.muted_channels.discard(channel_id)
s = SlackRequest(team.token, "users.prefs.set", {"name": "muted_channels", "value": ",".join(team.muted_channels)}, team_hash=team.team_hash, channel_identifier=channel_id)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_openweb(data, current_buffer, args):
# if done from server buffer, open slack for reals
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if isinstance(channel, SlackTeam):
url = "https://{}".format(channel.team.domain)
else:
now = SlackTS()
url = "https://{}/archives/{}/p{}000000".format(channel.team.domain, channel.slack_name, now.majorstr())
w.prnt_date_tags(channel.team.channel_buffer, SlackTS().major, "openweb,logger_backlog_end,notify_none", url)
def command_nodistractions(data, current_buffer, args):
global hide_distractions
hide_distractions = not hide_distractions
if config.distracting_channels != ['']:
for channel in config.distracting_channels:
dbg('hiding channel {}'.format(channel))
# try:
for c in EVENTROUTER.weechat_controller.buffers.itervalues():
if c == channel:
dbg('found channel {} to hide'.format(channel))
w.buffer_set(c.channel_buffer, "hidden", str(int(hide_distractions)))
# except:
# dbg("Can't hide channel {} .. removing..".format(channel), main_buffer=True)
# config.distracting_channels.pop(config.distracting_channels.index(channel))
# save_distracting_channels()
@slack_buffer_required
def command_upload(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
url = 'https://slack.com/api/files.upload'
fname = args.split(' ', 1)
file_path = os.path.expanduser(fname[1])
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if ' ' in file_path:
file_path = file_path.replace(' ', '\ ')
command = 'curl -F file=@{} -F channels={} -F token={} {}'.format(file_path, channel.identifier, team.token, url)
w.hook_process(command, config.slack_timeout, '', '')
@utf8_decode
def away_command_cb(data, current_buffer, args):
# TODO: reimplement all.. maybe
(all, message) = re.match("^/away(?:\s+(-all))?(?:\s+(.+))?", args).groups()
if message is None:
command_back(data, current_buffer, args)
else:
command_away(data, current_buffer, args)
return w.WEECHAT_RC_OK
@slack_buffer_required
def command_away(data, current_buffer, args):
"""
Sets your status as 'away'
/slack away
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "away"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_status(data, current_buffer, args):
"""
Lets you set your Slack Status (not to be confused with away/here)
/slack status [emoji] [status_message]
"""
e = EVENTROUTER
channel = e.weechat_controller.buffers.get(current_buffer, None)
if channel:
team = channel.team
split_args = args.split(None, 2)
emoji = split_args[1] if len(split_args) > 1 else ""
text = split_args[2] if len(split_args) > 2 else ""
profile = {"status_text":text,"status_emoji":emoji}
s = SlackRequest(team.token, "users.profile.set", {"profile": profile}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_back(data, current_buffer, args):
"""
Sets your status as 'back'
/slack back
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "users.setPresence", {"presence": "auto"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
@utf8_decode
def label_command_cb(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if channel and channel.type == 'thread':
aargs = args.split(None, 2)
new_name = " +" + aargs[1]
channel.label = new_name
w.buffer_set(channel.channel_buffer, "short_name", new_name)
@utf8_decode
def set_unread_cb(data, current_buffer, command):
for channel in EVENTROUTER.weechat_controller.buffers.values():
channel.mark_read()
return w.WEECHAT_RC_OK
@slack_buffer_or_ignore
@utf8_decode
def set_unread_current_buffer_cb(data, current_buffer, command):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
channel.mark_read()
return w.WEECHAT_RC_OK
def command_p(data, current_buffer, args):
args = args.split(' ', 1)[1]
w.prnt("", "{}".format(eval(args)))
###### NEW EXCEPTIONS
class ProcessNotImplemented(Exception):
"""
Raised when we try to call process_(something), but
(something) has not been defined as a function.
"""
def __init__(self, function_name):
super(ProcessNotImplemented, self).__init__(function_name)
class InvalidType(Exception):
"""
Raised when we do type checking to ensure objects of the wrong
type are not used improperly.
"""
def __init__(self, type_str):
super(InvalidType, self).__init__(type_str)
###### New but probably old and need to migrate
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is not None:
w.buffer_set(slack_debug, "display", "1")
else:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
def load_emoji():
try:
DIR = w.info_get("weechat_dir", "")
with open('{}/weemoji.json'.format(DIR), 'r') as ef:
return json.loads(ef.read())["emoji"]
except Exception as e:
dbg("Couldn't load emoji list: {}".format(e), 5)
return []
def setup_hooks():
cmds = {k[8:]: v for k, v in globals().items() if k.startswith("command_")}
w.bar_item_new('slack_typing_notice', 'typing_bar_item_cb', '')
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_callback", "EVENTROUTER")
w.hook_timer(3000, 0, 0, "reconnect_callback", "EVENTROUTER")
w.hook_timer(1000 * 60 * 5, 0, 0, "slack_never_away_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_callback", "EVENTROUTER")
w.hook_signal('buffer_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('window_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('quit', "quit_notification_callback", "")
if config.send_typing_notice:
w.hook_signal('input_text_changed', "typing_notification_cb", "")
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(cmds.keys()) +
'\nUse /slack help [command] to find out more\n',
# Completions
'|'.join(cmds.keys()),
# Function name
'slack_command_cb', '')
# w.hook_command('me', '', 'stuff', 'stuff2', '', 'me_command_cb', '')
w.hook_command_run('/me', 'me_command_cb', '')
w.hook_command_run('/query', 'command_talk', '')
w.hook_command_run('/join', 'command_talk', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/leave', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/thread', 'thread_command_callback', '')
w.hook_command_run('/reply', 'thread_command_callback', '')
w.hook_command_run('/rehistory', 'rehistory_command_callback', '')
w.hook_command_run('/hide', 'hide_command_callback', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run('/label', 'label_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_command_run("/input set_unread", "set_unread_cb", "")
w.hook_command_run("/input set_unread_current_buffer", "set_unread_current_buffer_cb", "")
w.hook_command_run('/away', 'away_command_cb', '')
w.hook_command_run('/whois', 'whois_command_cb', '')
w.hook_completion("nicks", "complete @-nicks for slack", "nick_completion_cb", "")
w.hook_completion("emoji", "complete :emoji: for slack", "emoji_completion_cb", "")
# Hooks to fix/implement
# w.hook_signal('buffer_opened', "buffer_opened_cb", "")
# w.hook_signal('window_scrolled', "scrolled_cb", "")
# w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
##### END NEW
def dbg(message, level=0, main_buffer=False, fout=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
# TODO: do this smarter
# return
if level >= config.debug_level:
global debug_string
message = "DEBUG: {}".format(message)
if fout:
file('/tmp/debug.log', 'a+').writelines(message + '\n')
if main_buffer:
# w.prnt("", "---------")
w.prnt("", "slack: " + message)
else:
if slack_debug and (not debug_string or debug_string in message):
# w.prnt(slack_debug, "---------")
w.prnt(slack_debug, message)
###### Config code
Setting = collections.namedtuple('Setting', ['default', 'desc'])
class PluginConfig(object):
# Default settings.
# These are, initially, each a (default, desc) tuple; the former is the
# default value of the setting, in the (string) format that weechat
# expects, and the latter is the user-friendly description of the setting.
# At __init__ time these values are extracted, the description is used to
# set or update the setting description for use with /help, and the default
# value is used to set the default for any settings not already defined.
# Following this procedure, the keys remain the same, but the values are
# the real (python) values of the settings.
default_settings = {
'background_load_all_history': Setting(
default='false',
desc='Load history for each channel in the background as soon as it'
' opens, rather than waiting for the user to look at it.'),
'channel_name_typing_indicator': Setting(
default='true',
desc='Change the prefix of a channel from # to > when someone is'
' typing in it. Note that this will (temporarily) affect the sort'
' order if you sort buffers by name rather than by number.'),
'colorize_private_chats': Setting(
default='false',
desc='Whether to use nick-colors in DM windows.'),
'debug_mode': Setting(
default='false',
desc='Open a dedicated buffer for debug messages and start logging'
' to it. How verbose the logging is depends on log_level.'),
'debug_level': Setting(
default='3',
desc='Show only this level of debug info (or higher) when'
' debug_mode is on. Lower levels -> more messages.'),
'distracting_channels': Setting(
default='',
desc='List of channels to hide.'),
'group_name_prefix': Setting(
default='&',
desc='The prefix of buffer names for groups (private channels).'),
'map_underline_to': Setting(
default='_',
desc='When sending underlined text to slack, use this formatting'
' character for it. The default ("_") sends it as italics. Use'
' "*" to send bold instead.'),
'never_away': Setting(
default='false',
desc='Poke Slack every five minutes so that it never marks you "away".'),
'record_events': Setting(
default='false',
desc='Log all traffic from Slack to disk as JSON.'),
'render_bold_as': Setting(
default='bold',
desc='When receiving bold text from Slack, render it as this in weechat.'),
'render_italic_as': Setting(
default='italic',
desc='When receiving bold text from Slack, render it as this in weechat.'
' If your terminal lacks italic support, consider using "underline" instead.'),
'send_typing_notice': Setting(
default='true',
desc='Alert Slack users when you are typing a message in the input bar '
'(Requires reload)'),
'server_aliases': Setting(
default='',
desc='A comma separated list of `subdomain:alias` pairs. The alias'
' will be used instead of the actual name of the slack (in buffer'
' names, logging, etc). E.g `work:no_fun_allowed` would make your'
' work slack show up as `no_fun_allowed` rather than `work.slack.com`.'),
'short_buffer_names': Setting(
default='false',
desc='Use `foo.#channel` rather than `foo.slack.com.#channel` as the'
' internal name for Slack buffers. Overrides server_aliases.'),
'show_reaction_nicks': Setting(
default='false',
desc='Display the name of the reacting user(s) alongside each reactji.'),
'slack_api_token': Setting(
default='INSERT VALID KEY HERE!',
desc='List of Slack API tokens, one per Slack instance you want to'
' connect to. See the README for details on how to get these.'),
'slack_timeout': Setting(
default='20000',
desc='How long (ms) to wait when communicating with Slack.'),
'switch_buffer_on_join': Setting(
default='true',
desc='When /joining a channel, automatically switch to it as well.'),
'thread_suffix_color': Setting(
default='lightcyan',
desc='Color to use for the [thread: XXX] suffix on messages that'
' have threads attached to them.'),
'unfurl_ignore_alt_text': Setting(
default='false',
desc='When displaying ("unfurling") links to channels/users/etc,'
' ignore the "alt text" present in the message and instead use the'
' canonical name of the thing being linked to.'),
'unfurl_auto_link_display': Setting(
default='both',
desc='When displaying ("unfurling") links to channels/users/etc,'
' determine what is displayed when the text matches the url'
' without the protocol. This happens when Slack automatically'
' creates links, e.g. from words separated by dots or email'
' addresses. Set it to "text" to only display the text written by'
' the user, "url" to only display the url or "both" (the default)'
' to display both.'),
'unhide_buffers_with_activity': Setting(
default='false',
desc='When activity occurs on a buffer, unhide it even if it was'
' previously hidden (whether by the user or by the'
' distracting_channels setting).'),
}
# Set missing settings to their defaults. Load non-missing settings from
# weechat configs.
def __init__(self):
self.settings = {}
# Set all descriptions, replace the values in the dict with the
# default setting value rather than the (setting,desc) tuple.
# Use items() rather than iteritems() so we don't need to worry about
# invalidating the iterator.
for key, (default, desc) in self.default_settings.items():
w.config_set_desc_plugin(key, desc)
self.settings[key] = default
# Migrate settings from old versions of Weeslack...
self.migrate()
# ...and then set anything left over from the defaults.
for key, default in self.settings.iteritems():
if not w.config_get_plugin(key):
w.config_set_plugin(key, default)
self.config_changed(None, None, None)
def __str__(self):
return "".join([x + "\t" + str(self.settings[x]) + "\n" for x in self.settings.keys()])
def config_changed(self, data, key, value):
for key in self.settings:
self.settings[key] = self.fetch_setting(key)
if self.debug_mode:
create_slack_debug_buffer()
return w.WEECHAT_RC_OK
def fetch_setting(self, key):
if hasattr(self, 'get_' + key):
try:
return getattr(self, 'get_' + key)(key)
except:
return self.settings[key]
else:
# Most settings are on/off, so make get_boolean the default
return self.get_boolean(key)
def __getattr__(self, key):
return self.settings[key]
def get_boolean(self, key):
return w.config_string_to_boolean(w.config_get_plugin(key))
def get_string(self, key):
return w.config_get_plugin(key)
def get_int(self, key):
return int(w.config_get_plugin(key))
def is_default(self, key):
default = self.default_settings.get(key).default
return w.config_get_plugin(key) == default
get_debug_level = get_int
get_group_name_prefix = get_string
get_map_underline_to = get_string
get_render_bold_as = get_string
get_render_italic_as = get_string
get_slack_timeout = get_int
get_thread_suffix_color = get_string
get_unfurl_auto_link_display = get_string
def get_distracting_channels(self, key):
return [x.strip() for x in w.config_get_plugin(key).split(',')]
def get_server_aliases(self, key):
alias_list = w.config_get_plugin(key)
if len(alias_list) > 0:
return dict(item.split(":") for item in alias_list.split(","))
def get_slack_api_token(self, key):
token = w.config_get_plugin("slack_api_token")
if token.startswith('${sec.data'):
return w.string_eval_expression(token, {}, {}, {})
else:
return token
def migrate(self):
"""
This is to migrate the extension name from slack_extension to slack
"""
if not w.config_get_plugin("migrated"):
for k in self.settings.keys():
if not w.config_is_set_plugin(k):
p = w.config_get("plugins.var.python.slack_extension.{}".format(k))
data = w.config_string(p)
if data != "":
w.config_set_plugin(k, data)
w.config_set_plugin("migrated", "true")
# to Trace execution, add `setup_trace()` to startup
# and to a function and sys.settrace(trace_calls) to a function
def setup_trace():
global f
now = time.time()
f = open('{}/{}-trace.json'.format(RECORD_DIR, now), 'w')
def trace_calls(frame, event, arg):
global f
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print >> f, 'Call to %s on line %s of %s from line %s of %s' % \
(func_name, func_line_no, func_filename,
caller_line_no, caller_filename)
f.flush()
return
def initiate_connection(token, retries=3):
return SlackRequest(token,
'rtm.start',
{"batch_presence_aware": 1 },
retries=retries)
# Main
if __name__ == "__main__":
w = WeechatWrapper(weechat)
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
weechat_version = w.info_get("version_number", "") or 0
if int(weechat_version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
global EVENTROUTER
EVENTROUTER = EventRouter()
# setup_trace()
# WEECHAT_HOME = w.info_get("weechat_dir", "")
# Global var section
slack_debug = None
config = PluginConfig()
config_changed_cb = config.config_changed
typing_timer = time.time()
# domain = None
# previous_buffer = None
# slack_buffer = None
# never_away = False
hide_distractions = False
# hotlist = w.infolist_get("hotlist", "", "")
# main_weechat_buffer = w.info_get("irc_buffer", "{}.{}".format(domain, "DOESNOTEXIST!@#$"))
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_modifier("input_text_for_buffer", "input_text_for_buffer_cb", "")
EMOJI.extend(load_emoji())
setup_hooks()
# attach to the weechat hooks we need
tokens = config.slack_api_token.split(',')
for t in tokens:
s = initiate_connection(t)
EVENTROUTER.receive(s)
if config.record_events:
EVENTROUTER.record()
EVENTROUTER.handle_next()
w.hook_timer(10, 0, 0, "handle_next", "")
# END attach to the weechat hooks we need
| [
"christian@kellner.me"
] | christian@kellner.me |
63875d79110d18f6cff0081c2b8bd5e9f9f2c73c | ce29e42a50e35ec4637521bf8a3b71ab5f2fb9a6 | /others/efficient/multi_process_manager.py | 76f62e733dced66c471ded08dfc6895c02769ffe | [] | no_license | ico-Meng/WebCrawler | 97b3d5f0956cc13a83009a26c3e50944252fba44 | 7d9236ee4e1c664d257f550d64186eeed892188b | refs/heads/master | 2020-03-15T21:40:26.224465 | 2018-05-06T17:13:36 | 2018-05-06T17:13:36 | 132,359,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | #!/usr/bin/env python
import random, time, Queue
from multiprocessing.managers import BaseManager
task_queue, result_queue = Queue.Queue(), Queue.Queue()
class QueueManager(BaseManager): pass
QueueManager.register('get_task_queue', callable=lambda: task_queue)
QueueManager.register('get_result_queue', callable=lambda: result_queue)
manager = QueueManager(address=('', 9999), authkey='crawler')
manager.start()
task = manager.get_task_queue()
result = manager.get_result_queue()
for i in xrange(10000):
print('Put task %d'%i)
task.put(i)
print('Try get results...')
for i in xrange(10000):
r = result.get(timeout=10)
print('Result: %s'%r)
manager.shutdown()
| [
"ico.yigang.wang@gmail.com"
] | ico.yigang.wang@gmail.com |
09cf6f7180cf4a125f28ec1629fc501026177646 | 041228051fa241354ea3f33cd3065be58b3a7258 | /binary_tree/huffman_decoding.py | 8400dae15ebea018328ed06089541281b26a30c1 | [] | no_license | muthusrathinam/30dayscoding | d52d6abd688e0d3d17115804dbbb613314a938c7 | da5f880ba0252d2d425e1171924c8ad5e821e130 | refs/heads/main | 2023-06-21T15:45:00.224532 | 2021-07-23T06:19:19 | 2021-07-23T06:19:19 | 364,475,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | def decodeHuff(root, s):
#Enter Your Code Here
#create temp. pointer
temp = root
result=[]
#traverse binary coded string
for char in s:
#traverse left of the tree
if char is '0':
temp = temp.left
#traverse right of the tree
else:
temp = temp.right
if temp.left is None and temp.right is None:
result.append(temp.data)
temp = root
print("".join(result))
| [
"muthusmr444@gmail.com"
] | muthusmr444@gmail.com |
c8e00806ae74fe6db6ef1b04820024697e4e3627 | 766c392a3a196d15909505fefae2d44a7041b215 | /sanicApp.py | c935d9db151d0570392f46802fbdcc07f661144f | [] | no_license | bunny1985/qtConnect | b942702afce9bd1b7efc65b8f5d11754cf8b6000 | e092cea5c87d3d5acea6f89cf153bc0dc561dd21 | refs/heads/master | 2020-05-19T08:52:24.786923 | 2019-05-05T13:30:37 | 2019-05-05T13:30:37 | 184,933,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | from sanic import Sanic
app = Sanic("QTConnect", strict_slashes=True)
| [
"michal.banas@gmail.com"
] | michal.banas@gmail.com |
71b4c3192c59446446642f2dc38ac6eac594e87f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_138/1273.py | 7807dd8fe021579a8ca3aa6fa4f8c90eff1cc487 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | t = int(raw_input())
for i in range(1,t+1):
n = int(raw_input())
line = raw_input().split()
naomi = []
for j in range(0,n):
naomi.append(float(line[j]))
line = raw_input().split()
ken = []
for j in range(0,n):
ken.append(float(line[j]))
naomi = sorted(naomi)
ken = sorted(ken)
ind_ken = 0
ind_naomi = 0
end = False
while ind_ken != n:
while ken[ind_ken] < naomi[ind_naomi]:
ind_ken += 1
if ind_ken == n:
end = True
break
if end:
break
ind_naomi += 1
ind_ken += 1
w = len(naomi) - ind_naomi
dw = 0
while len(ken) > 0:
if ken[len(ken) - 1] < naomi[len(naomi) - 1]:
dw += 1
ken.pop()
naomi.pop()
else:
ken.pop()
naomi.pop(0)
str = "Case #%d: %d %d" % (i, dw, w)
print str | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
d7078816be6d36440d58f60f8b3cbd2db9ef4135 | 9640a6783b61e962b15ed14b163e1d91836a0d26 | /webcrawl/naver_autologin.py | 27a332501bc1c540b6c74304fb7d1b03b55ed4c9 | [] | no_license | namgiho96/tf_Python_study | 5c644b79efd2012ab90569f1748d2003298f98cd | 639634373e6350951a484b782cbfb26aa7d2e740 | refs/heads/master | 2020-05-07T09:10:38.774408 | 2019-04-09T13:00:51 | 2019-04-09T13:00:51 | 180,365,932 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | from selenium import webdriver # 웹불러오는 라이브러리
ctx = '../crawler/chromedriver'
driver = webdriver.Chrome(ctx)
driver.implicitly_wait(3)
driver.get('https://nid.naver.com/nidlogin.login?mode=form&url=https%3A%2F%2Fwww.naver.com')
driver.find_element_by_name('id').send_keys('giho123') # 아이디를 정해준다
driver.find_element_by_name('pw').send_keys('....')
driver.implicitly_wait(3) # 몇초뒤에 실해해라
driver.find_element_by_xpath('//*[@id="frmNIDLogin"]/fieldset/input').click() # 클릭을 했을때
driver.implicitly_wait(3)
| [
"namgiho96@gmail.com"
] | namgiho96@gmail.com |
1bd538971347d12cadaa9a32a71f8df8df448517 | 0ec06804975ff5ae7e4258d088d2a940e7b7f128 | /artenv/artproject/artwork/settings.py | e12ad39f7dccf62d2210bcfa3f13ab838875340f | [] | no_license | NatashaGumbo/Artworks | 4d38c817a7fefb22989590d08fcea2d8e22fc04c | fbbd7f9f16f48ea377159bb060460c039846461e | refs/heads/master | 2021-08-11T09:39:11.739360 | 2017-11-13T14:11:15 | 2017-11-13T14:11:15 | 110,244,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,444 | py | """
Django settings for artwork project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^nagywvwkd)b$z0ch-0wd$4cfgv)oq8kxaas&*68$lsls)3n25'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'artstore.apps.ArtstoreConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'artwork.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'artwork.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, "static", 'media')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static", 'static_files'),
)
| [
"tashaverahgumbo@gmail.com"
] | tashaverahgumbo@gmail.com |
d0fdb912fce20ef803705dbad7736eeb3cee1a79 | 81abfb6bfeb12b01e0df631cb5dbfe467f8b4382 | /classes_objects.py | 3ee8a3a7a0b51fbf70757da05c335894d157e417 | [] | no_license | saikaranpraveen/python-basics | 704163c40df6128585ff0b9cad884014445dbc76 | e70148b4098b9d3b0ee86b91b46d6cd9581cf040 | refs/heads/main | 2023-06-27T06:42:04.356688 | 2021-04-23T06:10:53 | 2021-04-23T06:10:53 | 391,301,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | class Car:
def __init__(self, brand, model):
self.brand = brand
self.model = model
def handling(self, handling):
print (f"{self.brand} {self.model} has {handling} handling.")
def __str__(self) -> str:
return f"Brand = {self.brand}\nModel = {self.model}"
honda_city = Car("Honda", "City")
print(honda_city)
honda_city.handling("good") | [
"karansiamintern@gmail.com"
] | karansiamintern@gmail.com |
39abb1c58a1ae46d15c937d463dbc72c51ee8659 | b641319ea5164c1eb5db77c819abdd1f8136fce3 | /random_stream.py | 26e2a2c280f4736e7a6b65c58e3d223854009094 | [] | no_license | Anwesh43/theano-starter | 8d4b2a9e3023f10018f9005ef9a9e4583270fee0 | 87f2d987ce02a883889eac6543b82530d1b90989 | refs/heads/master | 2021-01-12T02:48:45.879958 | 2017-01-16T15:35:22 | 2017-01-16T15:35:22 | 78,109,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 693 | py | import theano.tensor as T
from theano import *
from theano.tensor.shared_randomstreams import RandomStreams
srng = RandomStreams(seed=1000)
r_uv = srng.uniform((2,2))
r_nd = srng.normal((2,2))
rng_val = r_uv.rng.get_value(borrow=True)
rng_val.seed(345)
r_uv.rng.set_value(rng_val,borrow=True)
r_uniform = function([],r_uv)
r_normal = function([],r_nd,no_default_updates=True)
print r_uniform()
print r_normal()
print r_normal()
rnd_val = r_uv.rng.get_value(borrow=True)
state = rnd_val.get_state()
v1 = r_uniform()
v2 = r_uniform()
rnd_val = r_uv.rng.get_value(borrow=True)
rnd_val.set_state(state)
r_nd.rng.set_value(rnd_val)
v3 = r_uniform()
print v1
print v2
print v3
print v1 == v3
| [
"anweshthecool0@gmail.com"
] | anweshthecool0@gmail.com |
3d29fc4351f1f443d97e90cdd5c1312728d92434 | d498180b97f5f474c969b8f89fef522ae1afa642 | /src/kubectl_launcher/config.py | 9c0214253271dfaa0856cb57b576574f026dd584 | [] | no_license | fm1ck3y/kubectl_launcher | cff19fc06f461655cdb8570f4701373b03f6586c | c842dae62ee265ab4e6aaaad01510920f3c0e0df | refs/heads/master | 2023-04-21T19:39:58.203965 | 2021-05-07T16:35:37 | 2021-05-07T16:35:37 | 365,292,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | import yaml
import logging
from kubectl_launcher.exceptions import KubectlLauncherYamlException
log = logging.getLogger(__name__)
class Config:
kubectl_servers = []
servers = []
deployments = []
services = []
persistentVolumes = []
def __init__(self,file_yaml):
try:
config_yaml = yaml.safe_load(file_yaml)
self.__dict__.update(config_yaml)
log.info(f"Config {file_yaml.name} successful download")
except yaml.YAMLError as e:
raise KubectlLauncherYamlException("YAMLError bad parse file.") | [
"arte.vdovin@gmail.com"
] | arte.vdovin@gmail.com |
d7e882092e4b190087f4548e9372a44995255bcf | d3737731634ee3f6fa2b19f6806d42ecc27d21a5 | /wals3/scripts/initializedb.py | 273185799ef7e0f763aec0421b0141b6c83648e1 | [] | no_license | Maggi12/wals3 | 3ad2475714b2d0bd1a7e5bb52baac1070eb07a5f | e66f08766ef67f51cae3d9656bcd4da1a8cf63c8 | refs/heads/master | 2021-01-22T20:02:56.225183 | 2014-07-25T15:42:39 | 2014-07-25T15:42:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,628 | py | from __future__ import unicode_literals
import sys
import transaction
from itertools import groupby, cycle
import re
from datetime import date, datetime
from collections import defaultdict
from pytz import utc
from sqlalchemy import create_engine
from sqlalchemy.orm import joinedload_all
from path import path
from bs4 import BeautifulSoup
from clld.db.meta import DBSession, VersionedDBSession
from clld.db.models import common
from clld.db.util import compute_language_sources
from clld.scripts.util import initializedb, Data, gbs_func
from clld.lib.bibtex import EntryType
from clld.lib.dsv import reader
from clld.util import LGR_ABBRS
import wals3
from wals3 import models
from wals3.scripts import uncited
from wals3.scripts import issues
UNCITED_MAP = {}
for k, v in uncited.MAP.items():
UNCITED_MAP[k.lower()] = v
# start with what's online right now:
DB = create_engine('postgresql://robert@/wals-vm42')
REFDB = create_engine('postgresql://robert@/walsrefs')
GC = create_engine('postgresql://robert@/glottolog3')
ABBRS = {
"A": "agent-like argument",
"ACCOMP": "accompanied ",
"ACR": "actor",
"ACT": "actual",
"ADEL": "adelative",
"ADVZ": "adverbializer",
"AFF": "affirmative",
"AGT": "agent",
"ALL": "allative",
"AN": "action nominal",
"ANC": "action nominal construction",
"ANIM": "animate",
"ANTIP": "antipassive",
"APPL": "applicative",
"AS": "asseverative",
"ASSOC": "associative",
"ASY": "asymmetric",
"ATTR": "attributive",
"AUD": "auditory evidential",
"AUG": "augmented",
"C": "common gender",
"CL": "class (= noun class, gender)",
"CLF": "classifier",
"CMPL": "completive",
"CNTR": "contrary to expectation marker",
"COLL": "collective",
"COM": "comitative",
"COMPR": "comparative",
"CONN": "connective",
"CONNEG": "connegative",
"CONSTR": "construct",
"CONT": "continuative, continous",
"CONTEMP": "contemporative",
"COP": "copula",
"CPW": "categories per word",
"CRS": "currently relevant state",
"DECL": "declarative",
"DEG": "degree word",
"DEP": "dependent marker",
"DES": "desire",
"DESID": "desiderative",
"DIM": "diminutive",
"DIR": "direct",
"DIR.EVD": "direct evidential",
"DIRL": "directional",
"DIST.PST": "distant past",
"DOBJ": "direct object",
"DS": "different subject",
"EMPH": "emphatic",
"EPENTH": "epenthetic",
"EPV": "expletive verbal suffix",
"EVD": "evidential",
"FACT": "fact",
"FAM": "familiar",
"FIN": "finite",
"FIN.AOR": "finite aorist",
"FV": "verb-final vowel",
"HAB": "habitual",
"HEST": "hesternal past",
"HHON": "super honorific",
"HOD": "hodiernal past",
"HON": "honorific",
"HORT": "hortative",
"HUM": "human",
"IE": "Indo-European",
"ILL": "illative",
"IMM.PRET": "immediate preterite",
"IMM.PST": "immediate past",
"IMPERS": "impersonal",
"INAN": "inanimate",
"INCEP": "inceptive",
"INCOMPL": "incompletive",
"IND": "indicative",
"INDIR.EVD": "indirect evidential",
"INFER": "inferential evidential",
"INGR": "ingressive",
"INTENT": "intentional",
"INTER": "interrogative",
"INTF": "intensifier",
"INTGEN": "intended genitive",
"INV": "inverse",
"IO": "indirect object ",
"IRR": "irrealis",
"ITER": "iterative",
"LIG": "ligature",
"LOCUT": "locutor person marker",
"MED": "medial",
"NARR": "narrative",
"NC": "noun class",
"NEC": "necessity",
"NHON": "non-honorific",
"NOMIN": "nominalization",
"NON.F": "non-feminine ",
"NONFIN": "non-finite ",
"NONFIN.AOR": "non-finite aorist",
"NP": "noun phrase",
"NPST": "non-past",
"NSG": "non-singular",
"NUM": "numeral",
"O": "object pronominal marker",
"OBV": "obviative",
"OPT": "optative",
"P": "patient-like argument",
"PAT": "patient",
"PATH": "path locative",
"PCL": "particle",
"PERS": "personal",
"PHR.TERM": "phrase terminal marker",
"PLUPERF": "pluperfect",
"POS": "possibility",
"POSTP": "postposition",
"POT": "potential",
"PP": "prepositional/postpositional phrase",
"PRECONTEMP": "precontemporal",
"PRED": "predicative",
"PREF": "prefix",
"PREP": "preposition",
"PREV": "preverb",
"PROL": "prolative",
"PRON": "pronoun",
"PROP": "proper name",
"PRTV": "partitive",
"PST.CONT": "past continuous",
"PST.PUNCT": "past punctiliar",
"PSTBEFOREYEST": "past before yesterday (= prehesternal)",
"PUNCT": "punctual stem",
"Q": "question-marker",
"QUOT": "quotative",
"RDP": "reduplication",
"REAL": "realis",
"REC": "recent (past)",
"RECP": "reciprocal",
"REM.PST": "remote past",
"REMOTE": "remote",
"REPET": "repetitive",
"RLZ": "realized",
"RNR": "result nominalizer",
"S": "sole argument of the intransitive verb",
"SBJV": "subjunctive",
"SENS": "sensory evidential",
"SPEC": "specific",
"SR": "switch Reference",
"SS": "same subject",
"STAT": "stative",
"SUBORD": "subordination",
"SUFF": "suffix",
"SUP": "superessive",
"SYM": "symmetric",
"SymAsy": "symmetric and asymmetric",
"T/A": "tense/ aspect",
"TD": "time depth/ proximality marker",
"TELIC": "telic",
"TEMPRY": "temporary",
"TH": "thematic suffix",
"THM": "theme (i.e. the semantic role)",
"TOD.PST": "today past",
"TRASL": "traslative",
"TRI": "trial",
"UNSP": "unspecified",
"VBLZ": "verbalizer",
"VENT": "ventive",
"VIS": "visual evidential",
"VP": "verb phrase",
}
for k, v in LGR_ABBRS.items():
ABBRS.setdefault(k, v)
def get_source(id): # pragma: no cover
"""retrieve a source record from wals_refdb
"""
field_map = {
'onlineversion': 'url',
'gbs_id': 'google_book_search_id',
'doi': 'jsondata',
'cited': 'jsondata',
'conference': 'jsondata',
'iso_code': 'jsondata',
'olac_field': 'jsondata',
'wals_code': 'jsondata',
}
res = {'id': id, 'jsondata': {'iso_code': [], 'olac_field': [], 'wals_code': []}}
refdb_id = UNCITED_MAP.get(id.lower())
if not refdb_id:
for row in REFDB.execute("""\
select id, genre from ref_record, ref_recordofdocument
where id = id_r_ref and citekey = '%s'""" % id
):
res['bibtex_type'] = row['genre']
refdb_id = row['id']
break
if not refdb_id:
if id[-1] in ['a', 'b', 'c', 'd']:
refdb_id = UNCITED_MAP.get(id[:-1].lower())
if not refdb_id:
print 'missing ref', id
return {}
res['pk'] = int(refdb_id)
if 'bibtex_type' not in res:
for row in REFDB.execute("select genre from ref_record where id = %s" % refdb_id):
res['bibtex_type'] = row['genre']
break
for row in REFDB.execute(
"select * from ref_recfields where id_r_ref = %s" % refdb_id
):
field = field_map.get(row['id_name'], row['id_name'])
if field == 'jsondata':
if row['id_name'] in ['iso_code', 'olac_field', 'wals_code']:
res['jsondata'][row['id_name']].append(row['id_value'])
else:
res['jsondata'][row['id_name']] = row['id_value']
else:
res[field] = row['id_value']
if res['bibtex_type'] == 'thesis':
if res['format'] == 'phd':
res['bibtex_type'] == 'phdthesis'
del res['format']
elif res['format'] == 'ma':
res['bibtex_type'] == 'mastersthesis'
del res['format']
else:
res['bibtex_type'] == 'misc'
if res['bibtex_type'] == 'online':
res['howpublished'] = 'online'
res['bibtex_type'] = getattr(EntryType, res['bibtex_type'], EntryType.misc)
if 'format' in res:
res['type'] = res['format']
del res['format']
authors = ''
for row in REFDB.execute(
"select * from ref_recauthors where id_r_ref = %s order by ord" % refdb_id
):
if row['type'] == 'etal':
authors += ' et al.'
else:
if authors:
authors += ' and '
authors += row['value']
res['author'] = authors
for row in REFDB.execute(
"select * from ref_recjournal where id_r_ref = %s" % refdb_id
):
res['journal'] = row['name']
break
return res
def parse_igt(html): # pragma: no cover
"""
<table class="IGT">
<caption>
<div class="translation">I want the white one.</div>
</caption>
<tbody>
<tr class="phrase">
<td class="morpheme"><i>Pojne-j-ben </i></td>
<td class="morpheme"><i>lew-din </i></td>
<td class="morpheme"><i>erd'-ije. </i></td>
</tr>
<tr class="gloss">
<td class="morpheme">white-PTCP-NMLZ</td>
<td class="morpheme">eat-INF</td>
<td class="morpheme">want-1SG.INTR</td>
</tr>
</tbody>
</table>
"""
def get_text(e):
if not isinstance(e, list):
e = [e]
return ' '.join(' '.join(ee.stripped_strings) for ee in e)
res = {}
soup = BeautifulSoup(html)
e = soup.find('caption')
if e:
res['description'] = get_text(e)
e = soup.find('tr', attrs={'class': 'phrase'})
if e:
morphemes = e.find_all('td', attrs={'class': 'morpheme'})
res['name'] = get_text(morphemes)
res['analyzed'] = '\t'.join(get_text(m) for m in morphemes)
res['markup_analyzed'] = '\t'.join(
''.join(unicode(c) for c in m.contents) for m in morphemes)
e = soup.find('tr', attrs={'class': 'gloss'})
if e:
morphemes = e.find_all('td', attrs={'class': 'morpheme'})
res['gloss'] = '\t'.join(get_text(m).replace('. ', '.') for m in morphemes)
res['markup_gloss'] = '\t'.join(
''.join(unicode(c) for c in m.contents) for m in morphemes)
assert len(res.get('gloss', '').split('\t')) == len(res.get('analyzed', '').split('\t'))
return res
def teaser(html): # pragma: no cover
res = ''
for s in BeautifulSoup(html).stripped_strings:
res = '%s %s' % (res, s)
if len(res) > 100:
break
return res.strip()
def get_vs2008(args): # pragma: no cover
vs2008 = {}
for row in reader(args.data_file('datapoints_2008.csv'), delimiter=','):
vs2008[(row[0], '%sA' % row[1])] = int(row[2])
return vs2008
E2008 = utc.localize(datetime(2008, 4, 21))
E2011 = utc.localize(datetime(2011, 4, 28))
E2013 = utc.localize(datetime(2013, 11, 15))
data = Data(created=E2008, updated=E2008)
def migrate(from_, to_, converter): # pragma: no cover
for row in DB.execute("select * from %s" % from_):
res = converter(row)
if not res:
continue
if isinstance(res, dict):
DBSession.add(to_(**res))
else:
data.add(to_, res[0], **res[1])
DBSession.flush()
def main(args): # pragma: no cover
glottocodes = {}
for row in GC.execute('select ll.hid, l.id from language as l, languoid as ll where ll.pk = l.pk'):
if row[0] and len(row[0]) == 3:
glottocodes[row[0]] = row[1]
icons = issues.Icons()
old_db = DB
vs2008 = get_vs2008(args)
missing_sources = []
refdb_ids = {}
max_id = 7350
with open('/home/robert/venvs/clld/data/wals-data/missing_source.py', 'w') as fp:
for row in old_db.execute("select * from reference"):
try:
author, year = row['id'].split('-')
except:
author, year = None, None
bibdata = get_source(row['id'])
if not bibdata:
fp.write('"%s",\n' % row['id'])
missing_sources.append(row['id'])
bibdata['pk'] = max_id
max_id += 1
if bibdata['pk'] in refdb_ids:
print 'already seen:', row['id'], 'as', refdb_ids[bibdata['pk']]
data['Source'][row['id']] = data['Source'][refdb_ids[bibdata['pk']]]
continue
refdb_ids[bibdata['pk']] = row['id']
bibdata.update({
'id': row['id'],
'name': row['name'],
'description': bibdata.get('title', bibdata.get('booktitle')),
'google_book_search_id': row['gbs_id'] or None,
})
data.add(common.Source, row['id'], **bibdata)
#
# TODO: add additional bibdata as data items
#
print('sources missing for %s refs' % len(missing_sources))
for id, name in ABBRS.items():
DBSession.add(common.GlossAbbreviation(id=id, name=name))
migrate(
'country',
models.Country,
lambda r: (r['id'], dict(id=r['id'], name=r['name'], continent=r['continent'])))
migrate(
'family',
models.Family,
lambda r: (r['id'], dict(id=r['id'], name=r['name'], description=r['comment'])))
for row, icon in zip(
list(old_db.execute("select * from genus order by family_id")),
cycle(iter(icons))
):
genus = data.add(
models.Genus, row['id'],
id=row['id'], name=row['name'], icon=icon, subfamily=row['subfamily'])
genus.family = data['Family'][row['family_id']]
DBSession.flush()
migrate(
'altname',
common.Identifier,
lambda r: (
(r['name'], r['type']), dict(name=r['name'], type='name', description=r['type'])))
# names for isolanguages are not unique!
enames = {}
for r in DB.execute("select * from isolanguage"):
id_ = 'ethnologue-%s' % r['id']
if r['name'] in enames:
data['Identifier'][id_] = enames[r['name']]
else:
enames[r['name']] = data.add(
common.Identifier, id_,
id=id_,
name=r['name'],
type='name',
description='ethnologue')
DBSession.flush()
migrate(
'isolanguage',
common.Identifier,
lambda r: (
r['id'],
dict(
id=r['id'],
name=r['id'],
type=common.IdentifierType.iso.value,
description=r['name'])))
migrate(
'isolanguage',
common.Identifier,
lambda r: None if r['id'] not in glottocodes else (
'gc-%s' % r['id'],
dict(
id='gc-%s' % r['id'],
name=glottocodes[r['id']],
type=common.IdentifierType.glottolog.value,
description=r['name'])))
migrate(
'language',
models.WalsLanguage,
lambda r: (
r['id'],
dict(
id=r['id'],
name=r['name'],
latitude=r['latitude'],
longitude=r['longitude'],
ascii_name=r['ascii_name'],
genus=data['Genus'][r['genus_id']],
samples_100=r['samples_100'] != 0,
samples_200=r['samples_200'] != 0)))
migrate(
'author',
common.Contributor,
lambda r: (
r['id'],
dict(name=r['name'], url=r['www'], id=r['id'], description=r['note'])))
dataset = common.Dataset(
id='wals',
name='WALS Online',
description='The World Atlas of Language Structures Online',
domain='wals.info',
published=date(2013, 8, 15),
contact='contact.wals@livingreviews.org',
license='http://creativecommons.org/licenses/by-nc-nd/2.0/de/deed.en',
jsondata={
'license_icon': 'http://wals.info/static/images/cc_by_nc_nd.png',
'license_name': 'Creative Commons Attribution-NonCommercial-NoDerivs 2.0 Germany'})
DBSession.add(dataset)
for i, editor in enumerate(['dryerms', 'haspelmathm']):
common.Editor(dataset=dataset, contributor=data['Contributor'][editor], ord=i + 1)
migrate(
'country_language',
models.CountryLanguage,
lambda r: dict(
language_pk=data['WalsLanguage'][r['language_id']].pk,
country_pk=data['Country'][r['country_id']].pk))
migrate(
'altname_language',
common.LanguageIdentifier,
lambda r: dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier'][(r['altname_name'], r['altname_type'])],
description=r['relation']))
migrate(
'isolanguage_language',
common.LanguageIdentifier,
lambda r: dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier'][r['isolanguage_id']],
description=r['relation']))
migrate(
'isolanguage_language',
common.LanguageIdentifier,
lambda r: None if 'ethnologue-%s' % r['isolanguage_id'] not in data['Identifier'] else dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier']['ethnologue-%s' % r['isolanguage_id']],
description=r['relation']))
migrate(
'isolanguage_language',
common.LanguageIdentifier,
lambda r: None if 'gc-%s' % r['isolanguage_id'] not in data['Identifier'] else dict(
language=data['WalsLanguage'][r['language_id']],
identifier=data['Identifier']['gc-%s' % r['isolanguage_id']],
description=r['relation']))
migrate(
'area',
models.Area,
lambda r: (
r['id'],
dict(name=r['name'], dbpedia_url=r['dbpedia_url'], id=str(r['id']))))
def migrate_chapter(row):
kw = dict(
id=row['id'],
name=row['name'],
wp_slug=row['blog_title'],
sortkey=int(row['id']),
area=data['Area'][row['area_id']])
if int(row['id']) in [143, 144]:
kw['created'] = E2011
kw['updated'] = E2011
return row['id'], kw
migrate('chapter', models.Chapter, migrate_chapter)
def migrate_supplement(row):
if row['name'] not in ['Help', 'Abbreviations']:
sortkey = 990 + int(row['id']) if row['name'] != 'Introduction' else 0
id_ = 's%s' % row['id']
kw = dict(id=id_, name=row['name'], sortkey=sortkey)
return id_, kw
migrate('supplement', models.Chapter, migrate_supplement)
migrate(
'chapter_reference',
common.ContributionReference,
lambda r: dict(
contribution=data['Chapter'][r['chapter_id']],
source=data['Source'][r['reference_id']]))
migrate(
'reference_supplement',
common.ContributionReference,
lambda r: dict(
contribution=data['Chapter']['s%s' % r['supplement_id']],
source=data['Source'][r['reference_id']]))
def migrate_feature(row):
kw = dict(id=row['id'], name=row['name'], ordinal_qualifier=row['id'][-1])
if row['id'].startswith('143') or row['id'].startswith('144'):
kw['created'] = E2011
kw['updated'] = E2011
kw['chapter'] = data['Chapter'][row['chapter_id']]
return row['id'], kw
migrate('feature', models.Feature, migrate_feature)
def migrate_value(row):
desc = row['description']
if desc == 'SOV & NegV/VNeg':
if row['icon_id'] != 's9ff':
desc += ' (a)'
else:
desc += ' (b)'
kw = dict(
id='%s-%s' % (row['feature_id'], row['numeric']),
name=desc,
description=row['long_description'],
jsondata=dict(icon=issues.Icons.id(row['icon_id'])),
number=row['numeric'],
parameter=data['Feature'][row['feature_id']])
return (row['feature_id'], row['numeric']), kw
migrate('value', common.DomainElement, migrate_value)
same = 0
added = 0
for row in old_db.execute("select * from datapoint"):
parameter = data['Feature'][row['feature_id']]
language = data['WalsLanguage'][row['language_id']]
id_ = '%s-%s' % (parameter.id, language.id)
created = E2008
updated = E2008
value_numeric = row['value_numeric']
if (language.id, parameter.id) in vs2008:
if vs2008[(language.id, parameter.id)] != row['value_numeric']:
print '~~~', id_, vs2008[(language.id, parameter.id)], '-->', row['value_numeric']
value_numeric = vs2008[(language.id, parameter.id)]
else:
same += 1
else:
updated = E2011
created = E2011
if parameter.id[-1] == 'A' and not (parameter.id.startswith('143') or parameter.id.startswith('144')):
added += 1
kw = dict(id=id_, updated=updated, created=created)
valueset = data.add(
common.ValueSet, row['id'],
language=language,
parameter=parameter,
contribution=parameter.chapter,
**kw)
data.add(
common.Value, id_,
domainelement=data['DomainElement'][(row['feature_id'], value_numeric)],
valueset=valueset,
**kw)
print same, 'datapoints did not change'
print added, 'datapoints added to existing features'
DBSession.flush()
migrate(
'datapoint_reference',
common.ValueSetReference,
lambda r: dict(
valueset=data['ValueSet'][r['datapoint_id']],
source=data['Source'][r['reference_id']],
description=r['note']))
migrate(
'author_chapter',
common.ContributionContributor,
lambda r: dict(
ord=r['order'],
primary=r['primary'] != 0,
contributor_pk=data['Contributor'][r['author_id']].pk,
contribution_pk=data['Chapter'][r['chapter_id']].pk))
migrate(
'author_supplement',
common.ContributionContributor,
lambda r: dict(
ord=r['order'],
primary=r['primary'] != 0,
contributor_pk=data['Contributor'][r['author_id']].pk,
contribution_pk=data['Chapter']['s%s' % r['supplement_id']].pk))
igts = defaultdict(lambda: [])
for row in old_db.execute("select * from igt"):
d = {'id': 'igt-%s' % row['id']}
d.update(parse_igt(row['xhtml']))
igts[row['example_id']].append(d)
for row in old_db.execute("select * from example"):
if not row['language_id']:
print 'example without language:', row['id']
continue
_igts = igts[row['id']]
if _igts:
for igt in _igts:
data.add(
common.Sentence, igt['id'],
markup_comment=row['xhtml'],
language=data['WalsLanguage'][row['language_id']],
**igt)
else:
name = teaser(row['xhtml'])
if name:
data.add(
common.Sentence, row['id'],
id=str(row['id']),
name=name,
xhtml=row['xhtml'],
language=data['WalsLanguage'][row['language_id']])
missing = {}
for row in old_db.execute("select * from example_feature"):
_igts = igts[row['example_id']]
if _igts:
for igt in _igts:
try:
sentence = data['Sentence'][igt['id']]
except KeyError:
print 'missing sentence:', row['example_id']
continue
try:
value = data['Value']['%s-%s' % (row['feature_id'], sentence.language.id)]
DBSession.add(common.ValueSentence(sentence=sentence, value=value))
except KeyError:
missing[(row['feature_id'], sentence.language.id)] = 1
#print 'missing datapoint:', '%s-%s' % (row['feature_id'], sentence.language.id)
else:
try:
sentence = data['Sentence'][row['example_id']]
except KeyError:
print 'missing sentence:', row['example_id']
continue
try:
value = data['Value']['%s-%s' % (row['feature_id'], sentence.language.id)]
DBSession.add(common.ValueSentence(sentence=sentence, value=value))
except KeyError:
missing[(row['feature_id'], sentence.language.id)] = 1
#print 'missing datapoint:', '%s-%s' % (row['feature_id'], sentence.language.id)
print len(missing), 'missing datapoints for example_feature relations'
def prime_cache(args): # pragma: no cover
"""
we use a versioned session to insert the changes in value assignment
"""
#
# compute the changes from 2008 to 2011:
#
vs2008 = get_vs2008(args)
for row in DB.execute("select * from datapoint"):
key = (row['language_id'], row['feature_id'])
old_value = vs2008.get(key)
new_value = row['value_numeric']
if old_value and old_value != new_value:
valueset = VersionedDBSession.query(common.ValueSet)\
.join(common.Language)\
.join(common.Parameter)\
.filter(common.Parameter.id == row['feature_id'])\
.filter(common.Language.id == row['language_id'])\
.one()
value = valueset.values[0]
assert value.domainelement.number == old_value
for de in valueset.parameter.domain:
if de.number == new_value:
value.domainelement = de
break
assert value.domainelement.number == new_value
valueset.updated = E2011
value.updated = E2011
VersionedDBSession.flush()
for row in reader(args.data_file('corrections_2013.tab'), namedtuples=True, newline='\r'):
valueset = VersionedDBSession.query(common.ValueSet)\
.join(common.Language)\
.join(common.Parameter)\
.filter(common.Parameter.id == row.feature)\
.filter(common.Language.id == row.wals_code)\
.one()
value = valueset.values[0]
if value.domainelement.number == int(row.new):
print '**** old news', valueset.language.id, valueset.parameter.id
continue
if value.domainelement.number != int(row.old):
print '--->', valueset.language.id, valueset.parameter.id, value.domainelement.number
for de in valueset.parameter.domain:
if de.number == int(row.new):
value.domainelement = de
break
assert value.domainelement.number == int(row.new)
valueset.updated = E2013
value.updated = E2013
VersionedDBSession.flush()
print 'corrections 2013 done'
for issue in ['0', '9', '10', '11', '13', '14', '15', '16', '17', '19', '20', '24', '26', '27', '28']:
issue = getattr(issues, 'issue' + issue)
issue(VersionedDBSession, E2013)
VersionedDBSession.flush()
transaction.commit()
transaction.begin()
#
# TODO: these must be recomputed as well, after migrations!
#
# cache number of languages for a parameter:
for parameter, valuesets in groupby(
DBSession.query(common.ValueSet).order_by(common.ValueSet.parameter_pk),
lambda vs: vs.parameter):
parameter.representation = str(len(set(v.language_pk for v in valuesets)))
print 'recomputation of representation done'
transaction.commit()
transaction.begin()
# cache iso codes for languages:
for language in DBSession.query(common.Language).options(joinedload_all(
common.Language.languageidentifier, common.LanguageIdentifier.identifier
)):
iso_codes = []
for identifier in language.identifiers:
if identifier.type == common.IdentifierType.iso.value:
iso_codes.append(identifier.name)
language.iso_codes = ', '.join(sorted(set(iso_codes)))
print 'recomputation of iso codes done'
transaction.commit()
transaction.begin()
compute_language_sources()
transaction.commit()
transaction.begin()
gbs_func('update', args)
if __name__ == '__main__': # pragma: no cover
initializedb(create=main, prime_cache=prime_cache)
| [
"xrotwang@googlemail.com"
] | xrotwang@googlemail.com |
baa7678b52fae8e25d141a1b921f8006e86a6d26 | 66adad393a638d3a4cc47ed3d8b45b208f155ebe | /bookmarks1/account/views.py | bdac6a72fc611c4ef5ecf8d9c87d1849eaffa17e | [] | no_license | Dyavathrocky/image_sharing | a5e265c65fde29c1f665c522230bd73dfbf16c23 | 0939240f9a96dd8c80de813939d79455e95782c7 | refs/heads/main | 2023-01-21T15:23:10.141362 | 2020-11-29T13:53:26 | 2020-11-29T13:53:26 | 316,220,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,172 | py | from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from .forms import LoginForm, UserRegistrationForm, \
UserEditForm, ProfileEditForm
from .models import Profile
from django.contrib import messages
# Create your views here.
@login_required
def dashboard(request):
return render(request,
'account/dashboard.html', {'section': 'dashboard'})
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated '
'successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'account/login.html', {'form': form})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
return render(request,
'account/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,
'account/register.html',
{'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile updated successfully')
else:
messages.error(request, 'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile)
return render(request,
'account/edit.html',
{'user_form': user_form,
'profile_form': profile_form}) | [
"davathrak@gmail.com"
] | davathrak@gmail.com |
b0adb28cc7f846a838514b4518eeb37eab5b89d6 | dded91a7c9a6f45c6b2753270e46f4fab8ffbaa3 | /EC_MenuApps/richtext_notepad.py | ed430474431476c52ce1138375a1b99ca4cbbbf1 | [] | no_license | francisco-ribeiro1/Curso-Python | e0ab05c210f9228c85c0d889b4a3429ce8e87f33 | edb7bfb4cf9bc6eb291e4caec894b0330ce4850f | refs/heads/master | 2022-12-17T17:30:04.196796 | 2020-09-18T23:35:35 | 2020-09-18T23:35:35 | 296,724,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,147 | py | # Notepad com Rich text
import sys
from PySide2.QtWidgets import (QApplication, QMainWindow, QAction,
QMessageBox, QTextEdit, QFileDialog,
QInputDialog, QFontDialog, QColorDialog)
from PySide2.QtGui import QIcon, QTextCursor, QColor
from PySide2.QtCore import Qt
class Notepad(QMainWindow):
def __init__(self):
super().__init__()
self.iniciaUI()
def iniciaUI(self):
"""
Inicializa a janela e mostra seu conteuda na tela
"""
self.setGeometry(100,100, 400, 500)
self.setWindowTitle("Notepad")
self.displayWidgets()
self.notepadMenu()
self.show()
def displayWidgets(self):
"""
Configura os widgets da app
"""
self.tedit = QTextEdit()
self.setCentralWidget(self.tedit)
def notepadMenu(self):
"""
Criando o menu para o Notepad
"""
novo_act = QAction(QIcon('Imagens/new_file.png'), 'Novo', self)
novo_act.setShortcut('Ctrl+N')
novo_act.triggered.connect(self.clearText)
abre_act = QAction(QIcon('Imagens/open_file.png'), 'Abrir', self)
abre_act.setShortcut('Ctrl+O')
abre_act.triggered.connect(self.openFile)
salv_act = QAction(QIcon('Imagens/save_file.png'), 'Salvar', self)
salv_act.setShortcut('Ctrl+S')
salv_act.triggered.connect(self.saveToFile)
sair_act = QAction(QIcon('Imagens/exit.png'), 'Sair', self)
sair_act.setShortcut('Ctrl+Q')
sair_act.triggered.connect(self.close)
dsfz_act = QAction(QIcon('Imagens/undo.png'),'Desfazer', self)
dsfz_act.setShortcut('Ctrl+Z')
dsfz_act.triggered.connect(self.tedit.undo)
rfaz_act = QAction(QIcon('Imagens/redo.png'),'Refazer', self)
rfaz_act.setShortcut('Ctrl+Shift+Z')
rfaz_act.triggered.connect(self.tedit.redo)
rcrt_act = QAction(QIcon('Imagens/cut.png'),'Recortar', self)
rcrt_act.setShortcut('Ctrl+X')
rcrt_act.triggered.connect(self.tedit.cut)
copr_act = QAction(QIcon('Imagens/copy.png'),'Copiar', self)
copr_act.setShortcut('Ctrl+C')
copr_act.triggered.connect(self.tedit.copy)
colr_act = QAction(QIcon('Imagens/paste.png'),'Colar', self)
colr_act.setShortcut('Ctrl+V')
colr_act.triggered.connect(self.tedit.paste)
proc_act = QAction(QIcon('Imagens/find.png'), 'Encontrar', self)
proc_act.setShortcut('Ctrl+F')
proc_act.triggered.connect(self.findTextDialog)
font_act = QAction(QIcon('Imagens/font.png'), 'Fonte', self)
font_act.setShortcut('Ctrl+T')
font_act.triggered.connect(self.chooseFont)
cor_act = QAction(QIcon('Imagens/color.png'), 'Cor', self)
cor_act.setShortcut('Ctrl+Shift+C')
cor_act.triggered.connect(self.chooseFontColor)
hilh_act = QAction(QIcon('Imagens/highlight.png'), 'Destaque', self)
hilh_act.setShortcut('Ctrl+Shift+H')
hilh_act.triggered.connect(self.chooseFontBackgroundColor)
sobr_act = QAction('Sobre', self)
sobr_act.triggered.connect(self.aboutDialog)
menu_bar = self.menuBar()
menu_bar.setNativeMenuBar(False)
arqv_menu = menu_bar.addMenu('Arquivo')
arqv_menu.addAction(novo_act)
arqv_menu.addSeparator()
arqv_menu.addAction(abre_act)
arqv_menu.addAction(salv_act)
arqv_menu.addSeparator()
arqv_menu.addAction(sair_act)
edit_menu = menu_bar.addMenu('Editar')
edit_menu.addAction(dsfz_act)
edit_menu.addAction(rfaz_act)
edit_menu.addSeparator()
edit_menu.addAction(rcrt_act)
edit_menu.addAction(copr_act)
edit_menu.addAction(colr_act)
edit_menu.addSeparator()
edit_menu.addAction(proc_act)
tool_menu = menu_bar.addMenu('Ferramentas')
tool_menu.addAction(font_act)
tool_menu.addAction(cor_act)
tool_menu.addAction(hilh_act)
help_menu = menu_bar.addMenu('Ajuda')
help_menu.addAction(sobr_act)
def clearText(self):
"""
Se o botão novo for clicado, exibe a caixa de diálogo perguntando
ao usuário se deseja limpar o campo de edição de texto ou não.
"""
resp = QMessageBox.question(self, "Limpar texto",
"Você deseja limpar o texto?", QMessageBox.No | QMessageBox.Yes,
QMessageBox.Yes)
if resp == QMessageBox.Yes:
self.tedit.clear()
else:
pass
def openFile(self):
"""
Abrir um arquivo de texto ou html e exiba seu
conteúdo no campo de edição de texto.
"""
file_name, _ = QFileDialog.getOpenFileName(self, "Abrir Arquivo",
"", "Arquivos HTML (*.html);;Arquivos de Texto (*.txt)")
if file_name:
with open(file_name, 'r') as f:
notepad_text = f.read()
self.tedit.setText(notepad_text)
else:
QMessageBox.information(self, "Erro", "Impossível abrir o arquivo.",
QMessageBox.Ok)
def findTextDialog(self):
"""
Pesquisar texto no widget QTextEdit
"""
find_text, ok = QInputDialog.getText(self, "Encontrar Texto", "Procurar:")
extra = []
if ok and not self.tedit.isReadOnly():
self.tedit.moveCursor(QTextCursor.Start)
color = QColor(Qt.yellow)
while(self.tedit.find(find_text)):
selection = QTextEdit.ExtraSelection()
selection.format.setBackground(color)
selection.cursor = self.tedit.textCursor()
extra.append(selection)
for i in extra_selections:
self.tedit.setExtraSelections(extra)
def saveToFile(self):
"""
Se o botão salvar for clicado, exibe a caixa de diálogo perguntando ao usuário
se deseja salvar o texto em um arquivo.
"""
file_name, _ = QFileDialog.getSaveFileName(self, 'Salvar Aqrquivo',
"","Arquivos HTML (*.html);;Arquivos de Texto (*.txt)")
if file_name.endswith('.txt'):
notepad_text = self.tedit.toPlainText()
with open(file_name, 'w') as f:
f.write(notepad_text)
elif file_name.endswith('.html'):
notepad_richtext = self.text_field.toHtml()
with open(file_name, 'w') as f:
f.write(notepad_richtext)
else:
QMessageBox.information(self, "Erro", "Impossível salvar o arquivo.",
QMessageBox.Ok)
def chooseFont(self):
"""
Selecione a fonte do texto
"""
atual = self.tedit.currentFont()
font, ok = QFontDialog.getFont(atual, self,
options=QFontDialog.DontUseNativeDialog)
if ok:
self.tedit.setCurrentFont(font)
def chooseFontColor(self):
"""
Selecione a cor do texto
"""
cor = QColorDialog.getColor()
if cor.isValid():
self.tedit.setTextColor(cor)
def chooseFontBackgroundColor(self):
"""
Selecione a cor de fundo do texto
"""
cor = QColorDialog.getColor()
if cor.isValid():
self.tedit.setTextBackgroundColor(cor)
def aboutDialog(self):
"""
Exibir informações sobre o programa
"""
QMessageBox.about(self, "Sobre Notepad",
"Bloco de notas adaptado do Guia prático para iniciantes no PyQt")
#Executando o App
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Notepad()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | noreply@github.com |
7bfebb60ad510573083f308d96db0442c9bcef2b | 0464932d544ee946a13d991d2ae0b284b31f5edd | /hourglassSum.py | da959cca4c5b6d4bbe5cf48f412abd10d7401144 | [] | no_license | arjun1237/2D-Array-DS | f9ace4ca845c6247fcff3c8d589745d8580fbb4d | 61c74cdef9cbe8be493aa3249996ebe87e5267b1 | refs/heads/master | 2020-07-12T14:20:07.051525 | 2019-08-28T03:27:05 | 2019-08-28T03:27:05 | 204,839,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | def hourglassSum(arr):
array_len = len(arr)
highest = 0
for i in range(1, array_len-1):
for j in range(1, array_len-1):
total = arr[i][j]
for k in range(0, 3):
total += arr[i-1][j-1+k] + arr[i+1][j-1+k]
if i == 1 and j == 1:
highest = total
if total > highest:
highest = total
return highest
print(hourglassSum(arr)) | [
"noreply@github.com"
] | noreply@github.com |
a21cbc48f84c785c531000e69be0bc25254d2951 | 8dfce5fab6ab68bd08807d5893c3dd60ddc5bded | /scraper/test_AmazonProduct.py | 6296ef2ab9dea7176cb8f085e16f107615c5cfb2 | [] | no_license | khiemjannguyen/Sem4_DB_Projekt | 0f8865076f952311882eb7198defcca936d4c50b | 60c157b38c423edf684c04372510a83ee9e7bd00 | refs/heads/main | 2023-04-19T22:38:31.110635 | 2021-05-15T21:47:19 | 2021-05-15T21:47:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,754 | py | import unittest
from AmazonProduct import *
class Test_AmazonProduct(unittest.TestCase):
url1 = "https://www.amazon.de/Samsung-MU-PA1T0B-EU-Portable-Kabel/dp/B074M774TW/?_encoding=UTF8&pd_rd_w=l7mCN&pf_rd_p=4f36a2ac-16fe-4a2a-a875-9a9ac12f9041&pf_rd_r=GXM3CM87QAX521QKM1WH&pd_rd_r=ce39a955-9f89-4159-88b2-278a4df7de77&pd_rd_wg=mn7D7&ref_=pd_gw_ci_mcx_mr_hp_d"
url2 = "https://www.ebay.de/itm/324203357789?epid=25032156863&hash=item4b7c06a65d:g:0OoAAOSwnv9e7HXe"
product_correct = AmazonProduct(url1)
product_error = AmazonProduct(url2)
def test_set_title(self):
self.assertEqual(self.product_correct.title, "Samsung MU-PA1T0B/EU Portable SSD T5 1 TB USB 3.1 Externe SSD Schwarz")
self.assertEqual(self.product_error.set_title(), None)
def test_set_product_id(self):
self.assertEqual(self.product_correct.product_id, "B074M774TW")
self.assertEqual(self.product_error.set_product_id(), None)
def test_get_price(self):
# not constant: self.assertEqual(self.product_correct.get_price(), None)
self.assertEqual(self.product_error.get_price(), None)
def test_get_currency(self):
self.assertEqual(self.product_correct.get_currency(), "€")
self.assertEqual(self.product_error.get_currency(), None)
def test_get_review_starsRate(self):
# not constant: self.assertEqual(self.product_correct.get_review_starsRate(), None)
self.assertEqual(self.product_error.get_review_starsRate(), None)
def test_get_numberOfReviews(self):
# not constant: self.assertEqual(self.product_correct.get_numberOfReviews(), None)
self.assertEqual(self.product_error.get_numberOfReviews(), None)
if __name__ == '__main__':
unittest.main()
| [
"khiemjan.nguyen@gmail.com"
] | khiemjan.nguyen@gmail.com |
e7e0deac411c991076dc18e374867a07a253d989 | d89a482aaf3001bbc4515f39af9ba474e1ae6062 | /sip/sip_history.py | 7bdbef694f14c90a11c7df182424967f95a137dc | [] | no_license | hongtao510/u_tool | 2925e3694aba81714cf83018c3f8520a7b503228 | 98c962cfb1f53c4971fb2b9ae22c882c0fae6497 | refs/heads/master | 2021-01-10T20:40:24.793531 | 2014-03-14T22:57:37 | 2014-03-14T22:57:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 03 13:30:41 2012
@author: jharston
"""
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import os
from uber import uber_lib
import history_tables
import rest_funcs
class SIPHistoryPage(webapp.RequestHandler):
def get(self):
templatepath = os.path.dirname(__file__) + '/../templates/'
ChkCookie = self.request.cookies.get("ubercookie")
html = uber_lib.SkinChk(ChkCookie, "SIP User History")
html = html + template.render(templatepath + '02uberintroblock_wmodellinks.html', {'model':'sip','page':'history'})
html = html + template.render(templatepath + '03ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberalgorithm_start.html', {
'model':'sip',
'model_attributes':'SIP User History'})
html = html + template.render (templatepath + 'history_pagination.html', {})
hist_obj = rest_funcs.user_hist('admin', 'sip')
html = html + history_tables.table_all(hist_obj)
html = html + template.render(templatepath + '04ubertext_end.html', {})
html = html + template.render(templatepath + '06uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', SIPHistoryPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
| [
"hongtao510@gmail.com"
] | hongtao510@gmail.com |
0a2a2c8497f4ec98148d7fb8ba76e6e61511350b | 33fd027b90dd8c4efd600f5bbcaf474edcc953a7 | /pyspark_windowfunctions.py | 6c96f1218c633ebc61e915eb25e33b9a7bed2965 | [] | no_license | andrevictorm/PYSPARK_ESTUDO | 50ce523ee0ce08533dc7a8dd0c2f9b4b0d0ca2cc | fedca431f5eaac6d57fcfaa42e39a819ac295072 | refs/heads/main | 2023-09-05T15:30:59.235946 | 2021-11-16T14:32:05 | 2021-11-16T14:32:05 | 428,668,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,412 | py | # Databricks notebook source
As Windows Functions retornam um único valor para cada grupo de linhas. O PySpark oferece suporte a 3 tipos de Windows Functions:
Ranking functions
Analytic functions
aggregate functions
Documentação: https://spark.apache.org/docs/latest/api/python/reference/api/pyspark.sql.Window.html?highlight=window#pyspark.sql.Window
# COMMAND ----------
import pyspark.sql.functions as F
from pyspark.sql.window import Window
# COMMAND ----------
dados = [
("Anderson", "Vendas", "SP", 1500.00, 34, 1000.00),
("Kennedy", "Vendas", "CE", 1200.00, 56, 2000.00),
("Bruno", "Vendas", "SP", 1100.00, 30, 2300.00),
("Maria", "Finanças", "CE", 3600.00, 24, 2300.00),
("Eduardo", "Finanças", "CE", 4500.00, 40, 2400.00),
("Mendes", "Finanças", "RS", 8000.00, 36, 1900.00),
("Kethlyn", "Finanças", "RS", 1200.00, 53, 1500.00),
("Thiago", "Marketing", "GO", 1100.00, 25, 1800.00),
]
schema = ['nome','departamento','estado','salario','idade','bonus']
df=spark.createDataFrame(data=dados,schema=schema)
df.printSchema()
df.show()
# COMMAND ----------
w0 = Window.partitionBy(F.col("departamento")).orderBy("salario")
# COMMAND ----------
# DBTITLE 1,Row Window
#RETORNA O NÚMERO DA LINHA DE ACORDO COM A COLUNA QUE FOI PARTICIONADA
df.withColumn("row_number",F.row_number().over(w0)).display()
# COMMAND ----------
# DBTITLE 1,rank Window Functions
df.withColumn("rank",F.rank().over(w0)).display()
# COMMAND ----------
# DBTITLE 1,dense_rank() Window Function
df.withColumn("dense_rank",F.dense_rank().over(w0)).display()
# COMMAND ----------
# DBTITLE 1,percent_rank Window Functions
df.withColumn("percent_rank",F.percent_rank().over(w0)).display()
# COMMAND ----------
# DBTITLE 1,lag window() Window Funciton
df.withColumn("lag",F.lag("salario",2).over(w0)).display()
# COMMAND ----------
# DBTITLE 1,lead Window Function
df.withColumn("lead",F.lead("salario",2).over(w0)).display()
# COMMAND ----------
# DBTITLE 1,Window Aggregate Functions
(df.withColumn("row",F.row_number().over(w0))
.withColumn("avg",F.avg(F.col("salario")).over(w0))
.withColumn("sum",F.sum(F.col("salario")).over(w0))
.withColumn("max",F.max(F.col("salario")).over(w0))
.withColumn("min",F.min(F.col("salario")).over(w0))
.select("row","departamento","avg","sum","min","max").display()
)
# COMMAND ----------
| [
"noreply@github.com"
] | noreply@github.com |
24b75e19d3d0afdeca0a294cc8798e0b8e02fdde | 0fa977f3ccfd3293d7707a1ec36dcc2dcbd82915 | /2017/booleans/underwater.py | dca547a6d61c876dd7b102ec563bd530643f7b59 | [] | no_license | xzou288/MInecraft_Python_4Xiang | c121bd22e66d8720198a4fa68b9445b52bf2d494 | 7ba8d7e5d1a0f24e49ee08bdc6c839548f4f3bf8 | refs/heads/master | 2021-09-10T15:32:51.524841 | 2018-03-28T15:47:14 | 2018-03-28T15:47:14 | 110,139,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | from mcpi.minecraft import Minecraft
mc = Minecraft.create()
pos = mc.player.getPos()
x = pos.x
y = pos.y
z = pos.z
blockType = mc.getBlock(x, y, z)
mc.postToChat(blockType == 0)
pos = mc.player.underwater()
underwater = true
| [
"noreply@github.com"
] | noreply@github.com |
bb352a077de0a96d708f7bd908b1d4f2e9c8b720 | aa76391d5789b5082702d3f76d2b6e13488d30be | /programmers/Lev1/print_triangle.py | aeb50f3fd8f6008928c6bee577e7267406cb4451 | [] | no_license | B2SIC/python_playground | 118957fe4ca3dc9395bc78b56825b9a014ef95cb | 14cbc32affbeec57abbd8e8c4ff510aaa986874e | refs/heads/master | 2023-02-28T21:27:34.148351 | 2021-02-12T10:20:49 | 2021-02-12T10:20:49 | 104,154,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | def printTriangle(num):
s = ''
for i in range(1, num + 1):
s += "*" * i + "\n"
return s
print(printTriangle(5)) | [
"the_basic_@kookmin.ac.kr"
] | the_basic_@kookmin.ac.kr |
a3f0f41e1e74dfb4968b10ff2fecd3d29bb11d66 | a20b8bea72c6f0def075001152455a6bfd68ac85 | /game/views.py | a6e0b824cbbb03ede6f636ac3864c4d2a856346a | [] | no_license | mr-korean/portfolio-django | cb0ea4cedd816ac75a657dbfca949235d8d30aa7 | 8246e137283344faf2b6c0555e129a54f064f5ee | refs/heads/master | 2020-03-20T15:44:29.021007 | 2018-09-11T16:28:46 | 2018-09-11T16:28:46 | 117,920,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,937 | py | # -*- coding: UTF-8 -*-
from django.views.generic.base import TemplateView
from django.contrib.auth.models import User
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Record, HighRecord
from django.http import JsonResponse, HttpResponse
from django.contrib.auth.decorators import login_required
import json, pytz
# 장고 쿼리셋에 관해서는
# (1) http://pythonstudy.xyz/python/article/310-Django-%EB%AA%A8%EB%8D%B8-API
# (2) http://brownbears.tistory.com/63
# (3) https://docs.djangoproject.com/en/2.0/topics/db/queries/
# (4) http://raccoonyy.github.io/using-django-querysets-effectively-translate/
# 위의 링크를 참고할 것.
class MainView(TemplateView):
template_name = 'game/game_main.html'
def show_game(request, order):
gamenumber = str(order)
return render(request, 'game/game-' + gamenumber + '.html')
def leaderboard(request):
# ※ 일반기록과 최고점수를 분리했으므로, 이제 랭킹에는 최고점수만을 표시해야 함.
# (1) 최고점수 내림차순, (2) 날짜 최신 순으로 정렬할 것.
checkedRecords = HighRecord.objects.all()
records_monte = checkedRecords.order_by('-highscore')[:10] # Top 10 제한 (필터링 & 정렬 단계에서 해야 함)
return render(request, 'game/game_score.html', {'records_monte' : records_monte})
@login_required
def score_upload_monte(request):
gotScore = request.GET.get('uploadedScore', None)
localId = request.user.id
foundUser = User.objects.get(id = localId)
Record.objects.create(player = foundUser, gametitle = 'monte', score = gotScore)
data = {
'message': "점수가 서버에 등록되었답니다."
}
return JsonResponse(data)
@login_required
def highscore_upload_monte(request):
gotHighscore = request.GET.get('uploadedHighscore', None)
localId = request.user.id
foundUser = User.objects.get(id = localId)
HighRecord.objects.create(player = foundUser, gametitle = 'monte', highscore = gotHighscore)
# ※※※ (단, 현재는 기존 기록과 관계없이 새로 만들기만 하므로 나중에 불러오면 전부 불러오게 된다. 뿌려줄 때 필터링하면 되는 것인가?)
data = {
'message': "최고점수가 서버에 등록되었답니다."
}
return JsonResponse(data)
@login_required
def record_download_monte(request):
localId = request.user.id
foundUser = User.objects.get(id = localId)
record = Record.objects.all()
highrecord = HighRecord.objects.all()
latestRecord = record.filter(player = foundUser).filter(gametitle = 'monte').order_by('-played_date')[0]
latestHighRecord = highrecord.filter(player = foundUser).filter(gametitle = 'monte').order_by('-played_date')[0]
data = {
'score':latestRecord.score,
'highScore':latestHighRecord.highscore
}
return JsonResponse(data) | [
"zzang2314274@hanmail.net"
] | zzang2314274@hanmail.net |
a079211c11689b83b59f3657cc19f4ecfdd5c9e7 | 43d38bf7ad12fd6030e705c1cb326e6a4d57595d | /kmsPidGenerator.py | 692a4210cdb384c6ea3f0d05674f79d2d1eb80c5 | [] | no_license | Cheain/kms_tool | fba8d1a9427e074eb6392471572c7d28fdeadbb9 | fa795d40667111fbc3ea23c1caa7d7d7f47397f9 | refs/heads/master | 2021-04-03T10:01:36.459501 | 2018-03-11T08:44:54 | 2018-03-11T08:44:54 | 124,347,704 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,582 | py | import datetime
import random
import time
import uuid
APP_ID_WINDOWS = uuid.UUID("55C92734-D682-4D71-983E-D6EC3F16059F")
APP_ID_OFFICE14 = uuid.UUID("59A52881-A989-479D-AF46-F275C6370663")
APP_ID_OFFICE15 = uuid.UUID("0FF1CE15-A989-479D-AF46-F275C6370663") # also valid for Office 16 (2016).
# KMS Host OS Type
hostOsList = {}
# Windows Server 2008 R2 SP1
hostOsList["HOST_SERVER2008R2"] = {
"type": 55041,
"osBuild": 7601
}
# Windows Server 2012 RTM
hostOsList["HOST_SERVER2012"] = {
"type": 5426,
"osBuild": 9200
}
# Windows Server 2012 R2 RTM
hostOsList["HOST_SERVER2012R2"] = {
"type": 6401,
"osBuild": 9600
}
# Windows Server 2016 RTM
hostOsList["HOST_SERVER2016"] = {
"type": 3612,
"osBuild": 14393
}
# Product Specific KeyConfig
pkeyConfigList = {}
# Windows Server KMS Host PID, actual PIDRangeMax = 191999999
pkeyConfigList["windows"] = {
"GroupID": 206,
"PIDRangeMin": 152000000,
"PIDRangeMax": 191999999
}
# Windows Server 2012 R2 KMS Host PID, actual PIDRangeMax = 310999999
pkeyConfigList["windows2012r2"] = {
"GroupID": 206,
"PIDRangeMin": 271000000,
"PIDRangeMax": 310999999
}
# Office 2010 KMSHost Class PID, actual PIDRangeMax = 217999999
pkeyConfigList["office14"] = {
"GroupID": 96,
"PIDRangeMin": 199000000,
"PIDRangeMax": 217999999
}
# Office 2013 KMSHost Class PID, actual PIDRangeMax = 255999999
pkeyConfigList["office15"] = {
"GroupID": 206,
"PIDRangeMin": 234000000,
"PIDRangeMax": 255999999
}
def epidGenerator(appId, version, lcid):
# Generate Part 1 & 7: Host Type and KMS Server OS Build
hostOsType = random.choice(list(hostOsList.keys())) # *2to3*
hostOsDict = hostOsList[hostOsType]
# Generate Part 2: Group ID and Product Key ID Range
if appId == APP_ID_OFFICE14:
keyConfig = pkeyConfigList["office14"]
elif appId == APP_ID_OFFICE15:
keyConfig = pkeyConfigList["office15"]
else:
# Default to Windows
if hostOsDict['osBuild'] == 14393:
keyConfig = pkeyConfigList["windows2012r2"]
elif hostOsDict['osBuild'] == 9600:
keyConfig = pkeyConfigList["windows2012r2"]
else:
keyConfig = pkeyConfigList["windows"]
# Generate Part 3 and Part 4: Product Key ID
productKeyID = random.randint(keyConfig["PIDRangeMin"], keyConfig["PIDRangeMax"])
# Generate Part 5: License Channel (00=Retail, 01=Retail, 02=OEM,
# 03=Volume(GVLK,MAK)) - always 03
licenseChannel = 3
# Generate Part 6: Language - use system default language
# 1033 is en-us
languageCode = lcid # C# CultureInfo.InstalledUICulture.LCID
# Generate Part 8: KMS Host Activation Date
# Get Minimum Possible Date: Newer Products first
if hostOsType == "HOST_SERVER2016":
# Microsoft Windows Server 2016 RTM
minTime = datetime.date(2016, 7, 27)
elif hostOsType == "HOST_SERVER2012R2" or version == 6:
# Microsoft Windows Server 2012 R2 RTM (October 17, 2013)
minTime = datetime.date(2013, 10, 17)
elif appId == APP_ID_OFFICE15:
# Microsoft Office 2013 RTM (October 24, 2012)
minTime = datetime.date(2012, 10, 24)
elif hostOsType == "HOST_SERVER2012" or version == 5:
# Microsoft Windows Server 2012 RTM (September 4, 2012)
minTime = datetime.date(2012, 9, 4)
else:
# Windows Server 2008 R2 SP1 (February 16, 2011)
minTime = datetime.date(2011, 2, 16)
# Generate Year and Day Number
randomDate = datetime.date.fromtimestamp(
random.randint(time.mktime(minTime.timetuple()), time.mktime(datetime.datetime.now().timetuple())))
firstOfYear = datetime.date(randomDate.year, 1, 1)
randomDayNumber = int((time.mktime(randomDate.timetuple()) - time.mktime(firstOfYear.timetuple())) / 86400 + 0.5)
# generate the epid string
result = []
result.append(str(hostOsDict["type"]).rjust(5, "0"))
result.append("-")
result.append(str(keyConfig["GroupID"]).rjust(5, "0"))
result.append("-")
result.append(str(productKeyID // 10 ** 6).rjust(3, "0")) # *2to3*
result.append("-")
result.append(str(productKeyID % 10 ** 6).rjust(6, "0"))
result.append("-")
result.append(str(licenseChannel).rjust(2, "0"))
result.append("-")
result.append(str(languageCode))
result.append("-")
result.append(str(hostOsDict["osBuild"]).rjust(4, "0"))
result.append(".0000-")
result.append(str(randomDayNumber).rjust(3, "0"))
result.append(str(randomDate.year).rjust(4, "0"))
return "".join(result)
| [
"hch.hch@163.com"
] | hch.hch@163.com |
e9a1330a1595a139f084229242758028a2cfc425 | cc1f28b97af0eba00706e70a246d3e737216356f | /generateGroups.py | fab1671ba31e87bf4ac70272865259b5e90368e5 | [] | no_license | Cherishlrx/DeepGroup | 26288aa23b43e6904c45e30c9c7004228a7ff4bc | f4bfe6cc97efdb13ce2065f13695427dea0d8a18 | refs/heads/main | 2023-02-19T07:31:23.530470 | 2021-01-23T17:28:19 | 2021-01-23T17:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,311 | py | import random
import pandas as pd
import itertools
from config import Config
def get_train_test_datasets(lambda_amount, num_groups, overlap_val, max_len_groups, num_items, fileName, version, train_portion=100):
config = Config()
file_name = fileName + '/negative_example_'+ config.decision_rule +'/ne_' + str(num_groups) + 'g-lambda' + str(lambda_amount) + 'version_' + str(version) + '.csv'
if overlap_val > 0:
file_name = fileName + '/overlap_negative_example_'+ config.decision_rule + '/ne_' + str(overlap_val) + 'overlap-g-lambda' + str(lambda_amount) + 'version_' + str(version) + '.csv'
dataframe = pd.read_csv(file_name, error_bad_lines=False, sep='\t')
train_number = int(train_portion * num_groups / 100)
items = [i for i in range(num_items)]
ratings = dataframe.rating.astype('category').cat.codes.values
rankings = dataframe.ranking.astype('category').cat.codes.values
def divide_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
ratings = list(divide_chunks(ratings, num_items))
rankings = list(divide_chunks(rankings, num_items))
test_rankings = rankings[train_number:]
decisions = []
for i in range(len(ratings)):
for j in range(len(ratings[i])):
if ratings[i][j] == 1:
decisions.append(j)
groups = []
for index, row in dataframe.iterrows():
value = []
for i in range(max_len_groups):
if 'user' in str(row[i + 1]):
value.append(int(row[i + 1].replace('user', '')))
groups.append(value)
groups = list(k for k,_ in itertools.groupby(groups))
x_train = [[], []]
y_train = []
x_test = [[], []]
y_test = []
for i in range(len(groups)):
if i < train_number:
for j in range(len(groups[i])):
for k in range(num_items):
x_train[0].append(groups[i][j])
x_train[1].append(items[k])
if decisions[i] == k:
y_train.append(1)
else:
y_train.append(0)
else:
for j in range(len(groups[i])):
for k in range(len(items)):
x_test[0].append(groups[i][j])
x_test[1].append(items[k])
if decisions[i] == k:
y_test.append(1)
else:
y_test.append(0)
return groups, decisions, x_train, y_train, x_test, y_test, test_rankings
def get_personal_preferencs(fileName, number_of_users, n_items):
if number_of_users == 0:
dataframe = pd.read_csv('dataset/' + fileName + '_' + 'user_ranking.csv', sep='\t',
error_bad_lines=False)
else:
dataframe = pd.read_csv('dataset/' + fileName + '_' + str(number_of_users) + 'user_ranking.csv', sep='\t',
error_bad_lines=False)
dict = {}
for index, row in dataframe.iterrows():
value = (
row['item0'], row['item1'], row['item2'], row['item3'], row['item4'], row['item5'], row['item6'], row['item7'],
row['item8'], row['item9'])
dict['user' + str(index)] = value
# make set of users
users = []
for key, value in dict.items():
users.append(key)
# making preferences
prefs = []
for i in range(len(users)):
inner_prefs = []
for j in range(n_items):
if dict[users[i]][j] == 0:
inner_prefs.append(1)
else:
inner_prefs.append(0)
prefs.append(inner_prefs)
return prefs
def get_train_test_datasets_reverse(lambda_amount, num_groups, overlap_val, max_len_groups, num_items, fileName, version, train_portion=100):
file_name = fileName + '/negative_example_'+ Config.decision_rule +'/ne_' + str(num_groups) + 'g-lambda' + str(
lambda_amount) + 'version_' + str(version) + '.csv'
if overlap_val > 0:
file_name = fileName + '/overlap_negative_example_'+Config.decision_rule +'/ne_' + str(
overlap_val) + 'overlap-g-lambda' + str(
lambda_amount) + 'version_' + str(version) + '.csv'
dataframe = pd.read_csv(file_name, error_bad_lines=False, sep='\t')
train_number = int(train_portion * num_groups / 100)
items = [i for i in range(num_items)]
ratings = dataframe.rating.astype('category').cat.codes.values
rankings = dataframe.ranking.astype('category').cat.codes.values
def divide_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
ratings = list(divide_chunks(ratings, num_items))
# print(ratings)
rankings = list(divide_chunks(rankings, num_items))
test_rankings = rankings
decisions = []
for i in range(len(ratings)):
for j in range(len(ratings[i])):
if ratings[i][j] == 1:
decisions.append(j)
# print(decisions)
groups = []
for index, row in dataframe.iterrows():
value = []
for i in range(max_len_groups):
if 'user' in str(row[i + 1]):
value.append(int(row[i + 1].replace('user', '')))
groups.append(value)
groups = list(k for k,_ in itertools.groupby(groups))
if overlap_val > 0:
personal_prefs = get_personal_preferencs(fileName, Config.num_users, num_items)
else:
personal_prefs = get_personal_preferencs(fileName, 0, num_items)
x_train = [[], []]
y_train = []
x_test = [[], []]
y_test = []
final_test_ranking = []
for i in range(len(groups)):
for j in range(len(groups[i])):
for k in range(num_items):
x_train[0].append(groups[i][j])
x_train[1].append(items[k])
if decisions[i] == k:
y_train.append(1)
else:
y_train.append(0)
if groups[i][j] not in x_test:
x_test[0].append(groups[i][j])
x_test[1].append(items[k])
y_test.append(personal_prefs[groups[i][j]][k])
final_test_ranking.append(test_rankings[i])
return groups, decisions, x_train, y_train, x_test, y_test, final_test_ranking
| [
"s.sajadi@ghoghnos.net"
] | s.sajadi@ghoghnos.net |
d57dc6bd6e6ed40663cea90c3cb805e43497b4f9 | e0980f704a573894350e285f66f4cf390837238e | /.history/news/models_20201124144813.py | df865398f4c40cdf05ca57629f9dae3cd204713b | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class FormField(AbstractEmailForm):
page = ParentalKey(
'NewsPage',
on_delete=models.CASCADE,
related_name
)
class NewsPage(AbstractEmailForm):
tempalte ='news/news_page.html'
leanding_page_template = 'news/news_page_leading.html'
subpage_types = []
max_coun = 1
intro = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul'])
thank_you_text = RichTextField(
blank=True,
features=['bold', 'italic', 'ol', 'ul'])
map_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=False,
on_delete=models.SET_NULL,
help_text='Obrazek będzie przycięty do rozmairu 588px na 355 px',
related_name='+',
)
map_url = models.URLField(
blank=True,
help_text='Opcjonalne. Jeśli podasz tutaj łączę, obraz stanie się łączem.'
)
content_panels = AbstractEmailForm.content_panel + [
FieldPanel('intro'),
ImageChooserPanel('map_iamge'),
FieldPanel('map_url'),
InlinePanel('form_fields', label="Form Fields"),
FieldPanel('thank_you_text'),
FieldPanel('from_address'),
FieldPanel('to_address'),
FieldPanel('subject'),
]
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
b6780c329a1ffec2fc775d000bede61cd63fd982 | 0814eb1801f8a6f7b0fd8e3b07b2b8b4b23b2041 | /dataloader/DataLoader.py | 0ec4a93357adcf6c937dc0eacfa8fd811f6208a1 | [] | no_license | xianfengju/SiamRPN-TF | 173d59cf7205bc2c8a1c6b3d921ddc7622fe3db9 | cf0859ee02328a2f32fb7a3ab5b38eaa55a6e17d | refs/heads/master | 2022-02-13T03:47:55.532351 | 2019-06-26T10:07:43 | 2019-06-26T10:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,595 | py | import tensorflow as tf
import numpy as np
import cv2
import os,sys
CURRENT_DIR = os.path.dirname(__file__)
sys.path.append(os.path.join(CURRENT_DIR, '..'))
from dataloader.sampler import Sampler,ShuffleSample
from dataloader.data_augmentation import RandomGray,RandomStretch,CenterCrop,RandomCrop,RandomColorAug,RandomFlip,RandomBlur,RandomDownsample
##########DataLoader
class DataLoader(object):
def __init__(self, config, is_training):
self.config = config
self.is_training = is_training
self.examplar_size = 127
self.instance_size = 255
self.dataset_py = Sampler(config['input_imdb'], config['max_frame_dist'], is_training)
#shuffle = False if self.config.get('lmdb_path', None) else is_training
self.sampler = ShuffleSample(self.dataset_py, shuffle=is_training)
if self.config.get('lmdb_path', None):
import lmdb
env = lmdb.open(self.config['lmdb_path'],map_size = 109951162777)
self.txn = env.begin()
def examplar_transform(self, input_image, gt_examplar_box):
img = CenterCrop(input_image,self.examplar_size)
shift_y = (self.instance_size - self.examplar_size)//2
shift_x = (self.instance_size - self.examplar_size)//2
x1 = gt_examplar_box[0] - shift_x
y1 = gt_examplar_box[1] - shift_y
x2 = gt_examplar_box[2] - shift_x
y2 = gt_examplar_box[3] - shift_y
gt_examplar_box = [x1, y1 ,x2 ,y2]
return img, gt_examplar_box
def instance_transform(self, img, gt_instance_box):
if self.is_training:
#Random flip doesnot affect the location of the target
if self.config['augmentation_config']['random_flip']:
img = RandomFlip(img)
if self.config['augmentation_config']['random_color']:
img = RandomColorAug(img)
if self.config['augmentation_config']['random_blur']:
img = RandomBlur(img)
img,scale = RandomStretch(img, max_stretch=0.4)
img,shift_xy,pad_xy = RandomCrop(img, self.instance_size)
gt_instance_box = gt_instance_box * scale
w = gt_instance_box[2] - gt_instance_box[0] + 1.0
h = gt_instance_box[3] - gt_instance_box[1] + 1.0
cx = (gt_instance_box[0] + gt_instance_box[2])/2.0
cy = (gt_instance_box[1] + gt_instance_box[3])/2.0
cx = cx - tf.to_float(shift_xy[0] - pad_xy[0])
cy = cy - tf.to_float(shift_xy[1] - pad_xy[1])
gt_instance_box=[cx-w/2.0, cy-h/2.0, cx + w/2.0, cy + h/2.0]
return img, gt_instance_box
def build(self):
self.build_dataset()
self.build_iterator()
def build_dataset(self):
def sample_generator():
for video_id in self.sampler:
sample = self.dataset_py[video_id]
yield sample
def transform_fn(img_paths):
def get_bytes_from_lmdb(key):
buffer = self.txn.get(key)
if isinstance(buffer,type(None)):
print("%s not found in database, continue"%(key))
return None
img_buffer = np.frombuffer(buffer, dtype=np.uint8)
if self.config.get('lmdb_encode', False):
image = cv2.imdecode(img_buffer, cv2.IMREAD_COLOR)
else:
img_size = int(np.sqrt(len(img_buffer)/3))
image = np.reshape(img_buffer, [img_size, img_size, 3])
return image
if self.config.get('lmdb_path', None):
exemplar_image = tf.py_func(get_bytes_from_lmdb, [img_paths[0]], tf.uint8, name = "exemplar_image")
instance_image = tf.py_func(get_bytes_from_lmdb, [img_paths[1]], tf.uint8, name = "instance_image")
else:
examplar_file = tf.read_file(img_paths[0])
instance_file = tf.read_file(img_paths[1])
exemplar_image = tf.image.decode_jpeg(examplar_file, channels=3, dct_method="INTEGER_ACCURATE")
instance_image = tf.image.decode_jpeg(instance_file, channels=3, dct_method="INTEGER_ACCURATE")
def get_file_info(bytes):
string = str(bytes, encoding="utf-8")
string = string.split('/')[-1]
frame_id = np.int32(string.split('.')[0])
#print(string)
w = int(string.split('.')[2]) #1.w.100.h.100.jpg
h = int(string.split('.')[4])
cx = (self.instance_size-1)/2.0
cy = (self.instance_size-1)/2.0
box = np.array([cx - w/2.0, cy - h/2.0, cx + w/2.0, cy + h/2.0],np.float32)
return box, frame_id
gt_instance_box,instance_frame_id = tf.py_func(get_file_info, [img_paths[1]], [tf.float32, tf.int32],name="gt_instance_box")
gt_instance_box.set_shape([4])
gt_examplar_box,examplar_frame_id = tf.py_func(get_file_info, [img_paths[0]], [tf.float32, tf.int32],name="gt_examplar_box")
gt_examplar_box.set_shape([4])
video = tf.stack([exemplar_image, instance_image])
video = RandomGray(video)
exemplar_image = video[0]
instance_image = video[1]
exemplar_image,gt_examplar_box = self.examplar_transform(exemplar_image, gt_examplar_box)
instance_image,gt_instance_box = self.instance_transform(instance_image, gt_instance_box)
if self.config.get('random_downsample',False):
exemplar_image = RandomDownsample(exemplar_image, self.examplar_size)
instance_image = RandomDownsample(instance_image, self.instance_size)
if self.config.get('time_decay', False):
time_interval = tf.abs(instance_frame_id - examplar_frame_id)
return exemplar_image, instance_image, gt_examplar_box, gt_instance_box, time_interval
else:
return exemplar_image, instance_image, gt_examplar_box, gt_instance_box
dataset = tf.data.Dataset.from_generator(sample_generator,
output_types=(tf.string),
output_shapes=(tf.TensorShape([2])))
dataset = dataset.map(transform_fn, num_parallel_calls=self.config['prefetch_threads'])
dataset = dataset.prefetch(self.config['prefetch_capacity'])
#dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.repeat()
dataset = dataset.batch(self.config['batch_size'])
self.dataset_tf = dataset
def build_iterator(self):
self.iterator = self.dataset_tf.make_one_shot_iterator()
def get_one_batch(self):
return self.iterator.get_next()
if __name__ == "__main__":
import cv2
config={}
config['input_imdb']="dataset/TrackingNet_VID_DET2014/train.pickle"
config['max_frame_dist']=100
config['prefetch_threads'] = 8
config['prefetch_capacity'] = 8
config['batch_size'] = 1
config['lmdb_path'] = 'dataset/TrackingNet_VID_DET2014/train_lmdb_encode'
config['lmdb_encode'] = True
config['time_decay'] = True
os.environ['CUDA_VISIBLE_DEVICES']=""
with tf.device('/cpu:0'):
test_loader = DataLoader(config,is_training=False)
test_loader.build()
with tf.Session() as sess:
while True:
batch = sess.run(test_loader.get_one_batch())
assert(len(batch) == 5) #exemplar_image, instance_image, gt_examplar_box, gt_instance_box, time_interval
instance = np.uint8(batch[1][0])
examplar = np.uint8(batch[0][0])
try:
cv2.rectangle(examplar, (batch[2][0][0],batch[2][0][1]),(batch[2][0][2],batch[2][0][3]),(0,255,0),3)
cv2.rectangle(instance, (batch[3][0][0],batch[3][0][1]),(batch[3][0][2],batch[3][0][3]),(0,255,0),3)
cv2.imshow("examplar", examplar)
cv2.imshow("instance", instance)
time_interval = np.int32(batch[4])
print("time_interval: %d"%(time_interval))
cv2.waitKey(0)
except:
print(np.shape(examplar), np.shape(instance)) | [
"xiongjiangfeng@gmail.com"
] | xiongjiangfeng@gmail.com |
215571d3b4b7d4e305c325ec81353e980d4c283e | 1ad78a9e70ed4295d11c7df6d33737c8a0af7b18 | /adjust_image_color.py | 12c2a2ecd7e177199f2c88ec9a34463b65d283c3 | [] | no_license | linchaolong-learn/TensorflowLearn | 7200cd2ac1640ec5f12fcfd411c1f424e831ea46 | 0838f3191bdc564a947c8e898b3119767ecf05d3 | refs/heads/master | 2020-03-15T07:18:31.928342 | 2018-06-06T08:18:03 | 2018-06-06T08:18:03 | 132,026,220 | 2 | 0 | null | 2018-05-03T17:07:19 | 2018-05-03T17:07:18 | null | UTF-8 | Python | false | false | 1,296 | py | # 图像色彩调整
import matplotlib.pyplot as plt
import tensorflow as tf
# 读取图像的原始数据
image_raw_data = tf.gfile.FastGFile("datasets/cat.jpg", 'rb').read()
# 解码图片
img_data = tf.image.decode_jpeg(image_raw_data)
with tf.Session() as sess:
# 将图像的亮度-0.5
# adjusted = tf.image.adjust_brightness(img_data, -0.5)
# 在[-0.5, 0.5]的范围随机调整图像的亮度
# adjusted = tf.image.random_brightness(img_data, 0.5)
# 将图像的对比度-5
# adjusted = tf.image.adjust_contrast(img_data, -5)
# 在正负[0.5, 5]的范围随机调整图像的对比度
# adjusted = tf.image.random_contrast(img_data, 0.5, 5)
# 调整图像的色相
# adjusted = tf.image.adjust_hue(img_data, 0.1)
# adjusted = tf.image.adjust_hue(img_data, 0.3)
# adjusted = tf.image.adjust_hue(img_data, 0.6)
# adjusted = tf.image.adjust_hue(img_data, 0.9)
# 在正负[-0.5, 0.5]范围随机调整图像的色相
# adjusted = tf.image.random_hue(img_data, 0.5)
# 将图像的饱和度-5
# adjusted = tf.image.adjust_saturation(img_data, -5)
# 在正负[0.5, 5]范围内随机调整图像的饱和度
adjusted = tf.image.random_saturation(img_data, 0.5, 5)
plt.imshow(adjusted.eval())
plt.show()
| [
"linchaolong@healthmall.cn"
] | linchaolong@healthmall.cn |
344e07bbfa7a9945dc213604ca7facf2917036fa | c526a482a0da0ef09d05975c97faae062d1911b6 | /Week 5/day 3/JSON_loads.py | 443924bd515c90fe8d17f23333c92280f7536c46 | [] | no_license | Wdecibel/MyPythonProject1 | 2958171e80e9aa74af2f1446a624af3183b6e741 | 4d443091d3df5b3774e490950c5b5e19b18dd149 | refs/heads/master | 2020-07-02T03:59:19.090917 | 2019-08-09T06:36:40 | 2019-08-09T06:36:40 | 201,409,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | # __Author__: Han
# __Date__: 2019/1/17
import json
with open('JSON_text', 'r') as f:
data = f.read()
data = json.loads(data)
print(data['name'])
| [
"hanw@cefcfco.com"
] | hanw@cefcfco.com |
cedfaacfc1ea964fcf106617d42d8c8281ed0dad | 69a9564d94bc9bcd8983703cdaba8c8c388b5c03 | /my-project/back_up_lcd_3.py | ec7b9c68f56dad1df2e92d9814eff663140a2e11 | [] | no_license | arunksoman/PCPL-electro | c67dd4a75bccea5063ae76ea2935a48a9e52a68b | 52afbb63ea4a8843b774c3b0d7dc636b997ba724 | refs/heads/master | 2020-05-06T15:41:57.483108 | 2020-02-11T16:04:25 | 2020-02-11T16:04:25 | 180,203,771 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,968 | py | import RPi.GPIO as GPIO
import random
import time
import sys
import requests
import json
import serial
product = json.loads(requests.get("http://192.168.43.94:8084/SmartTrolley/Files/ProductData.txt").text)
print(product)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
ser = serial.Serial("/dev/ttyACM0", baudrate=9600, timeout= 1)
loop = False
LCD_RS = 21
LCD_E = 24
LCD_D4 = 23
LCD_D5 = 19
LCD_D6 = 18
LCD_D7 = 22
# Display constants
LCD_WIDTH = 16 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
# Timing constants
E_PULSE = 0.005
E_DELAY = 0.005
def main():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LCD_E, GPIO.OUT) # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
def lcd_init():
# Initialise display
lcd_byte(0x33, LCD_CMD) # 110011 Initialise
lcd_byte(0x32, LCD_CMD) # 110010 Initialise
lcd_byte(0x06, LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C, LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28, LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01, LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
GPIO.output(LCD_RS, mode) # RS
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits& 0x10== 0x10:
GPIO.output(LCD_D4, True)
if bits& 0x20== 0x20 :
GPIO.output(LCD_D5, True)
if bits& 0x40 == 0x40 :
GPIO.output(LCD_D6, True)
if bits& 0x80 == 0x80 :
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
# Low bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits& 0x01 == 0x01 :
GPIO.output(LCD_D4, True)
if bits& 0x02 == 0x02 :
GPIO.output(LCD_D5, True)
if bits& 0x04 == 0x04 :
GPIO.output(LCD_D6, True)
if bits& 0x08 == 0x08 :
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
def lcd_toggle_enable():
# Toggle enable
time.sleep(E_DELAY)
GPIO.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO.output(LCD_E, False)
time.sleep(E_DELAY)
def lcd_string(message, line):
# Send string to display
message = message.ljust(LCD_WIDTH," " )
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]), LCD_CHR)
def force(message, force):
message = message.ljust(LCD_WIDTH," " )
lcd_byte(force, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]), LCD_CHR)
low = 0
high = 999
trolly_id = random.uniform(low,high)
trolley_id = int(trolly_id)
print("Trolley id= ", trolley_id)
str_trolley = str(trolley_id)
frmt_str_trolley = " " + str_trolley
main()
lcd_init()
lcd_string(" Trolly id ", LCD_LINE_1)
lcd_string(frmt_str_trolley, LCD_LINE_2)
time.sleep(30)
trolley_id_ret = json.loads(requests.get("http://192.168.43.94:8084/SmartTrolley/Files/PINVerify.txt").text)
print(trolley_id_ret)
print(type(trolley_id_ret))
print(type(trolley_id_ret[0][0]))
if trolley_id == trolley_id_ret[0][0]:
lcd_string("Pin Verified", LCD_LINE_1)
lcd_string("Thank you", LCD_LINE_2)
loop = True
elif trolley_id != trolley_id_ret[0][0]:
loop = False
lcd_string("Pin Verification", LCD_LINE_1)
lcd_string(" Failed", LCD_LINE_2)
time.sleep(2)
lcd_string("", LCD_LINE_1)
lcd_string(" Exiting...", LCD_LINE_2)
time.sleep(1)
sys.exit()
print(product)
out_dict = {}
out_dict['RFID'] = []
buzzer = 31
rows = [8, 37, 11, 12]
cols = [32, 33, 35,36]
keys = [
['1', '2', '3', 'A'],
['4', '5', '6', 'B'],
['7', '8', '9', 'C'],
['*', '0', '#', 'D']]
product = [
['Product1', "C7EA3BE", 8000, False],
['Product2', "372316B", 1000, False],
['Product3', "9781ABE", 1200, False],
['Product4', "173417B", 800, False],
['Product5', "734FB2B", 900, False]
]
GPIO.setup(buzzer, GPIO.OUT)
for row_pin in rows:
GPIO.setup(row_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
for col_pin in cols:
GPIO.setup(col_pin, GPIO.OUT)
def get_key():
key = 0
for col_num, col_pin in enumerate(cols):
GPIO.output(col_pin, 1)
for row_num, row_pin in enumerate(rows):
if GPIO.input(row_pin):
key = keys[row_num][col_num]
GPIO.output(col_pin, 0)
return key
count = 0
budget = 0
cart = 0
fixed = 5
print("Press A for Entering budget and B for skipping.")
lcd_string("A :Enter Budget", LCD_LINE_1)
lcd_string("B: skip Budget", LCD_LINE_2)
Budget_enter = False
data = {}
while loop:
key = get_key()
if key :
if key == "A":
count = 0
print(key)
print("Enter your Budget as a 5 digit number on keypad:")
lcd_string("Enter 5 digit ", LCD_LINE_1)
lcd_string("number on keypad", LCD_LINE_2)
skip = False
if key == "B":
print(key)
count = 6
Budget_enter = True
skip = True
if not skip and key != "A" and count < 6:
print(skip)
key = get_key()
print("count = ", count)
force_cursor = [0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0]
temp = int(key) * (10 ** (fixed - count))
budget = budget + temp
bud_lcd = str(budget)
conc_budg = "budget = " + bud_lcd
lcd_string("", LCD_LINE_1)
lcd_string("", LCD_LINE_2)
lcd_string(conc_budg, force_cursor[count])
print("Budget = ", budget)
count = count + 1
if count >= 5:
Budget_enter = True
print("You can't enter more than 5 digits")
lcd_string("Can't enter more", LCD_LINE_1)
lcd_string("than 5 digits", LCD_LINE_2)
time.sleep(0.5)
if Budget_enter:
print("Hold a tag near the reader")
lcd_string("Hold Card Near", LCD_LINE_1)
lcd_string(" the Reader ", LCD_LINE_2)
id = ser.readline()
print(id)
id = id.decode('utf-8')
id = id[0:7]
if key == "D":
lcd_string("Thank You", LCD_LINE_1)
lcd_string("Visit Again", LCD_LINE_2)
GPIO.cleanup()
sys.exit()
lcd_string("", LCD_LINE_1)
lcd_string(id , LCD_LINE_2)
print("ID: ", id)
for index in range(5):
if id == product[index][1] and not product[index][3]:
product[index][3] = True
prize = product[index][2]
"""GPIO.OUT(31, True)
time.sleep(0.7)
GPIO.output(buzzer,False)"""
out_dict.update({product[index][0]:product[index][3]})
""" product_id = '"' + product[index][0] + '"'
print(product_id)
product_st = str(product[index][3])
status = '"' + product_st + '"' """
out_dict['RFID'].append({
'product': product[index][0],
'status': product[index][3]
})
print(out_dict)
cart = cart + prize
str_cart = str(cart)
conc_cart = "cart = "+ str_cart
lcd_string(product[index][0] + " added", LCD_LINE_1)
lcd_string(conc_cart, LCD_LINE_2)
time.sleep(0.5)
print("prize added= ", prize)
elif id == product[index][1] and product[index][3]:
product[index][3] = False
prize = product[index][2]
out_dict.update({product[index][0]:product[index][3]})
str_cart = str(cart)
conc_cart = "cart = "+ str_cart
lcd_string(product[index][0] + " removed", LCD_LINE_1)
lcd_string(conc_cart, LCD_LINE_2)
print(product)
"""GPIO.OUT(31, True)
time.sleep(0.7)
GPIO.output(buzzer,False)"""
cart = cart - prize
print("prize removed= ", prize)
out_dict['RFID'].append({
'product': product[index][0],
'status': product[index][3]
})
print("cart = " , cart)
out_json = json.dumps(out_dict, indent=4)
print(out_json,file=open("output.json", "w"))
print(out_json)
if cart > budget and not skip:
print("Warning: Your Budget Exceeded")
key = get_key()
time.sleep(0.3)
lcd_init()
GPIO.cleanup()
| [
"noreply@github.com"
] | noreply@github.com |
a8f2cafb277643c76e1a634c5fcab184d07b9eb5 | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /third_party/blink/web_tests/external/wpt/tools/third_party/pytest/src/_pytest/_code/__init__.py | 815c13b42c25bd314988dbaa7ff9f4e3d1d2e5c2 | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 410 | py | """ python inspection/code generation API """
from __future__ import absolute_import, division, print_function
from .code import Code # noqa
from .code import ExceptionInfo # noqa
from .code import Frame # noqa
from .code import Traceback # noqa
from .code import getrawcode # noqa
from .source import Source # noqa
from .source import compile_ as compile # noqa
from .source import getfslineno # noqa
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com |
0bc20c0d41b836f3d9feff0c9ebc05cc4c02a45a | b9054f8fb2da88a4788e04f06e52e1745ef5683e | /displayer/custom_entry.py | 9c11f2cf36b3346b574febf1087a52f9fc5452d8 | [] | no_license | thinkingabouther/functionDisplayer | 9f9f012af54daa02d0062325d3cb9ab8aa807d1f | 1517847498bb21e0ce05a28798a1e74d5210a4dc | refs/heads/master | 2021-09-12T02:12:17.915235 | 2018-03-26T17:20:24 | 2018-03-26T17:20:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | from tkinter import Entry
class EntryWithBackgroundText(Entry):
def __init__(self, *args, **kwargs):
self.background_text = kwargs.pop("background_text", "")
super(EntryWithBackgroundText, self).__init__(*args, **kwargs)
self.insert(0, self.background_text)
self.bind('<FocusOut>', self.change_exit)
self.bind('<FocusIn>', self.change_enter)
def change_exit(self, event):
if self.get():
return
self.delete(0, 'end')
self.insert(0, self.background_text)
self.config(foreground='grey')
def change_enter(self, event):
if self.get() == self.background_text:
self.delete(0, 'end')
self.config(foreground='black')
| [
"widauka@ya.ru"
] | widauka@ya.ru |
67245adfabd81ea366d100de52754decec5d6173 | eeeceb85a215522ef412fddc44cc471c894f28af | /src/python/Problem113.py | d87253a0df06867a8444367d729a88f520e69e6c | [
"MIT"
] | permissive | mchrzanowski/ProjectEuler | 3874ad141ca0bf633039899807a6afc1cca67134 | 06a24cadbd2c38fb42c3935779fc7ffb6de4e1b5 | refs/heads/master | 2021-01-25T05:34:32.548331 | 2015-02-08T20:31:10 | 2015-02-08T20:31:10 | 3,176,325 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,016 | py | '''
Created on Apr 7, 2012
@author: mchrzanowski
'''
from operator import ge, le
from time import time
def calculateBouncyNumbers(limit):
'''
there are two sets of bouncy numbers:
1). numbers that are stable or decreasing
2). numbers that are stable or increasing
there exists an non-empty intersection between these two sets: it's [# of digits] * 9 .
but we don't want to use sets because that's lame and slow.
we calculate all unique bouncies for one type at a time.
we then add back the duplicate bouncies.
'''
increasingNumbers = calculateUniqueNumbersInOneDirection(limit = limit, goingForward = True)
decreasingNumbers = calculateUniqueNumbersInOneDirection(limit = limit, goingForward = False)
return increasingNumbers + decreasingNumbers + 9 * limit
def calculateUniqueNumbersInOneDirection(limit, goingForward):
'''
the basic idea here is that for a number that is n digits long,
the number of bouncies is a function of the number of bouncies for numbers n - 1 digits long.
the numbers that we look at that are n - 1 digits long depend on whether we are going forward or not
this means that we are keeping track of bouncy numbers based on their starting digit.
if we're going forward, we care about the bouncies for all n - 1 digits that started with a number ge to a given value from 1-9
if backwards, we care about all bounces that started with a number le to a given value from 0 - 9 (zero has no place in increasing bouncies)
'''
def getDefaults():
'''
return a dict representing the starting number of bouncies for 1-9.
this is all natural numbers < 10, and so return a dict of keys pointing to 1
'''
defaults = {}
if goingForward: # increasing numbers never have a zero. but zeros are vital for decreasing numbers.
startingValue = 1
else:
startingValue = 0
for i in xrange(startingValue, 10):
defaults[i] = 1
return defaults
combinationDict = getDefaults()
totalNumbers = 0 # don't include the 1-digit bouncies as they are non-unique.
if goingForward: # for the comparisons in the loop.
operator = ge
else:
operator = le
for iteration in xrange(1, limit):
for mutatingKey in sorted(combinationDict.keys(), reverse = not goingForward):
combinationDict[mutatingKey] = sum(combinationDict[key] for key in combinationDict if operator(key, mutatingKey))
totalNumbers += combinationDict[mutatingKey] - 1 # remove one to account for the repeating digit case
return totalNumbers
def main():
DIGITS = 100
print calculateBouncyNumbers(DIGITS)
if __name__ == '__main__':
start = time()
main()
end = time()
print "Runtime:", end - start, "seconds."
| [
"mike.chrzanowski0@gmail.com"
] | mike.chrzanowski0@gmail.com |
bb34d51fcdd2bb89209f9280e0d806d946c14977 | 4dbf7b5be86e7cc27cc3606106ce1daa792ade4b | /mmdet/models/detectors/base.py | b74cb657faa80d77757bf451dfe113c9a04efb42 | [
"MIT"
] | permissive | ashwinvaswani/lesion_detection | 61a011a90e9e42ab0cc56eba066540c879946f1d | 26246e3954209075c56649dfb2ef565290e6dcb3 | refs/heads/master | 2023-04-03T03:56:21.856885 | 2021-04-03T12:14:18 | 2021-04-03T12:14:18 | 273,017,913 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,706 | py | import logging
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch.nn as nn
import inspect
from mmdet.core import auto_fp16, get_classes, tensor2imgs
class BaseDetector(nn.Module):
"""Base class for detectors"""
__metaclass__ = ABCMeta
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_shared_head(self):
return hasattr(self, 'shared_head') and self.shared_head is not None
@property
def with_bbox(self):
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
return hasattr(self, 'mask_head') and self.mask_head is not None
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
for img in imgs:
yield self.extract_feat(img)
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): list of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has:
'img_shape', 'scale_factor', 'flip', and my also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
**kwargs: specific to concrete implementation
"""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
pass
def init_weights(self, pretrained=None):
if pretrained is not None:
logger = logging.getLogger()
logger.info('load model from: {}'.format(pretrained))
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_meta (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch
"""
# for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
# if not isinstance(var, list):
# raise TypeError('{} must be a list, but got {}'.format(
# name, type(var)))
# num_augs = len(imgs)
# if num_augs != len(img_metas):
# raise ValueError(
# 'num of augmentations ({}) != num of image meta ({})'.format(
# len(imgs), len(img_metas)))
# # TODO: remove the restriction of imgs_per_gpu == 1 when prepared
# imgs_per_gpu = imgs[0].size(0)
# assert imgs_per_gpu == 1
# if num_augs == 1:
# return self.simple_test(imgs[0], img_metas[0], **kwargs)
# else:
# return self.aug_test(imgs, img_metas, **kwargs)
return self.simple_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_meta, return_loss=True, debug=False, **kwargs):
"""
Calls either forward_train or forward_test depending on whether
return_loss=True. Note this setting will change the expected inputs.
When `return_loss=False`, img and img_meta are single-nested (i.e.
Tensor and List[dict]), and when `resturn_loss=True`, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
# print("*"*20)
# print("Kwarg values are:\n")
# for key,value in kwargs.items():
# print(key,value)
# print()
# print("*"*20)
# lines = inspect.getsource(self.forward_train)
# print(lines)
return self.forward_train(img, img_meta, **kwargs)
elif debug:
return self.forward_debug(img, img_meta, **kwargs)#hyadd
else:
return self.forward_test(img, img_meta, **kwargs)
def show_result(self, data, result, dataset=None, score_thr=0.3):
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
img_tensor = data['img'].data
print(img_tensor.shape)
img_metas = data['img_meta'].data[0]
print(img_metas)
imgs = tensor2imgs(img_tensor, **{'mean': np.array([0.5, 0.5, 0.5]), 'std': np.array([255., 255., 255.]), 'to_rgb': False})
assert len(imgs) == len(img_metas)
if dataset is None:
class_names = self.CLASSES
elif isinstance(dataset, str):
class_names = get_classes(dataset)
elif isinstance(dataset, (list, tuple)):
class_names = dataset
else:
raise TypeError(
'dataset must be a valid dataset name or a sequence'
' of class names, not {}'.format(type(dataset)))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
print(segms)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
print('amsk shape', mask.shape)
img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr)
| [
"ashwin.vaswani99@gmail.com"
] | ashwin.vaswani99@gmail.com |
0208a4a50bebc3bf813bc885b5acd3bc9bda9696 | 88c1f9ccb62e91d6b0574bcde1043921bdeb0126 | /client_cli/src/d1_cli/tests/test_cli.py | cf82f63f797c9ac5a31102d6f830c03b7f3c3656 | [
"Apache-2.0"
] | permissive | jevans97utk/d1_python | 83b8de8780287c655779844f367b9189413da074 | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | refs/heads/master | 2020-05-21T01:16:50.677816 | 2019-04-22T16:09:44 | 2019-04-22T16:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,974 | py | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test CLI high level functionality."""
import contextlib
import io
import os
import re
import tempfile
import freezegun
import mock
import pytest
import responses
import d1_cli.impl.command_parser
import d1_cli.impl.exceptions
import d1_common.date_time
import d1_common.system_metadata
import d1_common.types.dataoneTypes
import d1_test.d1_test_case
import d1_test.instance_generator.random_data
import d1_test.mock_api.catch_all
import d1_test.mock_api.get
import d1_test.mock_api.get_log_records
import d1_test.mock_api.get_system_metadata
import d1_test.mock_api.list_nodes
import d1_test.mock_api.list_objects
import d1_client.mnclient
@freezegun.freeze_time("1977-03-27")
@d1_test.d1_test_case.reproducible_random_decorator("TestCLI")
class TestCLI(d1_test.d1_test_case.D1TestCase):
def setup_method(self, method):
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("verbose true")
def test_1000(self, cn_client_v2):
"""preloop(): Successful initialization."""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
def test_1010(self, cn_client_v2):
"""preloop(): Successful deinitialization."""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.postloop()
assert "Exiting" in out_stream.getvalue()
def test_1020(self, cn_client_v2):
"""precmd(): Successful line formattting."""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
test_cmd_str = "somecommand arg1 arg2 arg3"
received_line = cli.precmd(test_cmd_str)
assert test_cmd_str in received_line
def test_1030(self, cn_client_v2):
"""default(): Yields unknown command."""
cli = d1_cli.impl.command_parser.CLI()
test_cmd_str = "somecommand arg1 arg2 arg3"
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.default(test_cmd_str)
assert "Unknown command: somecommand" in out_stream.getvalue()
def test_1040(self, cn_client_v2):
"""run_command_line_arguments():"""
cli = d1_cli.impl.command_parser.CLI()
test_cmd_str = "somecommand arg1 arg2 arg3"
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.default(test_cmd_str)
assert "Unknown command: somecommand" in out_stream.getvalue()
def test_1050(self, cn_client_v2):
"""do_help(): Valid command returns help string."""
cli = d1_cli.impl.command_parser.CLI()
cli.stdout = io.StringIO()
test_cmd_str = "get"
cli.do_help(test_cmd_str)
assert "The object is saved to <file>" in cli.stdout.getvalue()
def test_1060(self, cn_client_v2):
"""do_history(): Returns history."""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
test_cmd_str = "somecommand1 arg1 arg2 arg3"
cli.precmd(test_cmd_str)
test_cmd_str = "somecommand2 arg1 arg2 arg3"
cli.precmd(test_cmd_str)
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_history("")
assert "somecommand1" in out_stream.getvalue()
assert "somecommand2" in out_stream.getvalue()
# do_exit()
def test_1070(self, cn_client_v2):
"""do_exit(): Gives option to cancel if the operation queue is not empty."""
self._do_exit("yes", 1)
def test_1080(self, cn_client_v2):
"""do_exit(): Does not exit if cancelled."""
self._do_exit("no", 0)
def _do_exit(self, answer_str, exit_call_count):
"""do_exit(): Gives option to cancel if the operation queue is not empty."""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
fi, tmp_path = tempfile.mkstemp(
prefix="test_dataone_cli.", suffix=".tmp", text=True
)
os.close(fi)
cli.do_set("authoritative-mn urn:node:myTestMN")
cli.do_set("rights-holder test-rights-holder-subject")
create_operation = cli._command_processor._operation_maker.create(
"test_pid", tmp_path, "test_format_id"
)
cli._command_processor._operation_queue.append(create_operation)
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
with d1_test.d1_test_case.mock_input(answer_str):
with mock.patch("sys.exit", return_value="") as mock_method:
cli.do_exit("")
assert mock_method.call_count == exit_call_count
assert (
"There are 1 unperformed operations in the write operation queue"
in out_stream.getvalue()
)
def test_1090(self, cn_client_v2):
"""do_exit(): Calls sys.exit()"""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
with mock.patch("sys.exit", return_value="") as mock_method:
cli.do_quit("")
assert mock_method.call_count > 0
def test_1100(self, cn_client_v2):
"""do_eof(): Calls sys.exit()"""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
with mock.patch("sys.exit", return_value="") as mock_method:
cli.do_eof("")
assert mock_method.call_count > 0
def test_1110(self, cn_client_v2):
"""do_reset(), do_set(), do_save(), do_load(): Session to disk round trip."""
cli = d1_cli.impl.command_parser.CLI()
cli.preloop()
fi, path = tempfile.mkstemp(
prefix="test_dataone_cli.", suffix=".tmp", text=True
)
os.close(fi)
# Reset, set some values and save to file
cli.do_reset("")
cli.do_set("editor test_editor")
cli.do_set("cn-url test_cn-url")
cli.do_set("key-file test-key-file")
cli.do_save(path)
# Reset and check that values are at their defaults
cli.do_reset("")
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("editor")
assert "editor: nano" in out_stream.getvalue()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("cn-url")
assert "cn-url: https://cn.dataone.org/cn" in out_stream.getvalue()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("key-file")
assert "key-file: None" in out_stream.getvalue()
# Load from file and verify
cli.do_load(path)
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("editor")
assert "editor: test_editor" in out_stream.getvalue()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("cn-url")
assert "cn-url: test_cn-url" in out_stream.getvalue()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("key-file")
assert "key-file: test-key-file" in out_stream.getvalue()
def test_1120(self, cn_client_v2):
"""set: Command gives expected output on flag toggle."""
cli = d1_cli.impl.command_parser.CLI()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("verbose true")
assert 'verbose to "true"' in out_stream.getvalue()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("verbose false")
assert 'verbose to "false"' in out_stream.getvalue()
def test_1130(self, cn_client_v2):
"""set: Command gives expected output when setting count."""
cli = d1_cli.impl.command_parser.CLI()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("count 2")
assert 'count to "2"' in out_stream.getvalue()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("count 3")
assert 'count to "3"' in out_stream.getvalue()
def test_1140(self, cn_client_v2):
"""set: Command gives expected output when setting query string."""
cli = d1_cli.impl.command_parser.CLI()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("query a=b")
assert 'variable query to "a=b"' in out_stream.getvalue()
@d1_test.mock_api.catch_all.activate
def test_1150(self, cn_client_v2):
"""ping (no arguments): Ping the CN and MN that is specified in the session."""
d1_test.mock_api.catch_all.add_callback(d1_test.d1_test_case.MOCK_CN_BASE_URL)
d1_test.mock_api.catch_all.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
cli = d1_cli.impl.command_parser.CLI()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("cn-url {}".format(d1_test.d1_test_case.MOCK_CN_BASE_URL))
cli.do_set("mn-url {}".format(d1_test.d1_test_case.MOCK_MN_BASE_URL))
cli.do_ping("")
def test_1160(self, cn_client_v2):
"""do_allowaccess(): Correctly sets access control."""
cli = d1_cli.impl.command_parser.CLI()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_allowaccess("test_subject_1 write")
cli.do_allowaccess("test_subject_2 write")
cli.do_allowaccess("test_subject_3 changePermission")
access_pyxb = cli._command_processor.get_session().get_access_control()
check_cnt = 0
for allow_pyxb in access_pyxb.allow:
if allow_pyxb in ("test_subject_1", "test_subject_2", "test_subject_3"):
check_cnt += 1
assert check_cnt == 3
assert (
'Set changePermission access for subject "test_subject_3"'
in out_stream.getvalue()
)
def test_1170(self, cn_client_v2):
"""do_denyaccess(): Subject without permissions raises InvalidArguments."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_allowaccess("test_subject_1 write")
cli.do_allowaccess("test_subject_2 write")
cli.do_allowaccess("test_subject_3 changePermission")
with pytest.raises(d1_cli.impl.exceptions.InvalidArguments):
cli.do_denyaccess("unknown_subject")
def test_1180(self, cn_client_v2):
"""do_denyaccess(): Subject with permissions is removed."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_allowaccess("test_subject_1 write")
cli.do_allowaccess("test_subject_2 write")
cli.do_allowaccess("test_subject_3 changePermission")
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("")
env_str = out_stream.getvalue()
assert "test_subject_3: changePermission" in env_str
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_denyaccess("test_subject_3")
assert 'Removed subject "test_subject_3"' in out_stream.getvalue()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("")
env_str = out_stream.getvalue()
assert "test_subject_1: write" in env_str
assert "test_subject_2: write" in env_str
assert "test_subject_3: changePermission" not in env_str
def test_1190(self, cn_client_v2):
"""do_clearaccess(): Removes all subjects."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_allowaccess("test_subject_1 write")
cli.do_allowaccess("test_subject_2 write")
cli.do_allowaccess("test_subject_3 changePermission")
cli.do_clearaccess("")
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_set("")
env_str = out_stream.getvalue()
assert "test_subject_1: write" not in env_str
assert "test_subject_2: write" not in env_str
assert "test_subject_3: changePermission" not in env_str
def test_1200(self, cn_client_v2):
"""do_allowrep(), do_denyrep(): Toggles replication."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_reset("")
cli.do_allowrep("")
assert (
cli._command_processor.get_session()
.get_replication_policy()
.get_replication_allowed()
)
cli.do_denyrep("")
assert (
not cli._command_processor.get_session()
.get_replication_policy()
.get_replication_allowed()
)
def test_1210(self, cn_client_v2):
"""do_preferrep(): Adds preferred replication targets."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_reset("")
cli.do_preferrep("preferred-mn-1")
cli.do_preferrep("preferred-mn-2")
cli.do_preferrep("preferred-mn-3")
preferred_mn_list = (
cli._command_processor.get_session()
.get_replication_policy()
.get_preferred()
)
assert [
"preferred-mn-1",
"preferred-mn-2",
"preferred-mn-3",
] == preferred_mn_list
def test_1220(self, cn_client_v2):
"""do_blockrep(): Adds blocked replication targets."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_reset("")
cli.do_blockrep("blocked-mn-1")
cli.do_blockrep("blocked-mn-2")
cli.do_blockrep("blocked-mn-3")
blocked_mn_list = (
cli._command_processor.get_session().get_replication_policy().get_blocked()
)
assert ["blocked-mn-1", "blocked-mn-2", "blocked-mn-3"] == blocked_mn_list
def test_1230(self, cn_client_v2):
"""do_removerep(): Adds blocked replication targets."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_reset("")
cli.do_preferrep("preferred-mn-1")
cli.do_preferrep("preferred-mn-2")
cli.do_preferrep("preferred-mn-3")
cli.do_blockrep("blocked-mn-1")
cli.do_blockrep("blocked-mn-2")
cli.do_blockrep("blocked-mn-3")
cli.do_removerep("blocked-mn-2")
cli.do_removerep("preferred-mn-3")
preferred_mn_list = (
cli._command_processor.get_session()
.get_replication_policy()
.get_preferred()
)
assert ["preferred-mn-1", "preferred-mn-2"] == preferred_mn_list
blocked_mn_list = (
cli._command_processor.get_session().get_replication_policy().get_blocked()
)
assert ["blocked-mn-1", "blocked-mn-3"] == blocked_mn_list
def test_1240(self, cn_client_v2):
"""do_numberrep(): Sets preferred number of replicas."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_reset("")
cli.do_numberrep("42")
received_num_replicas = (
cli._command_processor.get_session()
.get_replication_policy()
.get_number_of_replicas()
)
assert received_num_replicas == 42
def test_1250(self, cn_client_v2):
"""do_clearrep(): Resets replication policy to default."""
cli = d1_cli.impl.command_parser.CLI()
cli.do_reset("")
cli.do_preferrep("preferred-mn-1")
cli.do_preferrep("preferred-mn-2")
cli.do_blockrep("blocked-mn-1")
cli.do_blockrep("blocked-mn-2")
cli.do_numberrep("42")
cli.do_clearrep("")
preferred_mn_list = (
cli._command_processor.get_session()
.get_replication_policy()
.get_preferred()
)
assert not preferred_mn_list
blocked_mn_list = (
cli._command_processor.get_session().get_replication_policy().get_blocked()
)
assert not blocked_mn_list
@responses.activate
def test_1260(self, capsys):
"""list nodes: Gives expected output."""
d1_test.mock_api.list_nodes.add_callback("http://responses/cn")
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("cn-url http://responses/cn")
cli.do_listnodes("")
stdout, stderr = capsys.readouterr()
self.sample.assert_equals(stdout, "list_nodes")
@responses.activate
def test_1270(self, cn_client_v2):
"""do_get(): Successful file download."""
d1_test.mock_api.get.add_callback("http://responses/cn")
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("mn-url http://responses/cn")
with tempfile.NamedTemporaryFile() as tmp_file:
tmp_file_path = tmp_file.name
pid_str = "test_pid_1234"
cli.do_get("{} {}".format(pid_str, tmp_file_path))
with open(tmp_file_path, "rb") as f:
received_sciobj_bytes = f.read()
client = d1_client.mnclient.MemberNodeClient("http://responses/cn")
expected_sciobj_bytes = client.get(pid_str).content
assert received_sciobj_bytes == expected_sciobj_bytes
@responses.activate
def test_1280(self, cn_client_v2, caplog):
"""do_meta(): Successful system metadata download."""
d1_test.mock_api.get_system_metadata.add_callback("http://responses/cn")
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("cn-url http://responses/cn")
with d1_test.d1_test_case.temp_file_name() as tmp_file_path:
cli.do_meta("test_pid_1234 {}".format(tmp_file_path))
with open(tmp_file_path, "rb") as f:
received_sysmeta_xml = f.read().decode("utf-8")
self.sample.assert_equals(received_sysmeta_xml, "do_meta")
@responses.activate
def test_1290(self, cn_client_v2):
"""do_list(): Successful object listing."""
d1_test.mock_api.list_objects.add_callback("http://responses/cn")
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("mn-url http://responses/cn")
with d1_test.d1_test_case.temp_file_name() as tmp_file_path:
cli.do_list(tmp_file_path)
with open(tmp_file_path, "rb") as f:
received_object_list_xml = f.read().decode("utf-8")
self.sample.assert_equals(received_object_list_xml, "do_list")
@responses.activate
def test_1300(self, cn_client_v2):
"""do_log(): Successful object listing."""
d1_test.mock_api.get_log_records.add_callback("http://responses/cn")
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("mn-url http://responses/cn")
with tempfile.NamedTemporaryFile() as tmp_file:
tmp_file_path = tmp_file.name
cli.do_log(tmp_file_path)
with open(tmp_file_path, "rb") as f:
received_event_log_pyxb = d1_common.types.dataoneTypes.CreateFromDocument(
f.read()
)
now = d1_common.date_time.utc_now()
for log_entry in received_event_log_pyxb.logEntry:
log_entry.dateLogged = now
self.sample.assert_equals(received_event_log_pyxb, "do_log", cn_client_v2)
#
# Write Operations
#
@d1_test.mock_api.catch_all.activate
@freezegun.freeze_time("1977-02-27")
def test_1310(self, cn_client_v2):
"""do_create(): Expected REST call is issued."""
d1_test.mock_api.catch_all.add_callback(d1_test.d1_test_case.MOCK_MN_BASE_URL)
cli = d1_cli.impl.command_parser.CLI()
with self._add_write_operation_to_queue(
cli, cli.do_create, "{pid} {tmp_file_path}"
):
self._assert_queued_operations(cli, 1, "create")
# Check cancel
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
with d1_test.d1_test_case.mock_input("no"):
with pytest.raises(d1_cli.impl.exceptions.InvalidArguments):
cli.do_run("")
assert "Continue" in out_stream.getvalue()
# Check create
with mock.patch("d1_cli.impl.client.CLIMNClient.create") as mock_client:
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
with d1_test.d1_test_case.mock_input("yes"):
cli.do_run("")
name, args, kwargs = mock_client.mock_calls[0]
create_pid_str, tmp_file, create_sysmeta_pyxb = args
d1_common.system_metadata.normalize_in_place(
create_sysmeta_pyxb, reset_timestamps=True
)
self.sample.assert_equals(create_sysmeta_pyxb, "do_create", cn_client_v2)
def test_1320(self, cn_client_v2):
"""do_clearqueue(): Queue can be cleared."""
cli = d1_cli.impl.command_parser.CLI()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
with self._add_write_operation_to_queue(
cli, cli.do_create, "{pid} {tmp_file_path}"
):
self._assert_queued_operations(cli, 1, "create")
with d1_test.d1_test_case.mock_input("yes"):
cli.do_clearqueue("")
self._assert_queue_empty(cli)
assert "You are about to clear" in out_stream.getvalue()
def test_1330(self, cn_client_v2):
"""do_update(): Task is added to queue."""
cli = d1_cli.impl.command_parser.CLI()
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
with self._add_write_operation_to_queue(
cli, cli.do_update, "old_pid {pid} {tmp_file_path}"
):
self._assert_queued_operations(cli, 1, "update")
with d1_test.d1_test_case.mock_input("yes"):
cli.do_clearqueue("")
self._assert_queue_empty(cli)
assert "You are about to clear" in out_stream.getvalue()
def test_1340(self, cn_client_v2):
"""do_package(): Task is added to queue."""
cli = d1_cli.impl.command_parser.CLI()
with self._add_write_operation_to_queue(
cli,
cli.do_package,
"{pid} scimeta_pid sciobj1_pid sciobj2_pid, sciobj3_pid",
):
self._assert_queued_operations(cli, 1, "create_package")
self._clear_queue(cli)
self._assert_queue_empty(cli)
def test_1350(self, cn_client_v2):
"""do_archive(): Tasks are added to queue for each pid."""
cli = d1_cli.impl.command_parser.CLI()
with self._add_write_operation_to_queue(
cli, cli.do_archive, "archive1_pid archive2_pid archive3_pid archive4_pid"
):
self._assert_queued_operations(cli, 4, "archive")
self._clear_queue(cli)
self._assert_queue_empty(cli)
def test_1360(self, cn_client_v2):
"""do_updateaccess(): Tasks are added to queue for each pid."""
cli = d1_cli.impl.command_parser.CLI()
with self._disable_check_for_authenticated_access():
with self._add_write_operation_to_queue(
cli, cli.do_updateaccess, "access1_pid access2_pid access3_pid"
):
self._assert_queued_operations(cli, 3, "update_access_policy")
self._clear_queue(cli)
self._assert_queue_empty(cli)
def test_1370(self, cn_client_v2):
"""do_updatereplication(): Tasks are added to queue for each pid."""
cli = d1_cli.impl.command_parser.CLI()
with self._disable_check_for_authenticated_access():
with self._add_write_operation_to_queue(
cli,
cli.do_updatereplication,
"replication1_pid replication2_pid replication3_pid",
):
self._assert_queued_operations(cli, 3, "update_replication_policy")
self._clear_queue(cli)
self._assert_queue_empty(cli)
def _assert_queue_empty(self, cli):
with pytest.raises(d1_cli.impl.exceptions.InvalidArguments):
cli.do_queue("")
def _clear_queue(self, cli):
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
with d1_test.d1_test_case.mock_input("yes"):
cli.do_clearqueue("")
assert "You are about to clear" in out_stream.getvalue()
@contextlib.contextmanager
def _add_write_operation_to_queue(
self, cli, write_fun, cmd_format_str, **kwargs_dict
):
cli.do_reset("")
cli.do_allowaccess("test_subject_1 write")
cli.do_allowaccess("test_subject_3 changePermission")
cli.do_preferrep("preferred-mn-2")
cli.do_blockrep("blocked-mn-1")
cli.do_blockrep("blocked-mn-2")
cli.do_numberrep("42")
cli.do_set("authoritative-mn urn:node:myTestMN")
cli.do_set("rights-holder test-rights-holder-subject")
cli.do_set("format-id test-format-id")
cli.do_set("cn-url {}".format(d1_test.d1_test_case.MOCK_CN_BASE_URL))
cli.do_set("mn-url {}".format(d1_test.d1_test_case.MOCK_MN_BASE_URL))
pid_str = "test_pid_{}".format(
d1_test.instance_generator.random_data.random_3_words()
)
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
tmp_file.write("sciobj_for_{}".format(pid_str).encode("utf-8"))
# Add a create task to the queue.
kwargs_dict.update({"pid": pid_str, "tmp_file_path": tmp_file.name})
with d1_test.d1_test_case.capture_std():
write_fun(cmd_format_str.format(**kwargs_dict))
yield pid_str
os.unlink(tmp_file.name)
@contextlib.contextmanager
def _disable_check_for_authenticated_access(self):
with mock.patch(
"d1_cli.impl.operation_validator.OperationValidator."
"_assert_authenticated_access",
return_value=True,
):
yield
def _assert_queued_operations(self, cli, num_operations, operation_str):
with d1_test.d1_test_case.capture_std() as (out_stream, err_stream):
cli.do_queue("")
queue_str = out_stream.getvalue()
assert re.search(r"operation:\s*{}".format(operation_str), queue_str)
assert re.search(r"\d+ of {}".format(num_operations), queue_str)
# def test_1380(self, cn_client_v2):
# """search: Expected Solr query is generated"""
# expect = '*:* dateModified:[* TO *]'
# args = ' '.join([_f for _f in ('id:knb-lter*',) if _f])
# cli = d1_cli.impl.command_parser.CLI()
# actual = cli._command_processor._create_solr_query(args)
# assert expect == actual
def test_1380(self, cn_client_v2):
"""search: Expected Solr query is generated."""
expect = "id:knb-lter* dateModified:[* TO *]"
args = " ".join([_f for _f in ("id:knb-lter*",) if _f])
cli = d1_cli.impl.command_parser.CLI()
actual = cli._command_processor._create_solr_query(args)
assert expect == actual
def test_1390(self, cn_client_v2):
"""search: Expected Solr query is generated."""
expect = "id:knb-lter* abstract:water dateModified:[* TO *]"
args = " ".join([_f for _f in ("id:knb-lter*",) if _f])
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("query abstract:water")
actual = cli._command_processor._create_solr_query(args)
assert expect == actual
def test_1400(self, cn_client_v2):
"""search: Expected Solr query is generated."""
expect = "id:knb-lter* abstract:water dateModified:[* TO *]"
args = " ".join([_f for _f in ("id:knb-lter*",) if _f])
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("query abstract:water")
actual = cli._command_processor._create_solr_query(args)
assert expect == actual
def test_1410(self, cn_client_v2):
"""search: Expected Solr query is generated."""
expect = "id:knb-lter* formatId:text/csv dateModified:[* TO *]"
args = " ".join([_f for _f in ("id:knb-lter*",) if _f])
cli = d1_cli.impl.command_parser.CLI()
cli.do_set("query None")
cli.do_set("search-format-id text/csv")
actual = cli._command_processor._create_solr_query(args)
assert expect == actual
| [
"git@dahlsys.com"
] | git@dahlsys.com |
55d65050fc5aacda7acd63dcd44060f20f7cf89f | 6a2db8e177cacd14a33cb49b8ea78b5fd7c3ebe1 | /day01/day01_2.py | 16600064035200615bbea703a73ed44c734004b8 | [] | no_license | somyungsub/kosta-pythonbasic | fa42c55852774b9791120829087fcea343efc502 | 0abfde24ff7a3018ec0ac057eed2a27746b77148 | refs/heads/master | 2020-03-27T19:30:45.803299 | 2018-09-01T11:49:28 | 2018-09-01T11:49:28 | 146,993,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | # 숫자
from builtins import divmod, complex
# 정수 할당
n1 = 4
n2 = 3
n3 = 2
# 실수 할당
n1 = 4.0
n2 = 3.0
n3 = 2.0
# 복소수 할당 -> 정수 나눗셈, 나머지 연산자는 안됨
# n1 = 4j
# n2 = 3j
# n3 = 2j
print(n1 + n3)
print(n1 - n3)
print(n1 / n3)
print(n1 / n2) # 기본적으로 실수형으로 변환 후 실행되어 실수형 반환
print(n1 // n2) # //는 정수형으로 됨
print(n1 * n3)
print(n1 % n3)
print(n1 % n3)
# 몫 + 나머지 얻기
print(divmod(n1, n2)) # 몫,나머지 튜플 형식 반환
# 정수
i1 = 5 # 초기화를 통한 방법
i2 = int(10) # int()를 통한 방법 : i2 = 0 과 동일
print(i1)
print(i2)
# 실수
i1 = 5.0
i2 = float() # 0.0으로 초기화됨
print(i1)
print(i2)
# 복소수
i1 = 5j
i2 = complex() # 0j으로 초기화됨
print(i1)
print(i2)
| [
"gkdldy5@naver.com"
] | gkdldy5@naver.com |
db6a5f9a9e48fbdb723307801d93ecf4a47d4f32 | 2b23ac5fac45ea526324b84bc1c873794a92ce3f | /example.py | 741e4ecd2ec915d16f015f54bb3416549d03e785 | [] | no_license | yaleman/technicolorspeedstats | 9bd9b1dded37ba943c09551edf42d8a7871722ee | 5a1514eacd7d93ab86f93489053ec9a0cf786dd3 | refs/heads/master | 2022-05-19T10:17:52.020727 | 2022-05-09T01:15:36 | 2022-05-09T01:15:36 | 245,538,477 | 1 | 0 | null | 2022-05-09T01:15:37 | 2020-03-06T23:59:40 | Python | UTF-8 | Python | false | false | 86 | py | #!/usr/bin/env python3
from technicolorspeedstats import get_data
print(get_data())
| [
"yaleman@ricetek.net"
] | yaleman@ricetek.net |
60e68556375c7be92b4a838b420b1e603730aca7 | 6f6997efe1a15d57688c12ff0197790fb2eac6bc | /histogram/wigets/waferdata_histogram.py | aa532f8c85f67b1d487d0d292c97a7194ae277d9 | [] | no_license | montanaviking/waferprobe | 29fa5f0eb07e60820162916e48059f63374902c5 | fb2786b376153f9b6e9495b6faf3ee5960f90a06 | refs/heads/master | 2022-11-06T10:57:01.539733 | 2020-06-19T23:47:59 | 2020-06-19T23:47:59 | 273,601,408 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,386 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'waferdata_histogram.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Histogram(object):
def setupUi(self, Histogram):
Histogram.setObjectName("Histogram")
Histogram.resize(763, 624)
Histogram.setFocusPolicy(QtCore.Qt.TabFocus)
self.verticalLayout_5 = QtWidgets.QVBoxLayout(Histogram)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout()
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout()
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.wafernamelabel = QtWidgets.QLabel(Histogram)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.wafernamelabel.sizePolicy().hasHeightForWidth())
self.wafernamelabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.wafernamelabel.setFont(font)
self.wafernamelabel.setObjectName("wafernamelabel")
self.horizontalLayout_13.addWidget(self.wafernamelabel)
self.wafername = QtWidgets.QLineEdit(Histogram)
self.wafername.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(8)
self.wafername.setFont(font)
self.wafername.setAcceptDrops(False)
self.wafername.setReadOnly(True)
self.wafername.setObjectName("wafername")
self.horizontalLayout_13.addWidget(self.wafername)
self.verticalLayout_2.addLayout(self.horizontalLayout_13)
self.horizontalLayout_12 = QtWidgets.QHBoxLayout()
self.horizontalLayout_12.setObjectName("horizontalLayout_12")
self.Vgs_label = QtWidgets.QLabel(Histogram)
self.Vgs_label.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(8)
self.Vgs_label.setFont(font)
self.Vgs_label.setObjectName("Vgs_label")
self.horizontalLayout_12.addWidget(self.Vgs_label)
self.Vgs_comboBox = QtWidgets.QComboBox(Histogram)
self.Vgs_comboBox.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(8)
self.Vgs_comboBox.setFont(font)
self.Vgs_comboBox.setObjectName("Vgs_comboBox")
self.horizontalLayout_12.addWidget(self.Vgs_comboBox)
self.Vds_FOC_label = QtWidgets.QLabel(Histogram)
self.Vds_FOC_label.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(8)
self.Vds_FOC_label.setFont(font)
self.Vds_FOC_label.setObjectName("Vds_FOC_label")
self.horizontalLayout_12.addWidget(self.Vds_FOC_label)
self.Vds_FOC = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.Vds_FOC.setFont(font)
self.Vds_FOC.setInputMethodHints(QtCore.Qt.ImhFormattedNumbersOnly)
self.Vds_FOC.setReadOnly(False)
self.Vds_FOC.setObjectName("Vds_FOC")
self.horizontalLayout_12.addWidget(self.Vds_FOC)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.horizontalLayout_12)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.horizontalLayout_15 = QtWidgets.QHBoxLayout()
self.horizontalLayout_15.setObjectName("horizontalLayout_15")
self.horizontalLayout_8.addLayout(self.horizontalLayout_15)
self.Yf_checkBox = QtWidgets.QCheckBox(Histogram)
self.Yf_checkBox.setObjectName("Yf_checkBox")
self.horizontalLayout_8.addWidget(self.Yf_checkBox)
self.deltaVgs_thres_label = QtWidgets.QLabel(Histogram)
self.deltaVgs_thres_label.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(8)
self.deltaVgs_thres_label.setFont(font)
self.deltaVgs_thres_label.setObjectName("deltaVgs_thres_label")
self.horizontalLayout_8.addWidget(self.deltaVgs_thres_label)
self.delta_Vgs_thres = QtWidgets.QLineEdit(Histogram)
self.delta_Vgs_thres.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(8)
self.delta_Vgs_thres.setFont(font)
self.delta_Vgs_thres.setToolTip("")
self.delta_Vgs_thres.setInputMethodHints(QtCore.Qt.ImhFormattedNumbersOnly)
self.delta_Vgs_thres.setReadOnly(False)
self.delta_Vgs_thres.setObjectName("delta_Vgs_thres")
self.horizontalLayout_8.addWidget(self.delta_Vgs_thres)
self.Yf_Vgsfitrange_label = QtWidgets.QLabel(Histogram)
self.Yf_Vgsfitrange_label.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(8)
self.Yf_Vgsfitrange_label.setFont(font)
self.Yf_Vgsfitrange_label.setObjectName("Yf_Vgsfitrange_label")
self.horizontalLayout_8.addWidget(self.Yf_Vgsfitrange_label)
self.Yf_Vgsfitrange_frac = QtWidgets.QLineEdit(Histogram)
self.Yf_Vgsfitrange_frac.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(8)
self.Yf_Vgsfitrange_frac.setFont(font)
self.Yf_Vgsfitrange_frac.setToolTip("")
self.Yf_Vgsfitrange_frac.setInputMethodHints(QtCore.Qt.ImhFormattedNumbersOnly)
self.Yf_Vgsfitrange_frac.setReadOnly(False)
self.Yf_Vgsfitrange_frac.setObjectName("Yf_Vgsfitrange_frac")
self.horizontalLayout_8.addWidget(self.Yf_Vgsfitrange_frac)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout_8)
self.horizontalLayout_11 = QtWidgets.QHBoxLayout()
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.parameterlabel = QtWidgets.QLabel(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.parameterlabel.setFont(font)
self.parameterlabel.setObjectName("parameterlabel")
self.horizontalLayout_11.addWidget(self.parameterlabel)
self.measurementtype = QtWidgets.QComboBox(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.measurementtype.setFont(font)
self.measurementtype.setEditable(False)
self.measurementtype.setObjectName("measurementtype")
self.horizontalLayout_11.addWidget(self.measurementtype)
self.verticalLayout_2.addLayout(self.horizontalLayout_11)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem2)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.set_includes_label = QtWidgets.QLabel(Histogram)
self.set_includes_label.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(8)
self.set_includes_label.setFont(font)
self.set_includes_label.setAlignment(QtCore.Qt.AlignCenter)
self.set_includes_label.setObjectName("set_includes_label")
self.verticalLayout_4.addWidget(self.set_includes_label)
self.set_includes = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.set_includes.setFont(font)
self.set_includes.setFocusPolicy(QtCore.Qt.ClickFocus)
self.set_includes.setAcceptDrops(False)
self.set_includes.setWhatsThis("")
self.set_includes.setReadOnly(False)
self.set_includes.setObjectName("set_includes")
self.verticalLayout_4.addWidget(self.set_includes)
self.verticalLayout_2.addLayout(self.verticalLayout_4)
self.horizontalLayout_9.addLayout(self.verticalLayout_2)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.averagelabel = QtWidgets.QLabel(Histogram)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.averagelabel.sizePolicy().hasHeightForWidth())
self.averagelabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.averagelabel.setFont(font)
self.averagelabel.setObjectName("averagelabel")
self.horizontalLayout_3.addWidget(self.averagelabel)
self.average = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.average.setFont(font)
self.average.setReadOnly(True)
self.average.setObjectName("average")
self.horizontalLayout_3.addWidget(self.average)
self.standarddeviation = QtWidgets.QLabel(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.standarddeviation.setFont(font)
self.standarddeviation.setObjectName("standarddeviation")
self.horizontalLayout_3.addWidget(self.standarddeviation)
self.standard_deviation = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.standard_deviation.setFont(font)
self.standard_deviation.setReadOnly(True)
self.standard_deviation.setObjectName("standard_deviation")
self.horizontalLayout_3.addWidget(self.standard_deviation)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.log_linear_histogram_but = QtWidgets.QPushButton(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.log_linear_histogram_but.setFont(font)
self.log_linear_histogram_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.log_linear_histogram_but.setStyleSheet("background-color: hsv(100, 200, 255);\n"
"color: rgb(0, 0,0);")
self.log_linear_histogram_but.setCheckable(True)
self.log_linear_histogram_but.setChecked(False)
self.log_linear_histogram_but.setAutoDefault(False)
self.log_linear_histogram_but.setObjectName("log_linear_histogram_but")
self.horizontalLayout_4.addWidget(self.log_linear_histogram_but)
self.label_numberofdevices = QtWidgets.QLabel(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.label_numberofdevices.setFont(font)
self.label_numberofdevices.setObjectName("label_numberofdevices")
self.horizontalLayout_4.addWidget(self.label_numberofdevices)
self.numberofdevices = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.numberofdevices.setFont(font)
self.numberofdevices.setReadOnly(True)
self.numberofdevices.setObjectName("numberofdevices")
self.horizontalLayout_4.addWidget(self.numberofdevices)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.selectmintype = QtWidgets.QComboBox(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.selectmintype.setFont(font)
self.selectmintype.setObjectName("selectmintype")
self.selectmintype.addItem("")
self.selectmintype.addItem("")
self.horizontalLayout_5.addWidget(self.selectmintype)
self.minimum = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.minimum.setFont(font)
self.minimum.setObjectName("minimum")
self.horizontalLayout_5.addWidget(self.minimum)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.selectmaxtype = QtWidgets.QComboBox(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.selectmaxtype.setFont(font)
self.selectmaxtype.setObjectName("selectmaxtype")
self.selectmaxtype.addItem("")
self.selectmaxtype.addItem("")
self.horizontalLayout_6.addWidget(self.selectmaxtype)
self.maximum = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.maximum.setFont(font)
self.maximum.setObjectName("maximum")
self.horizontalLayout_6.addWidget(self.maximum)
self.verticalLayout.addLayout(self.horizontalLayout_6)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.range_lin_fit_label = QtWidgets.QLabel(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.range_lin_fit_label.setFont(font)
self.range_lin_fit_label.setObjectName("range_lin_fit_label")
self.horizontalLayout_7.addWidget(self.range_lin_fit_label)
self.range_linearfit = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.range_linearfit.setFont(font)
self.range_linearfit.setFocusPolicy(QtCore.Qt.StrongFocus)
self.range_linearfit.setAcceptDrops(True)
self.range_linearfit.setObjectName("range_linearfit")
self.horizontalLayout_7.addWidget(self.range_linearfit)
self.transfer_curve_smoothing_factor_label = QtWidgets.QLabel(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.transfer_curve_smoothing_factor_label.setFont(font)
self.transfer_curve_smoothing_factor_label.setObjectName("transfer_curve_smoothing_factor_label")
self.horizontalLayout_7.addWidget(self.transfer_curve_smoothing_factor_label)
self.transfer_curve_smoothing_factor = QtWidgets.QLineEdit(Histogram)
self.transfer_curve_smoothing_factor.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(8)
self.transfer_curve_smoothing_factor.setFont(font)
self.transfer_curve_smoothing_factor.setFocusPolicy(QtCore.Qt.ClickFocus)
self.transfer_curve_smoothing_factor.setAcceptDrops(True)
self.transfer_curve_smoothing_factor.setObjectName("transfer_curve_smoothing_factor")
self.horizontalLayout_7.addWidget(self.transfer_curve_smoothing_factor)
self.verticalLayout.addLayout(self.horizontalLayout_7)
self.horizontalLayout_16 = QtWidgets.QHBoxLayout()
self.horizontalLayout_16.setObjectName("horizontalLayout_16")
self.TLM_lin_fit_label = QtWidgets.QLabel(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.TLM_lin_fit_label.setFont(font)
self.TLM_lin_fit_label.setObjectName("TLM_lin_fit_label")
self.horizontalLayout_16.addWidget(self.TLM_lin_fit_label)
self.TLM_fit_quality = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.TLM_fit_quality.setFont(font)
self.TLM_fit_quality.setAcceptDrops(True)
self.TLM_fit_quality.setObjectName("TLM_fit_quality")
self.horizontalLayout_16.addWidget(self.TLM_fit_quality)
self.minTLMlength_label = QtWidgets.QLabel(Histogram)
self.minTLMlength_label.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(8)
self.minTLMlength_label.setFont(font)
self.minTLMlength_label.setObjectName("minTLMlength_label")
self.horizontalLayout_16.addWidget(self.minTLMlength_label)
self.TLMlengthminimum = QtWidgets.QComboBox(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.TLMlengthminimum.setFont(font)
self.TLMlengthminimum.setEditable(False)
self.TLMlengthminimum.setObjectName("TLMlengthminimum")
self.horizontalLayout_16.addWidget(self.TLMlengthminimum)
self.maxTLMlength_label = QtWidgets.QLabel(Histogram)
self.maxTLMlength_label.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(8)
self.maxTLMlength_label.setFont(font)
self.maxTLMlength_label.setObjectName("maxTLMlength_label")
self.horizontalLayout_16.addWidget(self.maxTLMlength_label)
self.TLMlengthmaximum = QtWidgets.QComboBox(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.TLMlengthmaximum.setFont(font)
self.TLMlengthmaximum.setEditable(False)
self.TLMlengthmaximum.setObjectName("TLMlengthmaximum")
self.horizontalLayout_16.addWidget(self.TLMlengthmaximum)
self.verticalLayout.addLayout(self.horizontalLayout_16)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.binsizepolicy_label = QtWidgets.QLabel(Histogram)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.binsizepolicy_label.sizePolicy().hasHeightForWidth())
self.binsizepolicy_label.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.binsizepolicy_label.setFont(font)
self.binsizepolicy_label.setObjectName("binsizepolicy_label")
self.horizontalLayout_2.addWidget(self.binsizepolicy_label)
self.binsizepolicy = QtWidgets.QComboBox(Histogram)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.binsizepolicy.sizePolicy().hasHeightForWidth())
self.binsizepolicy.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(8)
self.binsizepolicy.setFont(font)
self.binsizepolicy.setObjectName("binsizepolicy")
self.horizontalLayout_2.addWidget(self.binsizepolicy)
self.label_binsize = QtWidgets.QLabel(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.label_binsize.setFont(font)
self.label_binsize.setObjectName("label_binsize")
self.horizontalLayout_2.addWidget(self.label_binsize)
self.binsize_stddev = QtWidgets.QLineEdit(Histogram)
font = QtGui.QFont()
font.setPointSize(8)
self.binsize_stddev.setFont(font)
self.binsize_stddev.setObjectName("binsize_stddev")
self.horizontalLayout_2.addWidget(self.binsize_stddev)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_9.addLayout(self.verticalLayout)
self.verticalLayout_5.addLayout(self.horizontalLayout_9)
self.plotframe = QtWidgets.QFrame(Histogram)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotframe.sizePolicy().hasHeightForWidth())
self.plotframe.setSizePolicy(sizePolicy)
self.plotframe.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.plotframe.setFrameShadow(QtWidgets.QFrame.Raised)
self.plotframe.setObjectName("plotframe")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.plotframe)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.opendirbut = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.opendirbut.setFont(font)
self.opendirbut.setFocusPolicy(QtCore.Qt.TabFocus)
self.opendirbut.setAutoDefault(False)
self.opendirbut.setObjectName("opendirbut")
self.horizontalLayout.addWidget(self.opendirbut)
self.save_state_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.save_state_but.setFont(font)
self.save_state_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.save_state_but.setObjectName("save_state_but")
self.horizontalLayout.addWidget(self.save_state_but)
self.pack_database_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.pack_database_but.setFont(font)
self.pack_database_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.pack_database_but.setObjectName("pack_database_but")
self.horizontalLayout.addWidget(self.pack_database_but)
self.open_filter_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.open_filter_but.setFont(font)
self.open_filter_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.open_filter_but.setObjectName("open_filter_but")
self.horizontalLayout.addWidget(self.open_filter_but)
self.export_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.export_but.setFont(font)
self.export_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.export_but.setObjectName("export_but")
self.horizontalLayout.addWidget(self.export_but)
self.device_list_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.device_list_but.setFont(font)
self.device_list_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.device_list_but.setObjectName("device_list_but")
self.horizontalLayout.addWidget(self.device_list_but)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.Device_Listing_Table = DevTable(self.plotframe)
self.Device_Listing_Table.setMaximumSize(QtCore.QSize(16777215, 200))
self.Device_Listing_Table.setStatusTip("")
self.Device_Listing_Table.setWhatsThis("")
self.Device_Listing_Table.setObjectName("Device_Listing_Table")
self.Device_Listing_Table.setColumnCount(0)
self.Device_Listing_Table.setRowCount(0)
self.Device_Listing_Table.horizontalHeader().setCascadingSectionResizes(True)
self.Device_Listing_Table.horizontalHeader().setStretchLastSection(True)
self.Device_Listing_Table.verticalHeader().setCascadingSectionResizes(True)
self.verticalLayout_3.addWidget(self.Device_Listing_Table)
self.chartcontrolHBOX = QtWidgets.QHBoxLayout()
self.chartcontrolHBOX.setObjectName("chartcontrolHBOX")
self.backview_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.backview_but.setFont(font)
self.backview_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.backview_but.setObjectName("backview_but")
self.chartcontrolHBOX.addWidget(self.backview_but)
self.forwardview_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.forwardview_but.setFont(font)
self.forwardview_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.forwardview_but.setObjectName("forwardview_but")
self.chartcontrolHBOX.addWidget(self.forwardview_but)
self.fullview_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.fullview_but.setFont(font)
self.fullview_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.fullview_but.setObjectName("fullview_but")
self.chartcontrolHBOX.addWidget(self.fullview_but)
self.selected_bin_only_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.selected_bin_only_but.setFont(font)
self.selected_bin_only_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.selected_bin_only_but.setCheckable(True)
self.selected_bin_only_but.setObjectName("selected_bin_only_but")
self.chartcontrolHBOX.addWidget(self.selected_bin_only_but)
self.histograph_image_to_clipboard_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.histograph_image_to_clipboard_but.setFont(font)
self.histograph_image_to_clipboard_but.setFocusPolicy(QtCore.Qt.ClickFocus)
self.histograph_image_to_clipboard_but.setObjectName("histograph_image_to_clipboard_but")
self.chartcontrolHBOX.addWidget(self.histograph_image_to_clipboard_but)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.chartcontrolHBOX.addItem(spacerItem3)
self.quit_but = QtWidgets.QPushButton(self.plotframe)
font = QtGui.QFont()
font.setPointSize(8)
self.quit_but.setFont(font)
self.quit_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.quit_but.setObjectName("quit_but")
self.chartcontrolHBOX.addWidget(self.quit_but)
self.verticalLayout_3.addLayout(self.chartcontrolHBOX)
self.plotframebox = QtWidgets.QHBoxLayout()
self.plotframebox.setObjectName("plotframebox")
self.verticalLayout_3.addLayout(self.plotframebox)
self.verticalLayout_5.addWidget(self.plotframe)
self.plotframe.raise_()
self.retranslateUi(Histogram)
QtCore.QMetaObject.connectSlotsByName(Histogram)
def retranslateUi(self, Histogram):
_translate = QtCore.QCoreApplication.translate
Histogram.setWindowTitle(_translate("Histogram", "Histogram"))
self.wafernamelabel.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This is the name of the wafer and directory currently under analysis.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">The directory name MUST match the wafer name.</p></body></html>"))
self.wafernamelabel.setText(_translate("Histogram", "Wafer Name"))
self.wafername.setToolTip(_translate("Histogram", "Wafer Name"))
self.Vgs_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Selected Vgs:</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This selects the Vgs for all analysis which requires the family of curves data</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">examples of which are Ron, Gon, TLM data, ratio Ron data etc...</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Analysis will be performed assuming the selected Vgs which selects a curve from the</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">family of curves. Changing this will update all data dependent upon Vgs.</p></body></html>"))
self.Vgs_label.setText(_translate("Histogram", "select Vgs for FOC"))
self.Vgs_comboBox.setToolTip(_translate("Histogram", "Gate voltage setting"))
self.Vgs_comboBox.setWhatsThis(_translate("Histogram", "This is the gate voltage setting"))
self.Vds_FOC_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Selected Vgs:</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This selects the Vgs for all analysis which requires the family of curves data</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">examples of which are Ron, Gon, TLM data, ratio Ron data etc...</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Analysis will be performed assuming the selected Vgs which selects a curve from the</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">family of curves. Changing this will update all data dependent upon Vgs.</p></body></html>"))
self.Vds_FOC_label.setText(_translate("Histogram", "Vds_FOC for |Idmax|@Vds"))
self.Yf_checkBox.setText(_translate("Histogram", " Y-function Analysis"))
self.deltaVgs_thres_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Selected Vgs:</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This selects the Vgs for all analysis which requires the family of curves data</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">examples of which are Ron, Gon, TLM data, ratio Ron data etc...</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Analysis will be performed assuming the selected Vgs which selects a curve from the</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">family of curves. Changing this will update all data dependent upon Vgs.</p></body></html>"))
self.deltaVgs_thres_label.setText(_translate("Histogram", "deltaVgsthres"))
self.delta_Vgs_thres.setText(_translate("Histogram", "-0.5"))
self.Yf_Vgsfitrange_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Selected Vgs:</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This selects the Vgs for all analysis which requires the family of curves data</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">examples of which are Ron, Gon, TLM data, ratio Ron data etc...</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Analysis will be performed assuming the selected Vgs which selects a curve from the</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">family of curves. Changing this will update all data dependent upon Vgs.</p></body></html>"))
self.Yf_Vgsfitrange_label.setText(_translate("Histogram", "Yf Vgs fit range fract"))
self.Yf_Vgsfitrange_frac.setText(_translate("Histogram", "0.1"))
self.parameterlabel.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Parameter selector:</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Selects parameter to be displayed on histogram.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">If you get the warning message "no devices"</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">then try to adjust the parameter selector to find a </p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">parameter for which there are data. The parameter </p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">selector MUST be set to data that exists for any analysis</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">to proceed.</p></body></html>"))
self.parameterlabel.setText(_translate("Histogram", "Parameter"))
self.measurementtype.setToolTip(_translate("Histogram", "Data Format: Resistance (Ron) or Conductance (Gon)"))
self.measurementtype.setWhatsThis(_translate("Histogram", "Ron @ Vds=0V is the slope of Ids/Vds at Vds=0V\n"
"Ron @ |Vds|=maximum is the maximum Vds/Id at maximum Vds\n"
"Gon s are similar to the above but are conductances = 1/Ron"))
self.set_includes_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\"><span style=\" color:#000000;\">Boolean selector:</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Selects devices for analysis based on their names</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Default is to analyze all devices.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This is a reverse Polish Boolean evaluator</span></p>\n"
"<p align=\"justify\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Operators (binary-two arguments): and, or, xor - call them bx</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Operators (unary-one argument): not - call it ux</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Format for search terms (strings) with operators bx, ux is:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">A B ba C ua bb D bc .......</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">where ba operates on A and B, ua operates on C, bb operates on the two results of</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">of ba and ua and bc this result and D. Example:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Aa Bla and Cz not and D or is equivalent to:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">((Aa and Bla) and not Cz) and D</span></p></body></html>"))
self.set_includes_label.setText(_translate("Histogram", "data filename filter"))
self.set_includes.setToolTip(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Boolean selector:</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Selects devices for analysis based on their names</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Default is to analyze all devices.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This is a reverse Polish Boolean evaluator</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Operators (binary-two arguments): and, or, xor - call them bo</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Operators (unary-one argument): not - call it ux</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Format for search terms (strings) with operators bx, ux is:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">A B ba C ua bb D bc .......</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">where ba operates on A and B, ua operates on C, bb operates on the two results of</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">of ba and ua and bc this result and D. Example:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Aa Bla and Cz not and D or is equivalent to:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">((Aa and Bla) and not Cz) and D</span></p></body></html>"))
self.averagelabel.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Average of all data visible within the hysteresis plot.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">When Linear plots button is green and selected, this is the arithmetic mean of the data.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">When log plots (button turns red) is selected, this is the geometric (log average) mean.</p></body></html>"))
self.averagelabel.setText(_translate("Histogram", "average"))
self.standarddeviation.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Standard Deviation of all data visible within the hysteresis plot.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">When Linear plots button is green and selected, this is the simple standard deviation of the data.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">When log plots (button turns red) is selected, this is the standard deviation of the log of the data.</p></body></html>"))
self.standarddeviation.setText(_translate("Histogram", "standard deviation"))
self.standard_deviation.setToolTip(_translate("Histogram", "Of selected range"))
self.log_linear_histogram_but.setText(_translate("Histogram", "Linear plots"))
self.label_numberofdevices.setText(_translate("Histogram", "number of devices"))
self.selectmintype.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Lower limit of data analysis on the histogram displayed as number of standard deviations below mean or a simple value.</p></body></html>"))
self.selectmintype.setItemText(0, _translate("Histogram", "Std Dev below mean"))
self.selectmintype.setItemText(1, _translate("Histogram", "Value"))
self.selectmaxtype.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Upper limit of data analysis on the histogram displayed as number of standard deviations below mean or a simple value.</p></body></html>"))
self.selectmaxtype.setItemText(0, _translate("Histogram", "Std Dev above mean"))
self.selectmaxtype.setItemText(1, _translate("Histogram", "Value"))
self.range_lin_fit_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Range of Vds over which an Id vs Vds curve is fit to a line to determine Ron, Gon.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">The Id(Vds) curve is that at a Vgs selected by the user on the Vgs selector of this GUI window.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This curve fit range starts at Vds=0 and extends to maximum negative Vds * the range fit.</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.range_lin_fit_label.setText(_translate("Histogram", "FOC Ron Range Fit"))
self.transfer_curve_smoothing_factor_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Range of Vds over which an Id vs Vds curve is fit to a line to determine Ron, Gon.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">The Id(Vds) curve is that at a Vgs selected by the user on the Vgs selector of this GUI window.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This curve fit range starts at Vds=0 and extends to maximum negative Vds * the range fit.</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.transfer_curve_smoothing_factor_label.setText(_translate("Histogram", "transfer curve smoothing factor"))
self.TLM_lin_fit_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Range of Vds over which an Id vs Vds curve is fit to a line to determine Ron, Gon.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">The Id(Vds) curve is that at a Vgs selected by the user on the Vgs selector of this GUI window.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This curve fit range starts at Vds=0 and extends to maximum negative Vds * the range fit.</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.TLM_lin_fit_label.setText(_translate("Histogram", "TLM linear fit quality"))
self.minTLMlength_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\"><span style=\" color:#000000;\">Minimum channel length of TLM devices.</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\"><span style=\" color:#000000;\">Allows user to select the minimum available channel length of devices in the TLM</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\"><span style=\" color:#000000;\">structures to use in performing TLM analysis of Rc and Rsh (contact and sheet resistance)</span></p></body></html>"))
self.minTLMlength_label.setText(_translate("Histogram", "TLM min length um"))
self.TLMlengthminimum.setToolTip(_translate("Histogram", "Data Format: Resistance (Ron) or Conductance (Gon)"))
self.TLMlengthminimum.setWhatsThis(_translate("Histogram", "Ron @ Vds=0V is the slope of Ids/Vds at Vds=0V\n"
"Ron @ |Vds|=maximum is the maximum Vds/Id at maximum Vds\n"
"Gon s are similar to the above but are conductances = 1/Ron"))
self.maxTLMlength_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Maximum channel length of TLM devices.</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Allows user to select the maximum available channel length of devices in the TLM</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">structures to use in performing TLM analysis of Rc and Rsh (contact and sheet resistance)</span></p></body></html>"))
self.maxTLMlength_label.setText(_translate("Histogram", "TLM max length um"))
self.TLMlengthmaximum.setToolTip(_translate("Histogram", "Data Format: Resistance (Ron) or Conductance (Gon)"))
self.TLMlengthmaximum.setWhatsThis(_translate("Histogram", "Ron @ Vds=0V is the slope of Ids/Vds at Vds=0V\n"
"Ron @ |Vds|=maximum is the maximum Vds/Id at maximum Vds\n"
"Gon s are similar to the above but are conductances = 1/Ron"))
self.binsizepolicy_label.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">User-selection of the method by which histogram bin size is determined</p></body></html>"))
self.binsizepolicy_label.setText(_translate("Histogram", "Bin Size Policy"))
self.label_binsize.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">User manual setting for histogram bin size.</p></body></html>"))
self.label_binsize.setText(_translate("Histogram", "bin size stddev"))
self.opendirbut.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">User to select wafer directory to open and analyze.</span></p></body></html>"))
self.opendirbut.setText(_translate("Histogram", "&open directory"))
self.save_state_but.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">User saves data to open later</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Should reduce loading time of analysis.</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">WARNING: Not working yet!</span></p></body></html>"))
self.save_state_but.setText(_translate("Histogram", "&Save State"))
self.pack_database_but.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">User saves data to open later</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Should reduce loading time of analysis.</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">WARNING: Not working yet!</span></p></body></html>"))
self.pack_database_but.setText(_translate("Histogram", "Pack Database"))
self.open_filter_but.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This opens a new window which allows the user to filter data for analysis by</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">data values. For example, the user can exclude devices having |Idmax| less than or </span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">greater than the user-specified values. This is often used to remove bad devices from the analysis.</span></p></body></html>"))
self.open_filter_but.setText(_translate("Histogram", "filter"))
self.export_but.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">This opens a new window which allows the user to obtain TLM device Ron and other parameters from statistical averaged data for each TLM element.</span></p></body></html>"))
self.export_but.setText(_translate("Histogram", "Export statistics"))
self.device_list_but.setWhatsThis(_translate("Histogram", "<html><head/><body><p><span style=\" color:#000000;\">This opens a new window which allows the user to obtain a device listing of device names on the wafer with the devices\' measured/calculated parameters in columns.</span></p></body></html>"))
self.device_list_but.setText(_translate("Histogram", "device list"))
self.Device_Listing_Table.setToolTip(_translate("Histogram", "<html><head/><body><p><span style=\" color:#000000; background-color:#ffffff;\">Device listing from selected bin of the histogram. Note that a ctrl-f opens a window which allows the user to place a Boolean expression to selectively display devices.</span></p><p><span style=\" color:#000000; background-color:#ffffff;\">Left mouse click on parameter (header) to sort. </span></p><p><span style=\" color:#000000; background-color:#ffffff;\">Shift+left mouse click on parameter to select it for copy to clipboard - the selected columns will change color. </span></p><p><span style=\" color:#000000; background-color:#ffffff;\">After selecting all desired parameters, cntl-c to copy them to clipboard. </span></p><p><span style=\" color:#000000; background-color:#ffffff;\">Right mouse click deselects all. </span></p><p><span style=\" color:#000000; background-color:#ffffff;\">Shift right click to load individual cells to clipboard. </span></p><p><span style=\" color:#000000; background-color:#ffffff;\">Left mouse click on device name will allow plotting of selected device parameters.</span></p></body></html>"))
self.backview_but.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Send the histogram view back to the previous setting.</p></body></html>"))
self.backview_but.setText(_translate("Histogram", "&back"))
self.forwardview_but.setWhatsThis(_translate("Histogram", "<html><head/><body><p><span style=\" color:#000000;\">Send the histogram view forward to the next saved setting.</span></p></body></html>"))
self.forwardview_but.setText(_translate("Histogram", "&forward"))
self.fullview_but.setWhatsThis(_translate("Histogram", "<html><head/><body><p><span style=\" color:#000000;\">Send the histogram to full span to view all available data i.e. this is the default setting.</span></p></body></html>"))
self.fullview_but.setText(_translate("Histogram", "&full view"))
self.selected_bin_only_but.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Send the histogram view back to the previous setting.</p></body></html>"))
self.selected_bin_only_but.setText(_translate("Histogram", "selected bin only"))
self.histograph_image_to_clipboard_but.setWhatsThis(_translate("Histogram", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" color:#000000;\">Send the histogram view back to the previous setting.</p></body></html>"))
self.histograph_image_to_clipboard_but.setText(_translate("Histogram", "histograph image->&clipboard"))
self.quit_but.setText(_translate("Histogram", "Quit"))
from devtable import DevTable
| [
"microcraftx@gmail.com"
] | microcraftx@gmail.com |
1ea4f37d648dbba8cdb93a2e9036c0c97129ecf0 | 8c06beebdb5ee28f7292574fefd540f8c43a7acf | /Arctype_Dashboard/asgi.py | f843ce0156227c94479067214b7caa5e4e018782 | [] | no_license | progettazionemauro/ARCTYPE_DJANGO_DASHBOARD | 0c3baf93c6a3f8dd28d9459a21a273efbed1f4e3 | 60d1dab19c32b7a80d70de85e846fd6760be9a26 | refs/heads/master | 2023-04-12T01:37:57.317231 | 2021-05-03T01:48:41 | 2021-05-03T01:48:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
ASGI config for Arctype_Dashboard project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Arctype_Dashboard.settings')
application = get_asgi_application()
| [
"chukslord1@gmail.com"
] | chukslord1@gmail.com |
4dde79d5e3be0ffc2d8fdc9b8d3237fd2be57c5b | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/Dowsure.py | c4c979d73ab936957a9778dbb5945bfedab00234 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 961 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Dowsure(object):
def __init__(self):
self._application_code = None
@property
def application_code(self):
return self._application_code
@application_code.setter
def application_code(self, value):
self._application_code = value
def to_alipay_dict(self):
params = dict()
if self.application_code:
if hasattr(self.application_code, 'to_alipay_dict'):
params['application_code'] = self.application_code.to_alipay_dict()
else:
params['application_code'] = self.application_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Dowsure()
if 'application_code' in d:
o.application_code = d['application_code']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
bcf48bbac8518b773ccfdc6279bdd00a78314561 | 7a7c0cd1b38a306e7d51a67f6442fc0b48ae83a0 | /Exam/28.py | 991a36c618d133a6c35e5fa823d398b37a0cc2b7 | [] | no_license | 09-03/Infa | f06fd007ded30101b289730ef0ea12154e3f7317 | a7896461435650a6d3865047ed09ec9dadd4b493 | refs/heads/master | 2023-06-10T16:48:06.440654 | 2021-06-29T00:43:27 | 2021-06-29T00:43:27 | 297,751,033 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | """
Написать программу, вычисляющую наибольший общий делитель двух чисел,
введённых с клавиатуры.
"""
def NOD(a,b):
a_start = a
b_start = b
while a != 0 and b != 0:
if a > b:
a = a % b
else:
b = b % a
return print(f"НОД {a_start} и {b_start}: {a+b}")
a = int(input("Введите первое число: "))
b = int(input("Введите второе число: "))
NOD(a,b)
| [
"noreply@github.com"
] | noreply@github.com |
7ecc66635c799a752527ae9aea098b284abae35c | 2f24d7b5c852eb0a8f70b898e101487b1cb3c407 | /src/models/request.py | 1efa149b4e80aa76c8da9ad67e28c619d366a886 | [] | no_license | Project78/Project78 | afdf60dc6f517207070b02488c19f8d392c11784 | 9d8feb4181e9b8b97c05bb49ba3156a1964fa517 | refs/heads/master | 2020-12-24T17:35:44.022129 | 2012-01-31T09:16:56 | 2012-01-31T09:16:56 | 2,641,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | '''
Created on Nov 6, 2011
@author: averaart
'''
from google.appengine.ext import db
from event import Event
from guardian import Guardian
from student import Student
from combination import Combination
class Request(db.Model):
'''
A Request represents one subject about which a Guardian wants to talk
'''
event = db.ReferenceProperty(Event, collection_name="requests")
guardian = db.ReferenceProperty(Guardian, collection_name="all_requests")
student = db.ReferenceProperty(Student, collection_name="requests")
combination = db.ReferenceProperty(Combination, collection_name="requests") | [
"averaart@hotmail.com"
] | averaart@hotmail.com |
e163ad73aa563dda6ef681844f282f3877df4508 | cf0f368408240a146b0d91ad7fe1c2f299694f4b | /main/migrations/0008_auto_20190609_2032.py | 919f27c81a72f260f0494a748082b8ecf9479268 | [] | no_license | joseangelmm/Project-AII | aaab67cef3575d0d13b59604bb63919950e933a9 | 67f8f082b48142de16cb65ba1d9d98de51b41771 | refs/heads/master | 2020-06-02T05:43:10.897784 | 2019-06-13T21:53:55 | 2019-06-13T21:53:55 | 191,058,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # Generated by Django 2.2 on 2019-06-09 18:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0007_auto_20190607_0027'),
]
operations = [
migrations.AlterField(
model_name='noticia',
name='fecha',
field=models.DateField(null=True, verbose_name='Fecha'),
),
]
| [
"36623144+joseangelmm@users.noreply.github.com"
] | 36623144+joseangelmm@users.noreply.github.com |
beafb58ab3ec08a99d6f55d9c254e7f0c0b3fe56 | e92ed45992bf8ef42e36596baa152f5412b45cc9 | /learners/perceptron.py | 0d1a1c2150a65c7b218ba98e4f6c2c579a737ca5 | [] | no_license | kjohnsen/cs478 | e4fda959aae8e87cffc82deee88f111f69f4fe19 | f01fab43833dbc50fff78b242113b4fbb46931d0 | refs/heads/master | 2020-04-15T14:41:05.262515 | 2019-03-20T05:57:15 | 2019-03-20T05:57:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 980 | py | import random
import numpy as np
class Perceptron:
def __init__(self, num_features, learning_rate):
# last weight is bias
self.weights = np.array([random.uniform(0.1, 0.5) for x in range(num_features)])
self.l_rate = learning_rate
# Returns 1 if it was correct, to keep track of accuracy
def process_instance(self, inputs, target):
out = self.calculate_output(inputs)
# c*input*(target-out)
weight_delta = [self.l_rate * input * (target - out) for input in inputs]
self.weights += weight_delta
if target == out: return 1
else: return 0
def calculate_output(self, inputs, net=False):
# print(f"Inputs = {inputs}")
# print(f'Weights = {self.weights}')
weighted_in = np.multiply(inputs, self.weights)
net_out = np.sum(weighted_in)
if net_out > 0: out = 1
else: out = 0
if net: return (out, net_out)
else: return out
| [
"johnsenkyle13@gmail.com"
] | johnsenkyle13@gmail.com |
4a5726c2ee0ab88d6178f75060afb050e28bb3d0 | 9b946ad3b66792aafdb3a41e7cd05289f747bd1a | /Festhub/studlogin/apps.py | 3525eea9deae50b93a58b1efcf41d31b84dde048 | [] | no_license | aiswaryathenkunnel/MCAProjec | fcadfea71dad0fcd4d297624d07ac2341c9a220d | c4e0b4c6e9de02baae82269c9aefb8c62e4ce3b2 | refs/heads/master | 2020-04-07T23:16:16.555560 | 2018-11-23T08:49:16 | 2018-11-23T08:49:16 | 158,805,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class StudloginConfig(AppConfig):
name = 'studlogin'
| [
"45259054+aiswaryathenkunnel@users.noreply.github.com"
] | 45259054+aiswaryathenkunnel@users.noreply.github.com |
8b9843406d7206f8d8eb6ef33274a88f5669773e | b727870804e5c7a474c271e1cf0ebfe05619ddfb | /keras44_5_wine_conv1d.py | 38577cf7df599d8d5b61c45ee04523731daff3ff | [] | no_license | marattang/keras | 843227592f7b3cb08034bfdc2e6319200e62e990 | cc78d1d70bfbe99e78f19ae11053ebbb87f20864 | refs/heads/main | 2023-08-03T21:50:53.438394 | 2021-09-10T05:11:15 | 2021-09-10T05:11:15 | 383,742,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,365 | py | import numpy as np
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, GlobalAveragePooling2D, Flatten, LSTM, Conv1D
from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, RobustScaler, StandardScaler, PowerTransformer, QuantileTransformer
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import EarlyStopping
from matplotlib import font_manager, rc
from tensorflow.python.keras.layers.core import Dropout
font_path = "C:/Windows/Fonts/gulim.ttc"
font = font_manager.FontProperties(fname=font_path).get_name()
rc('font', family=font)
# 완성하시오
# acc 0.8 이상 만들것
dataset = load_wine()
x = dataset.data
y = dataset.target
print(dataset.DESCR)
print(dataset.feature_names)
print(np.unique(y))
y = to_categorical(y)
print(y.shape)
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, shuffle=True, random_state=66)
print(x_train)
print(x_train.shape)
scaler = PowerTransformer()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
# print(x_train.shape)
# print(x_test.shape)
x_train = x_train.reshape(124, 13, 1)
x_test = x_test.reshape(54, 13, 1)
#
# model = Sequential()
# model.add(LSTM(units=128, activation='relu', input_shape=(13, 1)))
# model.add(Dense(256, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(128, activation='relu'))
# model.add(Dense(64, activation='relu'))
# model.add(Dense(32, activation='relu'))
# model.add(Dropout(0.1))
# model.add(Dense(32, activation='relu'))
# model.add(Dense(3, activation='softmax'))
model = Sequential()
model.add(Conv1D(16, kernel_size=1, activation='relu', input_shape=(13, 1)))
model.add(Conv1D(8, 1))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(32, activation='relu'))
model.add(Dense(3, activation='softmax'))
#
es = EarlyStopping(monitor='val_loss', mode='min', patience=15)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# hist = model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.1, callbacks=[es])
hist = model.fit(x_train, y_train, batch_size=1, epochs=70, validation_split=0.05)
# plt.plot(hist.history['loss']) # x: epoch, y: hist.history['loss']
# plt.plot(hist.history['val_loss'])
# plt.xlabel('epochs')
# plt.ylabel('loss, val_loss')
# plt.title('로스, 발로스')
# plt.show()
#
loss = model.evaluate(x_test, y_test)
print('loss : ', loss[0])
print('accuracy : ', loss[1])
# DNN
# QuantileTransformer - accuracy : 0.9259259104728699
# MaxAbsScaler - accuracy : 0.9259259104728699
# MinMaxScaler - accuracy : 0.9629629850387573
# RobustScaler - accuracy : 0.9814814925193787
# StandardScaler - accuracy : 0.9814814925193787
# PowerTransformer - accuracy : 0.9814814925193787
# CNN
# accuracy : 0.9814814925193787
# RNN
# epochs 50 -> 70
# 하이퍼 파라미터 작업 후
# accuracy : 0.9444444179534912 -> accuracy : 1.0
# conv1d
# accuracy : 0.9814814925193787 | [
"tlawlfp0322@gmail.com"
] | tlawlfp0322@gmail.com |
ff3255e6e3d9e37b3559b22797e3b5adaa849817 | 5167c21d194d3a03d59d58368f5c8481370af356 | /Semana 09/03.py | 1fb45f83b87281362864de69b3330d4c0d0d6d4a | [] | no_license | GuilhermeSSx/NappAcademy1 | 15ebc206d12ec52b25df2705ff3f8fe08b415605 | cd3a7641d3b8468a876d31df33044ea946b78e36 | refs/heads/master | 2023-05-28T05:52:17.048297 | 2021-06-13T18:51:02 | 2021-06-13T18:51:02 | 372,360,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | import json
def criar_json(**kwargs):
return json.dumps(kwargs)
assert criar_json(test="porvilow") == '{"test": "porvilow"}'
| [
"guilhermedosantos45@gmail.com"
] | guilhermedosantos45@gmail.com |
4905f1162de481d5c10d057cf7e2d91f01cd6fba | a3d32e0ff84958d194ced642441f5379c0032465 | /tests/functions/test_image_train_process.py | 3fe7e75cf95dfa56d3155c3a714ddfd2389acd77 | [] | no_license | TensorMSA/tensormsa_old | 406755511d05d4ec179c085337a05f73c0dde80a | ef058737f391de817c74398ef9a5d3a28f973c98 | refs/heads/master | 2021-06-18T11:58:29.349060 | 2017-04-20T10:17:43 | 2017-04-20T10:17:43 | 67,384,681 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,430 | py | import unittest, requests, os, json,random
from tfmsacore.utils.logger import tfmsa_logger
from django.core.files.uploadedfile import TemporaryUploadedFile
from tfmsacore.data import ImageManager
class TestImageTrainProcess(unittest.TestCase):
"""
./manage.py jenkins ./tests/functions --enable-coverage
./manage.py jenkins ./tests/functions
"""
rand_name = str(random.randrange(1,99999))
def test_image_train(self):
host_name = "{0}:{1}".format(os.environ['HOSTNAME'], "8989")
tfmsa_logger("[1] Image file format update")
resp = requests.post('http://' + host_name + '/api/v1/type/imagefile/base/mes/table/testtable2/format/nn0000090/',
json={"x_size": 32,"y_size": 32 })
if(json.loads(resp.json())['status'] != "200"):
raise Exception ("RESI Service Fail")
tfmsa_logger("[2] Network info update")
resp = requests.post('http://' + host_name + '/api/v1/type/common/nninfo/',
json={
"nn_id": "nn0000090",
"category": "SCM",
"subcate": "csv",
"name": "CENSUS_INCOME",
"desc": "INCOME PREDICT"
})
if (json.loads(resp.json())['status'] != "200"):
raise Exception("RESI Service Fail")
tfmsa_logger("[3] Network configuration update")
resp = requests.post('http://' + host_name + '/api/v1/type/cnn/conf/nn0000090/',
json={
"data":
{
"datalen": 1024,
"taglen": 2,
"matrix": [32, 32],
"learnrate": 0.01,
"epoch": 10
},
"layer":
[
{
"type": "input",
"active": "relu",
"cnnfilter": [2, 2],
"cnnstride": [2, 2],
"maxpoolmatrix": [2, 2],
"maxpoolstride": [2, 2],
"node_in_out": [1, 16],
"regualizer": "",
"padding": "SAME",
"droprate": ""
},
{
"type": "cnn",
"active": "relu",
"cnnfilter": [2, 2],
"cnnstride": [2, 2],
"maxpoolmatrix": [2, 2],
"maxpoolstride": [2, 2],
"node_in_out": [16, 32],
"regualizer": "",
"padding": "SAME",
"droprate": ""
},
{
"type": "reshape",
},
{
"type": "drop",
"active": "relu",
"regualizer": "",
"droprate": "0.5"
},
{
"type": "out",
"active": "softmax",
"cnnfilter": "",
"cnnstride": "",
"maxpoolmatrix": "",
"maxpoolstride": "",
"node_in_out": [32, 2],
"regualizer": "",
"padding": "SAME",
"droprate": ""
}
]
})
if (json.loads(resp.json())['status'] != "200"):
raise Exception("RESI Service Fail")
tfmsa_logger("[4] Train Neural Network")
resp = requests.post('http://' + host_name + '/api/v1/type/cnn/train/nn0000090/',
json={
"epoch": "10",
"testset": "10"
})
if (json.loads(resp.json())['status'] != "200"):
raise Exception("RESI Service Fail")
tfmsa_logger("[5] PASS TEST")
| [
"tmddno1@naver.com"
] | tmddno1@naver.com |
b59946a4e00e1bcdc58afd777c83dd4e729b3c09 | 436f7239b7314251b91a55868473a96d7dccac00 | /layer_definition.py | 1f5bfc88b4109cb0aed3ccbd62aab0e8d766ed26 | [
"Apache-2.0"
] | permissive | anguoyang/RED-NN | 44164d15f89072d230a62556eb449af0a108ec04 | b4b746eb0c1c0920822ee37b02c30c87ae0fba3d | refs/heads/master | 2022-02-18T23:19:13.892083 | 2019-10-08T07:48:04 | 2019-10-08T07:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,117 | py | import tensorflow as tf
import numpy as np
class Rig2DConv(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, phi, un_rotate=True, padding='SAME', **kwargs):
# Non-trainable variables initialization
self.filters = filters
self.un_rotate = un_rotate
self.phi = phi
self.pad = padding
self.kernel_size = kernel_size
self.ks = tf.constant(kernel_size, dtype='float32')
self.angle = tf.constant(0, dtype='float32')
self.angle2 = tf.constant(360, dtype='float32')
self.ang = tf.linspace(self.angle, self.angle2, self.phi + 1)
self.mid = tf.constant(0.5, dtype='float32')
self.rds = tf.constant(np.pi / 180, dtype='float32')
self.center = tf.round((self.ks * self.mid) - self.mid)
super(Rig2DConv, self).__init__(**kwargs)
def build(self, input_shape):
# Trainable variables initialization
self._l = self.add_weight(name='l', shape=(self.filters,), initializer=tf.initializers.constant(value=0.5111867),
trainable=True, constraint=tf.keras.constraints.NonNeg())
self.alpha = self.add_weight(name='alpha', shape=(self.filters,), initializer=tf.initializers.constant(value=0.5651333),
trainable=True, constraint=tf.keras.constraints.NonNeg())
self.beta = self.add_weight(name='beta', shape=(self.filters,), initializer=tf.initializers.constant(value=1.4184482),
trainable=True, constraint=tf.keras.constraints.NonNeg())
# Input size dependent variables
self.nf = input_shape[-1]
self.width = input_shape[-2]
self.height = input_shape[-3]
super(Rig2DConv, self).build(input_shape)
def call(self, x, **kwargs):
# Generate a set of coordinates
d = tf.range(self.ks)
x_coord, y_coord = tf.meshgrid(d, d)
x_coord = tf.cast(x_coord, dtype="float32") - self.center
y_coord = tf.cast(y_coord, dtype="float32") - self.center
# Horizontal basis filter
arx = tf.math.divide((-2 * self.alpha[0] * tf.square(self._l[0]) * x_coord), np.pi) * tf.exp(
-self._l[0] * ((self.alpha[0] * tf.square(x_coord)) + (self.beta[0] * tf.square(y_coord))))
# Vertical basis filter
ary = tf.math.divide((-2 * self.beta[0] * tf.square(self._l[0]) * y_coord), np.pi) * tf.exp(
-self._l[0] * ((self.alpha[0] * tf.square(x_coord)) + (self.beta[0] * tf.square(y_coord))))
arx = tf.expand_dims(arx, axis=-1)
ary = tf.expand_dims(ary, axis=-1)
# First filter generation
ar = (tf.cos(self.ang[0] * self.rds) * arx) + (tf.sin(self.ang[0] * self.rds) * ary)
# Convolve input with first generated filter
par1 = tf.nn.conv2d(x, tf.reshape(ar, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1), padding=self.pad)
# Rotation compensation to get translational feature space
par2 = tf.contrib.image.rotate(par1, self.ang[0] * self.rds, interpolation='BILINEAR')
# Second filter generation
ar1 = (tf.cos(self.ang[1] * self.rds) * arx) + (tf.sin(self.ang[1] * self.rds) * ary)
# Convolve input with second generated filter
par3 = tf.nn.conv2d(x, tf.reshape(ar1, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1), padding=self.pad)
# Rotation compensation to get translational feature space
par4 = tf.contrib.image.rotate(par3, self.ang[1] * self.rds, interpolation='BILINEAR')
if self.un_rotate:
out = tf.concat([par2, par4], axis=3)
else:
out = tf.concat([par1, par3], axis=3)
# Apply same process from filters up to Phi
for aa in range(2, self.phi):
ar = (tf.cos(self.ang[aa] * self.rds) * arx) + (tf.sin(self.ang[aa] * self.rds) * ary)
partial = tf.nn.conv2d(x, tf.reshape(ar, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1),
padding=self.pad)
partial2 = tf.contrib.image.rotate(partial, self.ang[aa] * self.rds, interpolation='BILINEAR')
if self.un_rotate:
out = tf.concat([out, partial2], axis=3)
else:
out = tf.concat([out, partial], axis=3)
out_f1 = tf.reshape(out, shape=(-1, self.height, self.width, self.phi, 1))
out_f1 = tf.transpose(out_f1, perm=(0, 3, 1, 2, 4))
# If only one filter ensemble is used return this
if self.filters == 1:
return out_f1
# If more ensembles are required do the same
arx = tf.math.divide((-2 * self.alpha[1] * tf.square(self._l[1]) * x_coord), np.pi) * tf.exp(
-self._l[1] * ((self.alpha[1] * tf.square(x_coord)) + (self.beta[1] * tf.square(y_coord))))
ary = tf.math.divide((-2 * self.beta[1] * tf.square(self._l[1]) * y_coord), np.pi) * tf.exp(
-self._l[1] * ((self.alpha[1] * tf.square(x_coord)) + (self.beta[1] * tf.square(y_coord))))
arx = tf.expand_dims(arx, axis=-1)
ary = tf.expand_dims(ary, axis=-1)
ar = (tf.cos(self.ang[0] * self.rds) * arx) + (tf.sin(self.ang[0] * self.rds) * ary)
par1 = tf.nn.conv2d(x, tf.reshape(ar, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1), padding=self.pad)
par2 = tf.contrib.image.rotate(par1, self.ang[0] * self.rds, interpolation='BILINEAR')
ar1 = (tf.cos(self.ang[1] * self.rds) * arx) + (tf.sin(self.ang[1] * self.rds) * ary)
par3 = tf.nn.conv2d(x, tf.reshape(ar1, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1), padding=self.pad)
par4 = tf.contrib.image.rotate(par3, self.ang[1] * self.rds, interpolation='BILINEAR')
if self.un_rotate:
out = tf.concat([par2, par4], axis=3)
else:
out = tf.concat([par1, par3], axis=3)
for aa in range(2, self.phi):
ar = (tf.cos(self.ang[aa] * self.rds) * arx) + (tf.sin(self.ang[aa] * self.rds) * ary)
partial = tf.nn.conv2d(x, tf.reshape(ar, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1),
padding=self.pad)
partial2 = tf.contrib.image.rotate(partial, self.ang[aa] * self.rds, interpolation='BILINEAR')
if self.un_rotate:
out = tf.concat([out, partial2], axis=3)
else:
out = tf.concat([out, partial], axis=3)
out_f2 = tf.reshape(out, shape=(-1, self.height, self.width, self.phi, 1))
out_f2 = tf.transpose(out_f2, perm=(0, 3, 1, 2, 4))
out_final = tf.concat([out_f1, out_f2], axis=4)
for bb in range(2, self.filters):
arx = tf.math.divide((-2 * self.alpha[bb] * tf.square(self._l[bb]) * x_coord), np.pi) * tf.exp(
-self._l[bb] * ((self.alpha[bb] * tf.square(x_coord)) + (self.beta[bb] * tf.square(y_coord))))
ary = tf.math.divide((-2 * self.beta[bb] * tf.square(self._l[bb]) * y_coord), np.pi) * tf.exp(
-self._l[bb] * ((self.alpha[bb] * tf.square(x_coord)) + (self.beta[bb] * tf.square(y_coord))))
arx = tf.expand_dims(arx, axis=-1)
ary = tf.expand_dims(ary, axis=-1)
ar = (tf.cos(self.ang[0] * self.rds) * arx) + (tf.sin(self.ang[0] * self.rds) * ary)
par1 = tf.nn.conv2d(x, tf.reshape(ar, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1),
padding=self.pad)
par2 = tf.contrib.image.rotate(par1, self.ang[0] * self.rds, interpolation='BILINEAR')
ar1 = (tf.cos(self.ang[1] * self.rds) * arx) + (tf.sin(self.ang[1] * self.rds) * ary)
par3 = tf.nn.conv2d(x, tf.reshape(ar1, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1),
padding=self.pad)
par4 = tf.contrib.image.rotate(par3, self.ang[1] * self.rds, interpolation='BILINEAR')
if self.un_rotate:
out = tf.concat([par2, par4], axis=3)
else:
out = tf.concat([par1, par3], axis=3)
for aa in range(2, self.phi):
ar = (tf.cos(self.ang[aa] * self.rds) * arx) + (tf.sin(self.ang[aa] * self.rds) * ary)
partial = tf.nn.conv2d(x, tf.reshape(ar, (self.ks, self.ks, self.nf, 1)), strides=(1, 1, 1, 1),
padding=self.pad)
partial2 = tf.contrib.image.rotate(partial, self.ang[aa] * self.rds, interpolation='BILINEAR')
if self.un_rotate:
out = tf.concat([out, partial2], axis=3)
else:
out = tf.concat([out, partial], axis=3)
out_fn = tf.reshape(out, shape=(-1, self.height, self.width, self.phi, 1))
out_fn = tf.transpose(out_fn, perm=(0, 3, 1, 2, 4))
out_final = tf.concat([out_final, out_fn], axis=4)
return out_final
def get_config(self):
base_config = super(Rig2DConv, self).get_config()
base_config['filters'] = self.filters
base_config['kernel_size'] = self.kernel_size
base_config['phi'] = self.phi
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
# A cyclic convolution can be implemented with a linear convolution over a periodically padded feature space
class Periodic_Pad(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(Periodic_Pad, self).__init__(**kwargs)
def build(self, input_shape):
super(Periodic_Pad, self).build(input_shape)
def call(self, inputs):
x = tf.concat([inputs, inputs], axis=1)
return x[:, :-1, :, :, :]
def get_config(self):
base_config = super(Periodic_Pad, self).get_config()
return base_config
@classmethod
def from_config(cls, config):
return cls(**config)
| [
"noreply@github.com"
] | noreply@github.com |
3387ea205b396e3bb8b9c493c9f7e9b47b825ea5 | 82e7ff55cc4d7fa9d4423e77eb7625cd89a65b78 | /src/bench/bench_muxers.py | 594da8837410126482bcf45fec660db9068be6be | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JohanSmet/lsim | d3c9cc63e01f5caa2bf2de50ff810999998c3d0a | 37594029278539f012b970d1516a4e27f82c0e24 | refs/heads/master | 2023-04-27T03:50:28.394867 | 2023-04-24T20:22:16 | 2023-04-24T20:22:16 | 208,345,318 | 11 | 5 | BSD-3-Clause | 2023-04-24T20:34:07 | 2019-09-13T20:56:13 | C++ | UTF-8 | Python | false | false | 31,484 | py | #!/usr/bin/env python3
import lsimpy
from bench_utils import *
lsim = lsimpy.LSimContext()
HIGH = lsimpy.ValueTrue
LOW = lsimpy.ValueFalse
def test_mux2to1s():
truth_table = [
[{'I0': LOW, 'I1': LOW, 'Sel': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'Sel': LOW, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': HIGH, 'I1': LOW, 'Sel': HIGH, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': HIGH, 'I1': HIGH, 'Sel': HIGH, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': LOW, 'I1': HIGH, 'Sel': HIGH, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': LOW, 'I1': LOW, 'Sel': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'Sel': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'Sel': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': HIGH, 'I1': HIGH, 'Sel': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'Sel': HIGH, '/Strobe': HIGH}, {'Y': LOW}]
]
run_thruth_table(lsim, "mux2to1s", truth_table)
def test_mux4to1s():
truth_table = [
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': LOW}, {'Y': HIGH}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': LOW, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': HIGH, 'Sel1': LOW, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': LOW, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'Sel0': HIGH, 'Sel1': HIGH, '/Strobe': HIGH}, {'Y': LOW}]
]
run_thruth_table(lsim, "mux4to1s", truth_table)
def test_decode1to2():
truth_table = [
[{'I0': LOW, '/Strobe': LOW}, {'O[0]': HIGH, 'O[1]': LOW}],
[{'I0': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': HIGH}],
[{'I0': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW}],
[{'I0': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW}]
]
run_thruth_table(lsim, "decode1to2", truth_table)
def test_decode2to4():
truth_table = [
[{'I[0]': LOW, 'I[1]': LOW, '/Strobe': LOW}, {'O[0]': HIGH, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': HIGH, 'O[2]': LOW, 'O[3]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': HIGH, 'O[3]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': HIGH}],
[{'I[0]': LOW, 'I[1]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}]
]
run_thruth_table(lsim, "decode2to4", truth_table)
def test_decode3to8():
truth_table = [
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': LOW, '/Strobe': LOW}, {'O[0]': HIGH, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': HIGH, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': HIGH, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': HIGH, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': HIGH, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': HIGH, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': HIGH, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': HIGH}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}]
]
run_thruth_table(lsim, "decode3to8", truth_table)
def test_decode4to16():
truth_table = [
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': HIGH, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': HIGH, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': HIGH, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': HIGH, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': HIGH, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': HIGH, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': HIGH, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': HIGH, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': HIGH, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': HIGH, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': HIGH, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': HIGH, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': HIGH, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': HIGH, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': HIGH, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': HIGH}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': LOW, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': LOW, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': LOW, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': LOW, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}],
[{'I[0]': HIGH, 'I[1]': HIGH, 'I[2]': HIGH, 'I[3]': HIGH, '/Strobe': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW, 'O[8]': LOW, 'O[9]': LOW, 'O[10]': LOW, 'O[11]': LOW, 'O[12]': LOW, 'O[13]': LOW, 'O[14]': LOW, 'O[15]': LOW}]
]
run_thruth_table(lsim, "decode4to16", truth_table)
def test_decode5to32():
truth_table = []
for s in [LOW, HIGH]:
for v in range(0, 32):
inputs = {'/Strobe' : s}
outputs = {}
for i in range(0, 5):
inputs.update([('I[{}]'.format(i), HIGH if ((v >> i) & 1) == 1 else LOW)])
for o in range(0, 32):
outputs.update([('O[{}]'.format(o), HIGH if v == o and s == LOW else LOW)])
truth_table.append([inputs, outputs])
run_thruth_table(lsim, "decode5to32", truth_table)
def test_decode6to64():
truth_table = []
for s in [LOW, HIGH]:
for v in range(0, 64):
inputs = {'/Strobe' : s}
outputs = {}
for i in range(0, 6):
inputs.update([('I[{}]'.format(i), HIGH if ((v >> i) & 1) == 1 else LOW)])
for o in range(0, 64):
outputs.update([('O[{}]'.format(o), HIGH if v == o and s == LOW else LOW)])
truth_table.append([inputs, outputs])
run_thruth_table(lsim, "decode6to64", truth_table)
def test_demux1to2():
truth_table = [
[{'Sel': LOW, 'I': HIGH}, {'O[0]': HIGH, 'O[1]': LOW}],
[{'Sel': HIGH, 'I': HIGH}, {'O[0]': LOW, 'O[1]': HIGH}],
[{'Sel': LOW, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW}],
[{'Sel': HIGH, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW}]
]
run_thruth_table(lsim, "demux1to2", truth_table)
def test_demux1to4():
truth_table = [
[{'Sel[0]': LOW, 'Sel[1]': LOW, 'I': HIGH}, {'O[0]': HIGH, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': LOW, 'I': HIGH}, {'O[0]': LOW, 'O[1]': HIGH, 'O[2]': LOW, 'O[3]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': HIGH, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': HIGH, 'O[3]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': HIGH, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': HIGH}],
[{'Sel[0]': LOW, 'Sel[1]': LOW, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': LOW, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': HIGH, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': HIGH, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW}]
]
run_thruth_table(lsim, "demux1to4", truth_table)
def test_demux1to8():
truth_table = [
[{'Sel[0]': LOW, 'Sel[1]': LOW, 'Sel[2]': LOW, 'I': HIGH}, {'O[0]': HIGH, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': LOW, 'Sel[2]': LOW, 'I': HIGH}, {'O[0]': LOW, 'O[1]': HIGH, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': HIGH, 'Sel[2]': LOW, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': HIGH, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': HIGH, 'Sel[2]': LOW, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': HIGH, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': LOW, 'Sel[2]': HIGH, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': HIGH, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': LOW, 'Sel[2]': HIGH, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': HIGH, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': HIGH, 'Sel[2]': HIGH, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': HIGH, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': HIGH, 'Sel[2]': HIGH, 'I': HIGH}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': HIGH}],
[{'Sel[0]': LOW, 'Sel[1]': LOW, 'Sel[2]': LOW, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': LOW, 'Sel[2]': LOW, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': HIGH, 'Sel[2]': LOW, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': HIGH, 'Sel[2]': LOW, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': LOW, 'Sel[2]': HIGH, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': LOW, 'Sel[2]': HIGH, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': LOW, 'Sel[1]': HIGH, 'Sel[2]': HIGH, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}],
[{'Sel[0]': HIGH, 'Sel[1]': HIGH, 'Sel[2]': HIGH, 'I': LOW}, {'O[0]': LOW, 'O[1]': LOW, 'O[2]': LOW, 'O[3]': LOW, 'O[4]': LOW, 'O[5]': LOW, 'O[6]': LOW, 'O[7]': LOW}]
]
run_thruth_table(lsim, "demux1to8", truth_table)
def test_demux1to16():
truth_table = []
for s in [HIGH, LOW]:
for v in range(0, 16):
inputs = {'I' : s}
outputs = {}
for i in range(0, 4):
inputs.update([('Sel[{}]'.format(i), HIGH if ((v >> i) & 1) == 1 else LOW)])
for o in range(0, 16):
outputs.update([('O[{}]'.format(o), HIGH if v == o and s == HIGH else LOW)])
truth_table.append([inputs, outputs])
run_thruth_table(lsim, "demux1to16", truth_table)
def test_demux1to32():
truth_table = []
for s in [HIGH, LOW]:
for v in range(0, 32):
inputs = {'I' : s}
outputs = {}
for i in range(0, 5):
inputs.update([('Sel[{}]'.format(i), HIGH if ((v >> i) & 1) == 1 else LOW)])
for o in range(0, 32):
outputs.update([('O[{}]'.format(o), HIGH if v == o and s == HIGH else LOW)])
truth_table.append([inputs, outputs])
run_thruth_table(lsim, "demux1to32", truth_table)
def test_demux1to64():
truth_table = []
for s in [HIGH, LOW]:
for v in range(0, 64):
inputs = {'I' : s}
outputs = {}
for i in range(0, 6):
inputs.update([('Sel[{}]'.format(i), HIGH if ((v >> i) & 1) == 1 else LOW)])
for o in range(0, 64):
outputs.update([('O[{}]'.format(o), HIGH if v == o and s == HIGH else LOW)])
truth_table.append([inputs, outputs])
run_thruth_table(lsim, "demux1to64", truth_table)
def test_prio_encode4to2():
truth_table = [
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': LOW}, {'O0': LOW, 'O1': LOW, 'V': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW}, {'O0': LOW, 'O1': LOW, 'V': HIGH}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW}, {'O0': HIGH, 'O1': LOW, 'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW}, {'O0': LOW, 'O1': HIGH, 'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH}, {'O0': HIGH, 'O1': HIGH, 'V': HIGH}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW}, {'O0': LOW, 'O1': LOW, 'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': LOW, 'I3': LOW}, {'O0': HIGH, 'O1': LOW, 'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': LOW}, {'O0': LOW, 'O1': HIGH, 'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH}, {'O0': HIGH, 'O1': HIGH, 'V': HIGH}]
]
run_thruth_table(lsim, "prio_encode4to2", truth_table)
def test_prio_encode8to3():
truth_table = [
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': LOW, 'O1': LOW, 'O2': LOW, 'V': LOW}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': LOW, 'O1': LOW, 'O2': LOW, 'V': HIGH}],
[{'I0': LOW, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': HIGH, 'O1': LOW, 'O2': LOW, 'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': HIGH, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': LOW, 'O1': HIGH, 'O2': LOW, 'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': HIGH, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': HIGH, 'O1': HIGH, 'O2': LOW, 'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'I4': HIGH, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': LOW, 'O1': LOW, 'O2': HIGH,'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': HIGH, 'I6': LOW, 'I7': LOW}, {'O0': HIGH, 'O1': LOW, 'O2': HIGH,'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': HIGH, 'I7': LOW}, {'O0': LOW, 'O1': HIGH, 'O2': HIGH,'V': HIGH}],
[{'I0': LOW, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': HIGH}, {'O0': HIGH, 'O1': HIGH, 'O2': HIGH,'V': HIGH}],
[{'I0': HIGH, 'I1': LOW, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': LOW, 'O1': LOW, 'O2': LOW, 'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': LOW, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': HIGH, 'O1': LOW, 'O2': LOW, 'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': LOW, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': LOW, 'O1': HIGH, 'O2': LOW, 'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'I4': LOW, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': HIGH, 'O1': HIGH, 'O2': LOW, 'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'I4': HIGH, 'I5': LOW, 'I6': LOW, 'I7': LOW}, {'O0': LOW, 'O1': LOW, 'O2': HIGH,'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'I4': HIGH, 'I5': HIGH, 'I6': LOW, 'I7': LOW}, {'O0': HIGH, 'O1': LOW, 'O2': HIGH,'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'I4': HIGH, 'I5': HIGH, 'I6': HIGH, 'I7': LOW}, {'O0': LOW, 'O1': HIGH, 'O2': HIGH,'V': HIGH}],
[{'I0': HIGH, 'I1': HIGH, 'I2': HIGH, 'I3': HIGH, 'I4': HIGH, 'I5': HIGH, 'I6': HIGH, 'I7': HIGH}, {'O0': HIGH, 'O1': HIGH, 'O2': HIGH,'V': HIGH}]
]
run_thruth_table(lsim, "prio_encode8to3", truth_table)
def main():
if (not lsim.load_user_library("../../examples/cpu_8bit/lib_muxers.lsim")):
print("Unable to load circuit\n")
exit(-1)
test_mux2to1s()
test_mux4to1s()
test_decode1to2()
test_decode2to4()
test_decode3to8()
test_decode4to16()
test_decode5to32()
test_decode6to64()
test_demux1to2()
test_demux1to4()
test_demux1to8()
test_demux1to16()
test_demux1to32()
test_demux1to64()
test_prio_encode4to2()
test_prio_encode8to3()
print_stats()
if __name__ == "__main__":
main() | [
"johan.smet@justcode.be"
] | johan.smet@justcode.be |
0b420050e1479b0904e29b59e1c48a5160989fd1 | f392a5e4193d44c41e234696d093140cdf301497 | /tests/example2.py | 8b1a937795148bdddefeb027df7948a1d1727c74 | [
"Apache-2.0"
] | permissive | GateNLP/gate-lf-python-data | fb151132c94e25f59947d6400692f23914dfa89e | 89880a82458f09702c1d6828ae341997e0b45f73 | refs/heads/master | 2021-03-27T08:55:26.304655 | 2019-05-31T11:44:29 | 2019-05-31T11:44:29 | 113,597,027 | 4 | 1 | Apache-2.0 | 2019-05-30T08:50:59 | 2017-12-08T16:52:39 | Python | UTF-8 | Python | false | false | 362 | py | from __future__ import print_function
from gatelfdata import Dataset
import sys
if len(sys.argv) != 2:
raise Exception("Need one parameter: meta file")
file = sys.argv[1]
ds = Dataset(file)
valset = ds.convert_to_file()
for b in ds.batches_converted(batch_size=20, as_numpy=False, pad_left=True):
print("Batch: len=", len(b))
print("Batch: data=", b)
| [
"johann.petrak@gmail.com"
] | johann.petrak@gmail.com |
7cfee5b9df13834712ed1c7dfcb5aaac39cd1210 | cd8f7ecd20c58ce1ae0fe3840f7c7ee961aa5819 | /Third Maximum Number.py | e33e7d88fe709f5f961c58fc1b2f6c3993b73f63 | [
"Apache-2.0"
] | permissive | sugia/leetcode | 9b0f2a3521b088f8f7e5633c2c6c17c76d33dcaf | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | refs/heads/master | 2021-06-05T07:20:04.099488 | 2021-02-24T07:24:50 | 2021-02-24T07:24:50 | 29,124,136 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | '''
Given a non-empty array of integers, return the third maximum number in this array. If it does not exist, return the maximum number. The time complexity must be in O(n).
Example 1:
Input: [3, 2, 1]
Output: 1
Explanation: The third maximum is 1.
Example 2:
Input: [1, 2]
Output: 2
Explanation: The third maximum does not exist, so the maximum (2) is returned instead.
Example 3:
Input: [2, 2, 3, 1]
Output: 1
Explanation: Note that the third maximum here means the third maximum distinct number.
Both numbers with value 2 are both considered as second maximum.
'''
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
vec = sorted(list(set(nums)), reverse = True)
if len(vec) > 2:
return vec[2]
return vec[0]
| [
"noreply@github.com"
] | noreply@github.com |
7d7924468a06f470667187de5548dd1e1047dfaa | a93cfeed4c2a2833f1896bf5f39aa31955f21efe | /Data Structures/Graphs/BFS_simple.py | 5ae140432c3c0db4bf5c088416c25bf7cebf8063 | [] | no_license | akshat343/Python-Programming | ae83d05408fb67d51d388df22492dfe743596b2a | f5a1540770388e49d65536352ce1816c406d5229 | refs/heads/master | 2023-08-05T19:58:40.293770 | 2021-10-07T07:25:46 | 2021-10-07T07:25:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | """
Author : Robbin Singh
Simple Implementaion of BFS (Breadth First Search)using queue
"""
#Entre Adjancey Matrix of the graph
print("Enter Adjancey matrix")
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
matrix = []
print("Enter the entries row wise:")
for i in range(R):
a = []
for j in range(C):
a.append(int(input()))
matrix.append(a)
print("Adjancey Matrix:")
for i in range(R):
for j in range(C):
print(matrix[i][j], end=" ")
print()
print("\nDFS :")
visited = [0]*R
queue= [0]
visited[0]=1 #Node Starts From Vertex 0
n = queue.pop(0)
print( n ,end="")
while 1:
for x in range(0,len(visited)):
if matrix[n][x] == 1 and visited[x]==0:
visited[x]=1
queue.append(x)
if len(queue)==0:
break
else:
n = queue.pop(0)
print("->",n,end=" ")
| [
"robin25tech@gmail.com"
] | robin25tech@gmail.com |
f62ae8f7f1ee46d83a45239fa0760d60126d68ef | c9f64fdf0a51b2ec438c05f2a08796a056aac73b | /HW1_solution_example/integrate.py | bd368bcece228facd6870ec75371c28150b84b66 | [
"MIT"
] | permissive | MengbinZhu/pg2014 | dd5e3d766e8ffd6292a551e4559c9c36d065f177 | 3571b2be5df2a9f593f781375b4b8029ce0c0c9e | refs/heads/master | 2021-05-27T17:20:49.235056 | 2014-12-04T23:07:26 | 2014-12-04T23:07:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | # Rob Hetland
# 2014-10-14
# Homework 1, problem 2, trapazoidal integration
# Released under MIT license
import numpy as np
def trapz(f, dx=1.0):
"""Trapazoidal integration of function f, with spacing dx"""
f = np.asarray(f)
return dx * 0.5 * np.sum(f[1:] + f[:-1])
if __name__ == '__main__':
# test with a flat funciton, of value 1
f = np.ones(11)
print trapz(f) # should be 10
print trapz(f, dx=0.5) # should be 5
print trapz(f, dx=2.0) # should be 20
dx = 0.001
x = np.arange(0.0, 10.0+dx, dx)
y = np.sin(x)
print 'Should be close (give or take one..)'
print trapz(y, dx=dx)
print -np.cos(10.0) + 1 | [
"hetland@tamu.edu"
] | hetland@tamu.edu |
63b86d3db2a788557594680b422fe05b9c77afcf | d01f9ff2d7ba3c7c99158678adeaf082f3f15dbc | /model/cpn/ade.cpn.R50_v1c.v38.v2/train.py | 294cb1add0f70efdd177b711e5ca1fc5df2170d0 | [
"MIT"
] | permissive | akinoriosamura/TorchSeg-mirror | d8e76d99e80d55c2555f4f8f7a7fc3f30ef5dec4 | 34033fe85fc24015bcef7a92aad39d2a25a001a5 | refs/heads/master | 2021-06-18T15:47:00.946788 | 2019-10-26T04:46:07 | 2019-10-26T04:46:07 | 217,657,156 | 0 | 0 | MIT | 2021-06-08T20:36:44 | 2019-10-26T04:46:39 | Python | UTF-8 | Python | false | false | 6,088 | py | from __future__ import division
import os.path as osp
import sys
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from config import config
from dataloader import get_train_loader
from network import CPNet
from datasets import ADE
from utils.init_func import init_weight, group_weight
from engine.lr_policy import PolyLR
from engine.logger import get_logger
from engine.engine import Engine
from seg_opr.sync_bn import DataParallelModel, Reduce, BatchNorm2d
from seg_opr.seg_oprs import one_hot
try:
from apex.parallel import SyncBatchNorm, DistributedDataParallel
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex .")
logger = get_logger()
torch.manual_seed(config.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(config.seed)
parser = argparse.ArgumentParser()
with Engine(custom_parser=parser) as engine:
args = parser.parse_args()
cudnn.benchmark = True
if engine.distributed:
torch.cuda.set_device(engine.local_rank)
# data loader
train_loader, train_sampler = get_train_loader(engine, ADE)
# config network and criterion
criterion = nn.CrossEntropyLoss(reduction='mean',
ignore_index=-1)
if engine.distributed:
logger.info('Use the Multi-Process-SyncBatchNorm')
BatchNorm2d = SyncBatchNorm
else:
BatchNorm2d = BatchNorm2d
model = CPNet(config.num_classes, criterion=criterion,
pretrained_model=config.pretrained_model,
norm_layer=BatchNorm2d)
init_weight(model.business_layer, nn.init.kaiming_normal_,
BatchNorm2d, config.bn_eps, config.bn_momentum,
mode='fan_in', nonlinearity='relu')
# group weight and config optimizer
base_lr = config.lr
# if engine.distributed:
# base_lr = config.lr * engine.world_size
params_list = []
params_list = group_weight(params_list, model.backbone,
BatchNorm2d, base_lr)
for module in model.business_layer:
params_list = group_weight(params_list, module, BatchNorm2d,
base_lr * 10)
# config lr policy
total_iteration = config.nepochs * config.niters_per_epoch
lr_policy = PolyLR(base_lr, config.lr_power, total_iteration)
optimizer = torch.optim.SGD(params_list,
lr=base_lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
if engine.distributed:
if torch.cuda.is_available():
model.cuda()
model = DistributedDataParallel(model)
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = DataParallelModel(model, engine.devices)
model.to(device)
engine.register_state(dataloader=train_loader, model=model,
optimizer=optimizer)
if engine.continue_state_object:
engine.restore_checkpoint()
optimizer.zero_grad()
model.train()
for epoch in range(engine.state.epoch, config.nepochs):
if engine.distributed:
train_sampler.set_epoch(epoch)
bar_format = '{desc}[{elapsed}<{remaining},{rate_fmt}]'
pbar = tqdm(range(config.niters_per_epoch), file=sys.stdout,
bar_format=bar_format)
dataloader = iter(train_loader)
for idx in pbar:
engine.update_iteration(epoch, idx)
minibatch = dataloader.next()
imgs = minibatch['data']
gts = minibatch['label']
imgs = imgs.cuda(non_blocking=True)
gts = gts.cuda(non_blocking=True)
b, h, w = gts.size()
scaled_gts = F.interpolate((gts.view(b, 1, h, w)).float(),
scale_factor=0.125,
mode="nearest")
b, c, h, w = scaled_gts.size()
scaled_gts = scaled_gts.squeeze_()
C = config.num_classes + 1
one_hot_gts = one_hot(scaled_gts, C).view(b, C, -1)
similarity_gts = torch.bmm(one_hot_gts.permute(0, 2, 1),
one_hot_gts)
gts = gts - 1
loss = model(imgs, gts, similarity_gts)
# reduce the whole loss over multi-gpu
if engine.distributed:
dist.all_reduce(loss, dist.ReduceOp.SUM)
loss = loss / engine.world_size
else:
loss = Reduce.apply(*loss) / len(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
current_idx = epoch * config.niters_per_epoch + idx
lr = lr_policy.get_lr(current_idx)
optimizer.param_groups[0]['lr'] = lr
optimizer.param_groups[1]['lr'] = lr
for i in range(2, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
print_str = 'Epoch{}/{}'.format(epoch, config.nepochs) \
+ ' Iter{}/{}:'.format(idx + 1, config.niters_per_epoch) \
+ ' lr=%.2e' % lr \
+ ' loss=%.2f' % loss.item()
pbar.set_description(print_str, refresh=False)
if (epoch >= config.nepochs - 20) or (
epoch % config.snapshot_iter == 0):
if engine.distributed and (engine.local_rank == 0):
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
elif not engine.distributed:
engine.save_and_link_checkpoint(config.snapshot_dir,
config.log_dir,
config.log_dir_link)
| [
"osamura.akinori@gmail.com"
] | osamura.akinori@gmail.com |
ad81e48cc1fae84b6d8c962232aab56ba3d9292b | b8485ced30894816aa3f4e12574764acc4ff87df | /thecart/shop/migrations/0008_auto_20210321_1546.py | 771c468c5709e40c9011ec8db8737d9ec3e26cc6 | [] | no_license | gauravsalwatkar/E-commerce-website | 74bdfbba88373f9d8589d200ef3ce7a36c5c216d | 4de236a74ce465d7d4205c458a397123b5fc3e83 | refs/heads/master | 2023-04-03T16:29:23.444752 | 2021-04-05T05:12:07 | 2021-04-05T05:12:07 | 354,724,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | # Generated by Django 3.1.1 on 2021-03-21 10:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0007_auto_20210321_1543'),
]
operations = [
migrations.RenameModel(
old_name='Order',
new_name='Orders',
),
]
| [
"noreply@github.com"
] | noreply@github.com |
73efba33a53e27b03f156c4fa1fef480aa463f10 | 8de7a1bb8e4939f37d77f66e90376fd2727e1972 | /trade.py | ccd09807bfdbfcb1e1110d050984962a54880ab0 | [] | no_license | webclinic017/stonk-1 | 8228a5da5ab47f2f9f7a2511e7ffef79add15f47 | fbf162b1f2e9a414d6262a6046f0f897857fb127 | refs/heads/master | 2023-08-22T02:37:44.036641 | 2021-10-13T02:33:17 | 2021-10-13T02:33:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,546 | py | # interface between the code and the Interactive Brokers API
# imports analysis and data
import analysis
from data import mktPrice
# Interactive Brokers imports
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from ibapi.order import *
# time and threading for optimisation
import time
import threading
# interactive brokers class
class IBapi(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def nextValidId(self, orderId: int):
super().nextValidId(orderId)
self.nextorderId = orderId
print('The next valid order id is: ', self.nextorderId)
def orderStatus(self, orderId, status, filled, remaining, avgFullPrice, permId, parentId, lastFillPrice, clientId, whyHeld, mktCapPrice):
print('orderStatus - orderid:', orderId, 'status:', status, 'filled',
filled, 'remaining', remaining, 'lastFillPrice', lastFillPrice)
def openOrder(self, orderId, contract, order, orderState):
print('openOrder id:', orderId, contract.symbol, contract.secType, '@', contract.exchange,
':', order.action, order.orderType, order.totalQuantity, orderState.status)
def execDetails(self, reqId, contract, execution):
print('Order Executed: ', reqId, contract.symbol, contract.secType, contract.currency,
execution.execId, execution.orderId, execution.shares, execution.lastLiquidity)
def run_loop():
app.run()
# create the order
def stockOrder(symbol):
contract = Contract()
contract.symbol = symbol
contract.secType = 'STK'
contract.currency = 'USD' # fuck this but unindent is being a pain
contract.exchange = 'ISLAND'
return contract
app = IBapi()
app.connect('127.0.0.1', 4002, 420)
app.nextorderId = None
# start the socket in a thread
api_thread = threading.Thread(target=run_loop, daemon=True)
api_thread.start()
# check if the API is connected via orderid
while True:
if isinstance(app.nextorderId, int):
print('connected')
break
else:
print('waiting for connection')
time.sleep(1)
# create order object
order = Order()
order.action = 'BUY'
order.totalQuantity = 1
order.orderType = 'LMT'
order.lmtPrice = mktPrice(ticker)
# determines if stock is worth buying by comparing current price with predicted price
if (analysis.labelSet[-1] > mktPrice(ticker)):
app.placeOrder(app.nextorderId, stockOrder(ticker), order)
else:
print('No action!')
time.sleep(3)
app.disconnect()
| [
"danieldunc@hotmail.com"
] | danieldunc@hotmail.com |
c9744084c4db946f615b3b38f72857617c7d3169 | 109e3c93cd7de41fd5fbfad242bb39ab267dd290 | /examples/twitter_server.py | 1dfbe86ea28f3944fad6d8ae27766c38ed72f461 | [
"Apache-2.0"
] | permissive | scholer/pygephi_graphstreaming | 1bab016dc65c4f521be72d8038094bc39eb6f5f1 | eace8d34a43badb39922b7ee764bac453ecfadc7 | refs/heads/master | 2021-01-15T16:28:13.646093 | 2015-10-08T22:31:07 | 2015-10-08T22:31:07 | 43,645,754 | 0 | 0 | null | 2015-10-04T17:44:08 | 2015-10-04T17:44:07 | null | UTF-8 | Python | false | false | 9,047 | py | #!/usr/bin/python
# coding: utf-8
#
# Copyright (C) 2012 André Panisson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This script starts an HTTP server that connects to the Twitter Streaming API and
shows the Twitter data in Graph Streaming format,
with the users as nodes and retweets as edges.
You have to install the tweepy client (http://joshthecoder.github.com/tweepy/)
in order to run this script.
To connect to the server with Gephi, you must install Gephi with
the Graph Streaming plugin.
1. Start Gephi
2. Go to the tab Streaming,right-click on Client and click on "Connect to Stream"
3. Enter the Source URL http://localhost:8181/?q=twitter (or other keywords if the
server is filtering other keywords) and click OK
The nodes and edges start to appear in the graph visualization. You can run
the Force Atlas layout in order to get a better layout.
Usage: server.py [options]
Options:
-h, --help show this help message and exit
-u USER, --user=USER Twitter username to connect
-p PASSWORD, --password=PASSWORD
Twitter password to connect
-q QUERY, --query=QUERY
Comma-separated list of keywords
-l LOG, --log=LOG Output log of streaming data
Created on Nov 10, 2010
@author: panisson
'''
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
import urlparse
import tweepy
import re
from pygephi import GephiFileHandler
import threading
import Queue
import socket
import optparse
import sys
import time
api = tweepy.API()
active_queues = []
class Status(object):
def __init__(self, status_id, source, target, text, date):
self.status_id = status_id
self.source = source
self.target = target
self.text = text
self.date = date
class StreamingListener(tweepy.StreamListener):
def __init__(self, *args, **kwargs):
tweepy.StreamListener.__init__(self, *args, **kwargs)
self.known_users = {}
self.stream_log = None
def on_data(self, data):
self.stream_log.write(data)
self.stream_log.flush()
super(StreamingListener, self).on_data(data)
def on_status(self, status):
print status.text
m = re.search('(?<=RT\s@)\w+', status.text)
if m:
source_user = m.group(0).lower()
target_user = status.user.screen_name
status_id = status.id
date = status.created_at
text = status.text
dispatch_event(Status(status_id, source_user, target_user, text, date))
def dispatch_event(e):
for q in active_queues:
q.put(e)
class RequestProcessor():
def __init__(self, parameters, out):
self.known_users = {}
if "q" in parameters:
q = parameters["q"][0]
self.terms = q.split(",")
print "Request for retweets, query '%s'"%q
else:
self.terms = None
print "Request for retweets, no query string"
self.handler = GephiFileHandler(out)
def process(self, status):
messages = []
found = False
if (self.terms):
for term in self.terms:
if re.search(term, status.text.lower()):
found = True
break
if not found:
return messages
default_node_attr = {'size':5, 'r':84./255., 'g':148./255., 'b':183./255.}
if status.source not in self.known_users:
self.known_users[status.source] = status.source
attributes = default_node_attr.copy()
attributes['label'] = status.source
self.handler.add_node(status.source, **attributes)
if status.target not in self.known_users:
self.known_users[status.target] = status.target
attributes = default_node_attr.copy()
attributes['label'] = status.target
self.handler.add_node(status.target, **attributes)
attributes = {'directed':True, 'weight':2.0, 'date':str(status.date)}
self.handler.add_edge(status.status_id, status.source, status.target, **attributes)
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_POST(self):
pass
def do_GET(self):
param_str = urlparse.urlparse(self.path).query
parameters = urlparse.parse_qs(param_str, keep_blank_values=False)
self.queue = Queue.Queue()
active_queues.append(self.queue)
self.wfile.write("HTTP/1.1 200 OK\nContent-Type: application/json\n\n")
request_processor = RequestProcessor(parameters, self.wfile)
while True:
status = self.queue.get()
if status is None: break
try:
request_processor.process(status)
except socket.error:
print "Connection closed"
active_queues.remove(self.queue)
return
class Collector(threading.Thread):
def __init__(self, options):
self.options = options
threading.Thread.__init__(self)
def run(self):
q = self.options.query.split(",")
# q = [e+ ' rt' for e in q]
print "Streaming retweets for query '%s'"%q
listener = StreamingListener()
def on_error(status_code):
if status_code == 401:
raise Exception("Authentication error")
listener.on_error = on_error
listener.stream_log = file(self.options.log, 'a')
auth = tweepy.OAuthHandler(self.options.consumer_key, self.options.consumer_secret)
auth.set_access_token(self.options.access_token, self.options.access_token_secret)
stream = tweepy.streaming.Stream(auth, listener, timeout=60.0)
while (True):
try:
stream.filter(track=q)
except socket.gaierror:
print "Stream closed"
time.sleep(60)
except Exception, e:
print str(e)
time.sleep(60)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def start(self):
self.serve_forever()
def stop(self):
self.socket.close()
def parseOptions():
parser = optparse.OptionParser()
parser.add_option("-k", "--consumer_key", type="string", dest="consumer_key", help="Twitter consumer key for OAuth authentication", default='undefined')
parser.add_option("-K", "--consumer_secret", type="string", dest="consumer_secret", help="Twitter consumer secret", default='undefined')
parser.add_option("-t", "--access_token", type="string", dest="access_token", help="Twitter access token", default='undefined')
parser.add_option("-T", "--access_token_secret", type="string", dest="access_token_secret", help="Twitter access token secret", default='undefined')
parser.add_option("-q", "--query", type="string", dest="query", help="Comma-separated list of keywords", default="twitter")
parser.add_option("-l", "--log", type="string", dest="log", help="Output log of streaming data", default="/tmp/stream.log")
parser.add_option("-s", "--serverport", type="int", dest="serverport", help="HTTP server port", default=8181)
(options, _) = parser.parse_args()
if options.consumer_key == 'undefined' or options.consumer_secret == 'undefined':
parser.error("Twitter consumer key and consumer secret are mandatory")
if options.access_token == 'undefined' or options.access_token_secret == 'undefined':
parser.error("Twitter access token and access token secret are mandatory")
return options
def main():
options = parseOptions()
collector = Collector(options)
collector.setDaemon(True)
collector.start()
try:
server = ThreadedHTTPServer(('', options.serverport), RequestHandler)
print 'Test server running...'
server.start()
except KeyboardInterrupt:
print 'Stopping server...'
server.stop()
dispatch_event(None)
sys.exit(0)
if __name__ == '__main__':
main()
| [
"panisson@gmail.com"
] | panisson@gmail.com |
16e79e32970b8fe13d66dc766354d45e07bc578d | 91df76643a7d942f0ae11fa89c2dfcbd3feadfa9 | /utils/dump_helpers.py | db66dbc830479a25545b07185b394d09ca21e9a3 | [
"MIT"
] | permissive | Sixzero/yolov3-tf2 | c6bb4a826872fec74b5c77d68b34962ee0876d7c | 404d937481f16804a80ce605ce1f8704d6775e56 | refs/heads/master | 2020-12-01T20:02:21.021216 | 2020-01-01T15:03:34 | 2020-01-01T15:03:40 | 230,750,981 | 0 | 0 | MIT | 2019-12-29T13:01:53 | 2019-12-29T13:01:52 | null | UTF-8 | Python | false | false | 47 | py | ../../diabtrend-treasures/utils/dump_helpers.py | [
"havliktomi@hotmail.com"
] | havliktomi@hotmail.com |
122f637e84c5e5004fd039a44edc78f44b846fcc | 4be65fc46dbb6e56834796f2f143256edf1bf03e | /chapter_11/language_survey.py | 5859296df3b5545d5a750877a40578a628db69c3 | [] | no_license | Mozes721/PythonCrashCourse | 78cbb32695d618a14f06f7ae71780b2604f6c7bf | 9c823cc9a75bd6a282bfbbe51aaccf3d21069177 | refs/heads/master | 2021-07-15T10:46:55.979655 | 2020-06-01T14:17:45 | 2020-06-01T14:17:45 | 158,593,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from chapter_11.survey import AnonymousSurvey
question = "What language did you first learn to speak?"
my_survey = AnonymousSurvey(question)
my_survey.show_questions()
print("Enter 'q' to quit at any time/n")
while True:
response = input("Language ")
if response == 'q':
break
my_survey.store_responses(response)
print("Thanks for your survey!")
my_survey.show_responses()
| [
"MozesTheGreat@yahoo.com"
] | MozesTheGreat@yahoo.com |
b2494838e2dd49d90b6e70323d72d2dcb16bfb95 | 0d97aede474efb01d893eba0180bc1cb5170687d | /traceml/setup.py | 61d6ce0b266d06cf5c25160a9d0f3f4d5dc59fc3 | [
"Apache-2.0"
] | permissive | jinheeson1008/tensorflow-lstm-regression | ae7bcd3e48a36cc8bde34c2bd3c2df4f6d24d4b3 | f31fc1181a5696a25f5737398ee0715c24626248 | refs/heads/master | 2022-08-10T13:42:21.695155 | 2022-03-26T16:30:17 | 2022-03-26T16:30:25 | 79,639,036 | 0 | 0 | Apache-2.0 | 2023-09-04T23:21:41 | 2017-01-21T10:26:51 | Python | UTF-8 | Python | false | false | 2,716 | py | #!/usr/bin/env python
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
def read_readme():
if not os.path.exists("./README.md"):
return ""
with open("./README.md") as f:
return f.read()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
with open(os.path.join("./traceml/pkg.py"), encoding="utf8") as f:
pkg = {}
exec(f.read(), pkg)
with open("requirements/dev.txt") as requirements_file:
dev_requirements = requirements_file.read().splitlines()
extra = {
"polyaxon": ["polyaxon"],
"dev": dev_requirements,
"all": [
"scikit-learn",
"Pillow",
"matplotlib",
"moviepy",
"plotly",
"bokeh",
"pandas",
"altair",
],
}
setup(
name=pkg["NAME"],
version=pkg["VERSION"],
description=pkg["DESC"],
long_description=read_readme(),
long_description_content_type="text/markdown",
maintainer=pkg["AUTHOR"],
maintainer_email=pkg["EMAIL"],
author=pkg["AUTHOR"],
author_email=pkg["EMAIL"],
url=pkg["URL"],
license=pkg["LICENSE"],
platforms="any",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
keywords=[
"polyaxon",
"aws",
"s3",
"microsoft",
"azure",
"google cloud storage",
"gcs",
"deep-learning",
"machine-learning",
"data-science",
"neural-networks",
"artificial-intelligence",
"ai",
"reinforcement-learning",
"kubernetes",
"aws",
"microsoft",
"azure",
"google cloud",
"tensorFlow",
"pytorch",
"matplotlib",
"plotly",
"visualization",
"analytics",
],
install_requires=[],
extras_require=extra,
python_requires=">=3.5",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
tests_require=["pytest"],
cmdclass={"test": PyTest},
)
| [
"ci@polyaxon.com"
] | ci@polyaxon.com |
d4d27d560f9cb70b7b0cdfe713f2b5481ce676e5 | facb8509e51a388030550574ab414d324374c8c8 | /iwlearn/models/based_on_keras.py | fcd6cf62d08b8356a6fb89099dd7c0a5d5ccbe06 | [
"Apache-2.0"
] | permissive | mfridental/iwlearn | a257092053d8a6c0f514713f83f0fa15c9d11c2e | a3253f9f59761663f59768eb26c9dc047504cc4e | refs/heads/main | 2022-07-31T05:58:43.217852 | 2022-07-03T21:31:28 | 2022-07-03T21:31:28 | 214,254,342 | 3 | 0 | Apache-2.0 | 2022-07-03T21:31:29 | 2019-10-10T18:16:52 | Python | UTF-8 | Python | false | false | 7,606 | py | # TODO: KERAS MODELS ARE TEMPORARY BROKEN DUE TO TENSORFLOW API CHANGE
# # -*- coding: utf-8 -*-
# import sys
# import logging
# import dill
# import ujson as json
# import os
# import datetime as dt
#
# import numpy as np
# import tensorflow.compat.v1.keras as k
# from keras.preprocessing.image import Iterator
# import tensorflow as tf
#
# from sklearn.metrics import accuracy_score, \
# mean_absolute_error # keras metrics do not deliver a single value, but a tensor - not suitable for
# # BaseModel.evaluate
#
# from iwlearn.base import BaseModel, ScorePrediction, TrivialFeature
#
#
# class KerasClassifierPredictionFactory(object):
# """
# Needed to create KerasClassifierPredictions
# """
#
# def create(self, scores, predictor):
# return KerasClassifierPrediction(scores, predictor)
#
#
# class KerasClassifierPrediction(ScorePrediction):
# """
# We need to create a special prediction because Keras classifiers use softmax output with shape (numclasses,),
# while normal classifiers have output shape () - scalar
# """
#
# def __init__(self, scores, predictor):
# ScorePrediction.__init__(self, scores, predictor)
# if self.scores is not None and 0 < len(self.scores):
# self.prediction = np.zeros_like(self.scores)
# self.prediction[np.argmax(self.scores)] = 1
#
#
# class BaseKerasModel(BaseModel):
# def __init__(self, name, features, sampletype, predictionfactory=None, labelkey=None):
# BaseModel.__init__(self, name, features, sampletype, predictionfactory, labelkey)
# self.experiment_name = self.__class__.__name__
# self.custom_objects = {}
# self.kerasmodel = None
# self.kerasmodelfile = None
#
# @classmethod
# def load(cls, filepath):
# result = BaseModel.load(filepath)
# if result.kerasmodelfile is not None:
# result.kerasmodel = k.models.load_model(result.kerasmodelfile, compile=True,
# custom_objects=result.custom_objects)
# return result
#
# def train_impl(self, dataset, **configuration):
# self._createkerasmodel()
#
# logging.info('Run training loop')
#
# batch_size = 32
# num_epochs = 100
# image_generator = None
# shuffle = True
# seed = dt.datetime.now().microsecond
# callbacks = [
# k.callbacks.TensorBoard(log_dir='logs/%s' % self.experiment_name,
# histogram_freq=0,
# batch_size=32,
# write_graph=True,
# write_grads=False,
# write_images=False,
# embeddings_freq=0,
# embeddings_layer_names=None,
# embeddings_metadata=None),
# k.callbacks.EarlyStopping(
# monitor='categorical_accuracy',
# min_delta=0.01,
# patience=3,
# verbose=1)]
#
# if 'batch_size' in configuration:
# batch_size = configuration['batch_size']
#
# if 'num_epochs' in configuration:
# num_epochs = configuration['num_epochs']
#
# if 'callbacks' in configuration:
# callbacks = configuration['callbacks']
#
# if 'image_generator' in configuration:
# image_generator = configuration['image_generator']
#
# if 'shuffle' in configuration:
# shuffle = configuration['shuffle']
#
# if 'seed' in configuration:
# seed = configuration['seed']
#
# sess = tf.compat.v1.Session()
#
# with sess.as_default():
# k.backend.set_session(sess)
#
# init_op = tf.global_variables_initializer()
# sess.run(init_op)
#
# self.kerasmodel.fit_generator(KerasGenerator(dataset, batch_size, image_generator, shuffle, seed),
# verbose=1,
# epochs=num_epochs,
# callbacks=callbacks)
#
# def _predict_scores_impl(self, X):
# return self.kerasmodel.predict(X, verbose=1)
#
# def _save(self, folder, creator):
# filepath_modelname = '%s/%s' % (folder, self.tag,)
#
# if not os.path.exists(filepath_modelname):
# os.makedirs(filepath_modelname)
#
# filepath_model = filepath_modelname + '.h5'
# self.kerasmodel.save(filepath_model)
# self.kerasmodelfile = filepath_model
#
# self.meta_infos["filepath_model"] = filepath_model
#
# BaseModel._save(self, folder, creator)
#
# return filepath_modelname
#
# def _createkerasmodel(self):
# raise NotImplementedError
#
#
# class KerasClassifierLabel(TrivialFeature):
# def __init__(self, path, output_shape):
# TrivialFeature.__init__(self, path, output_shape)
#
# def get(self, sample):
# y = TrivialFeature.get(self, sample)
# if self.output_shape == ():
# return k.utils.to_categorical(y, self._get_width())
# return y
#
#
# class BaseKerasClassifierModel(BaseKerasModel):
# def __init__(self, name, features, sampletype, predictionfactory=None,
# labelkey=None):
# if predictionfactory is None:
# predictionfactory = KerasClassifierPredictionFactory()
# BaseKerasModel.__init__(self, name, features, sampletype, predictionfactory, '')
# self.output_shape = (numclasses,)
# self.metrics = [accuracy_score]
# if labelkey == '':
# self.labels = []
# elif labelkey is None:
# self.labels = [KerasClassifierLabel(self.task + 'Label', self.output_shape)]
# else:
# self.labels = [KerasClassifierLabel(labelkey, self.output_shape)]
#
#
# class BaseKerasRegressorModel(BaseKerasModel):
# def __init__(self, name, features, sampletype, numoutputs, prediction_factory=None, labelkey=None):
# BaseModel.__init__(self, name, features, sampletype, prediction_factory, labelkey)
# self.output_shape = (numoutputs,)
# self.metrics = [mean_absolute_error]
#
#
# class KerasGenerator(Iterator):
# def __init__(self, dataset, batch_size, image_generator, shuffle, seed):
# self.dataset = dataset
# self.batch_size = batch_size
# self.image_generator = image_generator
# self.fourdshape = None
# if self.image_generator is not None and len(self.dataset.sampleshape) > 4:
# self.fourdshape = (int(np.prod(self.dataset.sampleshape[0:-3])),) + tuple(self.dataset.sampleshape[-3:])
# Iterator.__init__(self, len(self.dataset), self.batch_size, shuffle, seed)
#
# def _get_batches_of_transformed_samples(self, index_array):
# x, y = self.dataset.get_samples(index_array)
#
# if self.image_generator is not None:
# if self.fourdshape is not None:
# x = np.reshape(x, self.fourdshape)
# x = [self.image_generator.random_transform(imagetensor) for imagetensor in x]
# x = [self.image_generator.standardize(imagetensor) for imagetensor in x]
# if self.fourdshape is not None:
# x = np.reshape(x, self.dataset.sampleshape)
#
# return x, y
#
#
# if __name__ == "__main__":
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
| [
"maxim.fridental@paessler.com"
] | maxim.fridental@paessler.com |
37a72d6160014af204fb5ae6c5c9eca84b4d6906 | f19cc4e99b265ae41ad6e5c16f023c1e68d11126 | /Stess.py | 7a80a65c9ad9ab88157b1b83af91f42ab902ccf3 | [] | no_license | aezakmi007/CPU-STRESS-ALERT-SYSTEM-IOT | b9e3cdbed265f5a6b8c1839711ddf9fcd12453dd | bfb7f487e50c6aa6abd1a2a11c8f54b179f494ef | refs/heads/main | 2023-03-31T23:29:41.990723 | 2021-04-11T06:04:34 | 2021-04-11T06:04:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import psutil
from boltiot import Bolt
api_key = "a1d04b0c-01e4-4542-bafa-116090c8f0ae"
device_id = "BOLT293335"
cpu_Threshold = 0.4
clientId = Bolt(api_key, device_id)
interval = 5
def control_green_color(pin, value):
response = clientId.digitalWrite(pin, value)
def control_red_color(pin, value):
response = clientId.digitalWrite(pin,value)
while True:
cpu_usage = psutil.cpu_percent( interval = interval )
print("CPU usage : ", cpu_usage)
if cpu_usage > cpu_Threshold:
control_green_color('0','LOW')
control_red_color('1', 'HIGH')
control_red_color('2', 'LOW')
else:
control_green_color('0', 'HIGH')
control_red_color('1', 'LOW')
| [
"Abdullah9695"
] | Abdullah9695 |
94bb2d5431ce4313c83f6d758e2f635450d23874 | 6d5bc5e2bff976f8c2057e311be2e7a4a4e327e5 | /main.py | 8b4079c7c8e8025ec00bfe8d5aa0e85745f3742e | [] | no_license | vikasnataraja/Predicting-Adult-Census-Income-using-Machine-Learning | 8393c377fcc24a62adfc00dffb4a8818883432eb | 617b5e910ae015393d08765d95d6aedb2a05d5b5 | refs/heads/master | 2020-05-20T01:57:11.882412 | 2019-05-07T04:51:59 | 2019-05-07T04:51:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,443 | py | #!/usr/bin/env python
# coding: utf-8
# ## Predicting Adult Census Income
#
# Using only data from the training set, I am aiming to predict the test score (or incomes) without knowing the test
# solution. To circumvent this uncertainty, I am using cross validation which allows me to tune the hyperparameters.
# In[ ]:
import math
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.utils import shuffle
from sklearn.ensemble import RandomForestClassifier,BaggingClassifier,GradientBoostingClassifier,AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier,ExtraTreeClassifier
from sklearn.svm import SVC
# ### Constructing the dataframe using pandas
#
# * The `formatDataset` function uses pandas to create a dataframe for both test and train data.
# * I am using pandas because of the ease of use and clear labels
# In[ ]:
def formatDataset(data,listofLabels,numericLabels):
"""
This function takes in the data(test or train), a list of labels for the features and
another parameter for the labels containing the numeric data (also as a list)
"""
# create a dataframe using pandas, the labels are the names of features
df = pd.DataFrame(data,columns=listofLabels)
# create a duplicate, just in case
dataset = pd.DataFrame(data,columns=listofLabels)
# for the data that is already numerical, connvert to float
dataset[numericLabels] = dataset[numericLabels].astype('float64')
# get the dataframe that is not numerical
nonNumerical = dataset.drop(columns=numericLabels)
# for the non-numeric data, map the data to discrete values.
# (I tried automatic numbering but that meant the mapping for training and testing data would be different)
# so I created separate mapping values
workclassmap = {'Private': 0,'Self-emp-not-inc': 1,'Local-gov': 2,'?': 3,
'State-gov': 4,'Self-emp-inc': 5,'Federal-gov': 6,'Without-pay': 7,'Never-worked': 8}
educationmap = {'HS-grad': 0,'Some-college': 1,'Bachelors': 2,'Masters': 3,'Assoc-voc': 4,
'11th': 5,'Assoc-acdm': 6,'10th': 7,'7th-8th': 8,'Prof-school': 9,
'9th': 10,'12th': 11,'Doctorate': 12,'5th-6th': 13,'1st-4th': 14,'Preschool': 15}
marriedmap = {'Married-civ-spouse': 0,'Never-married': 1,'Divorced': 2,'Separated': 3,'Widowed': 4,
'Married-spouse-absent': 5,'Married-AF-spouse': 6}
occupationmap = {'Prof-specialty': 0,'Craft-repair': 1,'Exec-managerial': 2,'Adm-clerical': 3,
'Sales': 4,'Other-service': 5,'Machine-op-inspct': 6,'?': 7,'Transport-moving': 8,
'Handlers-cleaners': 9,'Farming-fishing': 10,'Tech-support': 11,'Protective-serv': 12,
'Priv-house-serv': 13,'Armed-Forces': 14}
relationmap = {'Husband': 0,'Not-in-family': 1,'Own-child': 2,'Unmarried': 3,
'Wife': 4,'Other-relative': 5}
racemap = {'White': 0,'Black': 1,'Asian-Pac-Islander': 2,'Amer-Indian-Eskimo': 3,'Other': 4}
sexmap = {'Male': 0, 'Female': 1}
countrymap = {'United-States': 0,'Mexico': 1,'?': 2,'Philippines': 3,'Germany': 4,'Canada': 5,
'Puerto-Rico': 6,'El-Salvador': 7,'India': 8,'Cuba': 9,'England': 10,'Jamaica': 11,
'South': 12,'China': 13,'Italy': 14,'Dominican-Republic': 15,'Vietnam': 16,'Guatemala': 17,
'Japan': 18,'Poland': 19,'Columbia': 20,'Taiwan': 21,'Haiti': 22,'Iran': 23,'Portugal': 24,
'Nicaragua': 25,'Peru': 26,'France': 27,'Greece': 28,'Ecuador': 29,'Ireland': 30,'Hong': 31,
'Cambodia': 32,'Trinadad&Tobago': 33,'Thailand': 34,'Laos': 35,'Yugoslavia': 36,
'Outlying-US(Guam-USVI-etc)': 37,'Hungary': 38,'Honduras': 39,'Scotland': 40,'Holand-Netherlands': 41}
# apply the maps to the non-numeric data
nonNumerical['workclass']=nonNumerical['workclass'].map(workclassmap)
nonNumerical['education']=nonNumerical['education'].map(educationmap)
nonNumerical['marital-status']=nonNumerical['marital-status'].map(marriedmap)
nonNumerical['occupation']=nonNumerical['occupation'].map(occupationmap)
nonNumerical['relationship']=nonNumerical['relationship'].map(relationmap)
nonNumerical['race']=nonNumerical['race'].map(racemap)
nonNumerical['sex']=nonNumerical['sex'].map(sexmap)
nonNumerical['native-country']=nonNumerical['native-country'].map(countrymap)
# add back the columns which were numeric in the first place
nonNumerical.insert(loc=0,column='age',value=dataset['age'])
nonNumerical.insert(loc=2,column='fnlwgt',value=dataset['fnlwgt'])
nonNumerical.insert(loc=4,column='education-num',value=dataset['education-num'])
nonNumerical.insert(loc=10,column='capital-gain',value=dataset['capital-gain'])
nonNumerical.insert(loc=11,column='capital-loss',value=dataset['capital-loss'])
nonNumerical.insert(loc=12,column='hours-per-week',value=dataset['hours-per-week'])
# check to see if it is training data or test data
if len(data[0])==15:
nonNumerical.insert(loc=14,column='income',value=dataset['income'])
dataset=nonNumerical
if len(data[0])==15:
X = dataset.drop(columns='income')
y = dataset['income']
# return X,y as well as the original dataframe for any use
return X,y,df
else:
return dataset,df
# ### Pass train.data and test.data through `formatDataset` ###
#
# * Pass the training and testing data
# * The function `formatDataset` can distinguish between them
# * It returns X,y and full dataframe in case of training set, X and full dataframe in case of testing set
# In[ ]:
"""
use the formatDataset function to construct pandas dataframe
for both train and testing
"""
train_data = []
with open('train.data', 'r') as reading:
train_input = reading.read().split('\n')
for row in train_input:
train_data.append(row.split(', '))
train_labels = ['age','workclass','fnlwgt','education','education-num','marital-status',
'occupation','relationship','race','sex','capital-gain','capital-loss',
'hours-per-week','native-country','income']
train_numericLabels = ['age','fnlwgt','education-num','capital-gain',
'capital-loss','hours-per-week','income']
X_train,y_train,df = formatDataset(train_data,train_labels,train_numericLabels)
# do the same for testing data, except no y for test
test_data =[]
with open('test.data', 'r') as reading_test:
test_input = reading_test.read().split('\n')
for row in test_input:
test_data.append(row.split(', '))
test_labels = ['age','workclass','fnlwgt','education','education-num','marital-status',
'occupation','relationship','race','sex','capital-gain','capital-loss',
'hours-per-week','native-country']
test_numericLabels = ['age','fnlwgt','education-num','capital-gain','capital-loss','hours-per-week']
X_test,testframe = formatDataset(test_data,test_labels,test_numericLabels)
# ### Classifiers
#
# * Use the X and y from the previous cell to find a classifier that has the best cross-validation score.
# * I am using cross-validation because test_y is not available.
#
#
# In[ ]:
model = GradientBoostingClassifier(learning_rate=0.99999998,n_estimators=232,loss='exponential',
max_features=8,max_depth=3,
min_samples_split=9100/10000)
scores = cross_val_score(model, X_train, y_train, cv=5)
print(scores)
print(sum(scores))
print("Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# In[ ]:
clf = RandomForestClassifier(n_estimators=99,max_features=9,bootstrap=True,max_depth=16,
random_state=42,criterion='entropy',oob_score=True)
scores = cross_val_score(clf, X_train, y_train, cv=5)
#print(i)
print(scores)
print("Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# In[ ]:
ada = AdaBoostClassifier(n_estimators=80,learning_rate=1.0,random_state=42)
scores = cross_val_score(ada, X_train, y_train, cv=5)
#print(i)
print(scores)
print("Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# In[ ]:
bag = BaggingClassifier(bootstrap=0.9,oob_score=True,n_estimators=90,max_features=12)
scores = cross_val_score(bag, X_train, y_train, cv=5)
#print(i)
print(scores)
print("Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# In[ ]:
tree = DecisionTreeClassifier(max_features=13,max_depth=9)
scores = cross_val_score(tree, X_train, y_train, cv=5)
#print(i)
print(scores)
print("Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# #### Fit the model to the training set and predict y for the test set
#
# * After finding the best model with tuning finished, fit the classifier to X and y.
# * Predict the y for the test set
# * Save the y_pred to a .csv file
# In[ ]:
model.fit(X=X_train,y=y_train)
y_pred = model.predict(X=X_test)
save_y = pd.DataFrame(y_pred,columns=['Category'])
save_y.to_csv('gradboost.csv',index=True)
# ### END OF DOCUMENT ###
| [
"viha4393@colorado.edu"
] | viha4393@colorado.edu |
582e0a4f9404ffe497957148713488fb28333b04 | 49f23f530d0cda7aadbb27be9c5bdefaa794d27f | /server/common_models/user.py | a5b3f4d6f5e5d6819209dd9b15cdda3c1a15dacb | [
"MIT"
] | permissive | Soopro/totoro | 198f3a51ae94d7466136ee766be98cb559c991f1 | 6be1af50496340ded9879a6450c8208ac9f97e72 | refs/heads/master | 2020-05-14T09:22:21.942621 | 2019-08-03T20:55:23 | 2019-08-03T20:55:23 | 181,738,167 | 0 | 1 | MIT | 2019-10-29T13:43:24 | 2019-04-16T17:42:16 | Python | UTF-8 | Python | false | false | 2,223 | py | # coding=utf-8
from __future__ import absolute_import
from document import BaseDocument, ObjectId, INDEX_DESC
from utils.misc import now
class User(BaseDocument):
STATUS_BEGINNER, STATUS_VIP, STATUS_BANNED = (0, 1, 2)
MAX_QUERY = 120
structure = {
'login': unicode,
'password_hash': unicode,
'openid': unicode,
'unionid': unicode,
'credit': int,
'meta': dict,
'creation': int,
'updated': int,
'status': int,
}
sensitive_fields = ['meta']
required_fields = ['openid']
default_values = {
'login': u'',
'password_hash': u'',
'unionid': u'',
'credit': 0,
'meta': {},
'creation': now,
'updated': now,
'status': STATUS_BEGINNER
}
indexes = [
{
'fields': ['openid'],
'unique': True,
},
{
'fields': ['login'],
'unique': True,
},
{
'fields': ['creation'],
},
{
'fields': ['status'],
}
]
def find_all(self):
return self.find().sort('creation', INDEX_DESC)
def find_activated(self):
return self.find({
'status': self.STATUS_ACTIVATED
}).sort('creation', INDEX_DESC).limit(self.MAX_QUERY)
def find_by_status(self, status):
return self.find({
'status': status
}).sort('creation', INDEX_DESC).limit(self.MAX_QUERY)
def find_one_by_id(self, user_id):
return self.find_one({
'_id': ObjectId(user_id),
})
def find_one_by_login(self, login):
if not login:
return None
return self.find_one({
'login': login,
})
def find_one_by_openid(self, openid):
return self.find_one({
'openid': openid,
})
def displace_login(self, login, openid):
# login can on exists once.
return self.collection.update(
{'openid': {'$ne': openid}, 'login': login},
{'$set': {'login': u'', 'status': self.STATUS_BEGINNER}},
multi=True)
def count_used(self):
return self.find().count()
| [
"redy.ru@gmail.com"
] | redy.ru@gmail.com |
efdbbbe6b16c275ebb302d934239ed9abb79e159 | 603f752f35b34802d15f397c9d11c56aad2ac5e4 | /getStudentScore.py | fc95bc09a24f687285cf4ee0d24a8aa4963922ec | [] | no_license | waitfun/university | a0b153e6121205ec772ee201f201c18c2aade8e2 | 71c1fad8a3193525e74389c8f5dc5fe9a9ab4e64 | refs/heads/master | 2020-04-06T17:01:24.980336 | 2018-11-15T03:53:07 | 2018-11-15T03:53:07 | 157,643,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,782 | py | import requests
from bs4 import BeautifulSoup
import csv,time,re,math
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
def getScore():
cookie = "JSESSIONID=AE9029BE20920E54399350EB7C13D7AF; Hm_lvt_c09b7f26e29f23a8e4c2c74fc98ca736=1526317044,1527310349,1527498285,1527844527"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Cookie':cookie
}
url = "http://kdjw.hnust.edu.cn/kdjw/cjzkAction.do?method=tofindCj0708ByXNZY"
params = {
#排名方式:
'pmfs': '3',
#排序方式
'pxfs': '1',
'rxnf': '2017',
'xjzt': '01',
'xsfs': '1',
'xh':'1716030417',
#专业方向:
'zyfx':'',
'xqmc':'',
'xnxq': '2017-2018-2',
'xnxqs': '2017-2018-2',
'yx': '16',
'zy': '7460476E105C4753B73B6559D7E43193',#'4BB9933ADA87451FA433A850C17DFD5D',
'zymc': '[2017]音乐学'
}
req = requests.post(url,headers=headers,verify=False,data=params)
#print(req.text)
soup = BeautifulSoup(req.content, 'html.parser')
for item in soup.find_all("tr",class_="smartTr"):
with open("2.html","a",encoding="utf-8") as fd:
fd.write(str(item))
info_list = item.find_all("td")
student_no = info_list[1].text
student_name = info_list[2].text
print(student_name,student_no)
tbcontent = soup.find("div",id="tblHeadDiv")
th_list = tbcontent.find_all("th")
for item2 in th_list:
print(item2.text)
with open("1.html","a",encoding="utf-8") as fd:
fd.write(str(item2))
if __name__ == "__main__":
getScore() | [
"waitfun319@qq.com"
] | waitfun319@qq.com |
65fa9de6c1fdf8b167f5af603e921268421be24f | 6cde25cd6cf38f854e465889ef1c59a5d05e58b4 | /src/python/normalize-by-median.py | d42a5891a8401eab2e814f7d7507d935af88a34c | [
"Artistic-1.0"
] | permissive | smyang2018/ergatis | ef8eb6e86add69c3583e16be530e42775be0c565 | 1081ede54c9b06efadf8046ec206e8d8c593e0df | refs/heads/master | 2022-01-21T07:21:42.407806 | 2019-07-12T19:46:50 | 2019-07-12T19:46:50 | 262,735,101 | 0 | 1 | NOASSERTION | 2020-05-10T07:32:32 | 2020-05-10T07:32:32 | null | UTF-8 | Python | false | false | 4,158 | py | #! /usr/bin/env python
"""
Eliminate reads with median k-mer abundance higher than
DESIRED_COVERAGE. Output sequences will be placed in 'infile.keep'.
% python scripts/normalize-by-median.py [ -C <cutoff> ] <data1> <data2> ...
Use '-h' for parameter help.
"""
import sys, screed, os
sys.path.append("/usr/local/packages/khmer/python")
import khmer
from khmer.counting_args import build_construct_args, DEFAULT_MIN_HASHSIZE
import argparse
DEFAULT_DESIRED_COVERAGE=5
def main():
parser = build_construct_args()
parser.add_argument('-C', '--cutoff', type=int, dest='cutoff',
default=DEFAULT_DESIRED_COVERAGE)
parser.add_argument('-s', '--savehash', dest='savehash', default='')
parser.add_argument('-l', '--loadhash', dest='loadhash',
default='')
parser.add_argument('-R', '--report-to-file', dest='report_file',
type=argparse.FileType('w'))
parser.add_argument('-o', '--outputpath', dest='outputpath', default='.')
parser.add_argument('input_filenames', nargs='+')
args = parser.parse_args()
if not args.quiet:
if args.min_hashsize == DEFAULT_MIN_HASHSIZE:
print>>sys.stderr, "** WARNING: hashsize is default! You absodefly want to increase this!\n** Please read the docs!"
print>>sys.stderr, '\nPARAMETERS:'
print>>sys.stderr, ' - kmer size = %d \t\t(-k)' % args.ksize
print>>sys.stderr, ' - n hashes = %d \t\t(-N)' % args.n_hashes
print>>sys.stderr, ' - min hashsize = %-5.2g \t(-x)' % args.min_hashsize
print>>sys.stderr, ''
print>>sys.stderr, 'Estimated memory usage is %.2g bytes (n_hashes x min_hashsize)' % (args.n_hashes * args.min_hashsize)
print>>sys.stderr, '-'*8
K=args.ksize
HT_SIZE=args.min_hashsize
N_HT=args.n_hashes
DESIRED_COVERAGE=args.cutoff
report_fp = args.report_file
filenames = args.input_filenames
outpath = args.outputpath
if args.loadhash:
print 'loading hashtable from', args.loadhash
ht = khmer.load_counting_hash(args.loadhash)
else:
print 'making hashtable'
ht = khmer.new_counting_hash(K, HT_SIZE, N_HT)
total = 0
discarded = 0
for input_filename in filenames:
output_name = outpath + '/' + os.path.basename(input_filename) + '.keep'
outfp = open(output_name, 'w')
for n, record in enumerate(screed.open(input_filename)):
if n > 0 and n % 10000 == 0:
print '... kept', total - discarded, 'of', total, ', or', \
int(100. - discarded / float(total) * 100.), '%'
print '... in file', input_filename
if report_fp:
print>>report_fp, total, total - discarded, \
1. - (discarded / float(total))
report_fp.flush()
total += 1
if len(record.sequence) < K:
continue
seq = record.sequence.replace('N', 'A')
med, _, _ = ht.get_median_count(seq)
if med < DESIRED_COVERAGE:
ht.consume(seq)
outfp.write('>%s\n%s\n' % (record.name, record.sequence))
else:
discarded += 1
print 'DONE with', input_filename, '; kept', total - discarded, 'of',\
total, 'or', int(100. - discarded / float(total) * 100.), '%'
print 'output in', output_name
if args.savehash:
print 'Saving hashfile through', input_filename
print '...saving to', args.savehash
ht.save(args.savehash)
# Change 0.2 only if you really grok it. HINT: You don't.
fp_rate = khmer.calc_expected_collisions(ht)
print 'fp rate estimated to be %1.3f' % fp_rate
if fp_rate > 0.20:
print >>sys.stderr, "**"
print >>sys.stderr, "** ERROR: the counting hash is too small for"
print >>sys.stderr, "** this data set. Increase hashsize/num ht."
print >>sys.stderr, "**"
print >>sys.stderr, "** Do not use these results!!"
sys.exit(-1)
if __name__ == '__main__':
main()
| [
"kabolude@c60d6122-3b2e-0410-912d-af40fea696fe"
] | kabolude@c60d6122-3b2e-0410-912d-af40fea696fe |
ea182e68ead47f4393fd988adea64d157fd53d03 | fee6a2ef79fd27c4795fe5256c2a144fc0c1311b | /roman/rq/sim_connection.py | 9b2de331765bde37df18f4334dae86a01d655dc0 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/roman | a290b470d3e2cc28f44dd8470f7ef295942fd929 | 51405e6a404d670ae571b7ddff4f9468f9381b8a | refs/heads/main | 2023-06-30T01:59:16.074496 | 2023-02-07T20:17:12 | 2023-02-07T20:17:12 | 246,629,589 | 20 | 15 | MIT | 2023-02-03T20:51:15 | 2020-03-11T16:56:42 | Python | UTF-8 | Python | false | false | 2,175 | py | from .hand import *
################################################################
## Simulated hand implementation
################################################################
class SimConnection:
"""Implements functionality to read and command the simulated hand, regardless of simulator."""
def __init__(self, env):
self.env = env
def connect(self, activate=True):
pass
def disconnect(self):
pass
def execute(self, cmd, state):
# translate and send the command
if cmd[Command._KIND] == Command._CMD_KIND_READ:
pass
elif cmd[Command._KIND] == Command._CMD_KIND_STOP:
self.env.hand.stop()
elif cmd[Command._KIND] == Command._CMD_KIND_CHANGE:
self.env.hand.set_mode(cmd[Command._MODE])
else: #Command._CMD_KIND_MOVE
if cmd[Command._FINGER] == Finger.All:
self.env.hand.move(cmd[Command._POSITION], cmd[Command._SPEED], cmd[Command._FORCE])
else:
self.env.hand.move_finger(cmd[Command._FINGER], cmd[Command._POSITION], cmd[Command._SPEED], cmd[Command._FORCE])
# prepare the state
# note that we need to call update in order for joint states to reflect the command
# otherwise object_detected would return true immediately after issuing a move command
# because the joints would appear to not move
self.env.update()
self.env.hand.read()
state[State._TIME] = self.env.time()
state[State._FLAGS] = State._FLAG_READY \
+ State._FLAG_MOVING * self.env.hand.is_moving() \
+ State._FLAG_OBJECT_DETECTED * self.env.hand.object_detected()
state[State._MODE] = self.env.hand.mode()
positions = self.env.hand.positions()
state[State._POSITION_A] = positions[0]
state[State._POSITION_B] = positions[1]
state[State._POSITION_C] = positions[2]
targets = self.env.hand.targets()
state[State._TARGET_A] = targets[0]
state[State._TARGET_B] = targets[1]
state[State._TARGET_C] = targets[2]
return state
| [
"mihaijal@microsoft.com"
] | mihaijal@microsoft.com |
1693c4a0d618967a4157bd72cbf492d3b6a83247 | 330540d9e2dcb3b8cc70272f5fb5c2bebdb572b5 | /DnfDeals/utils.py | b6601effd4e5ecee0c4a6d694f97f490f9465f3e | [] | no_license | xiaguangting/DnfDeals | 2a8e1e881cfbdd692bb1f16dffdb640b070c2af5 | 3dfdb34de4ea6bc5f9fb1c0352e0e94aeb24cae4 | refs/heads/master | 2020-08-05T22:47:15.889580 | 2019-11-14T03:45:36 | 2019-11-14T03:45:36 | 212,740,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | import logging
import smtplib
from email.mime.text import MIMEText
from logging.handlers import TimedRotatingFileHandler
import pymysql
from DnfDeals import settings
def send_email(subject, content):
message = MIMEText(content, 'html', 'utf-8')
message['From'] = "DnfDeals<%s>" % settings.EMAIL_ADDRESS
message['To'] = ','.join(settings.ACCEPT_EMAIL_list)
message['Subject'] = subject
smtpObj = smtplib.SMTP_SSL(settings.EMAIL_SMTP_SERVER, 465)
smtpObj.login(settings.EMAIL_USERNAME, settings.EMAIL_PASSWORD)
smtpObj.sendmail(settings.EMAIL_ADDRESS, settings.ACCEPT_EMAIL_list, message.as_string('utf-8'))
smtpObj.quit()
def get_hunter():
hunter = logging.Logger(name='hunter')
handler = TimedRotatingFileHandler(filename=settings.LOG_ADDRESS, when='D', backupCount=30)
hunter.addHandler(handler)
class Bee(object):
def __init__(self, host, port, user, password, db, charset='utf8mb4'):
self.host = host
self.user = user
self.password = password
self.db = db
self.charset = charset
self.port = port
def start_conn(self):
# Connect to the database
connection = pymysql.connect(host=self.host,
user=self.user,
password=self.password,
db=self.db,
charset=self.charset,
cursorclass=pymysql.cursors.DictCursor,
port=int(self.port))
return connection
def insert(self, sql, paras):
connection = self.start_conn()
try:
with connection.cursor() as cursor:
# Create a new record
cursor.execute(sql, paras)
connection.commit()
finally:
connection.close()
def insert_smart(self, tablename, data):
field_name_list = []
field_value_list = []
for i, j in data.items():
field_name_list.append(i)
field_value_list.append(j)
sql = "INSERT INTO %s (id, %s) VALUES (0, %s)" % (
tablename, ','.join(field_name_list), ','.join(['%s' for i in range(len(field_name_list))]))
self.insert(sql, field_value_list)
def read(self, sql, paras):
# Read a single record
connection = self.start_conn()
try:
result = None
with connection.cursor() as cursor:
cursor.execute(sql, paras)
result = cursor.fetchall()
connection.commit()
finally:
connection.close()
return result
| [
"18627924383@163.com"
] | 18627924383@163.com |
835b6e0ac178cfa27a2002e624816e0d162cf8b4 | 9d7afba14939649c0f20b2748d8a20cb973e4a0a | /generate.py | cebb86450d7ee4d0b968170465925193dfe3d9d5 | [] | no_license | ansible-network/resource_module_planning | 47f552a85e1693b68ba730596d6ad7e1b3ff73a0 | c7cca18a4f2f811859d8f1135de2ba2ddeacd935 | refs/heads/master | 2020-04-30T22:57:51.772230 | 2019-04-05T18:33:03 | 2019-04-05T18:33:03 | 177,132,464 | 0 | 2 | null | 2019-04-09T18:10:27 | 2019-03-22T12:01:20 | HTML | UTF-8 | Python | false | false | 4,846 | py | from datetime import timedelta, datetime
from jinja2 import Template
import csv
import settings
def build_ansible_releases():
ars = []
settings.ANSIBLE_RELEASES.sort(key=lambda x: x['date'])
for release in settings.ANSIBLE_RELEASES:
ars.append({"start": release['date'].isoformat(),
"start_human": release['date'].strftime('%Y-%m-%d'),
"className": "ansibleRelease",
"content": "%s %s" % (release['name'], release['note']),
"group": settings.TL_GROUPS.index("Ansible Releases")})
return ars
def build_resource_modules():
modules = []
module_list = [dict(m) for m in csv.DictReader(open("module_list.csv"))]
priority_points = [dict(m) for m in csv.DictReader(open("priority_points.csv"))]
for idx, ppts in enumerate(priority_points):
ppts['priority'] = idx
for module in module_list:
ppts = next(p for p in priority_points if p['resource'] == module['resource'])
module['priority'] = ppts['priority']
module['points'] = ppts['points']
module_list.sort(key=lambda x: x['priority'])
for module in module_list:
task_duration = int(module['points']) * settings.HRS_PER_POINT
groups_lowered = [g.lower() for g in settings.TL_GROUPS]
tipe = module['type'].split('/')[0]
modules.append({"className": tipe,
"content": module['name'],
"group": groups_lowered.index(tipe),
"kind": "resource_module",
"duration": task_duration})
return modules
def build_sprints(resource_modules):
sprs = []
sprint_end = settings.START_DATE + timedelta(weeks=settings.SPRINT_WKS)
sprint_num = 1
rm_with_dates = []
while resource_modules:
sprint = {'className': 'sprint',
'group': 2,
'content': str(sprint_num),
'start': sprint_end.isoformat(),
'start_human': sprint_end.strftime('%Y-%m-%d'),
'resource_modules': [],
'total': 0}
for resource_module in resource_modules[:]:
if sprint['total'] + resource_module['duration'] > settings.HOURS_PER_SPRINT:
continue
else:
sprint['resource_modules'].append(resource_module)
sprint['total'] += resource_module['duration']
resource_module['start'] = sprint_end.isoformat()
resource_module['start_human'] = sprint_end.strftime('%Y-%m-%d')
rm_with_dates.append(resource_module)
resource_modules.remove(resource_module)
sprint_num += 1
sprint_end += timedelta(weeks=settings.SPRINT_WKS)
sprs.append(sprint)
return sprs, rm_with_dates
def build_network_collection_releases(sprints):
sprints = sprints.copy()
ncrs = []
delivered = []
for idx, ansible_release in enumerate(settings.ANSIBLE_RELEASES):
anc_release_version = 1
for sprint in sprints[:]:
sprint_start = datetime.fromisoformat(sprint['start'])
if sprint_start < ansible_release['date']:
delivered.extend(sprint['resource_modules'])
sprints.remove(sprint)
continue
if ansible_release['date'] <= sprint_start < settings.ANSIBLE_RELEASES[idx+1]['date']:
delivered.extend(sprint['resource_modules'])
sprints.remove(sprint)
ncrs.append({"start": sprint['start'],
"start_human": sprint_start.strftime('%Y-%m-%d'),
"className": "ansibleNetworkCollectionRelease",
"content": "%s.%s" % (ansible_release['name'], anc_release_version),
"group": settings.TL_GROUPS.index("Network Collection Releases"),
"resource_modules": delivered})
anc_release_version += 1
delivered = []
return ncrs
def template(dates):
dates.sort(key=lambda x: datetime.fromisoformat(x['start']))
# pprint(dates)
with open('index.html.j2') as file_:
tmplt = Template(file_.read())
with open('index.html', encoding='utf-8', mode='w') as file:
file.write(tmplt.render(dates=dates,
groups=settings.TL_GROUPS,
settings=settings))
def main():
ansible_releases = build_ansible_releases()
resource_modules = build_resource_modules()
sprints, resource_modules = build_sprints(resource_modules)
ncrs = build_network_collection_releases(sprints=sprints)
template(ansible_releases + ncrs + resource_modules + sprints)
if __name__ == "__main__":
main()
| [
"bthornto@thethorntons.net"
] | bthornto@thethorntons.net |
492a402cbf27dab0e47aac4ae9e1d9d48b0ac5d8 | f273e6fbd3d2a39bc88373718cf226ae9638e491 | /POP_moving_file_once.py | c1edeb0ce1340305c601f68e70825e22ada8952c | [] | no_license | TheRiseOfDavid/ntut_crawler | 510c36f95afb8bace42702a60dc388a999dcb6c2 | 4b0a9d4bc92de38f50112975a6d922dbcdda5d4c | refs/heads/master | 2022-06-14T10:06:03.843702 | 2020-03-02T01:38:40 | 2020-03-02T01:38:40 | 242,755,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 26 01:25:29 2020
@author: user
"""
import os
import time
def duplicate(strPath , Data ):
lisDir = os.listdir(strPath)
dicAll_Data = dict()
strData = ""
if(type(Data) == dict):
dicAll_Data = Data
for strFile in lisDir :
strFile_Name = strFile.rsplit("." , 1)[0]
if(strFile_Name in dicAll_Data.keys()):
del dicAll_Data[strFile_Name]
return dicAll_Data
elif (type(Data) == str):
#debug
#print(Data)
strData = Data
for strFile in lisDir:
strFile_Name = strFile.rsplit("." , 1)[0]
if(strData == strFile_Name):
return 1
return 0
def rename(strPath , strFile_New_Name):
#debug
#strPath = r"C:\Users\user\Downloads"
#strNew_Path = r"D:\program\tool program\Download_NTUT_Imfornation_Learning_Course_Files\download"
strFile_Old_Name = ""
isOk = 1
while(isOk):
isOk = 0
time.sleep(2)
lisDir = os.listdir(strPath)
for strFile_Name in lisDir:
if ".crdownload" in strFile_Name:
isOk = 1
break
#debug
#print(isOk)
#print(strFile_Name)
lisDir = os.listdir(strPath)
lisFile_Name = list()
strFile_Old_Name = ""
strFile_Type = ""
for strFile_Name in lisDir:
if(strFile_Name[0:14].isdigit() or strFile_Name == r"'ebook.pdf'"):
lisFile_Name = strFile_Name.rsplit('.',1)
#debug
#print(lisFile)
#print(4)
strFile_Old_Name = lisFile_Name[0]
strFile_Type = lisFile_Name[1].replace("'" , "")
time.sleep(3)
isOk = 1
while(isOk):
try:
#because 'ebook.pdf' , pdf' != strFile_Type
os.rename( rf"{strPath}\{strFile_Old_Name}.{lisFile_Name[1]}" , \
rf"{strPath}\{strFile_New_Name}.{strFile_Type}")
isOk = 0
except:
time.sleep(3)
return rename(strPath , strFile_New_Name)
time.sleep(3)
break
| [
"david53133@gmail.com"
] | david53133@gmail.com |
c9bd05b4601e4999b2f8e8388cc5a71f4d443296 | 83dec1c86682fee0bebf19b1c30c4051795b4a3d | /Utilities.py | f35987e7f1489ea6281e0b060f807d0072ca36cc | [] | no_license | hayridurmaz/FaceAuthentication | 97b88248cf533a8a9bb4bf2f95da96240f35eaae | 6e6b949e740de56e67c611052a419d12abaab056 | refs/heads/master | 2023-06-07T11:57:57.777695 | 2021-06-17T19:15:43 | 2021-06-17T19:15:43 | 322,927,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,000 | py | import logging
import os
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
from deepface import DeepFace
from keras_vggface.vggface import VGGFace
import config
dataset_path = config.recognizer_options['user_dataset']
database_path = config.recognizer_options['user_database']
resnet50_features = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3),
pooling='avg') # pooling: None, avg or max
def resize(img):
img = cv2.resize(img, (224, 224)) # resize image to match model's expected sizing
img = img.reshape(1, 224, 224, 3) # return the image with shaping that TF wants.
return img
# get the face embedding for one face
def get_embedding(face_pixels):
return resnet50_features.predict(resize(face_pixels))
def create_file_if_not_exist(file_name):
if not os.path.exists(file_name):
os.mknod(file_name)
def create_folder_if_not_exist(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
def FileRead(file_path="users_name.txt"):
NAME = []
with open(file_path, "r") as f:
for line in f:
NAME.append(line.split(",")[1].rstrip())
return NAME
def Draw_Rect(Image, face, color):
x, y, w, h = face
cv2.line(Image, (x, y), (int(x + (w / 5)), y), color, 2)
cv2.line(Image, (int(x + ((w / 5) * 4)), y), (x + w, y), color, 2)
cv2.line(Image, (x, y), (x, int(y + (h / 5))), color, 2)
cv2.line(Image, (x + w, y), (x + w, int(y + (h / 5))), color, 2)
cv2.line(Image, (x, int(y + (h / 5 * 4))), (x, y + h), color, 2)
cv2.line(Image, (x, int(y + h)), (x + int(w / 5), y + h), color, 2)
cv2.line(Image, (x + int((w / 5) * 4), y + h), (x + w, y + h), color, 2)
cv2.line(Image, (x + w, int(y + (h / 5 * 4))), (x + w, y + h), color, 2)
def getImagesAndLabels():
imagePaths = [os.path.join(dataset_path, f) for f in os.listdir(dataset_path)]
faceSamples = []
ids = []
for imagePath in imagePaths:
img = cv2.imread(imagePath, 0)
img_numpy = np.array(img, 'uint8')
user_id = int(os.path.split(imagePath)[- 1].split(".")[0]) # Burada sorun olabilir.
faceSamples.append(img_numpy)
ids.append(user_id)
return faceSamples, ids
def getImagesAndLabelsForUser(user):
imagePaths = [os.path.join(dataset_path, f) for f in os.listdir(dataset_path)]
faceSamples = []
ids = []
for imagePath in imagePaths:
img = cv2.imread(imagePath)
img_numpy = np.array(img, 'uint8')
user_id = int(os.path.split(imagePath)[- 1].split(".")[0])
if str(user_id) == user.id:
faceSamples.append(img_numpy)
ids.append(user_id)
return faceSamples
def showImage(image, title='Video'):
cv2.imshow(title, image)
cv2.waitKey(delay=1)
def detect_face(image, isShowImage=False):
faces = DeepFace.detectFace(image, detector_backend="ssd")
faces = cv2.cvtColor(faces, cv2.COLOR_RGB2BGR)
if isShowImage:
showImage(faces)
faces = 255 * faces
faces = np.asarray(faces, dtype=int)
return faces
def create_dataset_for_user(cam, user, numberOfsamples, recognizer):
fig, axs = plt.subplots(10, 5, figsize=(20, 20), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace=.5, wspace=.001)
count = 0 # Variable for counting the number of captured face photos
logging.info("Please look into the camera and wait ...")
start_processing_video = time.time()
while True:
# Capture, decode and return the next frame of the video
ret, image = cam.read()
if image is None:
break
start_reading_image = time.time()
try:
faces = detect_face(image, isShowImage=True)
logging.info("Face detector took {} sec".format(time.time() - start_reading_image))
cv2.imwrite(
dataset_path + str(user.id) + '.' + str(
count) + ".jpg ",
faces)
logging.info("Saved one photo in {} sec".format(time.time() - start_reading_image))
axs[int(count / 5)][count % 5].imshow(faces, vmin=0, vmax=255)
axs[int(count / 5)][count % 5].set_title(
str(user.id) + '.' + str(count) + ".jpg ",
fontdict={'fontsize': 15, 'fontweight': 'medium'})
axs[int(count / 5)][count % 5].axis('off')
count += 1
except Exception as e:
logging.error("There are either no face or more than one face found")
continue
if cv2.waitKey(1) & 0xff == 27: # To exit the program, press "Esc", wait 100 ms,
break
elif count >= numberOfsamples: # taking pic_num photos
break
logging.info("Dataset has been successfully created for this person... in {} secs".format(
time.time() - start_processing_video))
cam.release()
cv2.destroyAllWindows()
plt.show()
| [
"hayri.durmaz@tedu.edu.tr"
] | hayri.durmaz@tedu.edu.tr |
e64468f489f6456474b6cfadd135210659097b4d | 9a6ed03100c84c6863ea94c592740ed7836bebaa | /venv/bin/wheel | b7654b7eb42f70ab2cb36133dd9af2c5f2b68513 | [] | no_license | nemanjamaksim/cicd-buzz | b708ea171cbdd4fc9648fea2f018e9e41bdff447 | 31fe7a1fa12eaca22eec172e3dc3c8cba7fe56d2 | refs/heads/master | 2020-04-30T17:18:50.182192 | 2019-03-21T15:42:00 | 2019-03-21T15:42:00 | 176,961,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | #!/home/nemanja/Documents/cicd-buzz/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nemanja.maksimovic@bepecs.com"
] | nemanja.maksimovic@bepecs.com | |
1fcabfdfd261d113f56cf3419f435286db66644a | ebec4fb7b2eb36b0214e7c018dd1a2f9a9f7b67d | /SPLN/testes/teste_2019/ex6_2.py | 8f64e6fb33ebc2a9359e11e36545baea12b4f200 | [] | no_license | MrBoas/PLC | cb09aaa2440a8711a6ba888a92cdcd2559b0b8cf | 3f77cf1ce96cf8c4bb21ee4ec05f1fc418e9b73e | refs/heads/master | 2022-02-02T10:45:16.322559 | 2019-08-05T15:51:51 | 2019-08-05T15:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py | from bs4 import BeautifulSoup as BS
import requests
import subprocess
def getHTML(distrito,concelho):
urlBase = "http://www.ipma.pt/pt/otempo/prev.localidade.hora/#"
composedURL = urlBase + distrito + '&' + concelho
response = requests.get(composedURL).content
soup = BS(response)
return soup
# x = getHTML('Braga','Barcelos')
# print(x)
file = "testing.html"
file = open(file).read()
soup = BS(file, 'html.parser')
result = soup.find_all('div',{"class":"weekly-column active"})
weather = []
for dia in result:
date = dia.find('div',{"class":"date"}).get_text()
tmin = dia.find('span',{"class":"tempMin"}).get_text()
tmax = dia.find('span',{"class":"tempMax"}).get_text()
prev_txt = dia.find('img',{"class":"weatherImg"})['title']
uv = dia.find('img',{"class":"iuvImg"})['title']
uv = uv.split()[1]
dic = {}
dic['date'] = date
dic['prev_txt'] = prev_txt
dic['temp_min'] = tmin
dic['temp_max'] = tmax
dic['uv'] = uv
weather.append(dic)
print(weather)
| [
"raulvilasboas97@gmail.com"
] | raulvilasboas97@gmail.com |
7454e773ea8edc027d5830ae01f8e4737b1415ed | 6f568983e2662d9c69603d93b62755f2dd47705e | /week5/ProblemSet5/ps5_recursion.py | b5c1d094a0f6bb086ac8f80155181080b3402d43 | [] | no_license | sesan/6.00x | 6e40d1832326e724e3d554c7327e28569890c2ae | 86afbf1f447cf5bd0e493890be2a00403ae2e825 | refs/heads/master | 2020-12-31T02:14:44.869543 | 2013-03-26T20:45:44 | 2013-03-26T20:45:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,310 | py | # 6.00x Problem Set 5
#
# Part 2 - RECURSION
import imp
storyFile = open("story.txt", 'r')
story = storyFile.read()
from ps5_encryption import *
#
# Problem 3: Recursive String Reversal
#
def reverseString(aStr):
"""
Given a string, recursively returns a reversed copy of the string.
For example, if the string is 'abc', the function returns 'cba'.
The only string operations you are allowed to use are indexing,
slicing, and concatenation.
aStr: a string
returns: a reversed string
"""
if len(aStr) == 0:
return aStr
else:
return aStr[-1] + reverseString(aStr[:-1])
#
# Problem 4: X-ian
#
def x_ian(x, word):
"""
Given a string x, returns True if all the letters in x are
contained in word in the same order as they appear in x.
>>> x_ian('eric', 'meritocracy')
True
>>> x_ian('eric', 'cerium')
False
>>> x_ian('john', 'mahjong')
False
x: a string
word: a string
returns: True if word is x_ian, False otherwise
"""
if len(word) == 0 and len(x) > 0:
return False
elif len(x) == 0:
return True
elif x[0] == word[0]:
return x_ian(x[1:], word[1:])
else:
return x_ian(x, word[1:])
#
# Problem 5: Typewriter
#
def insertNewlines(text, lineLength):
"""
Given text and a desired line length, wrap the text as a typewriter would.
Insert a newline character ("\n") after each word that reaches or exceeds
the desired line length.
text: a string containing the text to wrap.
line_length: the number of characters to include on a line before wrapping
the next word.
returns: a string, with newline characters inserted appropriately.
"""
if len(text) < lineLength:
return text
else:
if text[lineLength - 1] == ' ':
return text[:lineLength] + '\n' + insertNewlines(text[lineLength:], lineLength)
else:
tempFront = text[:lineLength]
tempBack = text[lineLength:]
cut = tempBack.find(' ') + 1
if cut <= 0:
return tempFront + tempBack
else:
newText = tempBack[cut:]
return tempFront + tempBack[:cut] + '\n' + insertNewlines(newText, lineLength)
| [
"ryan.lindgren@gmail.com"
] | ryan.lindgren@gmail.com |
8bd85a71ed32a09c3f871431ee97970c9134121b | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/FrontCache/FPythonCode/FC_TCOLL_01_ATS_40.py | 11d35ced4c4937f267029d8778ff4dfce2f825ed | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,150 | py |
'''----------------------------------------------------------------------------------------------------------
MODULE : FC_TCOLL_01_ATS_40
PROJECT : FX onto Front Arena
PURPOSE : This module is the entry point for the Trade Collection ATSs. These ATSs will
subscribe to Trade Collection Requests. They will pull the relevant Front Cache
data from Front Cache Tradign Manager Template for the specific trades in the
incoming request. Once a Request and/or Batch is complete, a Response message
will be posted onto the AMB so that the Response can be send to subscribing
consumers to notify them that the data for the Request or Batch is avaiable
for consumption.
DEPARTMENT AND DESK : All Departments and all Desks.
REQUASTER : FX onto Front Arena Project
DEVELOPER : Heinrich Cronje
CR NUMBER : XXXXXX
-------------------------------------------------------------------------------------------------------------
'''
'''----------------------------------------------------------------------------------------------------------
Importing all relevant Python and custom modules needed for the ATS to start up. Initializing the FC_UTILS
module to load all Parameters, Logging, Error Handler.
----------------------------------------------------------------------------------------------------------'''
import FC_ERROR_HANDLER_DEFAULT as ERROR_HANDLER_DEFAULT
import traceback
try:
from FC_UTILS import FC_UTILS as UTILS
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s.' %__name__, e, traceback)
raise ImportError('Import Error in module %s. ERROR: %s.' %(__name__, str(e)))
try:
UTILS.Initialize(__name__)
except Exception, e:
ERROR_HANDLER_DEFAULT.handelError('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Initialization Error in module %s. FC_UTILS could not be initialized. '
'No Parameters, Logging or Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from FC_EXCEPTION import FC_EXCEPTION as EXCEPTION
except ImportError, e:
ERROR_HANDLER_DEFAULT.handelError('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved.' %__name__, e, traceback)
raise Exception('Import Error in module %s. FC_EXCEPTION could not be imported. '
'No Error Handling could be loaded. '
'The ATS will not start until the root issue is resolved. ERROR: %s. ' %(__name__, str(e)))
try:
from datetime import datetime
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved.' %__name__, traceback, 'CRITICAL', e), __name__)
raise Exception('Import Error in module %s. Module datetime could not be imported. '
'The ATS will not start until the root issue is resolved. ERROR: %s' %(__name__, str(e)))
try:
from FC_TCOLL_ATS_WORKER import FC_TCOLL_ATS_WORKER as TCOLL_ATS_WORKER
except ImportError, e:
UTILS.ErrorHandler.processError(None, EXCEPTION('Could not import the worker module in module %s' %__name__, traceback, 'CRITICAL', None), __name__)
raise Exception('Could not import the worker module in module %s. ERROR: %s' %(__name__, str(e)))
'''----------------------------------------------------------------------------------------------------------
Global variables
-------------------------------------------------------------------------------------------------------------
'''
global worker
worker = None
'''----------------------------------------------------------------------------------------------------------
work function which the ATS will call once started.
-------------------------------------------------------------------------------------------------------------
'''
def work():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_S_IS_NOT_INSTANTIATED %__name__, traceback, UTILS.Constants.fcGenericConstants.CRITICAL, None), __name__)
else:
worker.work()
'''----------------------------------------------------------------------------------------------------------
start function which the ATS will call when the ATS is starting.
-------------------------------------------------------------------------------------------------------------
'''
def start():
UTILS.Logger.flogger.info(UTILS.Constants.fcFloggerConstants.STARTING_ATS_S_AT_S %(__name__, datetime.now()))
global worker
if not worker:
worker = TCOLL_ATS_WORKER()
worker.start()
'''----------------------------------------------------------------------------------------------------------
stop function which the ATS will call when the ATS is stopping.
-------------------------------------------------------------------------------------------------------------
'''
def stop():
global worker
if not worker:
UTILS.ErrorHandler.processError(None, EXCEPTION(UTILS.Constants.fcExceptionConstants.WORKER_VARIABLE_IN_S_IS_NOT_INSTANTIATED_STOP %__name__, traceback, UTILS.Constants.fcGenericConstants.MEDIUM, None), __name__)
else:
worker.stop()
#start()
#work()
#stop()
| [
"nencho.georogiev@absa.africa"
] | nencho.georogiev@absa.africa |
6f6c6ada1f54e063deb49df940dc1cc3650971d6 | b9008dc6326b30de1a16ba01a1f3143aa248f7c3 | /python/chapter3/ex01_10.py | 0eed47d612f626182ab96704303341e3153a0b74 | [] | no_license | wonjongah/multicampus_IoT | ce219f8b9875aa7738ef952a8702d818a571610e | 765a5cd7df09a869a4074d8eafce69f1d6cfda4a | refs/heads/master | 2023-02-13T12:30:19.924691 | 2021-01-08T10:17:42 | 2021-01-08T10:17:42 | 292,800,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | country = "Korea"
if country == "Korea":
print("한국입니다")
if country != "Korea":
print("한국이 아닙니다")
if "korea" > "japan":
print("한국이 더 크다")
if "korea" < "japan":
print("일본이 더 크다")
if "Korea" > "korea":
print("Korea가 더 큽니다")
if "Korea" < "korea":
print("korea가 더 큽니다")
print(ord("K"))
print(ord("k")) | [
"wonjongah@gmail.com"
] | wonjongah@gmail.com |
9e2613d43c8d4d08d227418cedbf0dd8bf1c3c42 | c09cfdb1302f4d409bfc042dd0fff6a6d59f4a87 | /Apocalypse/textRNN_half2.py | 6b50d99efbdabc8f4d5a19027bbc5fc8bd9a864f | [] | no_license | Utschie/ML_Monitoring_Trade | 30773b91024c7ffca4f95a8e18ed56c4e2147529 | 3791e8405ae314dee4f6008c3feb214427f14a55 | refs/heads/master | 2021-11-08T03:17:06.834889 | 2021-07-05T18:10:37 | 2021-07-05T18:10:37 | 166,300,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,151 | py | #本程序是采用平衡数据集的用tsvd的模型,看看会不会比非平衡数据集好————20210126
#把优化器的eps跳到1e-16,并把初始学习率调低两个数量级,看看loss会不会继续持续下降
'''
经过初始学习率分别为0.01,0.001,0.0001,0.00001的比对后,发现0.001甚至0.01是最好的学习率,loss最低降到1.1左右
而0.0001和0.00001下降非常慢,甚至在初始学习率为0.00001时loss到了5之后下降就非常慢了
而当学习率为0.01时loss很快就下降到了1.1,而且非常稳定,甚至比0.001还稳
'''
#这次试一下如果不用正交初始化而是用默认初始化的话,loss会是个什么结果
#使用默认初始化的效果比正交初始化的结果要略好,甚至大冷的平均收益率超过1(约1.002这样)。
#虽然只跑了可能3个epoch,但是总准确率提高到了42%,所以还是默认初始化比较好————20210202
#如果按照这个测试结果,那么爆大冷,小冷和正常的预测准确率分别为28.14%,29.01%和64.62%————20210203
#按照各种预测结果的最后一帧的最大值购买,那么平均收益分别为12.89%,3.87%和10.52%————20210203
#如果分别按每次命中收益算,即相当于加权,那么算出的平均收益分别为11%,3.6%和7%————20210203
#但实际上就算只卖大冷或者只买小冷,正常,只要都取最后一帧的最大值平均收益也有14%,1%和6.6%————20210203
#如果把模型预测的概率当做投资比例来做投资,那么72532场比赛的平均收益是6.6%,但标准差高达22%————20210203
#加入分布来自均值为6.6%,标准差为22%,那么样本均值的标准差=总标准差/sqrt(n),也就意味着至少要抽12个,才能是1倍标准差内(即以68%的概率)为正收益
#2倍标准差才能以95%的概率>1,此时需要抽45个,才能有95%的概率>1————20210203
#如果以99.7%的概率(3倍标准差以内)保证则需抽100个,然后每个都按照这样的比例投资
#如果不用模型预测的概率,而是用最后一帧的平均概率来投资,然后也用最大那一组赔率,它的均值和标准差会是多少呢?————20210203
import os
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import sys
import pandas as pd
import numpy as np
import csv
import random
import re
from sklearn.decomposition import TruncatedSVD
from torch.nn.utils.rnn import pad_sequence#用来填充序列
import time
from prefetch_generator import BackgroundGenerator
from torch.utils.tensorboard import SummaryWriter
from pywick.optimizers.nadam import Nadam#使用pywick包里的nadam优化器
from torch.optim import lr_scheduler
from torch.optim import Adam
with open('/home/jsy/data/cidlist_complete.csv') as f:
reader = csv.reader(f)
cidlist = [row[1] for row in reader]#得到cid对应表
cidlist = list(map(float,cidlist))#把各个元素字符串类型转成浮点数类型
class DataLoaderX(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
class BisaiDataset(Dataset):#数据预处理器
def __init__(self,filepath):#filepath是个列表
with open(filepath,'r') as f:#读取filepath文件并做成filelist列表
self.filelist = []
for line in f:
self.filelist.append(line.strip('\n'))
self.lablelist = pd.read_csv('/home/jsy/data/lablelist.csv',index_col = 0)#比赛id及其对应赛果的列表
self.lables = {'win':0,'draw':1,'lose':2}#分类问题要从0开始编号,而且要对应好了表中的顺序编,
def __getitem__(self, index):
#todo
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
#这里需要注意的是,第一步:read one data,是一个dat
data_path = self.filelist[index]
bisai_id = int(re.findall(r'/(\d*?).csv',data_path)[0])
# 2. Preprocess the data (e.g. torchvision.Transform).
data = self.csv2frame(data_path)
# 3. Return a data pair (e.g. image and label).
lable = self.lablelist.loc[bisai_id].result
lable = self.lables[lable]
return data,lable
def __len__(self):
# You should change 0 to the total size of your dataset.
return len(self.filelist)
def csv2frame(self,filepath):#给出单场比赛的csv文件路径,并转化成帧列表和对应变帧时间列表,以及比赛结果
data = pd.read_csv(filepath)#读取文件
data = data.drop(columns=['league','zhudui','kedui','companyname'])#去除非数字的列
frametimelist=data.frametime.value_counts().sort_index(ascending=False).index#将frametime的值读取成列表
framelist =list()#framelist为一个空列表,长度与frametimelist相同,一定要规定好具体形状和float类型,否则dataloader无法读取
'''
此处两个循环算法太慢,用pandas更慢,完全抛弃pandas后,数据处理速度从109秒降到了10秒,降到10秒后cpu利用率20%,再往上提也提不上去了,可能需要C++或C来写了
'''
new_data = np.array(data)
lables = new_data[:,0]
if len(frametimelist)>250:
frametimelist = [frametimelist[0]]+random.sample(list(frametimelist)[1:-1],248)+[frametimelist[-1]]#如果长度大于500,保留头尾,并在中间随机抽取498个,共计500个
frametimelist.sort(reverse=True)#并降序排列
for i in frametimelist:
state = new_data[lables==i]#从第一次变盘开始得到当次转移
#state = np.array(state)#不必转成numpy多维数组,因为已经是了
state = np.delete(state,(0,1), axis=-1)#去掉frametime和cid
#在填充成矩阵之前需要知道所有数据中到底有多少个cid
framelist.append(state)
frametimelist = np.array(frametimelist)
vectensor = self.mrx2vec(framelist)
len_frame = vectensor.shape[0]
if len_frame<250:
vectensor = np.concatenate((np.zeros((250-len_frame,10),dtype=np.float64),vectensor),axis=0)#如果不足500,则在前面用0填充
vectensor = torch.from_numpy(vectensor)
return vectensor#传出一个帧列表,也可以把frametimelist一并传出来,此处暂不考虑位置参数的问题
def tsvd(self,frame):
tsvd = TruncatedSVD(1)
if frame.shape[0] != 1:
newframe = tsvd.fit_transform(np.transpose(frame))#降维成(1,10)的矩阵
else:
return frame.reshape((10,1))#第一行需要reshape一下
return newframe
def mrx2vec(self,flist):#把截断奇异值的方法把矩阵变成向量(matrix2vec/img2vec),传入:len(frametimelist)*(306*10),传出:len(frametimelist)*10
vectensor = np.array(list(map(self.tsvd,flist))).squeeze(2)
#veclist = veclist.transpose()
#vectensor = torch.from_numpy(veclist)#转成张量
return vectensor#传出一个形状为(1,序列长度,10)的张量,因为后面传入模型之前,还需要做一下pad_sequence(0维是batch_size维)
class Lstm(nn.Module):#在模型建立之处就把它默认初始化
def __init__(self):
super().__init__()
self.encoder = nn.LSTM(input_size=10,
hidden_size=250,#选择对帧进行保留首尾的均匀截断采样
num_layers=1,#暂时就只有一层
bidirectional=True)
#nn.init.orthogonal_(self.encoder.weight_ih_l0)
#nn.init.orthogonal_(self.encoder.weight_hh_l0)
#nn.init.constant_(self.encoder.bias_ih_l0,0.0)
#nn.init.constant_(self.encoder.bias_hh_l0,0.0)
self.decoder = nn.Sequential(
nn.Linear(1000, 250),#把LSTM的输出
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(250, 3)
)
#nn.init.normal_(self.decoder[0].weight,mean=0.0)
#nn.init.constant_(self.decoder[0].bias, 0.0)
#nn.init.normal_(self.decoder[3].weight,mean=0.0)
#nn.init.constant_(self.decoder[3].bias, 0.0)
def forward(self,inputs):
output, _= self.encoder(inputs.permute(1,0,2))#inputs需要转置一下再输入lstm层,因为pytorch要求第0维为长度,第二维才是batch_size
encoding = torch.cat((output[0], output[-1]), -1)#双向的lstm,就把两个都放进去
return self.decoder(encoding)#把最后一个时间步的输出输入MLP
def get_parameter_number(model):#参数统计
total_num = sum(p.numel() for p in model.parameters())
trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
if __name__ == "__main__":
train_writer = SummaryWriter('/home/jsy/log3/train')#自动建立
test_writer = SummaryWriter('/home/jsy/log3/test')#自动建立
checkpoint_path = '/home/jsy/log3/checkpoint.pth'#ckpoint文件夹需要提前建立
train_path = '/home/jsy/balanced_train_path.txt'
test_path = '/home/jsy/balanced_test_path.txt'
dataset = BisaiDataset(train_path)#训练集
test_set = BisaiDataset(test_path)#验证集
print('数据集读取完成')
loader = DataLoaderX(dataset, 128 ,shuffle=True,num_workers=4,pin_memory=True)#num_workers>0情况下无法在交互模式下运行
test_loader = DataLoaderX(test_set, 128, shuffle=True,num_workers=4,pin_memory=True)#验证dataloader
print('dataloader准备完成')
net = Lstm().double().cuda()#双精度
print('网络构建完成')
stat1 = get_parameter_number(net)
print(str(stat1))
lr, num_epochs = 0.001, 2000
optimizer= Adam(net.parameters(), lr=lr,eps=1e-16)
start_epoch = 1#如果没有checkpoint则初始epoch为1
gesamt_counter = 0
if os.path.exists(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
net.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']+1
gesamt_counter = checkpoint['gesamt_counter']
loss = nn.CrossEntropyLoss()
scheuler = lr_scheduler.StepLR(optimizer,step_size=5,gamma=0.25)
for epoch in range(start_epoch, num_epochs + 1):
l_list = list()
epoch_start = time.time()#记录整个epoch除验证外所用的时间
net.train()
counter = 0
start = time.time()
train_output = torch.zeros((1,3))
train_y = torch.zeros((1)).long()#得是long类型
#if epoch == 6:
# loader = DataLoaderX(dataset,64,shuffle=True,num_workers=4,pin_memory=True)#在经过10个epoch后,batch改成64
# optimizer= Nadam(net.parameters(), lr=lr/pow(4,2))
for x, y in iter(loader):
#但是还需要使填充后的那些0不参与计算,所以可能需要制作掩码矩阵
#或者需要时序全局最大池化层来消除填充的后果
x = x.double().cuda()
y = y.long().cuda()
output = net(x)#x要求是一个固定shape的第0维是batch_size的张量,所以需要批量填充
l = loss(output, y)
optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
l.backward()
optimizer.step()
end = time.time()
train_period = end-start
counter+=1
gesamt_counter+=1
print('第'+str(epoch)+'个epoch已学习'+str(counter)+'个batch,'+'用时'+str(train_period)+'秒')
l_list.append(l.item())
train_writer.add_scalar('step_loss',l.item(),gesamt_counter)#随着每一步学习的loss下降图
#print('loss: %f' % (l.item()))
start = time.time()
train_output = torch.cat((train_output,output.cpu()),0)#把这一个batch的输出连起来
train_y = torch.cat((train_y,y.cpu()),0)#把这一个batch的lable连起来
epoch_end = time.time()
print('epoch %d, loss: %f' % (epoch,np.mean(l_list)))
print('第'+str(epoch)+'个epoch训练用时'+str(int(epoch_end-epoch_start))+'秒')
prediction = torch.argmax(train_output, 1)#找出每场比赛预测输出的最大值的坐标
correct = (prediction == train_y).sum().float()#找出预测正确的总个数
accuracy = correct/len(train_y)#计算Top-1正确率,总共就三分类,就不看top-2的了
train_writer.add_scalar('Top-1 Accuracy',accuracy,epoch)#写入文件
#下面是一个epoch结束的验证部分
#if epoch>=20:
print('开始验证......')
test_start = time.time()
net.eval()
torch.cuda.empty_cache()#释放一下显存
with torch.no_grad():#这样在验证时显存才不会爆
test_output = torch.zeros((1,3))
test_y = torch.zeros((1)).long()#得是long类型
test_counter = 0
for x,y in iter(test_loader):
x = x.double().cuda()
output = net(x).cpu()#把输出转到内存
test_output = torch.cat((test_output,output),0)#把这一个batch的输出连起来
test_y = torch.cat((test_y,y),0)#把这一个batch的lable连起来
#torch.cuda.empty_cache()
test_counter+=1
print('验证集已完成'+str(test_counter)+'个batch')
#验证输出和验证lable的第一个元素都是0,鉴于到时候占比很小就不删除了
print('计算结果......')
l_test = loss(test_output,test_y)#用整个验证集的输出和lable算一个总平均loss(nn.CrossEntropyLoss默认就是求平均值)
test_writer.add_scalar('epoch_loss',l_test.item(),epoch)#每一个epoch算一次验证loss
train_writer.add_scalar('epoch_loss',np.mean(l_list),epoch)#每一个epoch把训练loss也加上
prediction = torch.argmax(test_output, 1)#找出每场比赛预测输出的最大值的坐标
correct = (prediction == test_y).sum().float()#找出预测正确的总个数
accuracy = correct/len(test_y)#计算Top-1正确率,总共就三分类,就不看top-2的了
test_writer.add_scalar('Top-1 Accuracy',accuracy,epoch)#写入文件
test_end = time.time()
print('验证已完成,用时'+str(int(test_end-test_start))+'秒')
print('验证完成,开始保存......')
#下面是模型保存部分
checkpoint = {
'epoch': epoch,
'model_state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'gesamt_counter':gesamt_counter
}
torch.save(checkpoint, checkpoint_path)#保存checkpoint到路径
torch.cuda.empty_cache()#释放一下显存
scheuler.step()
print('保存完毕')
| [
"littlecat.j@hotmail.com"
] | littlecat.j@hotmail.com |
6a6dae441e61243aa4dc37b47e3ab5b3a6fe36d7 | 6703813468e8dccfafc7462be67c52d5456d89a6 | /example/web/app_factory.py | 9c3f9bf369963d01dbf54a02637b5664c9d97ea2 | [] | no_license | selecsosi/docker-compose-example | 42f9abb18c4040179f0d5b9ff77afb002635bb2e | 07a7ae485ab7b91f935bbb208cffc309eca1e5a2 | refs/heads/master | 2022-09-26T12:19:02.143742 | 2019-11-15T18:24:55 | 2019-11-15T18:38:27 | 221,803,777 | 0 | 0 | null | 2022-09-13T23:03:04 | 2019-11-14T23:18:49 | Python | UTF-8 | Python | false | false | 234 | py | from flask import Flask
def create_app(logger_override=None):
app = Flask("web")
if logger_override:
app.logger.handlers = logger_override.handlers
app.logger.setLevel(logger_override.level)
return app
| [
"sam.lahti@avant.com"
] | sam.lahti@avant.com |
fac71e017c2137fab436e08e46fbcddfb616918b | 0104a8813f94c71d8b119109e50e33f460ab4867 | /modular/subscribe.py | 69020294dfb0b80ca0d30298eab1ed479ca65280 | [] | no_license | sunnyhong123/test_ui | ce650962c467631766d38edf067efa39c53a538b | eb8528b72e9de5b76918a8cef5ef696636280ed5 | refs/heads/master | 2023-04-11T12:30:22.427364 | 2021-04-16T08:23:34 | 2021-04-16T08:23:34 | 353,869,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | #coding=utf-8
from public.Commonlib import Common
from public.rd_email_name import RD
import time
class Subscirbe():
def __init__(self,dr):
self.driver = dr
self.elt = Common(self.driver)
self.rm = RD(self.driver)
def subscribe_success(self):
#订阅流程
subscribe_value = self.elt.get_text("xpath",'//*[@id="Modalnewsletter"]/div/div/div[2]/div/div/div/div[1]/div[3]')
print(subscribe_value)
if subscribe_value:
#输入邮箱号
email =self.rm.rd_email()
print(email)
time.sleep(0.5) #//*[@id="Modalnewsletter"]/div/div/div[2]/div/div/div/div[2]/div/form/div/input[2]
self.elt.input_send("xpath",'//*[@id="Modalnewsletter"]/div/div/div[2]/div/div/div/div[2]/div/form/div/input[2]',email)
#点击JOIN US
self.elt.click('xpath','//*[@id="Modalnewsletter"]/div/div/div[2]/div/div/div/div[2]/div/form/div/div')
time.sleep(3)
success_text = self.elt.get_text('xpath','//*[@id="ModalSubsribeGood"]/div/div/div[2]/div/span')
if success_text == "You have successfully subscribed!":
#关闭提示弹框
self.elt.click('xpath','//*[@id="ModalSubsribeGood"]/div/div/div[1]/button')
return "subscribe success"
else:
return "subscribe fail"
| [
"alexhong465@gmail.com"
] | alexhong465@gmail.com |
343d228b230e0ac0eb81a170c2d7b22a7aef5b22 | ed773a408297250d3c1d2797f593fe043f6e07fa | /database_setup.py | 09a8eb086b080d56101b0a269cd160be74e1d5ba | [] | no_license | pedrocecchetti/pythonflaskapp | 8239baa978519f4210364fab64c5d38a610f47cc | aee00aa4c711629a7f4e9b95a2d07502ffc27150 | refs/heads/master | 2020-04-11T06:06:17.414814 | 2018-12-14T12:56:03 | 2018-12-14T12:56:03 | 161,570,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
Base = declarative_base()
class Restaurant(Base):
__tablename__ = 'restaurant'
id = Column(Integer, primary_key = True)
name = Column(String(250), nullable = False)
class MenuItem(Base):
__tablename__ = 'menu_item'
name = Column(String(80), nullable = False)
id = Column(Integer, primary_key = True)
course = Column(String(250))
description = Column(String(250))
price = Column(String(8))
restaurant_id = Column(Integer, ForeignKey('restaurant.id'))
restaurant = relationship(Restaurant)
@property
def serializeMenu(self):
return {
'name': self.name,
'desciption': self.description,
'id': self.id,
'price': self.price,
'course': self.course
}
####### INSERT AT THE END OF FILE#######
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.create_all(engine) | [
"noreply@github.com"
] | noreply@github.com |
9aeac7b87a2f57ebe772c44c85063b2f39bf2e16 | 4fe15daaf02b59ee4bf21d529c8ea092477de2ce | /wtiproj05_api_logic2.py | 827c84be93afbb8f39ececbc5930cb2e5d9f71d6 | [] | no_license | DalduK/WTILAB | a41e849ed20dbbb57d1af67f58743f4c70ec9572 | 27ee663737a096ad10cea02807b6026c1beb659c | refs/heads/master | 2022-10-25T06:59:53.269637 | 2020-06-20T15:21:53 | 2020-06-20T15:21:53 | 251,050,630 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | import json
import numpy as np
import pandas as pd
import wtiproj05_redis as rr
import wtiproj04_ETL_and_data_processing as w
class api():
def get(self):
ll = rr.get_rand_user()
ll = ll.fillna(0)
return ll.to_dict()
def post(self, data):
immutable = frozenset(data.items())
read_frozen = dict(immutable)
id = 0
for k, v in read_frozen.items():
if v == 0:
read_frozen[k] = np.nan
elif k == 'userID':
id = v
else:
read_frozen[k] = float(v)
rr.add_ocena(read_frozen)
rr.update_user(id)
return read_frozen
def get_all(self):
r = rr.get_data()
r = r.fillna(0).to_dict('r')
jsonfiles = json.dumps(r)
return jsonfiles
def delet(self):
return rr.dell()
def avg_usr(self, user):
df = rr.get_data()
list1 = df.columns.values.tolist()
df.fillna(value=pd.np.nan, inplace=True)
list1.remove("movieID")
list1.remove("rating")
list1.remove("userID")
id = float(user)
mean = w.user_mean(df, list1, str(id))
dict = {}
for key in range(len(list1)):
dict[list1[key]] = mean[key]
dict['userID'] = user
return dict
def avg_all(self):
df = rr.get_data()
list1 = df.columns.values.tolist()
df.fillna(value=pd.np.nan, inplace=True)
list1.remove("movieID")
list1.remove("rating")
list1.remove("userID")
mean, _ = w.mean_genres(df, list1, True)
dict = {}
for key in range(len(list1)):
dict[list1[key]] = mean[key]
return dict
def get_profile(self, user):
return rr.get_user(user)
| [
"przemyslaw7wozny@gmail.com"
] | przemyslaw7wozny@gmail.com |
7c2d0335fc9981073bcab8ae14c39d83b10e1c1f | 76902c0ca5e16eb68dfee899c5595fd697d3a7d8 | /CoisaDeUfersa/AgroPopShop/src/main/resources/aeiou.py | 462c2e199ed38cfe60b77617df60d4f36cce88bf | [
"MIT"
] | permissive | weixiaoUfesa/pweb_2020.2_weixiaoChen | d2c9c0aa88917fa7b103ac520a2ebc29500f85ac | 1061a980bba25f75f104dafb310fc0509aed71f0 | refs/heads/main | 2023-08-30T16:25:26.876386 | 2023-08-30T11:45:10 | 2023-08-30T11:45:10 | 343,974,773 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | a=e=ii=o=u=0
str=input('nome:')
for i in str:
if i=='a'or i=='I':a=a+1
if i=='e':e=e+1
if i=='i':ii=ii+1
if i=='o':o=o+1
if i=='u':u=u+1
print('a:',a)
print('e:',e)
print('i:',ii)
print('o:',o)
print('u:',u) | [
"56464135+weixiao1994c@users.noreply.github.com"
] | 56464135+weixiao1994c@users.noreply.github.com |
a1f7fdc043c119d5be8f9287702eb97ba545e849 | af2f88672cb9d0be3ff6424235180f2a5815a846 | /xml_process.py | 56eec4f34e1d1f57d6157b20d676857b8f96189c | [] | no_license | wfy1452473846/preprocess-dataset | 1260e77209ccf05b8982b73c5f6c8e03fa5f3d92 | 58e54897fe03b78979cc7c8710cbb5c7b57270b0 | refs/heads/master | 2020-08-02T03:10:38.283237 | 2019-09-27T02:12:20 | 2019-09-27T02:12:20 | 211,217,281 | 0 | 0 | null | 2019-09-27T02:10:50 | 2019-09-27T02:10:49 | null | UTF-8 | Python | false | false | 4,361 | py | """
xml_process.py
20190408
"""
import xml.dom.minidom
import os
from tqdm import tqdm
import cv2
import numpy as np
def pnpoly(test_point, polygon):
"""
Point Inclusion in Polygon Test
https://wrf.ecse.rpi.edu//Research/Short_Notes/pnpoly.html
:param test_point: the point to test , e[x, y]
:param polygon: the polygon , [(x1,y1), (x2,y2), (x3,y3), (x4,y4)]
:return is_inside : if in, return True
"""
is_inside = False
minX = polygon[0][0]
maxX = polygon[0][0]
minY = polygon[0][1]
maxY = polygon[0][1]
for p in polygon:
minX = min(p[0], minX)
maxX = max(p[0], maxX)
minY = min(p[1], minY)
maxY = max(p[1], maxY)
if test_point[0] < minX or test_point[0] > maxX or test_point[1] < minY or test_point[1] > maxY:
return False
j = len(polygon) - 1
for i in range(len(polygon)):
if ((polygon[i][1] > test_point[1]) != (polygon[j][1] > test_point[1]) and (
test_point[0] < (polygon[j][0] - polygon[i][0]) * (test_point[1] - polygon[i][1]) / (
polygon[j][1] - polygon[i][1]) + polygon[i][0])):
is_inside = not is_inside
j = i
return is_inside
def gen_dataset_ssdd(xml_path, source_img_path, save_img_path):
"""
pick, crop and save target images
:param xml_path: str. The folder path save xml files
:param source_img_path: str. The source image's path
:param save_img_path: str. The path to save croped images
:return
"""
if not os.path.exists(xml_path):
raise FileExistsError('path not found! : %s' % xml_path)
if not os.path.exists(source_img_path):
raise FileExistsError('path not found! : %s' % source_img_path)
os.makedirs(save_img_path, exist_ok=True)
pbar = tqdm(os.scandir(xml_path))
for xml_file in pbar:
if xml_file.is_file():
extension = os.path.splitext(xml_file.path)[1][1:]
if 'xml' == extension:
pbar.set_description("Processing %s" % xml_file.path)
dom = xml.dom.minidom.parse(xml_file.path)
root = dom.documentElement
img_name = root.getElementsByTagName('filename')[0].firstChild.data
my_object_list = root.getElementsByTagName('object')
for my_object in my_object_list:
object_type = my_object.getElementsByTagName('name')[0].firstChild.data
if object_type == 'ship':
bndbox = my_object.getElementsByTagName('bndbox')[0]
xmin = int(bndbox.getElementsByTagName('xmin')[0].firstChild.data)
ymin = int(bndbox.getElementsByTagName('ymin')[0].firstChild.data)
xmax = int(bndbox.getElementsByTagName('xmax')[0].firstChild.data)
ymax = int(bndbox.getElementsByTagName('ymax')[0].firstChild.data)
a = os.path.join(source_img_path, img_name+'.jpg')
ori_image = cv2.imread(os.path.join(source_img_path, img_name+'.jpg'), -1)
box = [(xmin, ymin), (xmax, ymin), (xmin, ymax), (xmax, ymax)]
if len(ori_image.shape) == 3:
_, _, image_channels = ori_image.shape
sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1, image_channels], dtype=np.int)
else:
sub_image = np.zeros([ymax - ymin + 1, xmax - xmin + 1], dtype=np.int)
for y in range(sub_image.shape[0]): #row
for x in range(sub_image.shape[1]): #col
sub_image[y,x] = ori_image[ymin+y-1, xmin+x-1]
sub_imagename = img_name+'_'+str(xmin)+'_'+str(ymin)+'_'+str(xmax)+'_'+str(ymax)+'.png'
cv2.imwrite(os.path.join(save_img_path, sub_imagename), sub_image[:, :, 0])
if __name__ == '__main__':
gen_dataset_ssdd(xml_path=r'F:\dataset_se\SSDD\Annotations',
source_img_path=r'F:\dataset_se\SSDD\JPEGImages',
save_img_path=r'F:\dataset_se\SSDD\crop_img')
pass
| [
"noreply@github.com"
] | noreply@github.com |
d6d4045aeb90759fae31fe036e9cb108f29134fe | 4249b126a07d277a110e1acbf12175ebfcdcbaad | /Book-Chapter3_3-Bisection-SqRoot.py | 7ea22cfe11a57c883242f37ab43a9ad50962992b | [
"MIT"
] | permissive | tocheng/Book-Exercises-Intro-to-Computing-using-Python | 426b24d1175b889dd5aea5df1fbac4b485bd4ba4 | a54f164cb53dc5fa0bfb88cee9e1a5d69cb9fb09 | refs/heads/master | 2023-02-12T19:47:43.832801 | 2021-01-14T15:34:48 | 2021-01-14T15:34:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 17:09 2020
Introduction to Computation and Programming Using Python. John V. Guttag, 2016, 2nd ed
Book Chapter 3 Finger Exercises
Bisection search for square root
@author: Atanas Kozarev - github.com/ultraasi-atanas
"""
# Find x such that x**2 - 24 is within epsilon of 0
x = 24
epsilon = 0.01
count = 0
low = 0.0
high = max(1.0, x)
ans = (high + low)/2.0
while abs(ans**2 - x) >= epsilon:
count += 1
if ans**2 < x:
low = ans
else:
high = ans
ans = (high + low)/2.0
print('Count is', count)
print(ans, 'is close to square root of', x)
| [
"55555430+ultraasi-atanas@users.noreply.github.com"
] | 55555430+ultraasi-atanas@users.noreply.github.com |
b9e6bf9944097897f35f1edbb712bbb2fa99f251 | c51eef37bb983a9c35635c7ccc96a0cf689a7438 | /com/chapter03_python_begin/05_for/02_range.py | 6e97b3c6cbd2c9345bac79003fba8ca69d3a9ff6 | [] | no_license | Kyeongrok/python_crawler | 0a717b43be36584af1b0f7c1ad0c79108a5d11e0 | 5a5da8af7bb080f752a9a066741ac8adab136a3a | refs/heads/master | 2022-09-13T03:15:08.053639 | 2022-08-02T15:45:03 | 2022-08-02T15:45:03 | 124,719,435 | 40 | 34 | null | 2019-02-27T08:29:52 | 2018-03-11T03:20:32 | HTML | UTF-8 | Python | false | false | 25 | py | print(list(range(1, 10))) | [
"kyeongrok.kim@okiconcession.com"
] | kyeongrok.kim@okiconcession.com |
65a186d1f261b126882e2435ee2ae83f22c7970b | 48c47c714502fdc8cb8bb59601f30c344945a6d0 | /sdt/util/logging.py | 2269f55874bf16b4b6e19049b6a40526ff113113 | [] | no_license | ronvree/SoftDecisionTree | 327ef0e89eb600f0ee16d3f9cb0ad619b8bb9ba7 | b3ad17be8870c08be66d78974e0f78ae6f0439c7 | refs/heads/master | 2022-12-12T13:35:51.805748 | 2020-08-22T15:05:13 | 2020-08-22T15:05:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | import os
class TrainLog:
"""
Object for managing the train log directory
"""
def __init__(self, log_dir: str): # Store log in log_dir
self._log_dir = log_dir
self._logs = dict()
# Ensure the directories exist
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
if not os.path.isdir(self.metadata_dir):
os.mkdir(self.metadata_dir)
if not os.path.isdir(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
@property
def log_dir(self):
return self._log_dir
@property
def checkpoint_dir(self):
return self._log_dir + '/checkpoints'
@property
def metadata_dir(self):
return self._log_dir + '/metadata'
def log_message(self, msg: str):
"""
Write a message to the log file
:param msg: the message string to be written to the log file
"""
with open(self.log_dir + '/log.txt', 'w') as f:
f.write(msg)
def create_log(self, log_name: str, key_name: str, *value_names):
"""
Create a csv for logging information
:param log_name: The name of the log. The log filename will be <log_name>.csv.
:param key_name: The name of the attribute that is used as key (e.g. epoch number)
:param value_names: The names of the attributes that are logged
"""
if log_name in self._logs.keys():
raise Exception('Log already exists!')
# Add to existing logs
self._logs[log_name] = (key_name, value_names)
# Create log file. Create columns
with open(self.log_dir + f'/{log_name}.csv', 'w') as f:
f.write(','.join((key_name,) + value_names) + '\n')
def log_values(self, log_name, key, *values):
"""
Log values in an existent log file
:param log_name: The name of the log file
:param key: The key attribute for logging these values
:param values: value attributes that will be stored in the log
"""
if log_name not in self._logs.keys():
raise Exception('Log not existent!')
if len(values) != len(self._logs[log_name][1]):
raise Exception('Not all required values are logged!')
# Write a new line with the given values
with open(self.log_dir + f'/{log_name}.csv', 'a') as f:
f.write(','.join(str(v) for v in (key,) + values) + '\n')
| [
"ronvbree@gmail.com"
] | ronvbree@gmail.com |
ec0697a587adfde770bd0982ff82827775e2cf74 | 5729a58d778cb2937960ffd381f81a42a92e7b17 | /python/0067.py | 89b75831148f283c91e990c09b0693abe572f80a | [] | no_license | morizyun/aoj-ruby-python | af62e59e42bc1533aa953ea540a409bc2b31b8cb | 296ab281328751f6fa4975d3cafd8a9db1d98f94 | refs/heads/master | 2020-05-18T18:08:19.189754 | 2017-07-09T07:54:02 | 2017-07-09T07:54:02 | 18,957,701 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | def get_map():
map = []
while True:
try:
tmp = list(raw_input())
if len(tmp) != 12: break
map.append(tmp)
except:
break
return map
def remove_island(x, y, map):
map[x][y] = 0
move = [[1, 0], [0, 1], [-1, 0], [0, -1]]
for i, j in move:
if 0 <= x + i <= 11 and 0 <= y + j <= 11 and map[x + i][y + j] == '1':
map = remove_island(x + i, y + j, map)
return map[:]
while True:
map = get_map()
if len(map) != 12: break
count = 0
for x in range(12):
for y in range(12):
if map[x][y] == '1':
count += 1
map = remove_island(x, y, map)
print count
| [
"merii.ken@gmail.com"
] | merii.ken@gmail.com |
2ca65305c59f566dc9043beda4fac38b816d1645 | cc3dd8ee82c854666680b086e55ba7bead421bef | /2.python_module/3.example/finance/methodForFin.py | 175a26041cf5c1924b98336735c8ab00a8588ffb | [] | no_license | eggeggss/PythonTutorial | b46a4377fdc0967f3c13e4d70f8ebfdebc93ef53 | a35fbf5d9c78ec6896b757529d97b262f6f0637b | refs/heads/master | 2020-04-01T01:27:42.793973 | 2018-12-01T15:14:49 | 2018-12-01T15:14:49 | 152,739,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py |
def printMethodName():
print('Finance Method') | [
"eggeggss@gmail.com"
] | eggeggss@gmail.com |
d01b878b550fec82255c4c85c7fdef2cbb8fe1a7 | e23dba7ea5d2413025f5075d8fbaed893e3edc3a | /preprocess/preprocess_core.py | 4e9947c616e308a68053250fcabe9f6f1039e1b7 | [] | no_license | leoliu1221/engagement_db | 900bee11f6d9cada6a5986d10ada7da1ac7d6a45 | 0b8477079d9ab46ba342c77e52ea2f70368f767e | refs/heads/master | 2021-06-07T09:12:17.797754 | 2016-04-12T04:35:42 | 2016-04-12T04:35:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,714 | py | # -*- coding: utf-8 -*-
'''
#functions needed:
Preprocessing after preprocessing input:
[x] Normalize the X and Y with definition2.
[x] Use the current cut c to cut both X and Y into smaller pieces. E.g. for N(X), I will have N(X) [0:c], N(X)[c:2c], N(X)[2c:3c]… etc. Same thing goes for N(Y).
[x] For each pair of N(X) and N(Y), calculate the Euclidean distance E between each pair with definition 4.
[ ] Store the results into E0, E1, E2 ….E2 For later retrieval.
'''
import sys,os
testing = False
import numpy as np
def N_(X):
'''
N(x) = (x-x_avg)/sqrt(sum((x-x_avg)^2))
Args:
X is an array of floats.
Returns:
Normalzied(X)
'''
#use numpy arrays
X = np.array(X).astype(float)
#denominator = sqrt(sum((x-x_avg)^2))
denominator = np.sqrt(np.sum([(item-X.mean())**2 for item in X]))
if denominator ==0:
print '0 denominator in normalize_def2'
sys.exit(1)
#testing
if testing:
print 'denom',denominator
#(x-x_avg)/denominator
result = [(item-X.mean())/denominator for item in X]
return np.array(result)
def cut(X,c):
'''
Args:
X, array of normalized X. Warning: must be normalized
c, an integer cut for cutting X into [0:c],[c:2c]],[2c:3c].....
'''
c = int(c)
return [X[c*i:c*(i+1)]for i in xrange(len(X)/c)]
def E_(X,Y):
'''
Et ̄tˆ(X, Y ) = Dt ̄tˆ(Nt ̄tˆ(X), Nt ̄tˆ(Y ))
Definition 4
Args:
X, Y: arrays of floats.
Returns:
E from X and Y.
'''
N_x = N_(X)
N_y = N_(Y)
return np.linalg.norm(N_x-N_y)**2
def store(X,Y,c,key,desc,key_path = '/keys/'):
'''
Store the euclidean distance of c in files. from X and Y
E(X[0:c],Y[0:c]) --- file ../db_store/key/0_1
E(X[c:2c],Y[c:2c]) -- file ../db_store/key/1_2
E(X[2c:3c],[Y[2c:3c]]) -- file ../db_store/key/2_3
'''
cutX = cut(X,c)
cutY = cut(Y,c)
for i in xrange(len(cutX)):
file_name = str(i)+'_'+str(c)+'.dt'
dir_path = get_storage_path()+key+'/'
full_path = dir_path+file_name
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(full_path,'w') as f:
f.write(str(E_(cutX[i],cutY[i])))
with open(get_storage_path()+key_path+key,'w') as f:
f.write(desc)
return True
def get_storage_path():
full_path = os.path.realpath(__file__)
dir_name = (os.path.dirname(full_path))
return dir_name+'/../db_storage/'
if testing:
X = [0,2,4,4,0]
c = 2
print 'X',X
print 'c',c
N = N_(X)
print 'normalized',N
C = cut(N,c)
print 'cutted',C
E = E_(X,X)
print 'euclidean',E
| [
"leoliu@u.northwestern.edu"
] | leoliu@u.northwestern.edu |
a71ffa22320c4670dd2b0cdddeac57895b932240 | 38f74ac071e1368b209f5659919204be6f3f7a75 | /algorithmic-toolbox/week4_divide_and_conquer/4_number_of_inversions/inversions.py | 297838c9b48955fac49f428e0e25256a2f328c0e | [] | no_license | pablo-var/data-structures-and-algorithms | 40ca102b40b2f96caa1bf70ac9c03187c062f2ed | b07d9025c162025b289cbb60f0c964973849f532 | refs/heads/master | 2022-11-11T05:21:45.700575 | 2020-06-21T20:29:10 | 2020-06-21T20:29:10 | 258,991,965 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | # Uses python3
import sys
def merge(arr, l, m, r):
number_of_inversions = 0
n1 = m - l + 1
n2 = r - m
# create temp arrays
L = [0] * (n1)
R = [0] * (n2)
# Copy data to temp arrays L[] and R[]
for i in range(0, n1):
L[i] = arr[l + i]
for j in range(0, n2):
R[j] = arr[m + 1 + j]
# Merge the temp arrays back into arr[l..r]
i = 0 # Initial index of first subarray
j = 0 # Initial index of second subarray
k = l # Initial index of merged subarray
while i < n1 and j < n2:
if L[i] <= R[j]:
arr[k] = L[i]
i += 1
else:
number_of_inversions += len(L[i:])
arr[k] = R[j]
j += 1
k += 1
# Copy the remaining elements of L[], if there
# are any
while i < n1:
arr[k] = L[i]
i += 1
k += 1
# Copy the remaining elements of R[], if there
# are any
while j < n2:
arr[k] = R[j]
j += 1
k += 1
return number_of_inversions
def get_number_of_inversions(arr, l, r):
number_of_inversions = 0
if l < r:
# Same as (l+r)//2, but avoids overflow for
# large l and h
m = (l + (r - 1)) // 2
number_of_inversions += get_number_of_inversions(arr, l, m)
number_of_inversions += get_number_of_inversions(arr, m + 1, r)
number_of_inversions += merge(arr, l, m, r)
return number_of_inversions
if __name__ == '__main__':
input = sys.stdin.read()
n, *a = list(map(int, input.split()))
print(get_number_of_inversions(a, 0, len(a) - 1))
| [
"pablovargasibarra12@gmail.com"
] | pablovargasibarra12@gmail.com |
6a368fc990d1e42010f3c08fb5a77e6b4041cf24 | 22d9a354423bfedd3e49bab34db7a550ec9142b4 | /数据嗨客/HousePricePridect/houserepdict1.py | b2db2250a46eafe652cb2080f81c4091ed1b2bc4 | [] | no_license | linxiaoby/PyCode | b486f583b25019ae3579fe44e8850758a66be5bf | f52bf0cd5fbe586c81d782e03a9458de234e5b20 | refs/heads/master | 2018-10-22T12:58:14.219968 | 2018-05-03T07:28:57 | 2018-05-03T07:28:57 | 104,988,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,076 | py | # Adding needed libraries and reading data
import pandas as pd
import numpy as np
import warnings
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
warnings.filterwarnings('ignore')
train = pd.read_csv('trainData.csv')
test = pd.read_csv('testData.csv')
# Prints R2 and RMSE scores
def get_score(prediction, lables):
print('R2: {}'.format(r2_score(prediction, lables)))
print('RMSE: {}'.format(np.sqrt(mean_squared_error(prediction, lables))))
# Shows scores for train and validation sets
def train_test(estimator, x_trn, x_tst, y_trn, y_tst):
prediction_train = estimator.predict(x_trn)
# Printing estimator
print(estimator)
# Printing train scores
get_score(prediction_train, y_trn)
prediction_test = estimator.predict(x_tst)
# Printing test scores
print("Test")
get_score(prediction_test, y_tst)
# Spliting to features and lables and deleting variable I don't need
train_labels = train.pop('SalePrice')
features = pd.concat([train, test], keys=['train', 'test'])
# I decided to get rid of features that have more than half of missing information or do not correlate to SalePrice
features.drop(['Utilities', 'RoofMatl', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'Heating', 'LowQualFinSF',
'BsmtFullBath', 'BsmtHalfBath', 'Functional', 'GarageYrBlt', 'GarageArea', 'GarageCond', 'WoodDeckSF',
'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'PoolArea', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal'],
axis=1, inplace=True)
# MSSubClass as str
#features['MSSubClass'] = features['MSSubClass'].astype(str)
# MSZoning NA in pred. filling with most popular values
features['MSZoning'] = features['MSZoning'].fillna(features['MSZoning'].mode()[0])
# LotFrontage NA in all. I suppose NA means 0
features['LotFrontage'] = features['LotFrontage'].fillna(features['LotFrontage'].mean())
# Alley NA in all. NA means no access
features['Alley'] = features['Alley'].fillna('NOACCESS')
# Converting OverallCond to str
# features.OverallCond = features.OverallCond.astype(str)
# MasVnrType NA in all. filling with most popular values
features['MasVnrType'] = features['MasVnrType'].fillna(features['MasVnrType'].mode()[0])
# BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1, BsmtFinType2
# NA in all. NA means No basement
for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
features[col] = features[col].fillna('NoBSMT')
# TotalBsmtSF NA in pred. I suppose NA means 0
features['TotalBsmtSF'] = features['TotalBsmtSF'].fillna(0)
# Electrical NA in pred. filling with most popular values
features['Electrical'] = features['Electrical'].fillna(features['Electrical'].mode()[0])
# KitchenAbvGr to categorical
# features['KitchenAbvGr'] = features['KitchenAbvGr'].astype(str)
# KitchenQual NA in pred. filling with most popular values
features['KitchenQual'] = features['KitchenQual'].fillna(features['KitchenQual'].mode()[0])
# FireplaceQu NA in all. NA means No Fireplace
features['FireplaceQu'] = features['FireplaceQu'].fillna('NoFP')
# GarageType, GarageFinish, GarageQual NA in all. NA means No Garage
for col in ('GarageType', 'GarageFinish', 'GarageQual'):
features[col] = features[col].fillna('NoGRG')
# GarageCars NA in pred. I suppose NA means 0
features['GarageCars'] = features['GarageCars'].fillna(0.0)
# SaleType NA in pred. filling with most popular values
features['SaleType'] = features['SaleType'].fillna(features['SaleType'].mode()[0])
# Year and Month to categorical
# features['YrSold'] = features['YrSold'].astype(str)
# features['MoSold'] = features['MoSold'].astype(str)
# Adding total sqfootage feature and removing Basement, 1st and 2nd floor features
features['TotalSF'] = features['TotalBsmtSF'] + features['1stFlrSF'] + features['2ndFlrSF']
features.drop(['TotalBsmtSF', '1stFlrSF', '2ndFlrSF'], axis=1, inplace=True)
## Log transformation of labels
train_labels = np.log(train_labels)
## Standardizing numeric features
numeric_features = features.loc[:,['LotFrontage', 'LotArea', 'GrLivArea', 'TotalSF']]
numeric_features_standardized = (numeric_features - numeric_features.mean())/numeric_features.std()
# Getting Dummies from Condition1 and Condition2
conditions = set([x for x in features['Condition1']] + [x for x in features['Condition2']])
dummies = pd.DataFrame(data=np.zeros((len(features.index), len(conditions))),
index=features.index, columns=conditions)
for i, cond in enumerate(zip(features['Condition1'], features['Condition2'])):
dummies.ix[i, cond] = 1
features = pd.concat([features, dummies.add_prefix('Condition_')], axis=1)
features.drop(['Condition1', 'Condition2'], axis=1, inplace=True)
# Getting Dummies from Exterior1st and Exterior2nd
exteriors = set([x for x in features['Exterior1st']] + [x for x in features['Exterior2nd']])
dummies = pd.DataFrame(data=np.zeros((len(features.index), len(exteriors))),
index=features.index, columns=exteriors)
for i, ext in enumerate(zip(features['Exterior1st'], features['Exterior2nd'])):
dummies.ix[i, ext] = 1
features = pd.concat([features, dummies.add_prefix('Exterior_')], axis=1)
features.drop(['Exterior1st', 'Exterior2nd'], axis=1, inplace=True)
# Getting Dummies from all other categorical vars
for col in features.dtypes[features.dtypes == 'object'].index:
for_dummy = features.pop(col)
features = pd.concat([features, pd.get_dummies(for_dummy, prefix=col)], axis=1)
### Copying features
features_standardized = features.copy()
### Replacing numeric features by standardized values
features_standardized.update(numeric_features_standardized)
### Splitting features
train_features = features.loc['train'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
test_features = features.loc['test'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
### Splitting standardized features
train_features_st = features_standardized.loc['train'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
test_features_st = features_standardized.loc['test'].drop('Id', axis=1).select_dtypes(include=[np.number]).values
### Shuffling train sets
train_features_st, train_features, train_labels = shuffle(train_features_st, train_features, train_labels, random_state = 5)
### Splitting
x_train, x_test, y_train, y_test = train_test_split(train_features, train_labels, test_size=0.1, random_state=200)
x_train_st, x_test_st, y_train_st, y_test_st = train_test_split(train_features_st, train_labels, test_size=0.1, random_state=200)
ENSTest = linear_model.ElasticNetCV(alphas=[0.0001, 0.0005, 0.001, 0.01, 0.1, 1, 10], l1_ratio=[.01, .1, .5, .9, .99], max_iter=5000).fit(x_train_st, y_train_st)
train_test(ENSTest, x_train_st, x_test_st, y_train_st, y_test_st)
# Average R2 score and standart deviation of 5-fold cross-validation
scores = cross_val_score(ENSTest, train_features_st, train_labels, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
GBest = ensemble.GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=3, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10, loss='huber').fit(x_train, y_train)
train_test(GBest, x_train, x_test, y_train, y_test)
# Average R2 score and standart deviation of 5-fold cross-validation
scores = cross_val_score(GBest, train_features_st, train_labels, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# Retraining models
GB_model = GBest.fit(train_features, train_labels)
ENST_model = ENSTest.fit(train_features_st, train_labels)
## Getting our SalePrice estimation
Final_labels = (np.exp(GB_model.predict(test_features)) + np.exp(ENST_model.predict(test_features_st))) / 2
## Saving to CSV
pd.DataFrame({'Id': test.Id, 'SalePrice': Final_labels}).to_csv('answer.csv', index =False) | [
"wanglin_bnu@163.com"
] | wanglin_bnu@163.com |
bcb2bac7460fd22247f6850cfb190c7713966a7e | 490ffe1023a601760ae7288e86723f0c6e366bba | /kolla-docker/nova/nova/compute/resource_tracker.py | 4f32a9eff29481b02245cdfa874c8e6d7ccb2a78 | [
"Apache-2.0"
] | permissive | bopopescu/Cloud-User-Management | 89696a5ea5d2f95191327fbeab6c3e400bbfb2b8 | 390988bf4915a276c7bf8d96b62c3051c17d9e6e | refs/heads/master | 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null | UTF-8 | Python | false | false | 67,693 | py | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH, task_states.REBUILDING]):
return True
return False
def _is_trackable_migration(migration):
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
"""Helper function that injects various information from a compute node
object into the inventory dict returned from the virt driver's
get_inventory() method. This function allows us to marry information like
*_allocation_ratio and reserved memory amounts that are in the
compute_nodes DB table and that the virt driver doesn't know about with the
information the virt driver *does* know about.
Note that if the supplied inv_data contains allocation_ratio, reserved or
other fields, we DO NOT override the value with that of the compute node.
This is to ensure that the virt driver is the single source of truth
regarding inventory information. For instance, the Ironic virt driver will
always return a very specific inventory with allocation_ratios pinned to
1.0.
:param inv_data: Dict, keyed by resource class, of inventory information
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
self.stats = stats.Stats()
self.tracked_instances = {}
self.tracked_migrations = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.scheduler_client = scheduler_client.SchedulerClient()
self.reportclient = self.scheduler_client.reportclient
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
def get_node_uuid(self, nodename):
try:
return self.compute_nodes[nodename].uuid
except KeyError:
raise exception.ComputeHostNotFound(host=nodename)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
migration, image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move."""
return self._move_claim(context, instance, instance_type, nodename,
migration, image_meta=image_meta,
limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, move_type=None, image_meta=None, limits=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param image_meta: instance image metadata
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# PCI requests come from two sources: instance flavor and
# SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
# Remove usage for an incoming/outgoing migration on the destination
# node.
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif (instance['uuid'] in self.tracked_instances):
self.tracked_instances.pop(instance['uuid'])
self._drop_pci_devices(instance, nodename, prefix)
# TODO(lbeliveau): Validate if numa needs the same treatment.
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
"""Check if nodes rebalance has happened.
The ironic driver maintains a hash ring mapping bare metal nodes
to compute nodes. If a compute dies, the hash ring is rebuilt, and
some of its bare metal nodes (more precisely, those not in ACTIVE
state) are assigned to other computes.
This method checks for this condition and adjusts the database
accordingly.
:param context: security context
:param resources: initial values
:param nodename: node name
:returns: True if a suitable compute node record was found, else False
"""
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return
if self._check_for_nodes_rebalance(context, resources, nodename):
return
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources):
"""Copy resource values to supplied compute_node."""
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
compute_node.stats = copy.deepcopy(self.stats)
# update the allocation ratios for the related ComputeNode object
compute_node.ram_allocation_ratio = self.ram_allocation_ratio
compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio
compute_node.disk_allocation_ratio = self.disk_allocation_ratio
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metrics = metrics.to_list()
if len(metrics):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metrics
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
return metrics
def update_available_resource(self, context, nodename):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources)
def _pair_instances_to_migrations(self, migrations, instances):
instance_by_uuid = {inst.uuid: inst for inst in instances}
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
# initialize the compute node object, creating it
# if it does not already exist.
self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instances)
self._update_usage_from_migrations(context, migrations, nodename)
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context, cn)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.info("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _update(self, context, compute_node):
"""Update partial stats locally and populate them to Scheduler."""
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB.
# NOTE(jianghuaw): Once we completely move to use get_inventory()
# for all resource provider's inv data. We can remove this check.
# At the moment we still need this check and save compute_node.
compute_node.save()
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
# object of compute_node; instead the inventory data for these
# resource is reported by driver's get_inventory(). So even there
# is no resource change for compute_node as above, we need proceed
# to get inventory and use scheduler_client interfaces to update
# inventory to placement. It's scheduler_client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
self.scheduler_client.set_inventory_for_provider(
context,
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in the
# format that the placement API expects and we'll be able to remove
# this code branch
self.scheduler_client.update_compute_node(context, compute_node)
try:
traits = self.driver.get_traits(nodename)
except NotImplementedError:
pass
else:
# NOTE(mgoddard): set_traits_for_provider does not refresh the
# provider tree in the report client, so we rely on the above call
# to set_inventory_for_provider or update_compute_node to ensure
# that the resource provider exists in the tree and has had its
# cached traits refreshed.
self.reportclient.set_traits_for_provider(
context, compute_node.uuid, traits)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
cn.running_vms = self.stats.num_instances
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating from migration %s", uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
record = self.tracked_instances.get(uuid, None)
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not record:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
elif outbound and not record:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# skip migration if instance isn't in a resize state:
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False, require_allocation_refresh=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
if is_removed_instance:
self.tracked_instances.pop(uuid)
sign = -1
cn = self.compute_nodes[nodename]
self.stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = copy.deepcopy(self.stats)
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
if require_allocation_refresh:
LOG.debug("Auto-correcting allocations.")
self.reportclient.update_instance_allocation(context, cn,
instance, sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance), nodename,
sign=sign)
cn.current_workload = self.stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
# NOTE(jaypipes): In Pike, we need to be tolerant of Ocata compute
# nodes that overwrite placement allocations to look like what the
# resource tracker *thinks* is correct. When an instance is
# migrated from an Ocata compute node to a Pike compute node, the
# Pike scheduler will have created a "doubled-up" allocation that
# contains allocated resources against both the source and
# destination hosts. The Ocata source compute host, during its
# update_available_resource() periodic call will find the instance
# in its list of known instances and will call
# update_instance_allocation() in the report client. That call will
# pull the allocations for the instance UUID which will contain
# both the source and destination host providers in the allocation
# set. Seeing that this is different from what the Ocata source
# host thinks it should be and will overwrite the allocation to
# only be an allocation against itself.
#
# And therefore, here we need to have Pike compute hosts
# "correct" the improper healing that the Ocata source host did
# during its periodic interval. When the instance is fully migrated
# to the Pike compute host, the Ocata compute host will find an
# allocation that refers to itself for an instance it no longer
# controls and will *delete* all allocations that refer to that
# instance UUID, assuming that the instance has been deleted. We
# need the destination Pike compute host to recreate that
# allocation to refer to its own resource provider UUID.
#
# For Pike compute nodes that migrate to either a Pike compute host
# or a Queens compute host, we do NOT want the Pike compute host to
# be "healing" allocation information. Instead, we rely on the Pike
# scheduler to properly create allocations during scheduling.
#
# Pike compute hosts may still rework an
# allocation for an instance in a move operation during
# confirm_resize() on the source host which will remove the
# source resource provider from any allocation for an
# instance.
#
# In Queens and beyond, the scheduler will understand when
# a move operation has been requested and instead of
# creating a doubled-up allocation that contains both the
# source and destination host, the scheduler will take the
# original allocation (against the source host) and change
# the consumer ID of that allocation to be the migration
# UUID and not the instance UUID. The scheduler will
# allocate the resources for the destination host to the
# instance UUID.
compute_version = objects.Service.get_minimum_version(
context, 'nova-compute')
has_ocata_computes = compute_version < 22
# Some drivers (ironic) still need the allocations to be
# fixed up, as they transition the way their inventory is reported.
require_allocation_refresh = (
has_ocata_computes or
self.driver.requires_allocation_refresh)
msg_allocation_refresh = (
"Compute driver doesn't require allocation refresh and we're on a "
"compute host in a deployment that only has compute hosts with "
"Nova versions >=16 (Pike). Skipping auto-correction of "
"allocations. ")
if require_allocation_refresh:
if self.driver.requires_allocation_refresh:
msg_allocation_refresh = (
"Compute driver requires allocation refresh. ")
elif has_ocata_computes:
msg_allocation_refresh = (
"We're on a compute host from Nova version >=16 (Pike or "
"later) in a deployment with at least one compute host "
"version <16 (Ocata or earlier). ")
msg_allocation_refresh += (
"Will auto-correct allocations to handle "
"Ocata-style assumptions.")
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
if msg_allocation_refresh:
LOG.debug(msg_allocation_refresh)
msg_allocation_refresh = False
self._update_usage_from_instance(context, instance, nodename,
require_allocation_refresh=require_allocation_refresh)
def _remove_deleted_instances_allocations(self, context, cn,
migrations):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
known_instances = set(self.tracked_instances.keys())
allocations = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid) or {}
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in known_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
try:
instance = objects.Instance.get_by_uuid(read_deleted_context,
instance_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the scheduler
# _just_ created an allocation for it and we're racing with the
# creation in the cell database, or the instance was deleted
# and fully archived before we got a chance to run this. The
# former is far more likely than the latter. Avoid deleting
# allocations for a building instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
if instance.deleted:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
# NOTE(jaypipes): This will not be true if/when we support
# cross-cell migrations...
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances.
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# situation here for information but don't attempt to delete or
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
self._delete_allocation_for_moved_instance(
context, instance, node, 'evacuated', node_type)
def delete_allocation_for_migrated_instance(self, context, instance, node):
self._delete_allocation_for_moved_instance(context, instance, node,
'migrated')
def _delete_allocation_for_moved_instance(
self, context, instance, node, move_type, node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of %s "
"instance on the %s node %s",
move_type, node_type, cn_uuid, instance=instance)
def delete_allocation_for_failed_resize(self, context, instance, node,
flavor):
"""Delete instance allocations for the node during a failed resize
:param context: The request context.
:param instance: The instance being resized/migrated.
:param node: The node provider on which the instance should have
allocations to remove. If this is a resize to the same host, then
the new_flavor resources are subtracted from the single allocation.
:param flavor: This is the new_flavor during a resize.
"""
cn = self.compute_nodes[node]
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn.uuid, self.reportclient, flavor):
if instance.instance_type_id == flavor.id:
operation = 'migration'
else:
operation = 'resize'
LOG.error('Failed to clean allocation after a failed '
'%(operation)s on node %(node)s',
{'operation': operation, 'node': cn.uuid},
instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
usage = {}
if isinstance(object_or_dict, objects.Instance):
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': object_or_dict.flavor.root_gb,
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
| [
"Mr.Qinlichao@hotmail.com"
] | Mr.Qinlichao@hotmail.com |
ced414a621cd118c2763f7acf3b292e0b3e15b21 | 234ed1b2c5b070dc0263e7e065c1f6b82d8a7c88 | /new_code/ui_activities/course_planner_components/course_information_window.py | de7acecaa4370033a7ebf6d79f52ccb93ebe53c3 | [] | no_license | Mossaka/grabbing_UCSD_courses | 8dec226ecdd42689d84740f456fa6ca9b670b228 | 88065dfa4f535d8f3d0ed3ca0a78ab22bec30cc1 | refs/heads/master | 2021-01-19T04:45:05.363362 | 2017-09-07T15:04:27 | 2017-09-07T15:04:27 | 87,393,494 | 1 | 1 | null | 2017-07-18T03:54:14 | 2017-04-06T06:20:27 | Python | UTF-8 | Python | false | false | 1,027 | py | import tkinter as tk
class ShowInfo:
def __init__(self, upper, course_ID, connector):
self.info_level = tk.Toplevel(upper)
self.info_level.title("More Info about {}".format(course_ID))
self.info_level.geometry()
self.info_level.resizable(0, 0)
self.info_level.bind('<Escape>', lambda event: self.info_level.destroy())
self.info_level.focus()
self.connector = connector
self.course_ID = course_ID
self.course = self.connector.get_course(self.course_ID)
self.info_frame = tk.Frame(self.info_level)
self.create_widgets()
def create_widgets(self):
self.info_frame.pack()
name_label = tk.Label(self.info_frame, text='Name: {}'.format(self.course_ID) )
name_label.grid(row=0, column=0, padx=5, pady=5, sticky=tk.NE)
description_label = tk.Label(self.info_frame, text='Description: {}'.format(self.course.get_description()))
description_label.grid(row=1, column=0, padx=5, pady=5, sticky=tk.NE) | [
"jiz417@ucsd.edu"
] | jiz417@ucsd.edu |
fe9f5ac55217dfc033c9cc3c4fd89943726640c8 | 614e01d08c8bb5adbe4d263d9dba04688502a12f | /toggl_driver/commands/start_timer.py | fe1b1ed883f441770071e74e3ae2ab9cf118f09e | [
"MIT"
] | permissive | cloew/TogglDriver | 28b2b2ebd396d08000fc92e0013f15722975ae06 | 7b0528710e7686690a88a22cf5cca1f3ac55ebbf | refs/heads/master | 2021-01-10T13:05:58.759515 | 2015-10-01T03:43:52 | 2015-10-01T03:43:52 | 43,025,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | from ..args import OptionalProjectArg
from ..config import GlobalConfig
from kao_command.args import Arg, BareWords
class StartTimer:
""" Represents a command to start the Toggl Timer """
description = "Start the Toggl Timer"
args = [Arg('description', nargs='+', provider=BareWords),
OptionalProjectArg(help="start the timer within")]
def run(self, *, description, project=None, workspace=None):
""" Start the timer """
entry = None
if project:
entry = GlobalConfig.connection.TimeEntry(description=description, pid=project.id)
else:
entry = GlobalConfig.connection.TimeEntry(description=description, wid=workspace.id)
entry.start() | [
"cloew123@gmail.com"
] | cloew123@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.