hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5afe5bfa12b0d62897f347a918c6c93f3a565284 | 5,619 | py | Python | zsync_network.py | disenone/zsync | bcd914e33a88ab505ddabb3dca7c440998bbfdb0 | [
"MIT"
] | null | null | null | zsync_network.py | disenone/zsync | bcd914e33a88ab505ddabb3dca7c440998bbfdb0 | [
"MIT"
] | null | null | null | zsync_network.py | disenone/zsync | bcd914e33a88ab505ddabb3dca7c440998bbfdb0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import zmq
import cPickle
from zsync_logger import MYLOGGER as logging
from collections import deque
import zhelpers
import time
| 26.504717 | 96 | 0.559352 | # -*- coding: utf-8 -*-
import zmq
import cPickle
from zsync_logger import MYLOGGER as logging
from collections import deque
import zhelpers
import time
class RpcCaller(object):
def __init__(self, transceiver, sock, funcName, identity=None):
self.transceiver = transceiver
self.sock = sock
self.funcName = funcName
self.identity = identity
return
def __call__(self, *args):
args = [self.funcName, cPickle.dumps(args)]
if self.identity:
args = self.identity + args
self.transceiver.send(self.sock, *args)
return
class Proxy(object):
def __init__(self, transceiver, sock, identity=None):
self.transceiver = transceiver
self.sock = sock
if type(identity) is str:
identity = [identity]
elif identity is not None:
if not type(identity) in [tuple, list]:
raise ValueError('identity is invalid: %s' % identity)
identity = list(identity)
self.identity = identity
return
def __getattr__(self, name):
return RpcCaller(self.transceiver, self.sock, name, self.identity)
def __cmp__(self, other):
return cmp((self.identity, self.sock, self.transceiver),
(other.identity, other.sock, other.transceiver))
def __str__(self):
return str(self.identity)
def call_raw(self, name, *args):
args = (zhelpers.RAW_MSG_FLAG, name) + args
if self.identity:
args = tuple(self.identity) + args
self.transceiver.send(self.sock, *args)
return
class SendQueue(dict):
def push_queue(self, sock, msg):
if sock not in self:
self[sock] = deque()
self[sock].append(msg)
return
def send(self, sock):
if not self.get(sock):
return
queue = self[sock]
msg = queue[0]
try:
sock.send_multipart(msg, zmq.NOBLOCK)
except:
return
queue.popleft()
if not queue:
self.pop(sock, None)
return
class Transceiver(object):
def __init__(self):
self.send_queue = SendQueue()
self.in_poller = zmq.Poller()
self.all_poller = zmq.Poller()
self.timeout_checkers = {}
return
def register(self, sock):
self.in_poller.register(sock, zmq.POLLIN)
self.all_poller.register(sock)
return
def delay_all_timeout(self):
[checker.feed() for checker in self.timeout_checkers.itervalues()]
return
def add_timeout(self, sock, timeout):
self.timeout_checkers[sock] = TimeoutChecker(timeout)
return
def del_timeout(self, sock):
self.timeout_checkers.pop(sock, None)
return
def check_timeout(self):
if not self.timeout_checkers:
return True
any_timeout = any([checker.timeout() for checker in self.timeout_checkers.itervalues()])
if any_timeout:
MYLOGGER.error('connect timeout')
return False
return True
def send(self, sock, *msg):
if not msg:
return
if not self.send_queue:
try:
sock.send_multipart(msg, zmq.NOBLOCK)
# MYLOGGER.debug('sended: %s' % (msg, ))
return True
except Exception as e:
# MYLOGGER.debug('send error: %s, %s' % (e, msg))
self.send_queue.push_queue(sock, msg)
else:
self.send_queue.push_queue(sock, msg)
return False
def queue_send(self, sock):
self.send_queue.send(sock)
return
def recv(self, sock):
msg = sock.recv_multipart(zmq.NOBLOCK)
if sock in self.timeout_checkers:
self.timeout_checkers[sock].feed()
return msg
def poll(self, ms):
if self.send_queue:
return zhelpers.poll(self.all_poller, ms)
else:
return zhelpers.poll(self.in_poller, ms)
def deal_poll(self, polls):
if not polls:
return
for sock, state in polls.iteritems():
if zmq.POLLIN in state:
while True:
try:
msg = self.recv(sock)
#MYLOGGER.debug('recved :%s' % msg)
except Exception, e:
break
self.dispatch(sock, msg)
if zmq.POLLOUT in state:
self.queue_send(sock)
return
def dispatch(self, sock, msg):
identity = None
if sock.socket_type == zmq.ROUTER:
identity, msg = zhelpers.split_identity(msg)
is_raw = msg[0] == zhelpers.RAW_MSG_FLAG
if is_raw:
msg = msg[1:]
funcn = msg[0]
func = getattr(self, funcn, None)
if not func:
MYLOGGER.error('not found function "%s"' % msg)
return
try:
if is_raw:
args = msg[1:]
else:
args = cPickle.loads(msg[1])
except Exception as e:
MYLOGGER.error('invalid function args: %s' % msg)
return
proxy = Proxy(self, sock, identity)
return func(proxy, *args)
class TimeoutChecker(object):
def __init__(self, timeout):
self.interval = timeout
self.timestamp = time.time()
return
def feed(self):
self.timestamp = time.time()
return
def timeout(self):
return time.time() > self.timestamp + self.interval
| 4,688 | 16 | 758 |
3239fc9853cc1f58c6863c9ff8360459339c32c3 | 545 | py | Python | users/views.py | Swannbm/Consensus-online | a75385b9155e35ce3ce8d92e21f6dbac7a30223a | [
"CC0-1.0"
] | null | null | null | users/views.py | Swannbm/Consensus-online | a75385b9155e35ce3ce8d92e21f6dbac7a30223a | [
"CC0-1.0"
] | null | null | null | users/views.py | Swannbm/Consensus-online | a75385b9155e35ce3ce8d92e21f6dbac7a30223a | [
"CC0-1.0"
] | null | null | null | from django.views.generic import TemplateView, DetailView, CreateView
from .models import User
| 21.8 | 69 | 0.73578 | from django.views.generic import TemplateView, DetailView, CreateView
from .models import User
class Connectview(TemplateView):
template_name = "users/connect.html"
class Disconnectview(TemplateView):
template_name = "users/connect.html"
class ProfilView(DetailView):
model = User
context_object_name = "user"
template_name = "users/profile.html"
class SubscriptionView(CreateView):
model = User
context_object_name = "user"
fields = ["email", "first_name"]
template_name = "users/subscription.html"
| 0 | 353 | 92 |
70a5b576ca8c9091512c00e88bce2182dbca683f | 574 | py | Python | data_structure/imaging_metrics.py | sly9/VoyagerTelegramBot | 0ea033e2e8b5bdea3f54d2e89aa7e191a5ee3545 | [
"MIT"
] | 12 | 2021-11-14T00:35:39.000Z | 2022-01-15T01:42:39.000Z | data_structure/imaging_metrics.py | sly9/VoyagerTelegramBot | 0ea033e2e8b5bdea3f54d2e89aa7e191a5ee3545 | [
"MIT"
] | 26 | 2021-11-13T00:26:22.000Z | 2022-03-12T06:10:46.000Z | data_structure/imaging_metrics.py | sly9/VoyagerTelegramBot | 0ea033e2e8b5bdea3f54d2e89aa7e191a5ee3545 | [
"MIT"
] | 5 | 2021-11-02T21:49:04.000Z | 2022-01-09T02:29:15.000Z | from dataclasses import dataclass
@dataclass
@dataclass
@dataclass
@dataclass
| 18.516129 | 57 | 0.695122 | from dataclasses import dataclass
@dataclass
class GuidingMetrics:
error_x: float = 0
error_y: float = 0
unit: str = 'pixel'
@dataclass
class FocusingMetrics:
position: float = 0
hfd: float = 0
temperature: float = 0
filter_name: str = 'Ha'
filter_color: str = '#FFFFFF'
@dataclass
class JpgMetrics:
star_index: float = 0
hfd: float = 0
@dataclass
class ImagingMetrics:
guiding_metrics: GuidingMetrics = GuidingMetrics()
focusing_metrics: FocusingMetrics = FocusingMetrics()
jpg_metrics: JpgMetrics = JpgMetrics()
| 0 | 400 | 88 |
07fe11420127afd35cacac96f7d748593a165715 | 419 | py | Python | classifier/beer/forms.py | RafaelBernardes/beer-classifier | 68edb99a231d7090d0d6d384de712b4792b3b7d0 | [
"MIT"
] | null | null | null | classifier/beer/forms.py | RafaelBernardes/beer-classifier | 68edb99a231d7090d0d6d384de712b4792b3b7d0 | [
"MIT"
] | null | null | null | classifier/beer/forms.py | RafaelBernardes/beer-classifier | 68edb99a231d7090d0d6d384de712b4792b3b7d0 | [
"MIT"
] | null | null | null | from django import forms | 52.375 | 73 | 0.73747 | from django import forms
class BeerForm(forms.Form):
IBU = forms.DecimalField(max_digits=5, decimal_places=3, label='IBU')
SRM = forms.DecimalField(max_digits=5, decimal_places=3, label='SRM')
OG = forms.DecimalField(max_digits=5, decimal_places=3, label='OG')
FG = forms.DecimalField(max_digits=5, decimal_places=3, label='FG')
ABV = forms.DecimalField(max_digits=5, decimal_places=3, label='ABV') | 0 | 372 | 23 |
5ce30781b8e14bb52e953842fdc800ce3dad1397 | 1,211 | py | Python | tests/test_ld.py | kateya/clade | f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3 | [
"Apache-2.0"
] | 11 | 2018-10-15T08:46:00.000Z | 2022-02-14T14:03:15.000Z | tests/test_ld.py | kateya/clade | f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3 | [
"Apache-2.0"
] | 136 | 2018-08-07T11:11:29.000Z | 2022-03-31T19:02:21.000Z | tests/test_ld.py | kateya/clade | f2c091be8055156ab3e6ce6b8f855c4b01d2b6f3 | [
"Apache-2.0"
] | 6 | 2018-11-09T12:52:39.000Z | 2022-02-19T20:34:25.000Z | # Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from clade import Clade
| 31.051282 | 79 | 0.689513 | # Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from clade import Clade
def test_ld(tmpdir, cmds_file):
c = Clade(tmpdir, cmds_file)
e = c.parse("LD")
cmds = e.load_all_cmds(with_opts=True, with_raw=True)
target_cmd = dict()
for cmd in cmds:
for cmd_out in cmd["out"]:
if re.search("main.o2", cmd_out):
target_cmd = cmd
assert len(cmds) >= 1
assert len(target_cmd["in"]) == 2
assert len(target_cmd["out"]) == 1
assert len(target_cmd["opts"]) == 7
assert len(target_cmd["command"]) == 11
| 473 | 0 | 23 |
acf59c7529ebaf9b43006db311dd17abf02180e3 | 338 | py | Python | week04/myproject/rent/admin.py | fuengfa/CS459 | cf8b8dcdb94ebcb894551174e5223b857425e1f6 | [
"BSD-2-Clause"
] | null | null | null | week04/myproject/rent/admin.py | fuengfa/CS459 | cf8b8dcdb94ebcb894551174e5223b857425e1f6 | [
"BSD-2-Clause"
] | null | null | null | week04/myproject/rent/admin.py | fuengfa/CS459 | cf8b8dcdb94ebcb894551174e5223b857425e1f6 | [
"BSD-2-Clause"
] | null | null | null | from django.contrib import admin
from rent.models import Rent,Car
# Register your models here.
admin.site.register(Rent,RentAdmin)
admin.site.register(Car,CarAdmin)
| 22.533333 | 49 | 0.789941 | from django.contrib import admin
from rent.models import Rent,Car
# Register your models here.
class RentAdmin(admin.ModelAdmin):
list_display=[f.name for f in Rent._meta.fields]
admin.site.register(Rent,RentAdmin)
class CarAdmin(admin.ModelAdmin):
list_display=[f.name for f in Car._meta.fields]
admin.site.register(Car,CarAdmin)
| 0 | 124 | 46 |
92152b17b6a5b1ccdcde6fa9a4cc1a8a589edfda | 3,205 | py | Python | mailersend/webhooks/__init__.py | mtthidoteu/mailersend-python | af9991847a5f4894a88037bc0778217611621a9f | [
"MIT"
] | 8 | 2020-12-18T10:18:27.000Z | 2022-02-24T07:43:27.000Z | mailersend/webhooks/__init__.py | mtthidoteu/mailersend-python | af9991847a5f4894a88037bc0778217611621a9f | [
"MIT"
] | 14 | 2021-02-02T09:07:58.000Z | 2022-03-17T10:35:03.000Z | mailersend/webhooks/__init__.py | mtthidoteu/mailersend-python | af9991847a5f4894a88037bc0778217611621a9f | [
"MIT"
] | 7 | 2021-04-09T12:34:00.000Z | 2022-02-04T15:06:35.000Z | """
Handles /webhooks endpoint
Doc: https://developers.mailersend.com/api/v1/webhooks.html
"""
import requests
from mailersend.base import base
data = {}
class NewWebhook(base.NewAPIClient):
"""
Instantiates the /webhooks endpoint object
"""
def __init__(self):
"""
NewWebhook constructor
"""
pass
def get_webhooks(self, domain_id):
"""
Returns a JSON response from the MailerSend API
@params:
domain_id (str): A domain ID
"""
request = requests.get(
f"{self.api_base}/webhooks",
headers=self.headers_default,
json={"domain_id": domain_id},
)
return request.text
def get_webhook_by_id(self, webhook_id):
"""
Returns a JSON response from the MailerSend API
@params:
webhook_id (str): A webhook ID
"""
request = requests.get(
f"{self.api_base}/webhooks/{webhook_id}", headers=self.headers_default
)
return request.text
def set_webhook_url(self, webhook_url):
"""
Sets the webhook 'url' field
@params:
webhook_url (str): A webhook URL
"""
data["url"] = webhook_url
def set_webhook_name(self, webhook_name):
"""
Sets the webhook 'name' field
@params:
webhook_name (str): A webhook name
"""
data["name"] = webhook_name
def set_webhook_events(self, events):
"""
Sets the webhook 'events' field
@params:
events (list): A list containing valid events
"""
data["events"] = events
def set_webhook_enabled(self, enabled=True):
"""
Sets the webhook 'enabled' status field
@params:
enabled (bool): Controls webhook status
"""
data["enabled"] = enabled
def set_webhook_domain(self, domain_id):
"""
Sets the webhook 'domain_id' status field
@params:
domain_id (str): A valid domain ID
"""
data["domain_id"] = domain_id
def update_webhook(self, webhook_id, key, value):
"""
Updates a webhook setting
@params:
webhook_id (str): A valid webhook ID
key (str): A setting key
value (str): Corresponding keys value
"""
request = requests.put(
f"{self.api_base}/webhooks/{webhook_id}",
headers=self.headers_default,
json={f"{key}": value},
)
return request.text
def delete_webhook(self, webhook_id):
"""
Returns a JSON response from the MailerSend API
@params:
webhook_id (str): A valid webhook ID
"""
request = requests.delete(
f"{self.api_base}/webhooks/{webhook_id}", headers=self.headers_default
)
return request.text
def create_webhook(self):
"""
Returns a JSON response from the MailerSend API
"""
request = requests.post(
f"{self.api_base}/webhooks", headers=self.headers_default, json=data
)
return request.text
| 23.566176 | 82 | 0.55975 | """
Handles /webhooks endpoint
Doc: https://developers.mailersend.com/api/v1/webhooks.html
"""
import requests
from mailersend.base import base
data = {}
class NewWebhook(base.NewAPIClient):
"""
Instantiates the /webhooks endpoint object
"""
def __init__(self):
"""
NewWebhook constructor
"""
pass
def get_webhooks(self, domain_id):
"""
Returns a JSON response from the MailerSend API
@params:
domain_id (str): A domain ID
"""
request = requests.get(
f"{self.api_base}/webhooks",
headers=self.headers_default,
json={"domain_id": domain_id},
)
return request.text
def get_webhook_by_id(self, webhook_id):
"""
Returns a JSON response from the MailerSend API
@params:
webhook_id (str): A webhook ID
"""
request = requests.get(
f"{self.api_base}/webhooks/{webhook_id}", headers=self.headers_default
)
return request.text
def set_webhook_url(self, webhook_url):
"""
Sets the webhook 'url' field
@params:
webhook_url (str): A webhook URL
"""
data["url"] = webhook_url
def set_webhook_name(self, webhook_name):
"""
Sets the webhook 'name' field
@params:
webhook_name (str): A webhook name
"""
data["name"] = webhook_name
def set_webhook_events(self, events):
"""
Sets the webhook 'events' field
@params:
events (list): A list containing valid events
"""
data["events"] = events
def set_webhook_enabled(self, enabled=True):
"""
Sets the webhook 'enabled' status field
@params:
enabled (bool): Controls webhook status
"""
data["enabled"] = enabled
def set_webhook_domain(self, domain_id):
"""
Sets the webhook 'domain_id' status field
@params:
domain_id (str): A valid domain ID
"""
data["domain_id"] = domain_id
def update_webhook(self, webhook_id, key, value):
"""
Updates a webhook setting
@params:
webhook_id (str): A valid webhook ID
key (str): A setting key
value (str): Corresponding keys value
"""
request = requests.put(
f"{self.api_base}/webhooks/{webhook_id}",
headers=self.headers_default,
json={f"{key}": value},
)
return request.text
def delete_webhook(self, webhook_id):
"""
Returns a JSON response from the MailerSend API
@params:
webhook_id (str): A valid webhook ID
"""
request = requests.delete(
f"{self.api_base}/webhooks/{webhook_id}", headers=self.headers_default
)
return request.text
def create_webhook(self):
"""
Returns a JSON response from the MailerSend API
"""
request = requests.post(
f"{self.api_base}/webhooks", headers=self.headers_default, json=data
)
return request.text
| 0 | 0 | 0 |
4a378a2798c29cc35bf9d04f4078a28eb79b8531 | 1,184 | py | Python | invenio_records_resources/services/records/links.py | inveniosoftware/invenio-resources | f1fb9a849d03af1d6ec4cddfc4e140a06788783b | [
"MIT"
] | null | null | null | invenio_records_resources/services/records/links.py | inveniosoftware/invenio-resources | f1fb9a849d03af1d6ec4cddfc4e140a06788783b | [
"MIT"
] | 19 | 2020-05-18T12:04:54.000Z | 2020-07-13T06:19:27.000Z | invenio_records_resources/services/records/links.py | inveniosoftware/invenio-resources | f1fb9a849d03af1d6ec4cddfc4e140a06788783b | [
"MIT"
] | 5 | 2020-04-28T09:07:43.000Z | 2020-07-01T14:43:01.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2020-2021 Northwestern University.
#
# Flask-Resources is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Utility for rendering URI template links."""
from ..base import Link
class RecordLink(Link):
"""Short cut for writing record links."""
@staticmethod
def vars(record, vars):
"""Variables for the URI template."""
vars.update({"id": record.pid.pid_value})
def pagination_links(tpl):
"""Create pagination links (prev/selv/next) from the same template."""
return {
"prev": Link(
tpl,
when=lambda pagination, ctx: pagination.has_prev,
vars=lambda pagination, vars: vars["args"].update(
{"page": pagination.prev_page.page}
),
),
"self": Link(tpl),
"next": Link(
tpl,
when=lambda pagination, ctx: pagination.has_next,
vars=lambda pagination, vars: vars["args"].update(
{"page": pagination.next_page.page}
),
),
}
| 28.190476 | 76 | 0.591216 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2020-2021 Northwestern University.
#
# Flask-Resources is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Utility for rendering URI template links."""
from ..base import Link
class RecordLink(Link):
"""Short cut for writing record links."""
@staticmethod
def vars(record, vars):
"""Variables for the URI template."""
vars.update({"id": record.pid.pid_value})
def pagination_links(tpl):
"""Create pagination links (prev/selv/next) from the same template."""
return {
"prev": Link(
tpl,
when=lambda pagination, ctx: pagination.has_prev,
vars=lambda pagination, vars: vars["args"].update(
{"page": pagination.prev_page.page}
),
),
"self": Link(tpl),
"next": Link(
tpl,
when=lambda pagination, ctx: pagination.has_next,
vars=lambda pagination, vars: vars["args"].update(
{"page": pagination.next_page.page}
),
),
}
| 0 | 0 | 0 |
b99da5339d7812793a5063a3f3c797bca067a632 | 6,421 | py | Python | gui/views/ui/Motors_widget.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | gui/views/ui/Motors_widget.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | gui/views/ui/Motors_widget.py | Saldenisov/pyconlyse | 1de301b4a4c15ee0bd19034aa8d5da1beacfd124 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Motors_widget.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 54.880342 | 100 | 0.732752 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Motors_widget.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_StepMotorsWidgetWindow(object):
def setupUi(self, stepmotorswidget):
stepmotorswidget.setObjectName("MotorswidgetWindow")
stepmotorswidget.resize(732, 560)
font = QtGui.QFont()
font.setFamily("MV Boli")
font.setPointSize(14)
font.setBold(False)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(50)
font.setStrikeOut(False)
font.setStyleStrategy(QtGui.QFont.PreferDefault)
stepmotorswidget.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../icons/motors.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
stepmotorswidget.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(stepmotorswidget)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setObjectName("listWidget")
self.verticalLayout.addWidget(self.listWidget)
self.horizontalLayout_buttons = QtWidgets.QHBoxLayout()
self.horizontalLayout_buttons.setObjectName("horizontalLayout_buttons")
self.verticalLayout.addLayout(self.horizontalLayout_buttons)
self.horizontalLayout.addLayout(self.verticalLayout)
self.gridLayout_2.addLayout(self.horizontalLayout, 0, 0, 1, 1)
stepmotorswidget.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(stepmotorswidget)
self.statusbar.setObjectName("statusbar")
stepmotorswidget.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(stepmotorswidget)
self.menubar.setGeometry(QtCore.QRect(0, 0, 732, 21))
self.menubar.setObjectName("menubar")
self.menu_sdf = QtWidgets.QMenu(self.menubar)
self.menu_sdf.setObjectName("menu_sdf")
self.menu_sd = QtWidgets.QMenu(self.menubar)
self.menu_sd.setObjectName("menu_sd")
self.menuMain = QtWidgets.QMenu(self.menubar)
self.menuMain.setObjectName("menuMain")
stepmotorswidget.setMenuBar(self.menubar)
self.actionSettings = QtWidgets.QAction(stepmotorswidget)
self.actionSettings.setObjectName("actionSettings")
self.actionHelp = QtWidgets.QAction(stepmotorswidget)
self.actionHelp.setObjectName("actionHelp")
self.actionAuthor = QtWidgets.QAction(stepmotorswidget)
self.actionAuthor.setObjectName("actionAuthor")
self.actionQuit = QtWidgets.QAction(stepmotorswidget)
self.actionQuit.setObjectName("actionQuit")
self.actionLoad = QtWidgets.QAction(stepmotorswidget)
self.actionLoad.setObjectName("actionLoad")
self.actionSave_config = QtWidgets.QAction(stepmotorswidget)
self.actionSave_config.setObjectName("actionSave_config")
self.actionTest_connections = QtWidgets.QAction(stepmotorswidget)
self.actionTest_connections.setObjectName("actionTest_connections")
self.actionStart_connection = QtWidgets.QAction(stepmotorswidget)
self.actionStart_connection.setObjectName("actionStart_connection")
self.actionStop_connection = QtWidgets.QAction(stepmotorswidget)
self.actionStop_connection.setObjectName("actionStop_connection")
self.menu_sdf.addAction(self.actionSettings)
self.menu_sdf.addSeparator()
self.menu_sdf.addAction(self.actionLoad)
self.menu_sdf.addAction(self.actionSave_config)
self.menu_sdf.addSeparator()
self.menu_sdf.addAction(self.actionTest_connections)
self.menu_sdf.addAction(self.actionStart_connection)
self.menu_sdf.addAction(self.actionStop_connection)
self.menu_sd.addAction(self.actionHelp)
self.menu_sd.addAction(self.actionAuthor)
self.menuMain.addAction(self.actionQuit)
self.menubar.addAction(self.menuMain.menuAction())
self.menubar.addAction(self.menu_sdf.menuAction())
self.menubar.addAction(self.menu_sd.menuAction())
self.retranslateUi(stepmotorswidget)
QtCore.QMetaObject.connectSlotsByName(stepmotorswidget)
def retranslateUi(self, MotorswidgetWindow):
_translate = QtCore.QCoreApplication.translate
MotorswidgetWindow.setWindowTitle(_translate("MotorswidgetWindow", "pytlyse Motors"))
self.menu_sdf.setTitle(_translate("MotorswidgetWindow", "Tools"))
self.menu_sd.setTitle(_translate("MotorswidgetWindow", "About"))
self.menuMain.setTitle(_translate("MotorswidgetWindow", "Main"))
self.actionSettings.setText(_translate("MotorswidgetWindow", "Settings"))
self.actionHelp.setText(_translate("MotorswidgetWindow", "Help"))
self.actionAuthor.setText(_translate("MotorswidgetWindow", "Author"))
self.actionAuthor.setIconText(_translate("MotorswidgetWindow", "Author"))
self.actionQuit.setText(_translate("MotorswidgetWindow", "Quit"))
self.actionQuit.setShortcut(_translate("MotorswidgetWindow", "Ctrl+Q"))
self.actionLoad.setText(_translate("MotorswidgetWindow", "Load config"))
self.actionLoad.setShortcut(_translate("MotorswidgetWindow", "Ctrl+L"))
self.actionSave_config.setText(_translate("MotorswidgetWindow", "Save config"))
self.actionSave_config.setShortcut(_translate("MotorswidgetWindow", "Ctrl+S"))
self.actionTest_connections.setText(_translate("MotorswidgetWindow", "Test connections"))
self.actionStart_connection.setText(_translate("MotorswidgetWindow", "Start connection"))
self.actionStart_connection.setShortcut(_translate("MotorswidgetWindow", "Shift+S"))
self.actionStop_connection.setText(_translate("MotorswidgetWindow", "Stop connection"))
self.actionStop_connection.setShortcut(_translate("MotorswidgetWindow", "Shift+T"))
| 6,079 | 19 | 76 |
6f989f580d5cef746641d09f4611e8bccd59dbca | 1,106 | py | Python | faceutils/faceplusplus.py | CelineWang1027/PSGAN | 33f246c8b4cf616929c01963476eb0d3f9d57132 | [
"MIT"
] | 570 | 2020-03-19T17:09:12.000Z | 2022-03-31T16:54:27.000Z | faceutils/faceplusplus.py | CelineWang1027/PSGAN | 33f246c8b4cf616929c01963476eb0d3f9d57132 | [
"MIT"
] | 35 | 2020-03-24T05:25:05.000Z | 2022-02-27T06:48:08.000Z | faceutils/faceplusplus.py | CelineWang1027/PSGAN | 33f246c8b4cf616929c01963476eb0d3f9d57132 | [
"MIT"
] | 112 | 2020-03-20T08:00:43.000Z | 2022-03-03T03:35:55.000Z | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from io import BytesIO
import time
import base64
import json
import requests
key = "-fd9YqPnrLnmugQGAhQoimCkQd0t8N8L"
secret = "0GLyRIHDnrjKSlDuflLPO8a6U32hyDUy"
beautify.url = 'https://api-cn.faceplusplus.com/facepp/v2/beautify'
rank.url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
| 25.136364 | 70 | 0.638336 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from io import BytesIO
import time
import base64
import json
import requests
key = "-fd9YqPnrLnmugQGAhQoimCkQd0t8N8L"
secret = "0GLyRIHDnrjKSlDuflLPO8a6U32hyDUy"
def encode(image: 'PIL.Image') -> str:
with BytesIO() as output_buf:
image.save(output_buf, format='PNG')
return base64.b64encode(output_buf.getvalue()).decode('utf-8')
def beautify(image: 'PIL.Image') -> str:
data = {
'api_key': key,
'api_secret': secret,
'image_base64': encode(image),
}
resp = requests.post(beautify.url, data=data)
return resp.json()['result']
def rank(image: 'PIL.Image') -> int:
data = {
'api_key': key,
'api_secret': secret,
'image_base64': encode(image),
'return_attributes': 'beauty',
}
resp = requests.post(rank.url, data=data)
scores = resp.json()['faces'][0]['attributes']['beauty']
return max(scores.values())
beautify.url = 'https://api-cn.faceplusplus.com/facepp/v2/beautify'
rank.url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
| 694 | 0 | 69 |
8c32ae202dcab0d85ad0396bf18b6333f947a38f | 4,221 | py | Python | fynance/features/roll_functions.py | ArthurBernard/Fynance | efd9a2e6f8eddcff017d828972236312f6f24084 | [
"MIT"
] | 19 | 2018-12-13T18:52:51.000Z | 2021-09-03T00:33:47.000Z | fynance/features/roll_functions.py | ArthurBernard/Fynance | efd9a2e6f8eddcff017d828972236312f6f24084 | [
"MIT"
] | null | null | null | fynance/features/roll_functions.py | ArthurBernard/Fynance | efd9a2e6f8eddcff017d828972236312f6f24084 | [
"MIT"
] | 6 | 2019-05-31T16:51:51.000Z | 2021-07-29T21:31:25.000Z | #!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: arthur.bernard.92@gmail.com
# @Date: 2020-09-18 21:15:59
# @Last modified by: ArthurBernard
# @Last modified time: 2020-09-18 22:21:22
""" Rolling functions. """
# Built-in packages
# Third party packages
import numpy as np
# Local packages
from fynance.features.roll_functions_cy import *
from fynance._wrappers import WrapperArray
__all__ = ["roll_min", "roll_max"]
# =========================================================================== #
# Min Max #
# =========================================================================== #
@WrapperArray('dtype', 'axis', 'window')
def roll_min(X, w=None, axis=0, dtype=None):
r""" Compute simple rolling minimum of size `w` for each `X`' series.
.. math::
roll\_min^w_t(X) = min(X_{t - w}, ..., X_t)
Parameters
----------
X : np.ndarray[dtype, ndim=1 or 2]
Elements to compute the rolling minimum.
w : int, optional
Size of the lagged window of the rolling minimum, must be positive. If
``w is None`` or ``w=0``, then ``w=X.shape[axis]``. Default is None.
axis : {0, 1}, optional
Axis along wich the computation is done. Default is 0.
dtype : np.dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from `X` input.
Returns
-------
np.ndarray[dtype, ndim=1 or 2]
Simple rolling minimum of each series.
Examples
--------
>>> X = np.array([60, 100, 80, 120, 160, 80])
>>> roll_min(X, w=3, dtype=np.float64, axis=0)
array([60., 60., 60., 80., 80., 80.])
>>> X = np.array([[60, 60], [100, 100], [80, 80],
... [120, 120], [160, 160], [80, 80]])
>>> roll_min(X, w=3, dtype=np.float64, axis=0)
array([[60., 60.],
[60., 60.],
[60., 60.],
[80., 80.],
[80., 80.],
[80., 80.]])
>>> roll_min(X, w=3, dtype=np.float64, axis=1)
array([[ 60., 60.],
[100., 100.],
[ 80., 80.],
[120., 120.],
[160., 160.],
[ 80., 80.]])
See Also
--------
roll_max
"""
return _roll_min(X, w)
@WrapperArray('dtype', 'axis', 'window')
def roll_max(X, w=None, axis=0, dtype=None):
r""" Compute simple rolling maximum of size `w` for each `X`' series.
.. math::
roll\_max^w_t(X) = max(X_{t - w}, ..., X_t)
Parameters
----------
X : np.ndarray[dtype, ndim=1 or 2]
Elements to compute the rolling maximum.
w : int, optional
Size of the lagged window of the rolling maximum, must be positive. If
``w is None`` or ``w=0``, then ``w=X.shape[axis]``. Default is None.
axis : {0, 1}, optional
Axis along wich the computation is done. Default is 0.
dtype : np.dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from `X` input.
Returns
-------
np.ndarray[dtype, ndim=1 or 2]
Simple rolling maximum of each series.
Examples
--------
>>> X = np.array([60, 100, 80, 120, 160, 80])
>>> roll_max(X, w=3, dtype=np.float64, axis=0)
array([ 60., 100., 100., 120., 160., 160.])
>>> X = np.array([[60, 60], [100, 100], [80, 80],
... [120, 120], [160, 160], [80, 80]])
>>> roll_max(X, w=3, dtype=np.float64, axis=0)
array([[ 60., 60.],
[100., 100.],
[100., 100.],
[120., 120.],
[160., 160.],
[160., 160.]])
>>> roll_max(X, w=3, dtype=np.float64, axis=1)
array([[ 60., 60.],
[100., 100.],
[ 80., 80.],
[120., 120.],
[160., 160.],
[ 80., 80.]])
See Also
--------
roll_max
"""
return _roll_max(X, w)
| 27.057692 | 79 | 0.49538 | #!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: arthur.bernard.92@gmail.com
# @Date: 2020-09-18 21:15:59
# @Last modified by: ArthurBernard
# @Last modified time: 2020-09-18 22:21:22
""" Rolling functions. """
# Built-in packages
# Third party packages
import numpy as np
# Local packages
from fynance.features.roll_functions_cy import *
from fynance._wrappers import WrapperArray
__all__ = ["roll_min", "roll_max"]
# =========================================================================== #
# Min Max #
# =========================================================================== #
@WrapperArray('dtype', 'axis', 'window')
def roll_min(X, w=None, axis=0, dtype=None):
r""" Compute simple rolling minimum of size `w` for each `X`' series.
.. math::
roll\_min^w_t(X) = min(X_{t - w}, ..., X_t)
Parameters
----------
X : np.ndarray[dtype, ndim=1 or 2]
Elements to compute the rolling minimum.
w : int, optional
Size of the lagged window of the rolling minimum, must be positive. If
``w is None`` or ``w=0``, then ``w=X.shape[axis]``. Default is None.
axis : {0, 1}, optional
Axis along wich the computation is done. Default is 0.
dtype : np.dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from `X` input.
Returns
-------
np.ndarray[dtype, ndim=1 or 2]
Simple rolling minimum of each series.
Examples
--------
>>> X = np.array([60, 100, 80, 120, 160, 80])
>>> roll_min(X, w=3, dtype=np.float64, axis=0)
array([60., 60., 60., 80., 80., 80.])
>>> X = np.array([[60, 60], [100, 100], [80, 80],
... [120, 120], [160, 160], [80, 80]])
>>> roll_min(X, w=3, dtype=np.float64, axis=0)
array([[60., 60.],
[60., 60.],
[60., 60.],
[80., 80.],
[80., 80.],
[80., 80.]])
>>> roll_min(X, w=3, dtype=np.float64, axis=1)
array([[ 60., 60.],
[100., 100.],
[ 80., 80.],
[120., 120.],
[160., 160.],
[ 80., 80.]])
See Also
--------
roll_max
"""
return _roll_min(X, w)
def _roll_min(X, w):
if len(X.shape) == 2:
return np.asarray(roll_min_cy_2d(X, w))
return np.asarray(roll_min_cy_1d(X, w))
@WrapperArray('dtype', 'axis', 'window')
def roll_max(X, w=None, axis=0, dtype=None):
r""" Compute simple rolling maximum of size `w` for each `X`' series.
.. math::
roll\_max^w_t(X) = max(X_{t - w}, ..., X_t)
Parameters
----------
X : np.ndarray[dtype, ndim=1 or 2]
Elements to compute the rolling maximum.
w : int, optional
Size of the lagged window of the rolling maximum, must be positive. If
``w is None`` or ``w=0``, then ``w=X.shape[axis]``. Default is None.
axis : {0, 1}, optional
Axis along wich the computation is done. Default is 0.
dtype : np.dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from `X` input.
Returns
-------
np.ndarray[dtype, ndim=1 or 2]
Simple rolling maximum of each series.
Examples
--------
>>> X = np.array([60, 100, 80, 120, 160, 80])
>>> roll_max(X, w=3, dtype=np.float64, axis=0)
array([ 60., 100., 100., 120., 160., 160.])
>>> X = np.array([[60, 60], [100, 100], [80, 80],
... [120, 120], [160, 160], [80, 80]])
>>> roll_max(X, w=3, dtype=np.float64, axis=0)
array([[ 60., 60.],
[100., 100.],
[100., 100.],
[120., 120.],
[160., 160.],
[160., 160.]])
>>> roll_max(X, w=3, dtype=np.float64, axis=1)
array([[ 60., 60.],
[100., 100.],
[ 80., 80.],
[120., 120.],
[160., 160.],
[ 80., 80.]])
See Also
--------
roll_max
"""
return _roll_max(X, w)
def _roll_max(X, w):
if len(X.shape) == 2:
return np.asarray(roll_max_cy_2d(X, w))
return np.asarray(roll_max_cy_1d(X, w))
| 238 | 0 | 46 |
3386462628a718e28730883b2a94c009a498da4c | 7,002 | py | Python | tests/msodde/test_basic.py | InQuest/oletools | e9dca546b053f7c7a8be921255a7bc0361e22425 | [
"MIT",
"Unlicense"
] | null | null | null | tests/msodde/test_basic.py | InQuest/oletools | e9dca546b053f7c7a8be921255a7bc0361e22425 | [
"MIT",
"Unlicense"
] | null | null | null | tests/msodde/test_basic.py | InQuest/oletools | e9dca546b053f7c7a8be921255a7bc0361e22425 | [
"MIT",
"Unlicense"
] | 2 | 2021-04-26T00:28:41.000Z | 2021-08-01T16:18:21.000Z | """ Test some basic behaviour of msodde.py
Ensure that
- doc and docx are read without error
- garbage returns error return status
- dde-links are found where appropriate
"""
from __future__ import print_function
import unittest
from oletools import msodde
from tests.test_utils import DATA_BASE_DIR as BASE_DIR
import os
from os.path import join
from traceback import print_exc
class TestReturnCode(unittest.TestCase):
""" check return codes and exception behaviour (not text output) """
def test_valid_doc(self):
""" check that a valid doc file leads to 0 exit status """
for filename in (
'dde-test-from-office2003', 'dde-test-from-office2016',
'harmless-clean', 'dde-test-from-office2013-utf_16le-korean'):
self.do_test_validity(join(BASE_DIR, 'msodde',
filename + '.doc'))
def test_valid_docx(self):
""" check that a valid docx file leads to 0 exit status """
for filename in 'dde-test', 'harmless-clean':
self.do_test_validity(join(BASE_DIR, 'msodde',
filename + '.docx'))
def test_valid_docm(self):
""" check that a valid docm file leads to 0 exit status """
for filename in 'dde-test', 'harmless-clean':
self.do_test_validity(join(BASE_DIR, 'msodde',
filename + '.docm'))
def test_valid_xml(self):
""" check that xml leads to 0 exit status """
for filename in 'harmless-clean-2003.xml', 'dde-in-excel2003.xml', \
'dde-in-word2003.xml', 'dde-in-word2007.xml':
self.do_test_validity(join(BASE_DIR, 'msodde', filename))
def test_invalid_none(self):
""" check that no file argument leads to non-zero exit status """
self.do_test_validity('', True)
def test_invalid_empty(self):
""" check that empty file argument leads to non-zero exit status """
self.do_test_validity(join(BASE_DIR, 'basic/empty'), True)
def test_invalid_text(self):
""" check that text file argument leads to non-zero exit status """
self.do_test_validity(join(BASE_DIR, 'basic/text'), True)
def test_encrypted(self):
"""
check that encrypted files lead to non-zero exit status
Currently, only the encryption applied by Office 2010 (CryptoApi RC4
Encryption) is tested.
"""
CRYPT_DIR = join(BASE_DIR, 'encrypted')
ADD_ARGS = '', '-j', '-d', '-f', '-a'
for filename in os.listdir(CRYPT_DIR):
full_name = join(CRYPT_DIR, filename)
for args in ADD_ARGS:
self.do_test_validity(args + ' ' + full_name, True)
def do_test_validity(self, args, expect_error=False):
""" helper for test_valid_doc[x] """
have_exception = False
try:
msodde.process_file(args, msodde.FIELD_FILTER_BLACKLIST)
except Exception:
have_exception = True
print_exc()
except SystemExit as exc: # sys.exit() was called
have_exception = True
if exc.code is None:
have_exception = False
self.assertEqual(expect_error, have_exception,
msg='Args={0}, expect={1}, exc={2}'
.format(args, expect_error, have_exception))
class TestDdeLinks(unittest.TestCase):
""" capture output of msodde and check dde-links are found correctly """
@staticmethod
def get_dde_from_output(output):
""" helper to read dde links from captured output
"""
return [o for o in output.splitlines()]
def test_with_dde(self):
""" check that dde links appear on stdout """
filename = 'dde-test-from-office2003.doc'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertNotEqual(len(self.get_dde_from_output(output)), 0,
msg='Found no dde links in output of ' + filename)
def test_no_dde(self):
""" check that no dde links appear on stdout """
filename = 'harmless-clean.doc'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertEqual(len(self.get_dde_from_output(output)), 0,
msg='Found dde links in output of ' + filename)
def test_with_dde_utf16le(self):
""" check that dde links appear on stdout """
filename = 'dde-test-from-office2013-utf_16le-korean.doc'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertNotEqual(len(self.get_dde_from_output(output)), 0,
msg='Found no dde links in output of ' + filename)
def test_excel(self):
""" check that dde links are found in excel 2007+ files """
expect = ['DDE-Link cmd /c calc.exe', ]
for extn in 'xlsx', 'xlsm', 'xlsb':
output = msodde.process_file(
join(BASE_DIR, 'msodde', 'dde-test.' + extn), msodde.FIELD_FILTER_BLACKLIST)
self.assertEqual(expect, self.get_dde_from_output(output),
msg='unexpected output for dde-test.{0}: {1}'
.format(extn, output))
def test_xml(self):
""" check that dde in xml from word / excel is found """
for name_part in 'excel2003', 'word2003', 'word2007':
filename = 'dde-in-' + name_part + '.xml'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
links = self.get_dde_from_output(output)
self.assertEqual(len(links), 1, 'found {0} dde-links in {1}'
.format(len(links), filename))
self.assertTrue('cmd' in links[0], 'no "cmd" in dde-link for {0}'
.format(filename))
self.assertTrue('calc' in links[0], 'no "calc" in dde-link for {0}'
.format(filename))
def test_clean_rtf_blacklist(self):
""" find a lot of hyperlinks in rtf spec """
filename = 'RTF-Spec-1.7.rtf'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertEqual(len(self.get_dde_from_output(output)), 1413)
def test_clean_rtf_ddeonly(self):
""" find no dde links in rtf spec """
filename = 'RTF-Spec-1.7.rtf'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_DDE)
self.assertEqual(len(self.get_dde_from_output(output)), 0,
msg='Found dde links in output of ' + filename)
if __name__ == '__main__':
unittest.main()
| 41.928144 | 92 | 0.596829 | """ Test some basic behaviour of msodde.py
Ensure that
- doc and docx are read without error
- garbage returns error return status
- dde-links are found where appropriate
"""
from __future__ import print_function
import unittest
from oletools import msodde
from tests.test_utils import DATA_BASE_DIR as BASE_DIR
import os
from os.path import join
from traceback import print_exc
class TestReturnCode(unittest.TestCase):
""" check return codes and exception behaviour (not text output) """
def test_valid_doc(self):
""" check that a valid doc file leads to 0 exit status """
for filename in (
'dde-test-from-office2003', 'dde-test-from-office2016',
'harmless-clean', 'dde-test-from-office2013-utf_16le-korean'):
self.do_test_validity(join(BASE_DIR, 'msodde',
filename + '.doc'))
def test_valid_docx(self):
""" check that a valid docx file leads to 0 exit status """
for filename in 'dde-test', 'harmless-clean':
self.do_test_validity(join(BASE_DIR, 'msodde',
filename + '.docx'))
def test_valid_docm(self):
""" check that a valid docm file leads to 0 exit status """
for filename in 'dde-test', 'harmless-clean':
self.do_test_validity(join(BASE_DIR, 'msodde',
filename + '.docm'))
def test_valid_xml(self):
""" check that xml leads to 0 exit status """
for filename in 'harmless-clean-2003.xml', 'dde-in-excel2003.xml', \
'dde-in-word2003.xml', 'dde-in-word2007.xml':
self.do_test_validity(join(BASE_DIR, 'msodde', filename))
def test_invalid_none(self):
""" check that no file argument leads to non-zero exit status """
self.do_test_validity('', True)
def test_invalid_empty(self):
""" check that empty file argument leads to non-zero exit status """
self.do_test_validity(join(BASE_DIR, 'basic/empty'), True)
def test_invalid_text(self):
""" check that text file argument leads to non-zero exit status """
self.do_test_validity(join(BASE_DIR, 'basic/text'), True)
def test_encrypted(self):
"""
check that encrypted files lead to non-zero exit status
Currently, only the encryption applied by Office 2010 (CryptoApi RC4
Encryption) is tested.
"""
CRYPT_DIR = join(BASE_DIR, 'encrypted')
ADD_ARGS = '', '-j', '-d', '-f', '-a'
for filename in os.listdir(CRYPT_DIR):
full_name = join(CRYPT_DIR, filename)
for args in ADD_ARGS:
self.do_test_validity(args + ' ' + full_name, True)
def do_test_validity(self, args, expect_error=False):
""" helper for test_valid_doc[x] """
have_exception = False
try:
msodde.process_file(args, msodde.FIELD_FILTER_BLACKLIST)
except Exception:
have_exception = True
print_exc()
except SystemExit as exc: # sys.exit() was called
have_exception = True
if exc.code is None:
have_exception = False
self.assertEqual(expect_error, have_exception,
msg='Args={0}, expect={1}, exc={2}'
.format(args, expect_error, have_exception))
class TestDdeLinks(unittest.TestCase):
""" capture output of msodde and check dde-links are found correctly """
@staticmethod
def get_dde_from_output(output):
""" helper to read dde links from captured output
"""
return [o for o in output.splitlines()]
def test_with_dde(self):
""" check that dde links appear on stdout """
filename = 'dde-test-from-office2003.doc'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertNotEqual(len(self.get_dde_from_output(output)), 0,
msg='Found no dde links in output of ' + filename)
def test_no_dde(self):
""" check that no dde links appear on stdout """
filename = 'harmless-clean.doc'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertEqual(len(self.get_dde_from_output(output)), 0,
msg='Found dde links in output of ' + filename)
def test_with_dde_utf16le(self):
""" check that dde links appear on stdout """
filename = 'dde-test-from-office2013-utf_16le-korean.doc'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertNotEqual(len(self.get_dde_from_output(output)), 0,
msg='Found no dde links in output of ' + filename)
def test_excel(self):
""" check that dde links are found in excel 2007+ files """
expect = ['DDE-Link cmd /c calc.exe', ]
for extn in 'xlsx', 'xlsm', 'xlsb':
output = msodde.process_file(
join(BASE_DIR, 'msodde', 'dde-test.' + extn), msodde.FIELD_FILTER_BLACKLIST)
self.assertEqual(expect, self.get_dde_from_output(output),
msg='unexpected output for dde-test.{0}: {1}'
.format(extn, output))
def test_xml(self):
""" check that dde in xml from word / excel is found """
for name_part in 'excel2003', 'word2003', 'word2007':
filename = 'dde-in-' + name_part + '.xml'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
links = self.get_dde_from_output(output)
self.assertEqual(len(links), 1, 'found {0} dde-links in {1}'
.format(len(links), filename))
self.assertTrue('cmd' in links[0], 'no "cmd" in dde-link for {0}'
.format(filename))
self.assertTrue('calc' in links[0], 'no "calc" in dde-link for {0}'
.format(filename))
def test_clean_rtf_blacklist(self):
""" find a lot of hyperlinks in rtf spec """
filename = 'RTF-Spec-1.7.rtf'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_BLACKLIST)
self.assertEqual(len(self.get_dde_from_output(output)), 1413)
def test_clean_rtf_ddeonly(self):
""" find no dde links in rtf spec """
filename = 'RTF-Spec-1.7.rtf'
output = msodde.process_file(
join(BASE_DIR, 'msodde', filename), msodde.FIELD_FILTER_DDE)
self.assertEqual(len(self.get_dde_from_output(output)), 0,
msg='Found dde links in output of ' + filename)
if __name__ == '__main__':
unittest.main()
| 0 | 0 | 0 |
7060640169cf70472f35a8527d67430e9373a503 | 4,873 | py | Python | Batch_Change_Place2D_GUI.py | AWACC2020/miscellaneous-scripts-for-Autodesk-Maya | 7995c63a94e2f4508d16b1383296542530afb732 | [
"MIT"
] | null | null | null | Batch_Change_Place2D_GUI.py | AWACC2020/miscellaneous-scripts-for-Autodesk-Maya | 7995c63a94e2f4508d16b1383296542530afb732 | [
"MIT"
] | null | null | null | Batch_Change_Place2D_GUI.py | AWACC2020/miscellaneous-scripts-for-Autodesk-Maya | 7995c63a94e2f4508d16b1383296542530afb732 | [
"MIT"
] | null | null | null | # -!- coding: utf-8 -!-
# Author : AWACS
# Time : 2020/06/12
# ------------------------------------
""" batch change place2dTexture Node for texture in Hypershade Editor,
if you think the default command that come with maya:
---(hold middle mouse button + Ctrl key ,and drag a place2dTexture to the texture node to plug all those slot)
is kind of trouble, or you have too many texture node to operating,here's the solution,a command with maya gui.
- How to install : you can simply run this in script editor of maya,or add it in the shelf,
or run as file like add this in the shelf :
import sys
sys.path.append( your script path(add quotation marks(") on the both side of your path ) )
import Batch_Change_Place2D_GUI
reload (Batch_Change_Place2D_GUI)
- How to use : just select place2d node to confirm,and select the multiple texture node to excute
"""
# ------------------------------------
"""在Hypershade批量替换place2dTexture节点
如果你觉得maya自带的命令:
---(按住鼠标中键+Ctrl,拽住place2dTexture节点,来连接到各种贴图节点)
有点麻烦的话,或者是你有太多贴图节点要连了,
这里是解决方案,一个有maya界面的命令
- 安装方式:你可以直接在maya的script editor里直接运行,或者添加到工具架,再或者在工具架上添加这个来以文件方式运行:
import sys
sys.path.append( 你的脚本路径(需要添加引号(")在路径两侧) )
import Batch_Change_Place2D_GUI
reload (Batch_Change_Place2D_GUI)
- 使用方式:选择好place2dTexture节点,然后点确认(Confirm),然后选好贴图节点执行连接就行了
"""
import maya.cmds as cmds
Batch_Change_Place2D_GUI() | 39.298387 | 112 | 0.672071 | # -!- coding: utf-8 -!-
# Author : AWACS
# Time : 2020/06/12
# ------------------------------------
""" batch change place2dTexture Node for texture in Hypershade Editor,
if you think the default command that come with maya:
---(hold middle mouse button + Ctrl key ,and drag a place2dTexture to the texture node to plug all those slot)
is kind of trouble, or you have too many texture node to operating,here's the solution,a command with maya gui.
- How to install : you can simply run this in script editor of maya,or add it in the shelf,
or run as file like add this in the shelf :
import sys
sys.path.append( your script path(add quotation marks(") on the both side of your path ) )
import Batch_Change_Place2D_GUI
reload (Batch_Change_Place2D_GUI)
- How to use : just select place2d node to confirm,and select the multiple texture node to excute
"""
# ------------------------------------
"""在Hypershade批量替换place2dTexture节点
如果你觉得maya自带的命令:
---(按住鼠标中键+Ctrl,拽住place2dTexture节点,来连接到各种贴图节点)
有点麻烦的话,或者是你有太多贴图节点要连了,
这里是解决方案,一个有maya界面的命令
- 安装方式:你可以直接在maya的script editor里直接运行,或者添加到工具架,再或者在工具架上添加这个来以文件方式运行:
import sys
sys.path.append( 你的脚本路径(需要添加引号(")在路径两侧) )
import Batch_Change_Place2D_GUI
reload (Batch_Change_Place2D_GUI)
- 使用方式:选择好place2dTexture节点,然后点确认(Confirm),然后选好贴图节点执行连接就行了
"""
import maya.cmds as cmds
def Batch_Change_Place2D_GUI():
if cmds.window('Batch_Change_Place2D', q=1, ex=1 ):
cmds.deleteUI('Batch_Change_Place2D')
TOOL_T = cmds.window('Batch_Change_Place2D' )
cmds.showWindow('Batch_Change_Place2D')
cmds.columnLayout()
cmds.rowLayout(nc=6)
cmds.text(l="1: ")
cmds.text(l=" Select Place2D Node : ")
cmds.textField('InputPlace2D', w=200, text = "")
cmds.button(c=lambda *args: SetInputPlace2D(), l="Confirm", w=60)
cmds.setParent('..')
cmds.rowLayout(nc=6)
cmds.text(l="2: ")
cmds.button(c=lambda *args: excute_Batch_Change_Place2d() , l="Select Target tex Node And Excute", w=200)
cmds.setParent('..')
def SetInputPlace2D():
Selection = cmds.ls(selection = True)
if cmds.objExists(Selection[0]):
cmds.textField('InputPlace2D', e = 1, tx = Selection[0])
print ("// InputPlace2D Selected : " + Selection[0])
def UV_Node_Exchange( UVNODE , TEXNODE ):
#plug_the_place2dTexture_Node_to_the_texfile_node
AttrList = [
['.outUV','.uvCoord'],
['.outUvFilterSize','.uvFilterSize'],
['.coverage','.coverage'],
['.translateFrame','.translateFrame'],
['.rotateFrame','.rotateFrame'],
['.mirrorU','.mirrorU'],
['.mirrorV','.mirrorV'],
['.stagger','.stagger'],
['.wrapU','.wrapU'],
['.wrapV','.wrapV'],
['.repeatUV','.repeatUV'],
['.vertexUvOne','.vertexUvOne'],
['.vertexUvTwo','.vertexUvTwo'],
['.vertexUvThree','.vertexUvThree'],
['.vertexCameraOne','.vertexCameraOne'],
['.noiseUV','.noiseUV'],
['.offset','.offset'],
['.rotateUV','.rotateUV'],
]
for i in AttrList:
try:
cmds.connectAttr( UVNODE + i[0], TEXNODE + i[1], force=1)
except:
pass
def UV_Node_Exchange_old( UVNODE , TEXNODE ):
#plug_the_place2dTexture_Node_to_the_texfile_node
cmds.connectAttr( UVNODE + '.outUV', TEXNODE + '.uvCoord', force=1)
cmds.connectAttr( UVNODE + '.outUvFilterSize', TEXNODE + '.uvFilterSize', force=1)
cmds.connectAttr( UVNODE + '.coverage', TEXNODE + '.coverage', force=1)
cmds.connectAttr( UVNODE + '.translateFrame', TEXNODE + '.translateFrame', force=1)
cmds.connectAttr( UVNODE + '.rotateFrame', TEXNODE + '.rotateFrame', force=1)
cmds.connectAttr( UVNODE + '.mirrorU', TEXNODE + '.mirrorU', force=1)
cmds.connectAttr( UVNODE + '.mirrorV', TEXNODE + '.mirrorV', force=1)
cmds.connectAttr( UVNODE + '.stagger', TEXNODE + '.stagger', force=1)
cmds.connectAttr( UVNODE + '.wrapU', TEXNODE + '.wrapU', force=1)
cmds.connectAttr( UVNODE + '.wrapV', TEXNODE + '.wrapV', force=1)
cmds.connectAttr( UVNODE + '.repeatUV', TEXNODE + '.repeatUV', force=1)
cmds.connectAttr( UVNODE + '.vertexUvOne', TEXNODE + '.vertexUvOne', force=1)
cmds.connectAttr( UVNODE + '.vertexUvTwo', TEXNODE + '.vertexUvTwo', force=1)
cmds.connectAttr( UVNODE + '.vertexUvThree', TEXNODE + '.vertexUvThree', force=1)
cmds.connectAttr( UVNODE + '.vertexCameraOne', TEXNODE + '.vertexCameraOne', force=1)
cmds.connectAttr( UVNODE + '.noiseUV', TEXNODE + '.noiseUV', force=1)
cmds.connectAttr( UVNODE + '.offset', TEXNODE + '.offset', force=1)
cmds.connectAttr( UVNODE + '.rotateUV', TEXNODE + '.rotateUV', force=1)
def excute_Batch_Change_Place2d( ):
Selection = cmds.ls(selection = True)
InputPlace2D = cmds.textField('InputPlace2D' , query=1, text = 1)
for i in Selection:
try:
UV_Node_Exchange( InputPlace2D , i )
except:
print("Change_Failed")
print("completed")
Batch_Change_Place2D_GUI() | 3,357 | 0 | 125 |
ef30c02cef9cdc5224e56c8c64dd5d8af9b69c68 | 918 | py | Python | Duong/AirTop.py | nhatduong01/MathModelAss | 0dc288c55de00ab60c9aa71b8d28ec66e7db1584 | [
"Unlicense"
] | null | null | null | Duong/AirTop.py | nhatduong01/MathModelAss | 0dc288c55de00ab60c9aa71b8d28ec66e7db1584 | [
"Unlicense"
] | null | null | null | Duong/AirTop.py | nhatduong01/MathModelAss | 0dc288c55de00ab60c9aa71b8d28ec66e7db1584 | [
"Unlicense"
] | null | null | null |
U_scr = 0.5
K_scr = 40
T_air = 298.15
T_top = 303.15
g = 9.8
p_air = air_density(T_air)
p_top = air_density(T_top)
p_mean_air = (p_air + p_top)/2
input_tuple = (U_scr, K_scr, T_air, T_top, g, p_mean_air, p_air, p_top)
# The tuple order is U_ src, K_Scr , T_air, T_top, gravitational coefficient,p_mean_air, p_air, p_Top
a = air_flow_rate(input_tuple)
print(MC_air_top(a))
| 30.6 | 118 | 0.6939 | def air_density(temperature):
return 101325/(287*temperature)
U_scr = 0.5
K_scr = 40
T_air = 298.15
T_top = 303.15
g = 9.8
p_air = air_density(T_air)
p_top = air_density(T_top)
p_mean_air = (p_air + p_top)/2
input_tuple = (U_scr, K_scr, T_air, T_top, g, p_mean_air, p_air, p_top)
# The tuple order is U_ src, K_Scr , T_air, T_top, gravitational coefficient,p_mean_air, p_air, p_Top
def air_flow_rate(input_tuple):
U_scr, K_scr, T_air, T_top, g, p_mean_air, p_air, p_top = input_tuple
return U_scr*K_scr*pow(abs(T_air-T_top), 2/3) + (1-U_scr)*pow((g*(1-U_scr)/(2*p_mean_air))*abs(p_air-p_top), 1/2)
def MC_air_top(air_flow_rate):
CO2_air = int(input("Please enter the CO2 concentration in air : "))
CO2_top = int(input("Please enter the CO2 concentration in top :"))
return air_flow_rate*(CO2_air-CO2_top)
a = air_flow_rate(input_tuple)
print(MC_air_top(a))
| 449 | 0 | 73 |
835df54b55d95ba6e029e2face4087974cd3c222 | 788 | py | Python | acapy_plugin_toolbox/holder/v0_1/messages/pres_list.py | mepeltier/aries-acapy-plugin-toolbox | 69cb760c2e802095440759166c8f6a2d69ca7281 | [
"Apache-2.0"
] | 13 | 2020-03-17T11:29:47.000Z | 2022-03-17T19:56:50.000Z | acapy_plugin_toolbox/holder/v0_1/messages/pres_list.py | frostyfrog/aries-acapy-plugin-toolbox | 17ded7e482e8bf95091c97a271d0b9f16643daef | [
"Apache-2.0"
] | 36 | 2020-02-13T18:23:18.000Z | 2022-03-01T20:06:59.000Z | acapy_plugin_toolbox/holder/v0_1/messages/pres_list.py | cjhowland/aries-acapy-plugin-toolbox | 1501b1f6152623421df9064d96b646f8b47d5afc | [
"Apache-2.0"
] | 23 | 2019-11-20T18:16:37.000Z | 2022-03-26T08:23:33.000Z | from marshmallow import fields
from ....decorators.pagination import Page
from ....util import expand_message_class
from .base import AdminHolderMessage
@expand_message_class
class PresList(AdminHolderMessage):
"""Presentation get list response message."""
message_type = "presentations-list"
class Fields:
"""Fields for presentation list message."""
results = fields.List(fields.Dict(), description="Retrieved presentations.")
page = fields.Nested(
Page.Schema,
required=False,
data_key="~page",
description="Pagination decorator.",
)
| 27.172414 | 84 | 0.652284 | from marshmallow import fields
from ....decorators.pagination import Page
from ....util import expand_message_class
from .base import AdminHolderMessage
@expand_message_class
class PresList(AdminHolderMessage):
"""Presentation get list response message."""
message_type = "presentations-list"
class Fields:
"""Fields for presentation list message."""
results = fields.List(fields.Dict(), description="Retrieved presentations.")
page = fields.Nested(
Page.Schema,
required=False,
data_key="~page",
description="Pagination decorator.",
)
def __init__(self, results, page: Page = None, **kwargs):
super().__init__(**kwargs)
self.results = results
self.page = page
| 127 | 0 | 27 |
76dfcc447d4f671cd6308e8514ab4ced63226fac | 1,778 | py | Python | scripts/db_update/update_04a_to_05.py | rhinoman/wikifeat | 1fc2c0c40fb412e64d60224a20ffda646c25eddc | [
"BSD-3-Clause"
] | 118 | 2015-08-06T15:44:22.000Z | 2022-03-27T22:00:19.000Z | scripts/db_update/update_04a_to_05.py | rhinoman/wikifeat | 1fc2c0c40fb412e64d60224a20ffda646c25eddc | [
"BSD-3-Clause"
] | 62 | 2015-08-05T02:22:09.000Z | 2020-07-25T15:12:51.000Z | scripts/db_update/update_04a_to_05.py | rhinoman/wikifeat | 1fc2c0c40fb412e64d60224a20ffda646c25eddc | [
"BSD-3-Clause"
] | 9 | 2015-11-05T00:30:40.000Z | 2020-03-11T17:27:43.000Z | #!/usr/bin/env python3
"""
Update Wikifeat couchdb databases from 0.4.0a to 0.5.0
Note: Requires python3
Changes:
1. Added getImageFileIndex view to wiki design documents
"""
import json
import common
import sys
wiki_ddoc = 'wikit'
getImageFileIndex = dict()
getImageFileIndex['map'] = """
function(doc){
if(doc.type==="file"){
const att=doc._attachments;
const contentType=att[Object.keys(att)[0]].content_type;
if(contentType.substring(0,6)==="image/"){
emit(doc.name,doc);
}
}
}
"""
getImageFileIndex['reduce'] = "_count"
args = common.parse_args()
conn = common.get_connection(args.use_ssl, args.couch_server, args.couch_port)
credentials = common.get_credentials(args.adminuser, args.adminpass)
get_headers = common.get_headers(credentials)
put_headers = common.put_headers(credentials)
# Update all the wiki design docs
conn.request("GET", '/_all_dbs', headers=get_headers)
db_list = common.decode_response(conn.getresponse())
wiki_list = [db for db in db_list if db[0:5] == "wiki_"]
# Update the wiki dbs
for wiki in wiki_list:
print("Examining " + wiki)
# Fetch design doc
ddoc_uri = '/' + wiki + '/_design/' + wiki_ddoc
conn.request("GET", ddoc_uri, headers=get_headers)
resp = conn.getresponse()
ddoc = common.decode_response(resp)
print("Updating " + wiki)
ddoc['views']['getImageFileIndex'] = getImageFileIndex
req_body = json.dumps(ddoc)
conn.request("PUT", ddoc_uri, body=req_body, headers=put_headers)
resp = conn.getresponse()
common.decode_response(resp)
if resp.getcode() == 201 or resp.getcode() == 200:
print("Update successful.")
else:
print("Update failed.")
# Lastly, close the connection
conn.close()
| 26.939394 | 78 | 0.683352 | #!/usr/bin/env python3
"""
Update Wikifeat couchdb databases from 0.4.0a to 0.5.0
Note: Requires python3
Changes:
1. Added getImageFileIndex view to wiki design documents
"""
import json
import common
import sys
wiki_ddoc = 'wikit'
getImageFileIndex = dict()
getImageFileIndex['map'] = """
function(doc){
if(doc.type==="file"){
const att=doc._attachments;
const contentType=att[Object.keys(att)[0]].content_type;
if(contentType.substring(0,6)==="image/"){
emit(doc.name,doc);
}
}
}
"""
getImageFileIndex['reduce'] = "_count"
args = common.parse_args()
conn = common.get_connection(args.use_ssl, args.couch_server, args.couch_port)
credentials = common.get_credentials(args.adminuser, args.adminpass)
get_headers = common.get_headers(credentials)
put_headers = common.put_headers(credentials)
# Update all the wiki design docs
conn.request("GET", '/_all_dbs', headers=get_headers)
db_list = common.decode_response(conn.getresponse())
wiki_list = [db for db in db_list if db[0:5] == "wiki_"]
# Update the wiki dbs
for wiki in wiki_list:
print("Examining " + wiki)
# Fetch design doc
ddoc_uri = '/' + wiki + '/_design/' + wiki_ddoc
conn.request("GET", ddoc_uri, headers=get_headers)
resp = conn.getresponse()
ddoc = common.decode_response(resp)
print("Updating " + wiki)
ddoc['views']['getImageFileIndex'] = getImageFileIndex
req_body = json.dumps(ddoc)
conn.request("PUT", ddoc_uri, body=req_body, headers=put_headers)
resp = conn.getresponse()
common.decode_response(resp)
if resp.getcode() == 201 or resp.getcode() == 200:
print("Update successful.")
else:
print("Update failed.")
# Lastly, close the connection
conn.close()
| 0 | 0 | 0 |
67155589e90f5a5efbcc8ba552364bbd06fdd1da | 4,244 | py | Python | demo_binary_logistic_regression.py | JyotinderSingh/TinyFlow-Deep-Learning-Framework | 1a57f273d05cf1ac940da61fa4713c5265b4c46d | [
"MIT"
] | null | null | null | demo_binary_logistic_regression.py | JyotinderSingh/TinyFlow-Deep-Learning-Framework | 1a57f273d05cf1ac940da61fa4713c5265b4c46d | [
"MIT"
] | 1 | 2020-07-05T08:10:16.000Z | 2020-07-05T13:53:27.000Z | demo_binary_logistic_regression.py | JyotinderSingh/TinyFlow-Deep-Learning-Framework | 1a57f273d05cf1ac940da61fa4713c5265b4c46d | [
"MIT"
] | 1 | 2020-05-12T15:27:00.000Z | 2020-05-12T15:27:00.000Z | # Binary Logistic Regression Demo
import numpy as np
from TinyFlow.Datasets import spiral_data
from TinyFlow.Metrics import model_accuracy_sigmoid
from TinyFlow.Layers import Layer_Dense
from TinyFlow.Activations import Activation_ReLU, Activation_Sigmoid
from TinyFlow.Loss import Loss_BinaryCrossEntropy
from TinyFlow.Optimizers import Optimizer_Adam
# Create dataset
X, y = spiral_data(100, 2)
# Reshape the labels as they aren't sparse anymore, They're binary, 0 & 1
# Reshape the labels to be a list of lists
# Inner list contains one output (either 0 or 1)
# per each output neuron, 1 in this case
#
# We do this reshaping as spiral data values are mapped directly to
# the sparse class values taht were the ideal "one hot index from
# the network's output". However in this case we're trying to represent
# Binary output. IN this example we have a single output neuron, of a target
# value of either 0 or 1.
y = y.reshape(-1, 1)
# Create a dense layer with 2 input features and 3 output values
# first dense layer, 2 inputs (each sample has 2 features), 64 outputs
dense1 = Layer_Dense(2, 64, weight_regularizer_l2=5e-4,
bias_regulariser_l2=5e-4)
# Create ReLU activation
activation1 = Activation_ReLU()
# Create second dense layer with 64 input features
# (as we take output of previous layer here) and 1 output
dense2 = Layer_Dense(64, 1)
# Create Sigmoid Activation
activation2 = Activation_Sigmoid()
# Create a loss function
loss_function = Loss_BinaryCrossEntropy()
# Create an optimizer
optimizer = Optimizer_Adam(decay=1e-8)
# Train in loop
for epoch in range(10001):
# Make a forward pass of our training adaa through this layer
dense1.forward(X)
# Make a forward pass through our activation function
activation1.forward(dense1.output)
# Make forward pass through second dense layer
dense2.forward(activation1.output)
# Make a forward pass through the second activation function
activation2.forward(dense2.output)
# Calculate the losses from the second activation function
sample_losses = loss_function.forward(activation2.output, y)
# Calculate mean loss
data_loss = np.mean(sample_losses)
# Calculate regularization penalty
regularization_loss = loss_function.regularization_loss(
dense1) + loss_function.regularization_loss(dense2)
# Overall loss
loss = data_loss + regularization_loss
# Calculate accuracy from output of activation2 and targets
# Part in the brackets returns a binary maskk - array consisting
# of True/False values, multiplying it by 1 changes it into array of 1s and 0s
accuracy = model_accuracy_sigmoid(activation2.output, y)
if not epoch % 100:
print(f'epoch: {epoch}, acc: {accuracy:.3f}, loss: {loss:.3f} (data_loss: {data_loss:.3f}, reg_loss: {regularization_loss:.3f}), lr: {optimizer.current_learning_rate:.5f}')
# Backward pass
loss_function.backward(activation2.output, y)
activation2.backward(loss_function.dvalues)
dense2.backward(activation2.dvalues)
activation1.backward(dense2.dvalues)
dense1.backward(activation1.dvalues)
# Update weights
optimizer.pre_update_params()
optimizer.update_params(dense1)
optimizer.update_params(dense2)
optimizer.post_update_params()
# Validate model
# Create test dataset
X_test ,y_test = spiral_data(100, 2)
# Reshape labels to be a list of lists
# Inner list contains one output (either 0 or 1)
# per output neuron, 1 in this case
y_test = y_test.reshape(-1, 1)
# Make a forward pass of the testing data though this layer
dense1.forward(X_test)
# Make a forward pass through the activation function
activation1.forward(dense1.output)
# Make a forward pass through the second dense layer
dense2.forward(activation1.output)
# Make a forward pass through the second activation function
activation2.forward(dense2.output)
# Calculate the sample loses from output of activation2 and targets
sample_losses = loss_function.forward(activation2.output, y_test)
# Calculate mean loss
loss = np.mean(sample_losses)
# Calculate accuracy over test data
accuracy = model_accuracy_sigmoid(activation2.output, y_test)
print(f'validation, acc: {accuracy:.3f}, loss: {loss:.3f}') | 33.68254 | 180 | 0.761074 | # Binary Logistic Regression Demo
import numpy as np
from TinyFlow.Datasets import spiral_data
from TinyFlow.Metrics import model_accuracy_sigmoid
from TinyFlow.Layers import Layer_Dense
from TinyFlow.Activations import Activation_ReLU, Activation_Sigmoid
from TinyFlow.Loss import Loss_BinaryCrossEntropy
from TinyFlow.Optimizers import Optimizer_Adam
# Create dataset
X, y = spiral_data(100, 2)
# Reshape the labels as they aren't sparse anymore, They're binary, 0 & 1
# Reshape the labels to be a list of lists
# Inner list contains one output (either 0 or 1)
# per each output neuron, 1 in this case
#
# We do this reshaping as spiral data values are mapped directly to
# the sparse class values taht were the ideal "one hot index from
# the network's output". However in this case we're trying to represent
# Binary output. IN this example we have a single output neuron, of a target
# value of either 0 or 1.
y = y.reshape(-1, 1)
# Create a dense layer with 2 input features and 3 output values
# first dense layer, 2 inputs (each sample has 2 features), 64 outputs
dense1 = Layer_Dense(2, 64, weight_regularizer_l2=5e-4,
bias_regulariser_l2=5e-4)
# Create ReLU activation
activation1 = Activation_ReLU()
# Create second dense layer with 64 input features
# (as we take output of previous layer here) and 1 output
dense2 = Layer_Dense(64, 1)
# Create Sigmoid Activation
activation2 = Activation_Sigmoid()
# Create a loss function
loss_function = Loss_BinaryCrossEntropy()
# Create an optimizer
optimizer = Optimizer_Adam(decay=1e-8)
# Train in loop
for epoch in range(10001):
# Make a forward pass of our training adaa through this layer
dense1.forward(X)
# Make a forward pass through our activation function
activation1.forward(dense1.output)
# Make forward pass through second dense layer
dense2.forward(activation1.output)
# Make a forward pass through the second activation function
activation2.forward(dense2.output)
# Calculate the losses from the second activation function
sample_losses = loss_function.forward(activation2.output, y)
# Calculate mean loss
data_loss = np.mean(sample_losses)
# Calculate regularization penalty
regularization_loss = loss_function.regularization_loss(
dense1) + loss_function.regularization_loss(dense2)
# Overall loss
loss = data_loss + regularization_loss
# Calculate accuracy from output of activation2 and targets
# Part in the brackets returns a binary maskk - array consisting
# of True/False values, multiplying it by 1 changes it into array of 1s and 0s
accuracy = model_accuracy_sigmoid(activation2.output, y)
if not epoch % 100:
print(f'epoch: {epoch}, acc: {accuracy:.3f}, loss: {loss:.3f} (data_loss: {data_loss:.3f}, reg_loss: {regularization_loss:.3f}), lr: {optimizer.current_learning_rate:.5f}')
# Backward pass
loss_function.backward(activation2.output, y)
activation2.backward(loss_function.dvalues)
dense2.backward(activation2.dvalues)
activation1.backward(dense2.dvalues)
dense1.backward(activation1.dvalues)
# Update weights
optimizer.pre_update_params()
optimizer.update_params(dense1)
optimizer.update_params(dense2)
optimizer.post_update_params()
# Validate model
# Create test dataset
X_test ,y_test = spiral_data(100, 2)
# Reshape labels to be a list of lists
# Inner list contains one output (either 0 or 1)
# per output neuron, 1 in this case
y_test = y_test.reshape(-1, 1)
# Make a forward pass of the testing data though this layer
dense1.forward(X_test)
# Make a forward pass through the activation function
activation1.forward(dense1.output)
# Make a forward pass through the second dense layer
dense2.forward(activation1.output)
# Make a forward pass through the second activation function
activation2.forward(dense2.output)
# Calculate the sample loses from output of activation2 and targets
sample_losses = loss_function.forward(activation2.output, y_test)
# Calculate mean loss
loss = np.mean(sample_losses)
# Calculate accuracy over test data
accuracy = model_accuracy_sigmoid(activation2.output, y_test)
print(f'validation, acc: {accuracy:.3f}, loss: {loss:.3f}') | 0 | 0 | 0 |
122ec9d4f375db1141b39b38f4d2b9ed772a0817 | 5,163 | py | Python | models/imagenet/fbnet_v2.py | a1004123217/pytorch-mobile | 97974af3259a2073efbc334d57841efbd3eaadfb | [
"MIT"
] | null | null | null | models/imagenet/fbnet_v2.py | a1004123217/pytorch-mobile | 97974af3259a2073efbc334d57841efbd3eaadfb | [
"MIT"
] | null | null | null | models/imagenet/fbnet_v2.py | a1004123217/pytorch-mobile | 97974af3259a2073efbc334d57841efbd3eaadfb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
FBNet classification models
Example code to create the model:
from mobile_cv.model_zoo.models.fbnet_v2 import fbnet
model = fbnet("fbnet_cse", pretrained=True)
model.eval()
Full example code is available at `examples/run_fbnet_v2.py`.
All suported architectures could be found in:
mobile_cv/arch/fbnet_v2/fbnet_modeldef_cls*.py
Architectures with pretrained weights could be found in:
mobile_cv/model_zoo/models/model_info/fbnet_v2/*.json
"""
import typing
import torch
import torch.nn as nn
from mobile_cv.arch.fbnet_v2 import fbnet_builder as mbuilder
from mobile_cv.arch.fbnet_v2 import fbnet_modeldef_cls as modeldef
from mobile_cv.arch.utils import misc
from mobile_cv.model_zoo.models import hub_utils, utils
PRETRAINED_MODELS = _load_pretrained_info()
NAME_MAPPING = {
# external name : internal name
"FBNet_a": "fbnet_a",
"FBNet_b": "fbnet_b",
"FBNet_c": "fbnet_c",
"FBNet_ase": "fbnet_ase",
"FBNet_bse": "fbnet_ase",
"FBNet_cse": "fbnet_ase",
"MobileNetV3": "mnv3",
"FBNetV2_F1": "dmasking_f1",
"FBNetV2_F5": "dmasking_l2",
}
class ClsConvHead(nn.Module):
"""Global average pooling + conv head for classification
"""
def fbnet(arch_name, pretrained=False, progress=True, **kwargs):
"""
Constructs a FBNet architecture named `arch_name`
Args:
arch_name (str): Architecture name
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
if isinstance(arch_name, str) and arch_name in NAME_MAPPING:
arch_name = NAME_MAPPING[arch_name]
model = FBNet(arch_name, **kwargs)
if pretrained:
assert (
arch_name in PRETRAINED_MODELS
), f"Invalid arch {arch_name}, supported arch {PRETRAINED_MODELS.keys()}"
model_info = PRETRAINED_MODELS[arch_name]
model_path = model_info["model_path"]
state_dict = _load_fbnet_state_dict(model_path, progress=progress)
model.load_state_dict(state_dict)
model.model_info = model_info
return model
| 29.843931 | 83 | 0.675576 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
FBNet classification models
Example code to create the model:
from mobile_cv.model_zoo.models.fbnet_v2 import fbnet
model = fbnet("fbnet_cse", pretrained=True)
model.eval()
Full example code is available at `examples/run_fbnet_v2.py`.
All suported architectures could be found in:
mobile_cv/arch/fbnet_v2/fbnet_modeldef_cls*.py
Architectures with pretrained weights could be found in:
mobile_cv/model_zoo/models/model_info/fbnet_v2/*.json
"""
import typing
import torch
import torch.nn as nn
from mobile_cv.arch.fbnet_v2 import fbnet_builder as mbuilder
from mobile_cv.arch.fbnet_v2 import fbnet_modeldef_cls as modeldef
from mobile_cv.arch.utils import misc
from mobile_cv.model_zoo.models import hub_utils, utils
def _load_pretrained_info():
folder_name = utils.get_model_info_folder("fbnet_v2")
ret = utils.load_model_info_all(folder_name)
return ret
PRETRAINED_MODELS = _load_pretrained_info()
NAME_MAPPING = {
# external name : internal name
"FBNet_a": "fbnet_a",
"FBNet_b": "fbnet_b",
"FBNet_c": "fbnet_c",
"FBNet_ase": "fbnet_ase",
"FBNet_bse": "fbnet_ase",
"FBNet_cse": "fbnet_ase",
"MobileNetV3": "mnv3",
"FBNetV2_F1": "dmasking_f1",
"FBNetV2_F5": "dmasking_l2",
}
def _load_fbnet_state_dict(file_name, progress=True):
if file_name.startswith("https://"):
file_name = hub_utils.download_file(file_name, progress=progress)
state_dict = torch.load(file_name, map_location="cpu")
if "model_ema" in state_dict and state_dict["model_ema"] is not None:
state_dict = state_dict["model_ema"]
else:
state_dict = state_dict["state_dict"]
ret = {}
for name, val in state_dict.items():
if name.startswith("module."):
name = name[len("module.") :]
ret[name] = val
return ret
def _create_builder(arch_name_or_def: typing.Union[str, dict]):
if isinstance(arch_name_or_def, str):
assert arch_name_or_def in modeldef.MODEL_ARCH, (
f"Invalid arch name {arch_name_or_def}, "
f"available names: {modeldef.MODEL_ARCH.keys()}"
)
arch_def = modeldef.MODEL_ARCH[arch_name_or_def]
else:
assert isinstance(arch_name_or_def, dict)
arch_def = arch_name_or_def
arch_def = mbuilder.unify_arch_def(arch_def, ["blocks"])
scale_factor = 1.0
width_divisor = 1
bn_info = {"name": "bn", "momentum": 0.003}
drop_out = 0.2
arch_def["dropout_ratio"] = drop_out
builder = mbuilder.FBNetBuilder(
width_ratio=scale_factor, bn_args=bn_info, width_divisor=width_divisor
)
builder.add_basic_args(**arch_def.get("basic_args", {}))
return builder, arch_def
class ClsConvHead(nn.Module):
"""Global average pooling + conv head for classification
"""
def __init__(self, input_dim, output_dim):
super().__init__()
# global avg pool of arbitrary feature map size
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.conv = nn.Conv2d(input_dim, output_dim, 1)
def forward(self, x):
x = self.avg_pool(x)
x = self.conv(x)
x = x.view(x.size(0), -1)
return x
class FBNetBackbone(nn.Module):
def __init__(self, arch_name, dim_in=3):
super().__init__()
builder, arch_def = _create_builder(arch_name)
self.stages = builder.build_blocks(arch_def["blocks"], dim_in=dim_in)
self.dropout = misc.add_dropout(arch_def["dropout_ratio"])
self.out_channels = builder.last_depth
self.arch_def = arch_def
def forward(self, x):
y = self.stages(x)
if self.dropout is not None:
y = self.dropout(y)
return y
class FBNet(nn.Module):
def __init__(self, arch_name, dim_in=3, num_classes=1000):
super().__init__()
self.backbone = FBNetBackbone(arch_name, dim_in)
self.head = ClsConvHead(self.backbone.out_channels, num_classes)
def forward(self, x):
y = self.backbone(x)
y = self.head(y)
return y
@property
def arch_def(self):
return self.backbone.arch_def
def fbnet(arch_name, pretrained=False, progress=True, **kwargs):
"""
Constructs a FBNet architecture named `arch_name`
Args:
arch_name (str): Architecture name
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
if isinstance(arch_name, str) and arch_name in NAME_MAPPING:
arch_name = NAME_MAPPING[arch_name]
model = FBNet(arch_name, **kwargs)
if pretrained:
assert (
arch_name in PRETRAINED_MODELS
), f"Invalid arch {arch_name}, supported arch {PRETRAINED_MODELS.keys()}"
model_info = PRETRAINED_MODELS[arch_name]
model_path = model_info["model_path"]
state_dict = _load_fbnet_state_dict(model_path, progress=progress)
model.load_state_dict(state_dict)
model.model_info = model_info
return model
| 2,596 | 106 | 222 |
1531d0b47665f5afed6b70413c566b13a32964a5 | 1,029 | py | Python | format_dataset/prepare_train_image.py | Srinjoycode/Binary-Segmentation-with-Spatial-Attention | 851008319eb8f6a351eccd4698b29a4335c1cf2a | [
"MIT"
] | null | null | null | format_dataset/prepare_train_image.py | Srinjoycode/Binary-Segmentation-with-Spatial-Attention | 851008319eb8f6a351eccd4698b29a4335c1cf2a | [
"MIT"
] | null | null | null | format_dataset/prepare_train_image.py | Srinjoycode/Binary-Segmentation-with-Spatial-Attention | 851008319eb8f6a351eccd4698b29a4335c1cf2a | [
"MIT"
] | null | null | null | """
Author: @ayushmankumar7
Paste this file (prepare_train_mask.py) in "leftImg8bit_trainvaltest/leftImg8bit".
There are 3 folder in this directory - test, train, val.
Paste the file inside the folder containing the 3 folders.
In Command Prompt or Terminal :
python prepare_train_image.py
Link for the label:
https://www.cityscapes-dataset.com/file-handling/?packageID=1
"""
import os
import cv2
import numpy as np
import glob
try:
os.makedirs("train_image")
except:
pass
print("It might take a few time. Be patient! Let this program run in background.")
files = glob.glob("train/*")
for file in files:
images = glob.glob(file+"\\*.png")
for image in images:
img_name = image.split("\\")[-1]
img = cv2.imread(image)
# print(f"train_image/{image}")
cv2.imwrite(f"train_image/{img_name}", img)
print(image, "----> DONE")
print("You train Images is stored in './train_image' successfully! \n You may proceed. \n Thank you")
| 22.866667 | 101 | 0.664723 | """
Author: @ayushmankumar7
Paste this file (prepare_train_mask.py) in "leftImg8bit_trainvaltest/leftImg8bit".
There are 3 folder in this directory - test, train, val.
Paste the file inside the folder containing the 3 folders.
In Command Prompt or Terminal :
python prepare_train_image.py
Link for the label:
https://www.cityscapes-dataset.com/file-handling/?packageID=1
"""
import os
import cv2
import numpy as np
import glob
try:
os.makedirs("train_image")
except:
pass
print("It might take a few time. Be patient! Let this program run in background.")
files = glob.glob("train/*")
for file in files:
images = glob.glob(file+"\\*.png")
for image in images:
img_name = image.split("\\")[-1]
img = cv2.imread(image)
# print(f"train_image/{image}")
cv2.imwrite(f"train_image/{img_name}", img)
print(image, "----> DONE")
print("You train Images is stored in './train_image' successfully! \n You may proceed. \n Thank you")
| 0 | 0 | 0 |
523fb495858e870283566cb905b011d71bc3ecd8 | 902 | py | Python | driver/tests.py | toelapiut/uber-pool | 94354982e65ef379f0bd9bc6e2268038d823bd6e | [
"MIT"
] | null | null | null | driver/tests.py | toelapiut/uber-pool | 94354982e65ef379f0bd9bc6e2268038d823bd6e | [
"MIT"
] | 6 | 2020-06-05T17:47:14.000Z | 2021-09-07T23:49:07.000Z | driver/tests.py | toelapiut/uber-pool | 94354982e65ef379f0bd9bc6e2268038d823bd6e | [
"MIT"
] | null | null | null | from django.test import TestCase
from .models import *
from django.contrib.auth.models import User
# Create your tests here.
# test for checking instance
# test for saving
# test for deleting Driver or Editor
| 32.214286 | 71 | 0.706208 | from django.test import TestCase
from .models import *
from django.contrib.auth.models import User
# Create your tests here.
class DriverRiderTest(TestCase):
def setUp(self):
user=User.objects.get(id).all()
self.driver_rider = Driver_Or_Rider(user_id=user, selection=1)
# test for checking instance
def test_isinstance(self):
self.assertTrue(isinstance(self.driver_rider, Driver_Or_Rider))
# test for saving
def test_driver_rider_save(self):
self.driver_rider.save_d_or_r()
driver_riders = Driver_Or_Rider.objects.all()
self.assertTrue(len(driver_rider) > 0)
# test for deleting Driver or Editor
def test_Driver_Rider_delete(self):
self.driver_rider.save_d_or_r()()
self.driver_rider.delete_d_or_r()()
driver_riders = Driver_Or_Rider.objects.all()
self.assertTrue(len(driver_riders) == 0)
| 539 | 11 | 127 |
f87a2cdc73539bf080b32360d07d64f6c3b9361d | 3,502 | py | Python | .leetcode/653.two-sum-iv-input-is-a-bst.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/653.two-sum-iv-input-is-a-bst.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/653.two-sum-iv-input-is-a-bst.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | # @lc app=leetcode id=653 lang=python3
#
# [653] Two Sum IV - Input is a BST
#
# https://leetcode.com/problems/two-sum-iv-input-is-a-bst/description/
#
# algorithms
# Easy (57.56%)
# Likes: 2752
# Dislikes: 175
# Total Accepted: 248.7K
# Total Submissions: 430.4K
# Testcase Example: '[5,3,6,2,4,null,7]\n9'
#
# Given the root of a Binary Search Tree and a target number k, return true if
# there exist two elements in the BST such that their sum is equal to the given
# target.
#
#
# Example 1:
#
#
# Input: root = [5,3,6,2,4,null,7], k = 9
# Output: true
#
#
# Example 2:
#
#
# Input: root = [5,3,6,2,4,null,7], k = 28
# Output: false
#
#
# Example 3:
#
#
# Input: root = [2,1,3], k = 4
# Output: true
#
#
# Example 4:
#
#
# Input: root = [2,1,3], k = 1
# Output: false
#
#
# Example 5:
#
#
# Input: root = [2,1,3], k = 3
# Output: true
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [1, 10^4].
# -10^4 <= Node.val <= 10^4
# root is guaranteed to be a valid binary search tree.
# -10^5 <= k <= 10^5
#
#
#
# @lc tags=tree
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 在二叉搜索树中,判断是否有两个数字和为指定值。
# 直接遍历。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('root = [5,3,6,2,4,null,7], k = 9')
print('Exception :')
print('true')
print('Output :')
print(
str(Solution().findTarget(listToTreeNode([5, 3, 6, 2, 4, None, 7]),
9)))
print()
print('Example 2:')
print('Input : ')
print('root = [5,3,6,2,4,null,7], k = 28')
print('Exception :')
print('false')
print('Output :')
print(
str(Solution().findTarget(listToTreeNode([5, 3, 6, 2, 4, None, 7]),
28)))
print()
print('Example 3:')
print('Input : ')
print('root = [2,1,3], k = 4')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().findTarget(listToTreeNode([2, 1, 3]), 4)))
print()
print('Example 4:')
print('Input : ')
print('root = [2,1,3], k = 1')
print('Exception :')
print('false')
print('Output :')
print(str(Solution().findTarget(listToTreeNode([2, 1, 3]), 1)))
print()
print('Example 5:')
print('Input : ')
print('root = [2,1,3], k = 3')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().findTarget(listToTreeNode([2, 1, 3]), 3)))
print()
print(str(Solution().findTarget(listToTreeNode([1]), 2)))
pass
# @lc main=end | 20.6 | 79 | 0.538835 | # @lc app=leetcode id=653 lang=python3
#
# [653] Two Sum IV - Input is a BST
#
# https://leetcode.com/problems/two-sum-iv-input-is-a-bst/description/
#
# algorithms
# Easy (57.56%)
# Likes: 2752
# Dislikes: 175
# Total Accepted: 248.7K
# Total Submissions: 430.4K
# Testcase Example: '[5,3,6,2,4,null,7]\n9'
#
# Given the root of a Binary Search Tree and a target number k, return true if
# there exist two elements in the BST such that their sum is equal to the given
# target.
#
#
# Example 1:
#
#
# Input: root = [5,3,6,2,4,null,7], k = 9
# Output: true
#
#
# Example 2:
#
#
# Input: root = [5,3,6,2,4,null,7], k = 28
# Output: false
#
#
# Example 3:
#
#
# Input: root = [2,1,3], k = 4
# Output: true
#
#
# Example 4:
#
#
# Input: root = [2,1,3], k = 1
# Output: false
#
#
# Example 5:
#
#
# Input: root = [2,1,3], k = 3
# Output: true
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [1, 10^4].
# -10^4 <= Node.val <= 10^4
# root is guaranteed to be a valid binary search tree.
# -10^5 <= k <= 10^5
#
#
#
# @lc tags=tree
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 在二叉搜索树中,判断是否有两个数字和为指定值。
# 直接遍历。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findTarget(self, root: Optional[TreeNode], k: int) -> bool:
def search(p: Optional[TreeNode], t: int, origin: Optional[TreeNode]):
while p:
if p.val == t:
return p != origin
elif p.val < t:
p = p.right
else:
p = p.left
return False
def recur(p: Optional[TreeNode]):
return search(root, k - p.val,p) \
or (p.left is not None and recur(p.left)) \
or (p.right is not None and recur(p.right))
return recur(root)
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('root = [5,3,6,2,4,null,7], k = 9')
print('Exception :')
print('true')
print('Output :')
print(
str(Solution().findTarget(listToTreeNode([5, 3, 6, 2, 4, None, 7]),
9)))
print()
print('Example 2:')
print('Input : ')
print('root = [5,3,6,2,4,null,7], k = 28')
print('Exception :')
print('false')
print('Output :')
print(
str(Solution().findTarget(listToTreeNode([5, 3, 6, 2, 4, None, 7]),
28)))
print()
print('Example 3:')
print('Input : ')
print('root = [2,1,3], k = 4')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().findTarget(listToTreeNode([2, 1, 3]), 4)))
print()
print('Example 4:')
print('Input : ')
print('root = [2,1,3], k = 1')
print('Exception :')
print('false')
print('Output :')
print(str(Solution().findTarget(listToTreeNode([2, 1, 3]), 1)))
print()
print('Example 5:')
print('Input : ')
print('root = [2,1,3], k = 3')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().findTarget(listToTreeNode([2, 1, 3]), 3)))
print()
print(str(Solution().findTarget(listToTreeNode([1]), 2)))
pass
# @lc main=end | 606 | -6 | 48 |
a26837c48b8d0500a693fa629a875483a0ab8788 | 1,030 | py | Python | 674. Longest Continuous Increasing Subsequence.py | youhusky/Facebook_Prepare | 4045bcb652537711b3680b2aa17204ae73c6bde8 | [
"MIT"
] | 6 | 2017-10-30T05:35:46.000Z | 2020-12-15T06:51:52.000Z | 674. Longest Continuous Increasing Subsequence.py | youhusky/Facebook_Prepare | 4045bcb652537711b3680b2aa17204ae73c6bde8 | [
"MIT"
] | 1 | 2017-10-30T04:11:31.000Z | 2017-10-30T05:46:24.000Z | 674. Longest Continuous Increasing Subsequence.py | youhusky/Facebook_Prepare | 4045bcb652537711b3680b2aa17204ae73c6bde8 | [
"MIT"
] | 2 | 2020-09-03T07:14:02.000Z | 2021-05-21T19:19:57.000Z | # Given an unsorted array of integers, find the length of longest continuous increasing subsequence.
# Example 1:
# Input: [1,3,5,4,7]
# Output: 3
# Explanation: The longest continuous increasing subsequence is [1,3,5], its length is 3.
# Even though [1,3,5,7] is also an increasing subsequence, it's not a continuous one where 5 and 7 are separated by 4.
# Example 2:
# Input: [2,2,2,2,2]
# Output: 1
# Explanation: The longest continuous increasing subsequence is [2], its length is 1.
# Note: Length of the array will not exceed 10,000.
# simple DP
| 33.225806 | 119 | 0.607767 | # Given an unsorted array of integers, find the length of longest continuous increasing subsequence.
# Example 1:
# Input: [1,3,5,4,7]
# Output: 3
# Explanation: The longest continuous increasing subsequence is [1,3,5], its length is 3.
# Even though [1,3,5,7] is also an increasing subsequence, it's not a continuous one where 5 and 7 are separated by 4.
# Example 2:
# Input: [2,2,2,2,2]
# Output: 1
# Explanation: The longest continuous increasing subsequence is [2], its length is 1.
# Note: Length of the array will not exceed 10,000.
# simple DP
class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
maxlength = 1
templength = 1
for i in range(1,len(nums)):
if nums[i] > nums[i-1]:
templength += 1
else:
maxlength = max(maxlength, templength)
templength = 1
return max(maxlength, templength)
| 0 | 452 | 22 |
b621b340b3bf805ccbd5a5e482ec56cab024df3a | 3,428 | py | Python | lexer.py | gggfox/compiler_proyect | d6c1836a0c83777e58cdc6da2a54a504a8d5b633 | [
"MIT"
] | null | null | null | lexer.py | gggfox/compiler_proyect | d6c1836a0c83777e58cdc6da2a54a504a8d5b633 | [
"MIT"
] | null | null | null | lexer.py | gggfox/compiler_proyect | d6c1836a0c83777e58cdc6da2a54a504a8d5b633 | [
"MIT"
] | null | null | null | '''
#############################
# Gerardo Galan Garzafox #
# A00821196 #
# #
# alebrije_lexer.py #
# Created_at 2021-09-25 #
# #
#############################
'''
import sys
import ply.lex as lex
reserved = {
'program' : 'PROGRAM',
'main' : 'MAIN',
'vars' : 'VARS',
'int' : 'INT',
'float' : 'FLOAT',
'bool' : 'BOOL',
'char': 'CHAR',
'string' : 'STRING',
'function' : 'FUNCTION',
'return' : 'RETURN',
'read' : 'READ',
'write' : 'WRITE',
'if' : 'IF',
'else' : 'ELSE',
'while' : 'WHILE',
'for' : 'FOR',
'to' : 'TO',
'void' : 'VOID',
'and' : 'AND',
'or' : 'OR',
'median' : 'MEDIAN',
'mode' : 'MODE',
'mean' : 'MEAN',
'variance' : 'VARIANCE',
'regression' : 'REGRESSION',
'plotxy' : 'PLOTXY',
'max' : 'MAX',
'min' : 'MIN',
}
tokens = list(reserved.values()) + [
'LESS',
'GREATER',
'LESS_EQ',
'GREATER_EQ',
'EQUIVALENT',
'DIFFERENT',
'EQUAL',
'MULT',
'DIV',
'PLUS',
'MINUS',
'REMAINDER',
'EXP',
'MULT_EQ',
'DIV_EQ',
'PLUS_EQ',
'MINUS_EQ',
'L_BRACE',
'R_BRACE',
'L_BRACKET',
'R_BRACKET',
'L_PAR',
'R_PAR',
'COLON',
'SEMICOLON',
'COMMA',
'ID',
'CTE_INT',
'CTE_FLOAT',
'CTE_BOOL',
'CTE_CHAR',
'CTE_STRING'
]
# Simple tokens
t_LESS = r'\<'
t_GREATER = r'\>'
t_LESS_EQ = r'\<\='
t_GREATER_EQ = r'\>\='
t_EQUAL = r'\='
t_MULT = r'\*'
t_DIV = r'\/'
t_PLUS = r'\+'
t_MINUS = r'\-'
t_REMAINDER = r'\%'
t_EXP = r'\^'
t_MULT_EQ = r'\*\='
t_DIV_EQ = r'\/\='
t_PLUS_EQ = r'\+\='
t_MINUS_EQ = r'\-\='
t_L_BRACE = r'\{'
t_R_BRACE = r'\}'
t_L_BRACKET = r'\['
t_R_BRACKET = r'\]'
t_L_PAR = r'\('
t_R_PAR = r'\)'
t_COLON = r'\:'
t_SEMICOLON = r'\;'
t_COMMA = r'\,'
t_EQUIVALENT = r'\=\='
t_DIFFERENT = r'\!\='
t_ignore = ' \t'
t_ignore_COMMENT = r'\/\/.*'
# complex tokens
def t_CTE_BOOL(t):
r'(True|true|False|false)'
t.type = 'CTE_BOOL'
return t
def t_ID(t):
r'[a-zA-Z][a-zA-Z_0-9]*'
reserved_type = reserved.get(t.value, False)
if reserved_type:
t.type = reserved_type
return t
else:
t.type = 'ID'
return t
def t_CTE_FLOAT(t):
r'-?\d+\.\d+'
t.value = float(t.value)
return t
def t_CTE_INT(t):
r'-?\d+'
t.value = int(t.value)
return t
def t_CTE_CHAR(t):
r'(\'((?!\').)\')|(\"((?!\").)\")'
t.type = 'CTE_CHAR'
return t
'''
the regex does a negative look ahead(?!) for ' or ' depending
on the case, so \'\'\' is invalid and so is '
'''
def t_CTE_STRING(t):
r'(\'((?!\').)*\')|(\"((?!\").)*\")'
t.type = 'CTE_STRING'
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# build the lexer
lexer = lex.lex()
# Tokenize
if __name__ == '__main__':
code = ''
if len(sys.argv) > 1:
file = open('{0}'.format(sys.argv[1]), 'r')
else:
file = open('Test/test_func.alebrije', 'r')
for line in file:
code += line
# Give the lexer some input
lexer.input(code)
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok) | 18.73224 | 61 | 0.483664 | '''
#############################
# Gerardo Galan Garzafox #
# A00821196 #
# #
# alebrije_lexer.py #
# Created_at 2021-09-25 #
# #
#############################
'''
import sys
import ply.lex as lex
reserved = {
'program' : 'PROGRAM',
'main' : 'MAIN',
'vars' : 'VARS',
'int' : 'INT',
'float' : 'FLOAT',
'bool' : 'BOOL',
'char': 'CHAR',
'string' : 'STRING',
'function' : 'FUNCTION',
'return' : 'RETURN',
'read' : 'READ',
'write' : 'WRITE',
'if' : 'IF',
'else' : 'ELSE',
'while' : 'WHILE',
'for' : 'FOR',
'to' : 'TO',
'void' : 'VOID',
'and' : 'AND',
'or' : 'OR',
'median' : 'MEDIAN',
'mode' : 'MODE',
'mean' : 'MEAN',
'variance' : 'VARIANCE',
'regression' : 'REGRESSION',
'plotxy' : 'PLOTXY',
'max' : 'MAX',
'min' : 'MIN',
}
tokens = list(reserved.values()) + [
'LESS',
'GREATER',
'LESS_EQ',
'GREATER_EQ',
'EQUIVALENT',
'DIFFERENT',
'EQUAL',
'MULT',
'DIV',
'PLUS',
'MINUS',
'REMAINDER',
'EXP',
'MULT_EQ',
'DIV_EQ',
'PLUS_EQ',
'MINUS_EQ',
'L_BRACE',
'R_BRACE',
'L_BRACKET',
'R_BRACKET',
'L_PAR',
'R_PAR',
'COLON',
'SEMICOLON',
'COMMA',
'ID',
'CTE_INT',
'CTE_FLOAT',
'CTE_BOOL',
'CTE_CHAR',
'CTE_STRING'
]
# Simple tokens
t_LESS = r'\<'
t_GREATER = r'\>'
t_LESS_EQ = r'\<\='
t_GREATER_EQ = r'\>\='
t_EQUAL = r'\='
t_MULT = r'\*'
t_DIV = r'\/'
t_PLUS = r'\+'
t_MINUS = r'\-'
t_REMAINDER = r'\%'
t_EXP = r'\^'
t_MULT_EQ = r'\*\='
t_DIV_EQ = r'\/\='
t_PLUS_EQ = r'\+\='
t_MINUS_EQ = r'\-\='
t_L_BRACE = r'\{'
t_R_BRACE = r'\}'
t_L_BRACKET = r'\['
t_R_BRACKET = r'\]'
t_L_PAR = r'\('
t_R_PAR = r'\)'
t_COLON = r'\:'
t_SEMICOLON = r'\;'
t_COMMA = r'\,'
t_EQUIVALENT = r'\=\='
t_DIFFERENT = r'\!\='
t_ignore = ' \t'
t_ignore_COMMENT = r'\/\/.*'
# complex tokens
def t_CTE_BOOL(t):
r'(True|true|False|false)'
t.type = 'CTE_BOOL'
return t
def t_ID(t):
r'[a-zA-Z][a-zA-Z_0-9]*'
reserved_type = reserved.get(t.value, False)
if reserved_type:
t.type = reserved_type
return t
else:
t.type = 'ID'
return t
def t_CTE_FLOAT(t):
r'-?\d+\.\d+'
t.value = float(t.value)
return t
def t_CTE_INT(t):
r'-?\d+'
t.value = int(t.value)
return t
def t_CTE_CHAR(t):
r'(\'((?!\').)\')|(\"((?!\").)\")'
t.type = 'CTE_CHAR'
return t
'''
the regex does a negative look ahead(?!) for ' or ' depending
on the case, so \'\'\' is invalid and so is '
'''
def t_CTE_STRING(t):
r'(\'((?!\').)*\')|(\"((?!\").)*\")'
t.type = 'CTE_STRING'
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(t):
msg = 'Illegal character "{0}"'.format(t.value[0])
raise ValueError(msg)
t.lexer.skip(1)
# build the lexer
lexer = lex.lex()
# Tokenize
if __name__ == '__main__':
code = ''
if len(sys.argv) > 1:
file = open('{0}'.format(sys.argv[1]), 'r')
else:
file = open('Test/test_func.alebrije', 'r')
for line in file:
code += line
# Give the lexer some input
lexer.input(code)
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok) | 95 | 0 | 23 |
bfaf0bced0b2389168b820ab55515998d5b5034c | 927 | py | Python | travel_distance_map/test/test_primitives.py | pdyban/travel_distance_map | 4e36fcee5fcb6a1682001aaff93912482deb05e7 | [
"MIT"
] | 1 | 2020-07-02T20:13:21.000Z | 2020-07-02T20:13:21.000Z | travel_distance_map/test/test_primitives.py | pdyban/travel_distance_map | 4e36fcee5fcb6a1682001aaff93912482deb05e7 | [
"MIT"
] | null | null | null | travel_distance_map/test/test_primitives.py | pdyban/travel_distance_map | 4e36fcee5fcb6a1682001aaff93912482deb05e7 | [
"MIT"
] | null | null | null | import unittest
from travel_distance_map import GPSPoint, Position
| 37.08 | 78 | 0.693635 | import unittest
from travel_distance_map import GPSPoint, Position
class TestGPSPoint(unittest.TestCase):
def test_distance_utf(self):
point1 = GPSPoint(52.5219216, 13.411026) # Alexanderplatz
point2 = GPSPoint(52.5201169, 13.3865786) # Friedrichstrasse
dist = point1.distance_utf(point2)
self.assertEqual(dist, 1666.210510534654)
def test_equals(self):
point1 = GPSPoint(52.5219216, 13.411026) # Alexanderplatz
point2 = GPSPoint(52.5201169, 13.3865786) # Friedrichstrasse
self.assertTrue(point1 == point1)
self.assertTrue(not point1 == point2)
class TestPosition(unittest.TestCase):
def test_equals(self):
pos1 = Position(52.5219216, 13.411026, 900100003, 'Alexanderplatz')
pos2 = Position(52.5201169, 13.3865786, 900100001, 'Friedrichstrasse')
self.assertTrue(pos1 == pos1)
self.assertTrue(not pos1 == pos2)
| 699 | 34 | 125 |
3c07eb9eef1a936fa8dc25341e9b95d69849db4f | 17,241 | py | Python | otherlanguage/encoder.py | tanreinama/aMLP-japanese | ee16377c023d113d769645a3139e26093873628b | [
"MIT"
] | 5 | 2021-11-13T09:50:58.000Z | 2022-03-01T07:59:18.000Z | otherlanguage/encoder.py | tanreinama/aMLP-japanese | ee16377c023d113d769645a3139e26093873628b | [
"MIT"
] | 1 | 2022-02-03T23:44:26.000Z | 2022-02-09T03:56:39.000Z | otherlanguage/encoder.py | tanreinama/aMLP-japanese | ee16377c023d113d769645a3139e26093873628b | [
"MIT"
] | null | null | null | import numpy as np
import re
import json
import os
if __name__=='__main__':
import argparse
import os
import json
from tqdm import tqdm
import pickle
from multiprocessing import Pool
parser = argparse.ArgumentParser()
parser.add_argument("--src_dir", help="source dir", required=True )
parser.add_argument("--dst_file", help="destnation file", required=True )
parser.add_argument("--language", help="use language (ja/hi/ta/japanese/hindi/tamil)", required=True )
parser.add_argument("--num_process", help="process num", type=int, default=8 )
parser.add_argument("--combine", help="Concatenate files with <|endoftext|> separator into chunks of this minimum size", type=int, default=50000 )
parser.add_argument('--clean_text', action='store_true')
args = parser.parse_args()
language = args.language.lower()[:2]
assert language in ["ja","hi","ta"], f"unsupported language: {lang}"
vocabulary = os.path.join("vocabulary", language+"-swe24k.txt")
enc = get_encoder(vocabulary, "emoji.json", language!="ja")
array_file = []
for curDir, dirs, files in os.walk(args.src_dir):
array_file.append((curDir, dirs, files))
with Pool(args.num_process) as p:
p.map(_proc, list(range(args.num_process)))
token_chunks = []
for i in range(args.num_process):
with open('tmp%d.pkl'%i, 'rb') as f:
token_chunks.extend(pickle.load(f))
np.savez_compressed(args.dst_file, *token_chunks)
for i in range(args.num_process):
os.remove('tmp%d.pkl'%i)
print("end")
| 44.321337 | 272 | 0.461574 | import numpy as np
import re
import json
import os
class SWEEncoder_wholeword:
def __init__(self, bpe, emoji):
self.bpe = bpe
self.swe = {}
for idx, wd in enumerate(self.bpe):
self.swe[wd] = idx
self.emoji = emoji
self.maxlen = np.max([len(w) for w in self.swe.keys()])
self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
self.content_repatter3 = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
self.content_repatter4 = re.compile(r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
self.content_repatter5 = re.compile(r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
self.content_repatter6 = re.compile(r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
self.content_trans1 = str.maketrans({k:'<BLOCK>' for k in keisen+blocks})
def __len__(self):
return len(self.bpe)
def clean_text(self, content):
content = self.content_repatter1.sub("<URL>" ,content)
content = self.content_repatter2.sub("<EMAIL>" ,content)
content = self.content_repatter3.sub("<TEL>" ,content)
content = self.content_repatter4.sub("<DATE>" ,content)
content = self.content_repatter5.sub("<DATE>" ,content)
content = self.content_repatter6.sub("<PRICE>" ,content)
content = content.translate(self.content_trans1)
while '<BLOCK><BLOCK>' in content:
content = content.replace('<BLOCK><BLOCK>', '<BLOCK>')
return content
def encode(self, words, clean=False, position=False):
replace_words = {}
def add_replace_words(org, rep):
if org in words:
replace_words[org] = rep
add_replace_words(' ', '<SP>')
add_replace_words(' ', '<SP>')
add_replace_words('\r\n', '<BR>')
add_replace_words('\n', '<BR>')
add_replace_words('\r', '<BR>')
add_replace_words('\t', '<TAB>')
add_replace_words('—', 'ー')
add_replace_words('−', 'ー')
for k,v in self.emoji['emoji'].items():
add_replace_words(k, v)
if clean:
words = self.clean_text(words)
def checkkigou(x):
e = x.encode()
if len(x) == 1 and len(e)==2:
c = (int(e[0])<<8)+int(e[1])
if (c >= 0xc2a1 and c <= 0xc2bf) or (c >= 0xc780 and c <= 0xc783) or (c >= 0xcab9 and c <= 0xcbbf) or (c >= 0xcc80 and c <= 0xcda2):
return True
return False
def checku2e(x):
e = x.encode()
if len(x) == 1 and len(e)==3:
c = (int(e[0])<<16)+(int(e[1])<<8)+int(e[2])
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
pos = 0
result = []
result_position = []
while pos < len(words):
kouho = []
for k in replace_words.keys():
if words[pos:pos+len(k)] == k:
wd = replace_words[k]
kouho.append((self.swe[wd], wd, pos+len(k)))
if len(kouho) == 0:
end = min(len(words), pos+self.maxlen+1) if words[pos]=='<' else pos+4
for e in range(end, pos, -1):
if pos>0:
p = "##"
else:
p = ""
if e>=len(words):
wd = p+words[pos:e]
if wd in self.swe:
if wd[0]=='<' and len(wd) > 2:
kouho = [(self.swe[wd], wd, e)]
break
else:
kouho.append((self.swe[wd], wd, e))
if len(kouho) > 0:
wp,wd,e = sorted(kouho, key=lambda x:x[0])[0]
if len(result)>0 and self.bpe[result[-1]]=='<SP>':
result.pop()
result_position.pop()
result.append(wp)
result_position.append(pos)
pos = e
else:
end = pos+1
wd = words[pos:end]
if checkkigou(wd):
result.append(self.swe['<KIGOU>'])
result_position.append(pos)
elif checku2e(wd):
result.append(self.swe['<U2000U2BFF>'])
result_position.append(pos)
else:
for i in wd.encode('utf-8'):
result.append(self.swe['<|byte%d|>'%i])
result_position.append(pos)
pos = end
if position:
return result, result_position
else:
return result
def decode(self, tokens, breakline='\n'):
words = []
byte_tokens = []
def check_hindi(x):
e = x.encode()
if len(x) == 1 and len(e)==3:
c = (int(e[0])<<16)+(int(e[1])<<8)+int(e[2])
if c >= 0xE0A480 and c <= 0xE0A5BF:
return True
return False
def check_tamil(x):
e = x.encode()
if len(x) == 1 and len(e)==3:
c = (int(e[0])<<16)+(int(e[1])<<8)+int(e[2])
if c >= 0xE0AE82 and c <= 0xE0AFBA:
return True
return False
for i in tokens:
word = self.bpe[i]
if word[:6] == '<|byte' and word[-2:] == '|>':
byte_tokens.append(int(word[6:-2]))
else:
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
byte_tokens = []
if word[:7] == '<|emoji' and word[-2:] == '|>':
words.append(self.emoji['emoji_inv'][word])
elif word == '<SP>':
words.append(' ')
elif word == '<BR>':
words.append(breakline)
elif word == '<TAB>':
words.append('\t')
elif word == '<BLOCK>':
words.append('▀')
elif word == '<KIGOU>':
words.append('§')
elif word == '<U2000U2BFF>':
words.append('■')
else:
if word.startswith("##"):
words.append(word[2:])
else:
if len(words)>0 and (check_hindi(word[0]) or check_tamil(word[0])):
words.append(' ')
words.append(word)
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
text = ''.join(words)
return text
class SWEEncoder_ja:
def __init__(self, bpe, emoji):
self.bpe = [[b] if (b==',' or ',' not in b) else b.split(',') for b in bpe]
self.swe = {}
for idx, b in enumerate(self.bpe):
for wd in b:
self.swe[wd] = idx
self.emoji = emoji
self.maxlen = np.max([len(w) for w in self.swe.keys()])
self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
self.content_repatter3 = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
self.content_repatter4 = re.compile(r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
self.content_repatter5 = re.compile(r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
self.content_repatter6 = re.compile(r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
self.content_trans1 = str.maketrans({k:'<BLOCK>' for k in keisen+blocks})
def __len__(self):
return len(self.bpe)
def clean_text(self, content):
content = self.content_repatter1.sub("<URL>" ,content)
content = self.content_repatter2.sub("<EMAIL>" ,content)
content = self.content_repatter3.sub("<TEL>" ,content)
content = self.content_repatter4.sub("<DATE>" ,content)
content = self.content_repatter5.sub("<DATE>" ,content)
content = self.content_repatter6.sub("<PRICE>" ,content)
content = content.translate(self.content_trans1)
while '<BLOCK><BLOCK>' in content:
content = content.replace('<BLOCK><BLOCK>', '<BLOCK>')
return content
def encode(self, words, clean=False, position=False):
replace_words = {}
def add_replace_words(org, rep):
if org in words:
replace_words[org] = rep
add_replace_words(' ', '<SP>')
add_replace_words(' ', '<SP>')
add_replace_words('\r\n', '<BR>')
add_replace_words('\n', '<BR>')
add_replace_words('\r', '<BR>')
add_replace_words('\t', '<TAB>')
add_replace_words('—', 'ー')
add_replace_words('−', 'ー')
for k,v in self.emoji['emoji'].items():
add_replace_words(k, v)
if clean:
words = self.clean_text(words)
def checkkigou(x):
e = x.encode()
if len(x) == 1 and len(e)==2:
c = (int(e[0])<<8)+int(e[1])
if (c >= 0xc2a1 and c <= 0xc2bf) or (c >= 0xc780 and c <= 0xc783) or (c >= 0xcab9 and c <= 0xcbbf) or (c >= 0xcc80 and c <= 0xcda2):
return True
return False
def checku2e(x):
e = x.encode()
if len(x) == 1 and len(e)==3:
c = (int(e[0])<<16)+(int(e[1])<<8)+int(e[2])
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
pos = 0
result = []
result_position = []
while pos < len(words):
kouho = []
for k in replace_words.keys():
if words[pos:pos+len(k)] == k:
wd = replace_words[k]
kouho.append((self.swe[wd], pos+len(k)))
if len(kouho) == 0:
end = min(len(words), pos+self.maxlen+1) if words[pos]=='<' else pos+3
for e in range(end, pos, -1):
wd = words[pos:e]
if wd in self.swe:
if wd[0]=='<' and len(wd) > 2:
kouho = [(self.swe[wd], e)]
break
else:
kouho.append((self.swe[wd], e))
if len(kouho) > 0:
wp,e = sorted(kouho, key=lambda x:x[0])[0]
result.append(wp)
result_position.append(pos)
pos = e
else:
end = pos+1
wd = words[pos:end]
if checkkigou(wd):
result.append(self.swe['<KIGOU>'])
result_position.append(pos)
elif checku2e(wd):
result.append(self.swe['<U2000U2BFF>'])
result_position.append(pos)
else:
for i in wd.encode('utf-8'):
result.append(self.swe['<|byte%d|>'%i])
result_position.append(pos)
pos = end
if position:
return result, result_position
else:
return result
def decode(self, tokens, breakline='\n'):
words = []
byte_tokens = []
for i in tokens:
word = self.bpe[i][0]
if word[:6] == '<|byte' and word[-2:] == '|>':
byte_tokens.append(int(word[6:-2]))
else:
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
byte_tokens = []
if word[:7] == '<|emoji' and word[-2:] == '|>':
words.append(self.emoji['emoji_inv'][word])
elif word == '<SP>':
words.append(' ')
elif word == '<BR>':
words.append(breakline)
elif word == '<TAB>':
words.append('\t')
elif word == '<BLOCK>':
words.append('▀')
elif word == '<KIGOU>':
words.append('ǀ')
elif word == '<U2000U2BFF>':
words.append('‖')
else:
words.append(word)
if len(byte_tokens) > 0:
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
text = ''.join(words)
return text
def get_encoder(voc_file, emoji_file, wholeword=False):
assert os.path.exists(voc_file), f"vocabulary file not found in {voc_file}"
assert os.path.exists(emoji_file), f"emoji file not found in {emoji_file}"
with open(voc_file, encoding='utf-8') as f:
bpe = f.read().split('\n')
with open('emoji.json', encoding='utf-8') as f:
emoji = json.loads(f.read())
if not wholeword:
return SWEEncoder_ja(bpe, emoji)
else:
return SWEEncoder_wholeword(bpe, emoji)
if __name__=='__main__':
import argparse
import os
import json
from tqdm import tqdm
import pickle
from multiprocessing import Pool
parser = argparse.ArgumentParser()
parser.add_argument("--src_dir", help="source dir", required=True )
parser.add_argument("--dst_file", help="destnation file", required=True )
parser.add_argument("--language", help="use language (ja/hi/ta/japanese/hindi/tamil)", required=True )
parser.add_argument("--num_process", help="process num", type=int, default=8 )
parser.add_argument("--combine", help="Concatenate files with <|endoftext|> separator into chunks of this minimum size", type=int, default=50000 )
parser.add_argument('--clean_text', action='store_true')
args = parser.parse_args()
language = args.language.lower()[:2]
assert language in ["ja","hi","ta"], f"unsupported language: {lang}"
vocabulary = os.path.join("vocabulary", language+"-swe24k.txt")
enc = get_encoder(vocabulary, "emoji.json", language!="ja")
array_file = []
def _proc(i):
token_chunks = []
raw_text = ''
for j, (curDir, dirs, files) in enumerate(array_file):
if not (j % args.num_process == i):
continue
print('append #',curDir)
for file in tqdm(files):
if file.endswith(".txt"):
input = os.path.join(curDir, file)
with open(input, 'r', encoding='utf-8') as fp:
raw_text += fp.read()
raw_text += '<|endoftext|>'
if len(raw_text) >= args.combine:
tokens = np.stack(enc.encode(raw_text, clean=args.clean_text))
token_chunks.append(tokens)
raw_text = ''
if raw_text and len(raw_text) > 0:
tokens = np.stack(enc.encode(raw_text))
token_chunks.append(tokens)
with open('tmp%d.pkl'%i, 'wb') as f:
pickle.dump(token_chunks, f)
del token_chunks, raw_text
return
for curDir, dirs, files in os.walk(args.src_dir):
array_file.append((curDir, dirs, files))
with Pool(args.num_process) as p:
p.map(_proc, list(range(args.num_process)))
token_chunks = []
for i in range(args.num_process):
with open('tmp%d.pkl'%i, 'rb') as f:
token_chunks.extend(pickle.load(f))
np.savez_compressed(args.dst_file, *token_chunks)
for i in range(args.num_process):
os.remove('tmp%d.pkl'%i)
print("end")
| 16,332 | 5 | 363 |
b6eb5c56e22104678f8c01cd791cd6ffda925e43 | 1,293 | py | Python | ads/migrations/0004_auto_20160511_2048.py | japsu/tracontent | 169fe84c49c1a30133e927f1be50abba171ebe68 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null | ads/migrations/0004_auto_20160511_2048.py | japsu/tracontent | 169fe84c49c1a30133e927f1be50abba171ebe68 | [
"PostgreSQL",
"Unlicense",
"MIT"
] | 7 | 2020-11-26T18:41:07.000Z | 2022-01-18T09:27:00.000Z | ads/migrations/0004_auto_20160511_2048.py | tracon/tracontent | 65bd8c15b7909a90ebe5ed28cbbf66683a4e3c2c | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | from django.db import migrations, models
| 34.945946 | 179 | 0.612529 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ads', '0003_auto_20160218_2343'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='active',
field=models.BooleanField(default=True, help_text='Voit piilottaa bannerin poistamatta sitä ottamalla tästä ruksin pois.', verbose_name='Aktiivinen'),
),
migrations.AlterField(
model_name='banner',
name='image_file',
field=models.FileField(upload_to='banners'),
),
migrations.AlterField(
model_name='banner',
name='title',
field=models.CharField(help_text='Esimerkiksi mainostettavan yrityksen tai sivuston nimi. Näytetään alt- ja hover-tekstinä.', max_length=1023, verbose_name='Otsikko'),
),
migrations.AlterField(
model_name='banner',
name='url',
field=models.CharField(help_text='Bannerin klikkaaja ohjataan tähän osoitteeseen.', max_length=1023, verbose_name='Osoite'),
),
migrations.AlterField(
model_name='bannerclick',
name='date',
field=models.DateField(verbose_name='Päivämäärä'),
),
]
| 0 | 1,242 | 23 |
e2738087b399be494d8837b975acf1175c29954c | 400 | py | Python | setup.py | AlanCristhian/symbexpr | 2dae9990d0f28d428015c9644e140928357517cc | [
"MIT"
] | null | null | null | setup.py | AlanCristhian/symbexpr | 2dae9990d0f28d428015c9644e140928357517cc | [
"MIT"
] | null | null | null | setup.py | AlanCristhian/symbexpr | 2dae9990d0f28d428015c9644e140928357517cc | [
"MIT"
] | null | null | null | """Insall script."""
from setuptools import setup
setup(
name="symbexpr",
version="0.0.1a1",
py_modules=["symbexpr"],
zip_safe=True,
author="Alan Cristhian",
author_email="alan.cristh@gmail.com",
description="Systems of equalities, inequalities and constraints.",
license="MIT",
keywords="data structure",
url="https://github.com/AlanCristhian/symbexpr",
)
| 22.222222 | 71 | 0.675 | """Insall script."""
from setuptools import setup
setup(
name="symbexpr",
version="0.0.1a1",
py_modules=["symbexpr"],
zip_safe=True,
author="Alan Cristhian",
author_email="alan.cristh@gmail.com",
description="Systems of equalities, inequalities and constraints.",
license="MIT",
keywords="data structure",
url="https://github.com/AlanCristhian/symbexpr",
)
| 0 | 0 | 0 |
1ca19b47ad72b8b73290bd4f6a6cc00321ac9ce7 | 50,797 | py | Python | osh/cmd_parse.py | afunsten/oil | e52071e10a78157db1e4f0befc439a36ca1cbc01 | [
"Apache-2.0"
] | 1 | 2019-01-25T01:15:51.000Z | 2019-01-25T01:15:51.000Z | osh/cmd_parse.py | afunsten/oil | e52071e10a78157db1e4f0befc439a36ca1cbc01 | [
"Apache-2.0"
] | null | null | null | osh/cmd_parse.py | afunsten/oil | e52071e10a78157db1e4f0befc439a36ca1cbc01 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_parse.py - Parse high level shell commands.
"""
from __future__ import print_function
from asdl import const
from core import alloc
from core import util
from core.meta import syntax_asdl, types_asdl, Id, Kind
from frontend import match
from frontend import reader
from osh import braces
from osh import bool_parse
from osh import word
log = util.log
p_die = util.p_die
assign_op_e = syntax_asdl.assign_op_e
command = syntax_asdl.command
command_e = syntax_asdl.command_e
lhs_expr = syntax_asdl.lhs_expr
redir = syntax_asdl.redir
word_part = syntax_asdl.word_part
word_e = syntax_asdl.word_e
osh_word = syntax_asdl.word # TODO: rename
lex_mode_e = types_asdl.lex_mode_e
def _MakeLiteralHereLines(here_lines, arena):
"""Create a line_span and a token for each line."""
tokens = []
for line_id, line, start_offset in here_lines:
line_span = syntax_asdl.line_span(line_id, start_offset, len(line))
span_id = arena.AddLineSpan(line_span)
t = syntax_asdl.token(Id.Lit_Chars, line[start_offset:], span_id)
tokens.append(t)
return [word_part.LiteralPart(t) for t in tokens]
def _ParseHereDocBody(parse_ctx, h, line_reader, arena):
"""Fill in attributes of a pending here doc node."""
# "If any character in word is quoted, the delimiter shall be formed by
# performing quote removal on word, and the here-document lines shall not
# be expanded. Otherwise, the delimiter shall be the word itself."
# NOTE: \EOF counts, or even E\OF
ok, delimiter, delim_quoted = word.StaticEval(h.here_begin)
if not ok:
p_die('Invalid here doc delimiter', word=h.here_begin)
here_lines, last_line = _ReadHereLines(line_reader, h, delimiter)
if delim_quoted: # << 'EOF'
# LiteralPart for each line.
h.stdin_parts = _MakeLiteralHereLines(here_lines, arena)
else:
line_reader = reader.VirtualLineReader(here_lines, arena)
w_parser = parse_ctx.MakeWordParserForHereDoc(line_reader)
w_parser.ReadHereDocBody(h.stdin_parts) # fills this in
end_line_id, end_line, end_pos = last_line
# Create a span with the end terminator. Maintains the invariant that
# the spans "add up".
line_span = syntax_asdl.line_span(end_line_id, end_pos, len(end_line))
h.here_end_span_id = arena.AddLineSpan(line_span)
def _MakeAssignPair(parse_ctx, preparsed):
"""Create an assign_pair from a 4-tuples from DetectAssignment."""
left_token, close_token, part_offset, w = preparsed
if left_token.id == Id.Lit_VarLike: # s=1
if left_token.val[-2] == '+':
var_name = left_token.val[:-2]
op = assign_op_e.PlusEqual
else:
var_name = left_token.val[:-1]
op = assign_op_e.Equal
lhs = lhs_expr.LhsName(var_name)
lhs.spids.append(left_token.span_id)
elif left_token.id == Id.Lit_ArrayLhsOpen: # a[x++]=1
var_name = left_token.val[:-1]
if close_token.val[-2] == '+':
op = assign_op_e.PlusEqual
else:
op = assign_op_e.Equal
# Adapted from tools/osh2oil.py Cursor.PrintUntil
# TODO: Make a method like arena.AppendPieces(start, end, []), and share
# with alias.
pieces = []
for span_id in xrange(left_token.span_id + 1, close_token.span_id):
span = parse_ctx.arena.GetLineSpan(span_id)
line = parse_ctx.arena.GetLine(span.line_id)
piece = line[span.col : span.col + span.length]
pieces.append(piece)
# Now reparse everything between here
code_str = ''.join(pieces)
# NOTE: It's possible that an alias expansion underlies this, not a real file!
# We have to use a SideArena since this will happen during translation.
line_num = 99
source_name = 'TODO'
arena = alloc.SideArena('<LHS array index at line %d of %s>' %
(line_num, source_name))
a_parser = parse_ctx.MakeArithParser(code_str, arena)
expr = a_parser.Parse() # raises util.ParseError
# TODO: It reports from the wrong arena!
lhs = lhs_expr.LhsIndexedName(var_name, expr)
lhs.spids.append(left_token.span_id)
else:
raise AssertionError
# TODO: Should we also create a rhs_exp.ArrayLiteral here?
n = len(w.parts)
if part_offset == n:
val = osh_word.EmptyWord()
else:
val = osh_word.CompoundWord(w.parts[part_offset:])
val = word.TildeDetect(val) or val
pair = syntax_asdl.assign_pair(lhs, op, val)
pair.spids.append(left_token.span_id) # Do we need this?
return pair
def _AppendMoreEnv(preparsed_list, more_env):
"""Helper to modify a SimpleCommand node.
Args:
preparsed: a list of 4-tuples from DetectAssignment
more_env: a list to append env_pairs to
"""
for left_token, close_token, part_offset, w in preparsed_list:
if left_token.id != Id.Lit_VarLike: # can't be a[x]=1
p_die("Environment binding shouldn't look like an array assignment",
token=left_token)
if left_token.val[-2] == '+':
p_die('Expected = in environment binding, got +=', token=left_token)
var_name = left_token.val[:-1]
n = len(w.parts)
if part_offset == n:
val = osh_word.EmptyWord()
else:
val = osh_word.CompoundWord(w.parts[part_offset:])
pair = syntax_asdl.env_pair(var_name, val)
pair.spids.append(left_token.span_id) # Do we need this?
more_env.append(pair)
def _MakeAssignment(parse_ctx, assign_kw, suffix_words):
"""Create an command.Assignment node from a keyword and a list of words.
NOTE: We don't allow dynamic assignments like:
local $1
This can be replaced with eval 'local $1'
"""
# First parse flags, e.g. -r -x -a -A. None of the flags have arguments.
flags = []
n = len(suffix_words)
i = 1
while i < n:
w = suffix_words[i]
ok, static_val, quoted = word.StaticEval(w)
if not ok or quoted:
break # can't statically evaluate
if static_val.startswith('-'):
flags.append(static_val)
else:
break # not a flag, rest are args
i += 1
# Now parse bindings or variable names
pairs = []
while i < n:
w = suffix_words[i]
# declare x[y]=1 is valid
left_token, close_token, part_offset = word.DetectAssignment(w)
if left_token:
pair = _MakeAssignPair(parse_ctx, (left_token, close_token, part_offset, w))
else:
# In aboriginal in variables/sources: export_if_blank does export "$1".
# We should allow that.
# Parse this differently then? # dynamic-export? It sets global
# variables.
ok, static_val, quoted = word.StaticEval(w)
if not ok or quoted:
p_die("Variable names must be unquoted constants", word=w)
# No value is equivalent to ''
if not match.IsValidVarName(static_val):
p_die('Invalid variable name %r', static_val, word=w)
lhs = lhs_expr.LhsName(static_val)
lhs.spids.append(word.LeftMostSpanForWord(w))
pair = syntax_asdl.assign_pair(lhs, assign_op_e.Equal, None)
left_spid = word.LeftMostSpanForWord(w)
pair.spids.append(left_spid)
pairs.append(pair)
i += 1
node = command.Assignment(assign_kw, flags, pairs)
return node
def _SplitSimpleCommandPrefix(words):
"""Second pass of SimpleCommand parsing: look for assignment words."""
preparsed_list = []
suffix_words = []
done_prefix = False
for w in words:
if done_prefix:
suffix_words.append(w)
continue
left_token, close_token, part_offset = word.DetectAssignment(w)
if left_token:
preparsed_list.append((left_token, close_token, part_offset, w))
else:
done_prefix = True
suffix_words.append(w)
return preparsed_list, suffix_words
def _MakeSimpleCommand(preparsed_list, suffix_words, redirects):
"""Create an command.SimpleCommand node."""
# FOO=(1 2 3) ls is not allowed.
for _, _, _, w in preparsed_list:
if word.HasArrayPart(w):
p_die("Environment bindings can't contain array literals", word=w)
# echo FOO=(1 2 3) is not allowed (but we should NOT fail on echo FOO[x]=1).
for w in suffix_words:
if word.HasArrayPart(w):
p_die("Commands can't contain array literals", word=w)
# NOTE: # In bash, {~bob,~jane}/src works, even though ~ isn't the leading
# character of the initial word.
# However, this means we must do tilde detection AFTER brace EXPANSION, not
# just after brace DETECTION like we're doing here.
# The BracedWordTree instances have to be expanded into CompoundWord
# instances for the tilde detection to work.
words2 = braces.BraceDetectAll(suffix_words)
words3 = word.TildeDetectAll(words2)
node = command.SimpleCommand()
node.words = words3
node.redirects = redirects
_AppendMoreEnv(preparsed_list, node.more_env)
return node
NOT_FIRST_WORDS = (
Id.KW_Do, Id.KW_Done, Id.KW_Then, Id.KW_Fi, Id.KW_Elif,
Id.KW_Else, Id.KW_Esac
)
class CommandParser(object):
"""
Args:
word_parse: to get a stream of words
lexer: for lookahead in function def, PushHint of ()
line_reader: for here doc
"""
def Reset(self):
"""Reset our own internal state.
Called by the interactive loop.
"""
# Cursor state set by _Peek()
self.next_lex_mode = lex_mode_e.Outer
self.cur_word = None # current word
self.c_kind = Kind.Undefined
self.c_id = Id.Undefined_Tok
self.pending_here_docs = []
def ResetInputObjects(self):
"""Reset the internal state of our inputs.
Called by the interactive loop.
"""
self.w_parser.Reset()
self.lexer.ResetInputObjects()
self.line_reader.Reset()
# NOTE: If our approach to _MaybeExpandAliases isn't sufficient, we could
# have an expand_alias=True flag here? We would litter the parser with calls
# to this like dash and bash.
#
# Although it might be possible that you really need to mutate the parser
# state, and not just provide a parameter to _Next().
# You might also need a flag to indicate whether the previous expansion ends
# with ' '. I didn't see that in dash or bash code.
def _Next(self, lex_mode=lex_mode_e.Outer):
"""Helper method."""
self.next_lex_mode = lex_mode
def Peek(self):
"""Public method for REPL."""
self._Peek()
return self.cur_word
def _Peek(self):
"""Helper method.
Returns True for success and False on error. Error examples: bad command
sub word, or unterminated quoted string, etc.
"""
if self.next_lex_mode != lex_mode_e.Undefined:
w = self.w_parser.ReadWord(self.next_lex_mode)
assert w is not None
# Here docs only happen in command mode, so other kinds of newlines don't
# count.
if w.tag == word_e.TokenWord and w.token.id == Id.Op_Newline:
for h in self.pending_here_docs:
_ParseHereDocBody(self.parse_ctx, h, self.line_reader, self.arena)
del self.pending_here_docs[:] # No .clear() until Python 3.3.
self.cur_word = w
self.c_kind = word.CommandKind(self.cur_word)
self.c_id = word.CommandId(self.cur_word)
self.next_lex_mode = lex_mode_e.Undefined
def _Eat(self, c_id):
"""Consume a word of a type. If it doesn't match, return False.
Args:
c_id: either EKeyword.* or a token type like Id.Right_Subshell.
TODO: Rationalize / type check this.
"""
self._Peek()
# TODO: Printing something like KW_Do is not friendly. We can map
# backwards using the _KEYWORDS list in osh/lex.py.
if self.c_id != c_id:
p_die('Expected word type %s, got %s', c_id,
word.CommandId(self.cur_word), word=self.cur_word)
self._Next()
def _NewlineOk(self):
"""Check for optional newline and consume it."""
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next()
self._Peek()
def ParseRedirect(self):
"""
Problem: You don't know which kind of redir_node to instantiate before
this? You could stuff them all in one node, and then have a switch() on
the type.
You need different types.
"""
self._Peek()
assert self.c_kind == Kind.Redir, self.cur_word
op = self.cur_word.token
# For now only supporting single digit descriptor
first_char = self.cur_word.token.val[0]
if first_char.isdigit():
fd = int(first_char)
else:
fd = const.NO_INTEGER
if op.id in (Id.Redir_DLess, Id.Redir_DLessDash): # here doc
node = redir.HereDoc()
node.op = op
node.fd = fd
self._Next()
self._Peek()
node.here_begin = self.cur_word
self._Next()
self.pending_here_docs.append(node) # will be filled on next newline.
else:
node = redir.Redir()
node.op = op
node.fd = fd
self._Next()
self._Peek()
if self.c_kind != Kind.Word:
p_die('Invalid token after redirect operator', word=self.cur_word)
new_word = word.TildeDetect(self.cur_word)
node.arg_word = new_word or self.cur_word
self._Next()
return node
def _ParseRedirectList(self):
"""Try parsing any redirects at the cursor.
This is used for blocks only, not commands.
Return None on error.
"""
redirects = []
while True:
self._Peek()
# This prediction needs to ONLY accept redirect operators. Should we
# make them a separate TokeNkind?
if self.c_kind != Kind.Redir:
break
node = self.ParseRedirect()
redirects.append(node)
self._Next()
return redirects
def _ScanSimpleCommand(self):
"""First pass: Split into redirects and words."""
redirects = []
words = []
# Set a reference so we can inspect state after a failed parse!
self.parse_ctx.trail.SetLatestWords(words, redirects)
while True:
self._Peek()
if self.c_kind == Kind.Redir:
node = self.ParseRedirect()
redirects.append(node)
elif self.c_kind == Kind.Word:
words.append(self.cur_word)
else:
break
self._Next()
return redirects, words
def _MaybeExpandAliases(self, words, cur_aliases):
"""Try to expand aliases.
Our implementation of alias has two design choices:
- Where to insert it in parsing. We do it at the end of ParseSimpleCommand.
- What grammar rule to parse the expanded alias buffer with. In our case
it's ParseCommand().
This doesn't quite match what other shells do, but I can't figure out a
better places.
Most test cases pass, except for ones like:
alias LBRACE='{'
LBRACE echo one; echo two; }
alias MULTILINE='echo 1
echo 2
echo 3'
MULTILINE
NOTE: dash handles aliases in a totally diferrent way. It has a global
variable checkkwd in parser.c. It assigns it all over the grammar, like
this:
checkkwd = CHKNL | CHKKWD | CHKALIAS;
The readtoken() function checks (checkkwd & CHKALIAS) and then calls
lookupalias(). This seems to provide a consistent behavior among shells,
but it's less modular and testable.
Bash also uses a global 'parser_state & PST_ALEXPNEXT'.
Returns:
A command node if any aliases were expanded, or None otherwise.
"""
# The last char that we might parse.
right_spid = word.RightMostSpanForWord(words[-1])
first_word_str = None # for error message
expanded = []
i = 0
n = len(words)
while i < n:
w = words[i]
ok, word_str, quoted = word.StaticEval(w)
if not ok or quoted:
break
alias_exp = self.aliases.get(word_str)
if alias_exp is None:
break
# Prevent infinite loops. This is subtle: we want to prevent infinite
# expansion of alias echo='echo x'. But we don't want to prevent
# expansion of the second word in 'echo echo', so we add 'i' to
# "cur_aliases".
if (word_str, i) in cur_aliases:
break
if i == 0:
first_word_str = word_str # for error message
#log('%r -> %r', word_str, alias_exp)
cur_aliases.append((word_str, i))
expanded.append(alias_exp)
i += 1
if not alias_exp.endswith(' '):
# alias e='echo [ ' is the same expansion as
# alias e='echo ['
# The trailing space indicates whether we should continue to expand
# aliases; it's not part of it.
expanded.append(' ')
break # No more expansions
if not expanded: # No expansions; caller does parsing.
return None
# We got some expansion. Now copy the rest of the words.
# We need each NON-REDIRECT word separately! For example:
# $ echo one >out two
# dash/mksh/zsh go beyond the first redirect!
while i < n:
w = words[i]
left_spid = word.LeftMostSpanForWord(w)
right_spid = word.RightMostSpanForWord(w)
# Adapted from tools/osh2oil.py Cursor.PrintUntil
for span_id in xrange(left_spid, right_spid + 1):
span = self.arena.GetLineSpan(span_id)
line = self.arena.GetLine(span.line_id)
piece = line[span.col : span.col + span.length]
expanded.append(piece)
expanded.append(' ') # Put space back between words.
i += 1
code_str = ''.join(expanded)
lines = code_str.splitlines(True) # Keep newlines
line_info = []
# TODO: Add location information
self.arena.PushSource(
'<expansion of alias %r at line %d of %s>' %
(first_word_str, -1, 'TODO'))
try:
for i, line in enumerate(lines):
line_id = self.arena.AddLine(line, i+1)
line_info.append((line_id, line, 0))
finally:
self.arena.PopSource()
line_reader = reader.VirtualLineReader(line_info, self.arena)
cp = self.parse_ctx.MakeOshParser(line_reader)
try:
node = cp.ParseCommand(cur_aliases=cur_aliases)
except util.ParseError as e:
# Failure to parse alias expansion is a fatal error
# We don't need more handling here/
raise
if 0:
log('AFTER expansion:')
from osh import ast_lib
ast_lib.PrettyPrint(node)
return node
# Flags that indicate an assignment should be parsed like a command.
_ASSIGN_COMMANDS = set([
(Id.Assign_Declare, '-f'), # function defs
(Id.Assign_Declare, '-F'), # function names
(Id.Assign_Declare, '-p'), # print
(Id.Assign_Typeset, '-f'),
(Id.Assign_Typeset, '-F'),
(Id.Assign_Typeset, '-p'),
(Id.Assign_Local, '-p'),
(Id.Assign_Readonly, '-p'),
# Hm 'export -p' is more like a command. But we're parsing it
# dynamically now because of some wrappers.
# Maybe we could change this.
#(Id.Assign_Export, '-p'),
])
# Flags to parse like assignments: -a -r -x (and maybe -i)
def ParseSimpleCommand(self, cur_aliases):
"""
Fixed transcription of the POSIX grammar (TODO: port to grammar/Shell.g)
io_file : '<' filename
| LESSAND filename
...
io_here : DLESS here_end
| DLESSDASH here_end
redirect : IO_NUMBER (io_redirect | io_here)
prefix_part : ASSIGNMENT_WORD | redirect
cmd_part : WORD | redirect
assign_kw : Declare | Export | Local | Readonly
# Without any words it is parsed as a command, not an assigment
assign_listing : assign_kw
# Now we have something to do (might be changing assignment flags too)
# NOTE: any prefixes should be a warning, but they are allowed in shell.
assignment : prefix_part* assign_kw (WORD | ASSIGNMENT_WORD)+
# an external command, a function call, or a builtin -- a "word_command"
word_command : prefix_part* cmd_part+
simple_command : assign_listing
| assignment
| proc_command
Simple imperative algorithm:
1) Read a list of words and redirects. Append them to separate lists.
2) Look for the first non-assignment word. If it's declare, etc., then
keep parsing words AND assign words. Otherwise, just parse words.
3) If there are no non-assignment words, then it's a global assignment.
{ redirects, global assignments } OR
{ redirects, prefix_bindings, words } OR
{ redirects, ERROR_prefix_bindings, keyword, assignments, words }
THEN CHECK that prefix bindings don't have any array literal parts!
global assignment and keyword assignments can have the of course.
well actually EXPORT shouldn't have them either -- WARNING
3 cases we want to warn: prefix_bindings for assignment, and array literal
in prefix bindings, or export
A command can be an assignment word, word, or redirect on its own.
ls
>out.txt
>out.txt FOO=bar # this touches the file, and hten
Or any sequence:
ls foo bar
<in.txt ls foo bar >out.txt
<in.txt ls >out.txt foo bar
Or add one or more environment bindings:
VAR=val env
>out.txt VAR=val env
here_end vs filename is a matter of whether we test that it's quoted. e.g.
<<EOF vs <<'EOF'.
"""
result = self._ScanSimpleCommand()
redirects, words = result
if not words: # e.g. >out.txt # redirect without words
node = command.SimpleCommand()
node.redirects = redirects
return node
preparsed_list, suffix_words = _SplitSimpleCommandPrefix(words)
if not suffix_words: # ONE=1 a[x]=1 TWO=2 (with no other words)
if redirects:
left_token, _, _, _ = preparsed_list[0]
p_die("Global assignment shouldn't have redirects", token=left_token)
pairs = []
for preparsed in preparsed_list:
pairs.append(_MakeAssignPair(self.parse_ctx, preparsed))
node = command.Assignment(Id.Assign_None, [], pairs)
left_spid = word.LeftMostSpanForWord(words[0])
node.spids.append(left_spid) # no keyword spid to skip past
return node
kind, kw_token = word.KeywordToken(suffix_words[0])
if kind == Kind.Assign:
# Here we StaticEval suffix_words[1] to see if we have an ASSIGNMENT COMMAND
# like 'typeset -p', which lists variables -- a SimpleCommand rather than
# an Assignment.
#
# Note we're not handling duplicate flags like 'typeset -pf'. I see this
# in bashdb (bash debugger) but it can just be changed to 'typeset -p
# -f'.
is_command = False
if len(suffix_words) > 1:
ok, val, _ = word.StaticEval(suffix_words[1])
if ok and (kw_token.id, val) in self._ASSIGN_COMMANDS:
is_command = True
if is_command: # declare -f, declare -p, typeset -p, etc.
node = _MakeSimpleCommand(preparsed_list, suffix_words, redirects)
return node
if redirects:
# Attach the error location to the keyword. It would be more precise
# to attach it to the
p_die("Assignments shouldn't have redirects", token=kw_token)
if preparsed_list: # FOO=bar local spam=eggs not allowed
# Use the location of the first value. TODO: Use the whole word
# before splitting.
left_token, _, _, _ = preparsed_list[0]
p_die("Assignments shouldn't have environment bindings", token=left_token)
# declare str='', declare -a array=()
node = _MakeAssignment(self.parse_ctx, kw_token.id, suffix_words)
node.spids.append(kw_token.span_id)
return node
if kind == Kind.ControlFlow:
if redirects:
p_die("Control flow shouldn't have redirects", token=kw_token)
if preparsed_list: # FOO=bar local spam=eggs not allowed
# TODO: Change location as above
left_token, _, _, _ = preparsed_list[0]
p_die("Control flow shouldn't have environment bindings",
token=left_token)
# Attach the token for errors. (Assignment may not need it.)
if len(suffix_words) == 1:
arg_word = None
elif len(suffix_words) == 2:
arg_word = suffix_words[1]
else:
p_die('Unexpected argument to %r', kw_token.val, word=suffix_words[2])
return command.ControlFlow(kw_token, arg_word)
# If any expansions were detected, then parse again.
node = self._MaybeExpandAliases(suffix_words, cur_aliases)
if node:
# NOTE: There are other types of nodes with redirects. Do they matter?
if node.tag == command_e.SimpleCommand:
node.redirects = redirects
_AppendMoreEnv(preparsed_list, node.more_env)
return node
# TODO check that we don't have env1=x x[1]=y env2=z here.
# FOO=bar printenv.py FOO
node = _MakeSimpleCommand(preparsed_list, suffix_words, redirects)
return node
def ParseBraceGroup(self):
"""
brace_group : LBrace command_list RBrace ;
"""
left_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Lit_LBrace)
c_list = self._ParseCommandList()
assert c_list is not None
# Not needed
#right_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Lit_RBrace)
node = command.BraceGroup(c_list.children)
node.spids.append(left_spid)
return node
def ParseDoGroup(self):
"""
Used by ForEach, ForExpr, While, Until. Should this be a Do node?
do_group : Do command_list Done ; /* Apply rule 6 */
"""
self._Eat(Id.KW_Do)
do_spid = word.LeftMostSpanForWord(self.cur_word) # after _Eat
c_list = self._ParseCommandList() # could be any thing
assert c_list is not None
self._Eat(Id.KW_Done)
done_spid = word.LeftMostSpanForWord(self.cur_word) # after _Eat
node = command.DoGroup(c_list.children)
node.spids.extend((do_spid, done_spid))
return node
def ParseForWords(self):
"""
for_words : WORD* for_sep
;
for_sep : ';' newline_ok
| NEWLINES
;
"""
words = []
# The span_id of any semi-colon, so we can remove it.
semi_spid = const.NO_INTEGER
while True:
self._Peek()
if self.c_id == Id.Op_Semi:
semi_spid = self.cur_word.token.span_id # TokenWord
self._Next()
self._NewlineOk()
break
elif self.c_id == Id.Op_Newline:
self._Next()
break
if self.cur_word.tag != word_e.CompoundWord:
# TODO: Can we also show a pointer to the 'for' keyword?
p_die('Invalid word in for loop', word=self.cur_word)
words.append(self.cur_word)
self._Next()
return words, semi_spid
def _ParseForExprLoop(self):
"""
for (( init; cond; update )) for_sep? do_group
"""
node = self.w_parser.ReadForExpression()
assert node is not None
self._Next()
self._Peek()
if self.c_id == Id.Op_Semi:
self._Next()
self._NewlineOk()
elif self.c_id == Id.Op_Newline:
self._Next()
elif self.c_id == Id.KW_Do: # missing semicolon/newline allowed
pass
else:
p_die('Invalid word after for expression', word=self.cur_word)
body_node = self.ParseDoGroup()
assert body_node is not None
node.body = body_node
return node
def ParseFor(self):
"""
for_clause : For for_name newline_ok (in for_words? for_sep)? do_group ;
| For '((' ... TODO
"""
self._Eat(Id.KW_For)
self._Peek()
if self.c_id == Id.Op_DLeftParen:
node = self._ParseForExprLoop()
else:
node = self._ParseForEachLoop()
return node
def ParseWhileUntil(self):
"""
while_clause : While command_list do_group ;
until_clause : Until command_list do_group ;
"""
keyword = self.cur_word.parts[0].token
# This is ensured by the caller
assert keyword.id in (Id.KW_While, Id.KW_Until), keyword
self._Next() # skip while
cond_node = self._ParseCommandList()
assert cond_node is not None
body_node = self.ParseDoGroup()
assert body_node is not None
return command.WhileUntil(keyword, cond_node.children, body_node)
def ParseCaseItem(self):
"""
case_item: '('? pattern ('|' pattern)* ')'
newline_ok command_term? trailer? ;
"""
self.lexer.PushHint(Id.Op_RParen, Id.Right_CasePat)
left_spid = word.LeftMostSpanForWord(self.cur_word)
if self.c_id == Id.Op_LParen:
self._Next()
pat_words = []
while True:
self._Peek()
pat_words.append(self.cur_word)
self._Next()
self._Peek()
if self.c_id == Id.Op_Pipe:
self._Next()
else:
break
rparen_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Right_CasePat)
self._NewlineOk()
if self.c_id not in (Id.Op_DSemi, Id.KW_Esac):
c_list = self._ParseCommandTerm()
assert c_list is not None
action_children = c_list.children
else:
action_children = []
dsemi_spid = const.NO_INTEGER
last_spid = const.NO_INTEGER
self._Peek()
if self.c_id == Id.KW_Esac:
last_spid = word.LeftMostSpanForWord(self.cur_word)
elif self.c_id == Id.Op_DSemi:
dsemi_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next()
else:
# Happens on EOF
p_die('Expected ;; or esac', word=self.cur_word)
self._NewlineOk()
arm = syntax_asdl.case_arm(pat_words, action_children)
arm.spids.extend((left_spid, rparen_spid, dsemi_spid, last_spid))
return arm
def ParseCaseList(self, arms):
"""
case_list: case_item (DSEMI newline_ok case_item)* DSEMI? newline_ok;
"""
self._Peek()
while True:
# case item begins with a command word or (
if self.c_id == Id.KW_Esac:
break
if self.c_kind != Kind.Word and self.c_id != Id.Op_LParen:
break
arm = self.ParseCaseItem()
assert arm is not None
arms.append(arm)
self._Peek()
# Now look for DSEMI or ESAC
def ParseCase(self):
"""
case_clause : Case WORD newline_ok in newline_ok case_list? Esac ;
"""
case_node = command.Case()
case_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip case
self._Peek()
case_node.to_match = self.cur_word
self._Next()
self._NewlineOk()
in_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_In)
self._NewlineOk()
if self.c_id != Id.KW_Esac: # empty case list
self.ParseCaseList(case_node.arms)
# TODO: should it return a list of nodes, and extend?
self._Peek()
esac_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Esac)
self._Next()
case_node.spids.extend((case_spid, in_spid, esac_spid))
return case_node
def _ParseElifElse(self, if_node):
"""
else_part: (Elif command_list Then command_list)* Else command_list ;
"""
arms = if_node.arms
self._Peek()
while self.c_id == Id.KW_Elif:
elif_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip elif
cond = self._ParseCommandList()
assert cond is not None
then_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Then)
body = self._ParseCommandList()
assert body is not None
arm = syntax_asdl.if_arm(cond.children, body.children)
arm.spids.extend((elif_spid, then_spid))
arms.append(arm)
if self.c_id == Id.KW_Else:
else_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next()
body = self._ParseCommandList()
assert body is not None
if_node.else_action = body.children
else:
else_spid = const.NO_INTEGER
if_node.spids.append(else_spid)
def ParseIf(self):
"""
if_clause : If command_list Then command_list else_part? Fi ;
"""
if_node = command.If()
self._Next() # skip if
cond = self._ParseCommandList()
assert cond is not None
then_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Then)
body = self._ParseCommandList()
assert body is not None
arm = syntax_asdl.if_arm(cond.children, body.children)
arm.spids.extend((const.NO_INTEGER, then_spid)) # no if spid at first?
if_node.arms.append(arm)
if self.c_id in (Id.KW_Elif, Id.KW_Else):
self._ParseElifElse(if_node)
else:
if_node.spids.append(const.NO_INTEGER) # no else spid
fi_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Fi)
if_node.spids.append(fi_spid)
return if_node
def ParseTime(self):
"""
time [-p] pipeline
According to bash help.
"""
self._Next() # skip time
pipeline = self.ParsePipeline()
assert pipeline is not None
return command.TimeBlock(pipeline)
def ParseCompoundCommand(self):
"""
compound_command : brace_group
| subshell
| for_clause
| while_clause
| until_clause
| if_clause
| case_clause
| time_clause
| [[ BoolExpr ]]
| (( ArithExpr ))
;
"""
if self.c_id == Id.Lit_LBrace:
return self.ParseBraceGroup()
if self.c_id == Id.Op_LParen:
return self.ParseSubshell()
if self.c_id == Id.KW_For:
return self.ParseFor()
if self.c_id in (Id.KW_While, Id.KW_Until):
return self.ParseWhileUntil()
if self.c_id == Id.KW_If:
return self.ParseIf()
if self.c_id == Id.KW_Case:
return self.ParseCase()
if self.c_id == Id.KW_Time:
return self.ParseTime()
# Example of redirect that is observable:
# $ (( $(echo one 1>&2; echo 2) > 0 )) 2> out.txt
if self.c_id == Id.KW_DLeftBracket:
return self.ParseDBracket()
if self.c_id == Id.Op_DLeftParen:
return self.ParseDParen()
# This never happens?
p_die('Unexpected word while parsing compound command', word=self.cur_word)
def ParseFunctionBody(self, func):
"""
function_body : compound_command io_redirect* ; /* Apply rule 9 */
"""
body = self.ParseCompoundCommand()
assert body is not None
redirects = self._ParseRedirectList()
assert redirects is not None
func.body = body
func.redirects = redirects
def ParseFunctionDef(self):
"""
function_header : fname '(' ')'
function_def : function_header newline_ok function_body ;
Precondition: Looking at the function name.
Post condition:
NOTE: There is an ambiguity with:
function foo ( echo hi ) and
function foo () ( echo hi )
Bash only accepts the latter, though it doesn't really follow a grammar.
"""
left_spid = word.LeftMostSpanForWord(self.cur_word)
ok, name = word.AsFuncName(self.cur_word)
if not ok:
p_die('Invalid function name', word=self.cur_word)
self._Next() # skip function name
# Must be true beacuse of lookahead
self._Peek()
assert self.c_id == Id.Op_LParen, self.cur_word
self.lexer.PushHint(Id.Op_RParen, Id.Right_FuncDef)
self._Next()
self._Eat(Id.Right_FuncDef)
after_name_spid = word.LeftMostSpanForWord(self.cur_word) + 1
self._NewlineOk()
func = command.FuncDef()
func.name = name
self.ParseFunctionBody(func)
func.spids.append(left_spid)
func.spids.append(after_name_spid)
return func
def ParseKshFunctionDef(self):
"""
ksh_function_def : 'function' fname ( '(' ')' )? newline_ok function_body
"""
left_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip past 'function'
self._Peek()
ok, name = word.AsFuncName(self.cur_word)
if not ok:
p_die('Invalid KSH-style function name', word=self.cur_word)
after_name_spid = word.LeftMostSpanForWord(self.cur_word) + 1
self._Next() # skip past 'function name
self._Peek()
if self.c_id == Id.Op_LParen:
self.lexer.PushHint(Id.Op_RParen, Id.Right_FuncDef)
self._Next()
self._Eat(Id.Right_FuncDef)
# Change it: after )
after_name_spid = word.LeftMostSpanForWord(self.cur_word) + 1
self._NewlineOk()
func = command.FuncDef()
func.name = name
self.ParseFunctionBody(func)
func.spids.append(left_spid)
func.spids.append(after_name_spid)
return func
def ParseCoproc(self):
"""
TODO:
"""
raise NotImplementedError
def ParseDBracket(self):
"""
Pass the underlying word parser off to the boolean expression parser.
"""
maybe_error_word = self.cur_word
# TODO: Test interactive. Without closing ]], you should get > prompt
# (PS2)
self._Next() # skip [[
b_parser = bool_parse.BoolParser(self.w_parser)
bnode = b_parser.Parse() # May raise
return command.DBracket(bnode)
def ParseCommand(self, cur_aliases=None):
"""
command : simple_command
| compound_command io_redirect*
| function_def
| ksh_function_def
;
"""
cur_aliases = cur_aliases or []
self._Peek()
if self.c_id in NOT_FIRST_WORDS:
p_die('Unexpected word when parsing command', word=self.cur_word)
if self.c_id == Id.KW_Function:
return self.ParseKshFunctionDef()
# NOTE: We should have another Kind for "initial keywords". And then
# NOT_FIRST_WORDS are "secondary keywords".
if self.c_id in (
Id.KW_DLeftBracket, Id.Op_DLeftParen, Id.Op_LParen, Id.Lit_LBrace,
Id.KW_For, Id.KW_While, Id.KW_Until, Id.KW_If, Id.KW_Case, Id.KW_Time):
node = self.ParseCompoundCommand()
assert node is not None
if node.tag != command_e.TimeBlock: # The only one without redirects
node.redirects = self._ParseRedirectList()
assert node.redirects is not None
return node
# NOTE: I added this to fix cases in parse-errors.test.sh, but it doesn't
# work because Lit_RBrace is in END_LIST below.
# TODO: KW_Do is also invalid here.
if self.c_id == Id.Lit_RBrace:
p_die('Unexpected right brace', word=self.cur_word)
if self.c_kind == Kind.Redir: # Leading redirect
return self.ParseSimpleCommand(cur_aliases)
if self.c_kind == Kind.Word:
if (self.w_parser.LookAhead() == Id.Op_LParen and
not word.IsVarLike(self.cur_word)):
return self.ParseFunctionDef() # f() { echo; } # function
# echo foo
# f=(a b c) # array
# array[1+2]+=1
return self.ParseSimpleCommand(cur_aliases)
if self.c_kind == Kind.Eof:
p_die("Unexpected EOF while parsing command", word=self.cur_word)
# e.g. )
p_die("Invalid word while parsing command", word=self.cur_word)
def ParsePipeline(self):
"""
pipeline : Bang? command ( '|' newline_ok command )* ;
"""
negated = False
self._Peek()
if self.c_id == Id.KW_Bang:
negated = True
self._Next()
child = self.ParseCommand()
assert child is not None
children = [child]
self._Peek()
if self.c_id not in (Id.Op_Pipe, Id.Op_PipeAmp):
if negated:
node = command.Pipeline(children, negated)
return node
else:
return child
pipe_index = 0
stderr_indices = []
if self.c_id == Id.Op_PipeAmp:
stderr_indices.append(pipe_index)
pipe_index += 1
while True:
self._Next() # skip past Id.Op_Pipe or Id.Op_PipeAmp
self._NewlineOk()
child = self.ParseCommand()
assert child is not None
children.append(child)
self._Peek()
if self.c_id not in (Id.Op_Pipe, Id.Op_PipeAmp):
break
if self.c_id == Id.Op_PipeAmp:
stderr_indices.append(pipe_index)
pipe_index += 1
node = command.Pipeline(children, negated)
node.stderr_indices = stderr_indices
return node
def ParseAndOr(self):
"""
and_or : and_or ( AND_IF | OR_IF ) newline_ok pipeline
| pipeline
Note that it is left recursive and left associative. We parse it
iteratively with a token of lookahead.
"""
child = self.ParsePipeline()
assert child is not None
self._Peek()
if self.c_id not in (Id.Op_DPipe, Id.Op_DAmp):
return child
ops = []
children = [child]
while True:
ops.append(self.c_id)
self._Next() # skip past || &&
self._NewlineOk()
child = self.ParsePipeline()
assert child is not None
children.append(child)
self._Peek()
if self.c_id not in (Id.Op_DPipe, Id.Op_DAmp):
break
node = command.AndOr(ops, children)
return node
# NOTE: _ParseCommandLine and _ParseCommandTerm are similar, but different.
# At the top level, We want to execute after every line:
# - to process alias
# - to process 'exit', because invalid syntax might appear after it
# But for say a while loop body, we want to parse the whole thing at once, and
# then execute it. We don't want to parse it over and over again!
# COMPARE
# command_line : and_or (sync_op and_or)* trailer? ; # TOP LEVEL
# command_term : and_or (trailer and_or)* ; # CHILDREN
def _ParseCommandLine(self):
"""
command_line : and_or (sync_op and_or)* trailer? ;
trailer : sync_op newline_ok
| NEWLINES;
sync_op : '&' | ';';
NOTE: This rule causes LL(k > 1) behavior. We would have to peek to see if
there is another command word after the sync op.
But it's easier to express imperatively. Do the following in a loop:
1. ParseAndOr
2. Peek.
a. If there's a newline, then return. (We're only parsing a single
line.)
b. If there's a sync_op, process it. Then look for a newline and
return. Otherwise, parse another AndOr.
"""
# NOTE: This is slightly different than END_LIST in _ParseCommandTerm, and
# unfortunately somewhat ad hoc.
END_LIST = (Id.Op_Newline, Id.Eof_Real, Id.Op_RParen)
children = []
done = False
while not done:
child = self.ParseAndOr()
assert child is not None
self._Peek()
if self.c_id in (Id.Op_Semi, Id.Op_Amp): # also Id.Op_Amp.
child = command.Sentence(child, self.cur_word.token)
self._Next()
self._Peek()
if self.c_id in END_LIST:
done = True
elif self.c_id in END_LIST:
done = True
else:
# e.g. echo a(b)
p_die('Unexpected word while parsing command line',
word=self.cur_word)
children.append(child)
# Simplify the AST.
if len(children) > 1:
return command.CommandList(children)
else:
return children[0]
def _ParseCommandTerm(self):
""""
command_term : and_or (trailer and_or)* ;
trailer : sync_op newline_ok
| NEWLINES;
sync_op : '&' | ';';
This is handled in imperative style, like _ParseCommandLine.
Called by _ParseCommandList for all blocks, and also for ParseCaseItem,
which is slightly different. (HOW? Is it the DSEMI?)
Returns:
syntax_asdl.command
"""
# Token types that will end the command term.
END_LIST = (self.eof_id, Id.Right_Subshell, Id.Lit_RBrace, Id.Op_DSemi)
# NOTE: This is similar to _ParseCommandLine.
#
# - Why aren't we doing END_LIST in _ParseCommandLine?
# - Because you will never be inside $() at the top level.
# - We also know it will end in a newline. It can't end in "fi"!
# - example: if true; then { echo hi; } fi
children = []
done = False
while not done:
self._Peek()
# Most keywords are valid "first words". But do/done/then do not BEGIN
# commands, so they are not valid.
if self.c_id in NOT_FIRST_WORDS:
break
child = self.ParseAndOr()
assert child is not None
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next()
self._Peek()
if self.c_id in END_LIST:
done = True
elif self.c_id in (Id.Op_Semi, Id.Op_Amp):
child = command.Sentence(child, self.cur_word.token)
self._Next()
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next() # skip over newline
# Test if we should keep going. There might be another command after
# the semi and newline.
self._Peek()
if self.c_id in END_LIST: # \n EOF
done = True
elif self.c_id in END_LIST: # ; EOF
done = True
elif self.c_id in END_LIST: # EOF
done = True
else:
pass # e.g. "} done", "fi fi", ") fi", etc. is OK
children.append(child)
self._Peek()
return command.CommandList(children)
# TODO: Make this private.
def _ParseCommandList(self):
"""
command_list : newline_ok command_term trailer? ;
This one is called by all the compound commands. It's basically a command
block.
NOTE: Rather than translating the CFG directly, the code follows a style
more like this: more like this: (and_or trailer)+. It makes capture
easier.
"""
self._NewlineOk()
node = self._ParseCommandTerm()
assert node is not None
return node
def ParseLogicalLine(self):
"""Parse a single line for main_loop.
A wrapper around _ParseCommandLine(). Similar but not identical to
_ParseCommandList() and ParseCommandSub().
Raises:
ParseError
We want to be able catch ParseError all in one place.
"""
self._NewlineOk()
self._Peek()
if self.c_id == Id.Eof_Real:
return None
node = self._ParseCommandLine()
assert node is not None
return node
def ParseCommandSub(self):
"""Parse $(echo hi) and `echo hi` for word_parse.py.
They can have multiple lines, like this:
echo $(
echo one
echo two
)
"""
self._NewlineOk()
if self.c_kind == Kind.Eof: # e.g. $()
return command.NoOp()
# This calls ParseAndOr(), but I think it should be a loop that calls
# _ParseCommandLine(), like oil.InteractiveLoop.
node = self._ParseCommandTerm()
assert node is not None
return node
| 29.227273 | 82 | 0.647755 | #!/usr/bin/env python
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_parse.py - Parse high level shell commands.
"""
from __future__ import print_function
from asdl import const
from core import alloc
from core import util
from core.meta import syntax_asdl, types_asdl, Id, Kind
from frontend import match
from frontend import reader
from osh import braces
from osh import bool_parse
from osh import word
log = util.log
p_die = util.p_die
assign_op_e = syntax_asdl.assign_op_e
command = syntax_asdl.command
command_e = syntax_asdl.command_e
lhs_expr = syntax_asdl.lhs_expr
redir = syntax_asdl.redir
word_part = syntax_asdl.word_part
word_e = syntax_asdl.word_e
osh_word = syntax_asdl.word # TODO: rename
lex_mode_e = types_asdl.lex_mode_e
def _ReadHereLines(line_reader, h, delimiter):
# NOTE: We read all lines at once, instead of parsing line-by-line,
# because of cases like this:
# cat <<EOF
# 1 $(echo 2
# echo 3) 4
# EOF
here_lines = []
last_line = None
while True:
line_id, line, unused_offset = line_reader.GetLine()
if not line: # EOF
# An unterminated here doc is just a warning in bash. We make it
# fatal because we want to be strict, and because it causes problems
# reporting other errors.
# Attribute it to the << in <<EOF for now.
p_die("Couldn't find terminator for here doc that starts here",
token=h.op)
# If op is <<-, strip off ALL leading tabs -- not spaces, and not just
# the first tab.
start_offset = 0
if h.op.id == Id.Redir_DLessDash:
n = len(line)
i = 0
while i < n:
if line[i] != '\t':
break
i += 1
start_offset = i
if line[start_offset:].rstrip() == delimiter:
last_line = (line_id, line, start_offset)
break
here_lines.append((line_id, line, start_offset))
return here_lines, last_line
def _MakeLiteralHereLines(here_lines, arena):
"""Create a line_span and a token for each line."""
tokens = []
for line_id, line, start_offset in here_lines:
line_span = syntax_asdl.line_span(line_id, start_offset, len(line))
span_id = arena.AddLineSpan(line_span)
t = syntax_asdl.token(Id.Lit_Chars, line[start_offset:], span_id)
tokens.append(t)
return [word_part.LiteralPart(t) for t in tokens]
def _ParseHereDocBody(parse_ctx, h, line_reader, arena):
"""Fill in attributes of a pending here doc node."""
# "If any character in word is quoted, the delimiter shall be formed by
# performing quote removal on word, and the here-document lines shall not
# be expanded. Otherwise, the delimiter shall be the word itself."
# NOTE: \EOF counts, or even E\OF
ok, delimiter, delim_quoted = word.StaticEval(h.here_begin)
if not ok:
p_die('Invalid here doc delimiter', word=h.here_begin)
here_lines, last_line = _ReadHereLines(line_reader, h, delimiter)
if delim_quoted: # << 'EOF'
# LiteralPart for each line.
h.stdin_parts = _MakeLiteralHereLines(here_lines, arena)
else:
line_reader = reader.VirtualLineReader(here_lines, arena)
w_parser = parse_ctx.MakeWordParserForHereDoc(line_reader)
w_parser.ReadHereDocBody(h.stdin_parts) # fills this in
end_line_id, end_line, end_pos = last_line
# Create a span with the end terminator. Maintains the invariant that
# the spans "add up".
line_span = syntax_asdl.line_span(end_line_id, end_pos, len(end_line))
h.here_end_span_id = arena.AddLineSpan(line_span)
def _MakeAssignPair(parse_ctx, preparsed):
"""Create an assign_pair from a 4-tuples from DetectAssignment."""
left_token, close_token, part_offset, w = preparsed
if left_token.id == Id.Lit_VarLike: # s=1
if left_token.val[-2] == '+':
var_name = left_token.val[:-2]
op = assign_op_e.PlusEqual
else:
var_name = left_token.val[:-1]
op = assign_op_e.Equal
lhs = lhs_expr.LhsName(var_name)
lhs.spids.append(left_token.span_id)
elif left_token.id == Id.Lit_ArrayLhsOpen: # a[x++]=1
var_name = left_token.val[:-1]
if close_token.val[-2] == '+':
op = assign_op_e.PlusEqual
else:
op = assign_op_e.Equal
# Adapted from tools/osh2oil.py Cursor.PrintUntil
# TODO: Make a method like arena.AppendPieces(start, end, []), and share
# with alias.
pieces = []
for span_id in xrange(left_token.span_id + 1, close_token.span_id):
span = parse_ctx.arena.GetLineSpan(span_id)
line = parse_ctx.arena.GetLine(span.line_id)
piece = line[span.col : span.col + span.length]
pieces.append(piece)
# Now reparse everything between here
code_str = ''.join(pieces)
# NOTE: It's possible that an alias expansion underlies this, not a real file!
# We have to use a SideArena since this will happen during translation.
line_num = 99
source_name = 'TODO'
arena = alloc.SideArena('<LHS array index at line %d of %s>' %
(line_num, source_name))
a_parser = parse_ctx.MakeArithParser(code_str, arena)
expr = a_parser.Parse() # raises util.ParseError
# TODO: It reports from the wrong arena!
lhs = lhs_expr.LhsIndexedName(var_name, expr)
lhs.spids.append(left_token.span_id)
else:
raise AssertionError
# TODO: Should we also create a rhs_exp.ArrayLiteral here?
n = len(w.parts)
if part_offset == n:
val = osh_word.EmptyWord()
else:
val = osh_word.CompoundWord(w.parts[part_offset:])
val = word.TildeDetect(val) or val
pair = syntax_asdl.assign_pair(lhs, op, val)
pair.spids.append(left_token.span_id) # Do we need this?
return pair
def _AppendMoreEnv(preparsed_list, more_env):
"""Helper to modify a SimpleCommand node.
Args:
preparsed: a list of 4-tuples from DetectAssignment
more_env: a list to append env_pairs to
"""
for left_token, close_token, part_offset, w in preparsed_list:
if left_token.id != Id.Lit_VarLike: # can't be a[x]=1
p_die("Environment binding shouldn't look like an array assignment",
token=left_token)
if left_token.val[-2] == '+':
p_die('Expected = in environment binding, got +=', token=left_token)
var_name = left_token.val[:-1]
n = len(w.parts)
if part_offset == n:
val = osh_word.EmptyWord()
else:
val = osh_word.CompoundWord(w.parts[part_offset:])
pair = syntax_asdl.env_pair(var_name, val)
pair.spids.append(left_token.span_id) # Do we need this?
more_env.append(pair)
def _MakeAssignment(parse_ctx, assign_kw, suffix_words):
"""Create an command.Assignment node from a keyword and a list of words.
NOTE: We don't allow dynamic assignments like:
local $1
This can be replaced with eval 'local $1'
"""
# First parse flags, e.g. -r -x -a -A. None of the flags have arguments.
flags = []
n = len(suffix_words)
i = 1
while i < n:
w = suffix_words[i]
ok, static_val, quoted = word.StaticEval(w)
if not ok or quoted:
break # can't statically evaluate
if static_val.startswith('-'):
flags.append(static_val)
else:
break # not a flag, rest are args
i += 1
# Now parse bindings or variable names
pairs = []
while i < n:
w = suffix_words[i]
# declare x[y]=1 is valid
left_token, close_token, part_offset = word.DetectAssignment(w)
if left_token:
pair = _MakeAssignPair(parse_ctx, (left_token, close_token, part_offset, w))
else:
# In aboriginal in variables/sources: export_if_blank does export "$1".
# We should allow that.
# Parse this differently then? # dynamic-export? It sets global
# variables.
ok, static_val, quoted = word.StaticEval(w)
if not ok or quoted:
p_die("Variable names must be unquoted constants", word=w)
# No value is equivalent to ''
if not match.IsValidVarName(static_val):
p_die('Invalid variable name %r', static_val, word=w)
lhs = lhs_expr.LhsName(static_val)
lhs.spids.append(word.LeftMostSpanForWord(w))
pair = syntax_asdl.assign_pair(lhs, assign_op_e.Equal, None)
left_spid = word.LeftMostSpanForWord(w)
pair.spids.append(left_spid)
pairs.append(pair)
i += 1
node = command.Assignment(assign_kw, flags, pairs)
return node
def _SplitSimpleCommandPrefix(words):
"""Second pass of SimpleCommand parsing: look for assignment words."""
preparsed_list = []
suffix_words = []
done_prefix = False
for w in words:
if done_prefix:
suffix_words.append(w)
continue
left_token, close_token, part_offset = word.DetectAssignment(w)
if left_token:
preparsed_list.append((left_token, close_token, part_offset, w))
else:
done_prefix = True
suffix_words.append(w)
return preparsed_list, suffix_words
def _MakeSimpleCommand(preparsed_list, suffix_words, redirects):
"""Create an command.SimpleCommand node."""
# FOO=(1 2 3) ls is not allowed.
for _, _, _, w in preparsed_list:
if word.HasArrayPart(w):
p_die("Environment bindings can't contain array literals", word=w)
# echo FOO=(1 2 3) is not allowed (but we should NOT fail on echo FOO[x]=1).
for w in suffix_words:
if word.HasArrayPart(w):
p_die("Commands can't contain array literals", word=w)
# NOTE: # In bash, {~bob,~jane}/src works, even though ~ isn't the leading
# character of the initial word.
# However, this means we must do tilde detection AFTER brace EXPANSION, not
# just after brace DETECTION like we're doing here.
# The BracedWordTree instances have to be expanded into CompoundWord
# instances for the tilde detection to work.
words2 = braces.BraceDetectAll(suffix_words)
words3 = word.TildeDetectAll(words2)
node = command.SimpleCommand()
node.words = words3
node.redirects = redirects
_AppendMoreEnv(preparsed_list, node.more_env)
return node
NOT_FIRST_WORDS = (
Id.KW_Do, Id.KW_Done, Id.KW_Then, Id.KW_Fi, Id.KW_Elif,
Id.KW_Else, Id.KW_Esac
)
class CommandParser(object):
"""
Args:
word_parse: to get a stream of words
lexer: for lookahead in function def, PushHint of ()
line_reader: for here doc
"""
def __init__(self, parse_ctx, w_parser, lexer_, line_reader, arena=None,
eof_id=Id.Eof_Real):
self.parse_ctx = parse_ctx
self.aliases = parse_ctx.aliases # aliases to expand at parse time
self.w_parser = w_parser # for normal parsing
self.lexer = lexer_ # for pushing hints, lookahead to (
self.line_reader = line_reader # for here docs
self.arena = arena or parse_ctx.arena # for adding here doc and alias spans
self.eof_id = eof_id
self.Reset()
def Reset(self):
"""Reset our own internal state.
Called by the interactive loop.
"""
# Cursor state set by _Peek()
self.next_lex_mode = lex_mode_e.Outer
self.cur_word = None # current word
self.c_kind = Kind.Undefined
self.c_id = Id.Undefined_Tok
self.pending_here_docs = []
def Error(self):
return 'TODO: for completion'
def ResetInputObjects(self):
"""Reset the internal state of our inputs.
Called by the interactive loop.
"""
self.w_parser.Reset()
self.lexer.ResetInputObjects()
self.line_reader.Reset()
# NOTE: If our approach to _MaybeExpandAliases isn't sufficient, we could
# have an expand_alias=True flag here? We would litter the parser with calls
# to this like dash and bash.
#
# Although it might be possible that you really need to mutate the parser
# state, and not just provide a parameter to _Next().
# You might also need a flag to indicate whether the previous expansion ends
# with ' '. I didn't see that in dash or bash code.
def _Next(self, lex_mode=lex_mode_e.Outer):
"""Helper method."""
self.next_lex_mode = lex_mode
def Peek(self):
"""Public method for REPL."""
self._Peek()
return self.cur_word
def _Peek(self):
"""Helper method.
Returns True for success and False on error. Error examples: bad command
sub word, or unterminated quoted string, etc.
"""
if self.next_lex_mode != lex_mode_e.Undefined:
w = self.w_parser.ReadWord(self.next_lex_mode)
assert w is not None
# Here docs only happen in command mode, so other kinds of newlines don't
# count.
if w.tag == word_e.TokenWord and w.token.id == Id.Op_Newline:
for h in self.pending_here_docs:
_ParseHereDocBody(self.parse_ctx, h, self.line_reader, self.arena)
del self.pending_here_docs[:] # No .clear() until Python 3.3.
self.cur_word = w
self.c_kind = word.CommandKind(self.cur_word)
self.c_id = word.CommandId(self.cur_word)
self.next_lex_mode = lex_mode_e.Undefined
def _Eat(self, c_id):
"""Consume a word of a type. If it doesn't match, return False.
Args:
c_id: either EKeyword.* or a token type like Id.Right_Subshell.
TODO: Rationalize / type check this.
"""
self._Peek()
# TODO: Printing something like KW_Do is not friendly. We can map
# backwards using the _KEYWORDS list in osh/lex.py.
if self.c_id != c_id:
p_die('Expected word type %s, got %s', c_id,
word.CommandId(self.cur_word), word=self.cur_word)
self._Next()
def _NewlineOk(self):
"""Check for optional newline and consume it."""
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next()
self._Peek()
def ParseRedirect(self):
"""
Problem: You don't know which kind of redir_node to instantiate before
this? You could stuff them all in one node, and then have a switch() on
the type.
You need different types.
"""
self._Peek()
assert self.c_kind == Kind.Redir, self.cur_word
op = self.cur_word.token
# For now only supporting single digit descriptor
first_char = self.cur_word.token.val[0]
if first_char.isdigit():
fd = int(first_char)
else:
fd = const.NO_INTEGER
if op.id in (Id.Redir_DLess, Id.Redir_DLessDash): # here doc
node = redir.HereDoc()
node.op = op
node.fd = fd
self._Next()
self._Peek()
node.here_begin = self.cur_word
self._Next()
self.pending_here_docs.append(node) # will be filled on next newline.
else:
node = redir.Redir()
node.op = op
node.fd = fd
self._Next()
self._Peek()
if self.c_kind != Kind.Word:
p_die('Invalid token after redirect operator', word=self.cur_word)
new_word = word.TildeDetect(self.cur_word)
node.arg_word = new_word or self.cur_word
self._Next()
return node
def _ParseRedirectList(self):
"""Try parsing any redirects at the cursor.
This is used for blocks only, not commands.
Return None on error.
"""
redirects = []
while True:
self._Peek()
# This prediction needs to ONLY accept redirect operators. Should we
# make them a separate TokeNkind?
if self.c_kind != Kind.Redir:
break
node = self.ParseRedirect()
redirects.append(node)
self._Next()
return redirects
def _ScanSimpleCommand(self):
"""First pass: Split into redirects and words."""
redirects = []
words = []
# Set a reference so we can inspect state after a failed parse!
self.parse_ctx.trail.SetLatestWords(words, redirects)
while True:
self._Peek()
if self.c_kind == Kind.Redir:
node = self.ParseRedirect()
redirects.append(node)
elif self.c_kind == Kind.Word:
words.append(self.cur_word)
else:
break
self._Next()
return redirects, words
def _MaybeExpandAliases(self, words, cur_aliases):
"""Try to expand aliases.
Our implementation of alias has two design choices:
- Where to insert it in parsing. We do it at the end of ParseSimpleCommand.
- What grammar rule to parse the expanded alias buffer with. In our case
it's ParseCommand().
This doesn't quite match what other shells do, but I can't figure out a
better places.
Most test cases pass, except for ones like:
alias LBRACE='{'
LBRACE echo one; echo two; }
alias MULTILINE='echo 1
echo 2
echo 3'
MULTILINE
NOTE: dash handles aliases in a totally diferrent way. It has a global
variable checkkwd in parser.c. It assigns it all over the grammar, like
this:
checkkwd = CHKNL | CHKKWD | CHKALIAS;
The readtoken() function checks (checkkwd & CHKALIAS) and then calls
lookupalias(). This seems to provide a consistent behavior among shells,
but it's less modular and testable.
Bash also uses a global 'parser_state & PST_ALEXPNEXT'.
Returns:
A command node if any aliases were expanded, or None otherwise.
"""
# The last char that we might parse.
right_spid = word.RightMostSpanForWord(words[-1])
first_word_str = None # for error message
expanded = []
i = 0
n = len(words)
while i < n:
w = words[i]
ok, word_str, quoted = word.StaticEval(w)
if not ok or quoted:
break
alias_exp = self.aliases.get(word_str)
if alias_exp is None:
break
# Prevent infinite loops. This is subtle: we want to prevent infinite
# expansion of alias echo='echo x'. But we don't want to prevent
# expansion of the second word in 'echo echo', so we add 'i' to
# "cur_aliases".
if (word_str, i) in cur_aliases:
break
if i == 0:
first_word_str = word_str # for error message
#log('%r -> %r', word_str, alias_exp)
cur_aliases.append((word_str, i))
expanded.append(alias_exp)
i += 1
if not alias_exp.endswith(' '):
# alias e='echo [ ' is the same expansion as
# alias e='echo ['
# The trailing space indicates whether we should continue to expand
# aliases; it's not part of it.
expanded.append(' ')
break # No more expansions
if not expanded: # No expansions; caller does parsing.
return None
# We got some expansion. Now copy the rest of the words.
# We need each NON-REDIRECT word separately! For example:
# $ echo one >out two
# dash/mksh/zsh go beyond the first redirect!
while i < n:
w = words[i]
left_spid = word.LeftMostSpanForWord(w)
right_spid = word.RightMostSpanForWord(w)
# Adapted from tools/osh2oil.py Cursor.PrintUntil
for span_id in xrange(left_spid, right_spid + 1):
span = self.arena.GetLineSpan(span_id)
line = self.arena.GetLine(span.line_id)
piece = line[span.col : span.col + span.length]
expanded.append(piece)
expanded.append(' ') # Put space back between words.
i += 1
code_str = ''.join(expanded)
lines = code_str.splitlines(True) # Keep newlines
line_info = []
# TODO: Add location information
self.arena.PushSource(
'<expansion of alias %r at line %d of %s>' %
(first_word_str, -1, 'TODO'))
try:
for i, line in enumerate(lines):
line_id = self.arena.AddLine(line, i+1)
line_info.append((line_id, line, 0))
finally:
self.arena.PopSource()
line_reader = reader.VirtualLineReader(line_info, self.arena)
cp = self.parse_ctx.MakeOshParser(line_reader)
try:
node = cp.ParseCommand(cur_aliases=cur_aliases)
except util.ParseError as e:
# Failure to parse alias expansion is a fatal error
# We don't need more handling here/
raise
if 0:
log('AFTER expansion:')
from osh import ast_lib
ast_lib.PrettyPrint(node)
return node
# Flags that indicate an assignment should be parsed like a command.
_ASSIGN_COMMANDS = set([
(Id.Assign_Declare, '-f'), # function defs
(Id.Assign_Declare, '-F'), # function names
(Id.Assign_Declare, '-p'), # print
(Id.Assign_Typeset, '-f'),
(Id.Assign_Typeset, '-F'),
(Id.Assign_Typeset, '-p'),
(Id.Assign_Local, '-p'),
(Id.Assign_Readonly, '-p'),
# Hm 'export -p' is more like a command. But we're parsing it
# dynamically now because of some wrappers.
# Maybe we could change this.
#(Id.Assign_Export, '-p'),
])
# Flags to parse like assignments: -a -r -x (and maybe -i)
def ParseSimpleCommand(self, cur_aliases):
"""
Fixed transcription of the POSIX grammar (TODO: port to grammar/Shell.g)
io_file : '<' filename
| LESSAND filename
...
io_here : DLESS here_end
| DLESSDASH here_end
redirect : IO_NUMBER (io_redirect | io_here)
prefix_part : ASSIGNMENT_WORD | redirect
cmd_part : WORD | redirect
assign_kw : Declare | Export | Local | Readonly
# Without any words it is parsed as a command, not an assigment
assign_listing : assign_kw
# Now we have something to do (might be changing assignment flags too)
# NOTE: any prefixes should be a warning, but they are allowed in shell.
assignment : prefix_part* assign_kw (WORD | ASSIGNMENT_WORD)+
# an external command, a function call, or a builtin -- a "word_command"
word_command : prefix_part* cmd_part+
simple_command : assign_listing
| assignment
| proc_command
Simple imperative algorithm:
1) Read a list of words and redirects. Append them to separate lists.
2) Look for the first non-assignment word. If it's declare, etc., then
keep parsing words AND assign words. Otherwise, just parse words.
3) If there are no non-assignment words, then it's a global assignment.
{ redirects, global assignments } OR
{ redirects, prefix_bindings, words } OR
{ redirects, ERROR_prefix_bindings, keyword, assignments, words }
THEN CHECK that prefix bindings don't have any array literal parts!
global assignment and keyword assignments can have the of course.
well actually EXPORT shouldn't have them either -- WARNING
3 cases we want to warn: prefix_bindings for assignment, and array literal
in prefix bindings, or export
A command can be an assignment word, word, or redirect on its own.
ls
>out.txt
>out.txt FOO=bar # this touches the file, and hten
Or any sequence:
ls foo bar
<in.txt ls foo bar >out.txt
<in.txt ls >out.txt foo bar
Or add one or more environment bindings:
VAR=val env
>out.txt VAR=val env
here_end vs filename is a matter of whether we test that it's quoted. e.g.
<<EOF vs <<'EOF'.
"""
result = self._ScanSimpleCommand()
redirects, words = result
if not words: # e.g. >out.txt # redirect without words
node = command.SimpleCommand()
node.redirects = redirects
return node
preparsed_list, suffix_words = _SplitSimpleCommandPrefix(words)
if not suffix_words: # ONE=1 a[x]=1 TWO=2 (with no other words)
if redirects:
left_token, _, _, _ = preparsed_list[0]
p_die("Global assignment shouldn't have redirects", token=left_token)
pairs = []
for preparsed in preparsed_list:
pairs.append(_MakeAssignPair(self.parse_ctx, preparsed))
node = command.Assignment(Id.Assign_None, [], pairs)
left_spid = word.LeftMostSpanForWord(words[0])
node.spids.append(left_spid) # no keyword spid to skip past
return node
kind, kw_token = word.KeywordToken(suffix_words[0])
if kind == Kind.Assign:
# Here we StaticEval suffix_words[1] to see if we have an ASSIGNMENT COMMAND
# like 'typeset -p', which lists variables -- a SimpleCommand rather than
# an Assignment.
#
# Note we're not handling duplicate flags like 'typeset -pf'. I see this
# in bashdb (bash debugger) but it can just be changed to 'typeset -p
# -f'.
is_command = False
if len(suffix_words) > 1:
ok, val, _ = word.StaticEval(suffix_words[1])
if ok and (kw_token.id, val) in self._ASSIGN_COMMANDS:
is_command = True
if is_command: # declare -f, declare -p, typeset -p, etc.
node = _MakeSimpleCommand(preparsed_list, suffix_words, redirects)
return node
if redirects:
# Attach the error location to the keyword. It would be more precise
# to attach it to the
p_die("Assignments shouldn't have redirects", token=kw_token)
if preparsed_list: # FOO=bar local spam=eggs not allowed
# Use the location of the first value. TODO: Use the whole word
# before splitting.
left_token, _, _, _ = preparsed_list[0]
p_die("Assignments shouldn't have environment bindings", token=left_token)
# declare str='', declare -a array=()
node = _MakeAssignment(self.parse_ctx, kw_token.id, suffix_words)
node.spids.append(kw_token.span_id)
return node
if kind == Kind.ControlFlow:
if redirects:
p_die("Control flow shouldn't have redirects", token=kw_token)
if preparsed_list: # FOO=bar local spam=eggs not allowed
# TODO: Change location as above
left_token, _, _, _ = preparsed_list[0]
p_die("Control flow shouldn't have environment bindings",
token=left_token)
# Attach the token for errors. (Assignment may not need it.)
if len(suffix_words) == 1:
arg_word = None
elif len(suffix_words) == 2:
arg_word = suffix_words[1]
else:
p_die('Unexpected argument to %r', kw_token.val, word=suffix_words[2])
return command.ControlFlow(kw_token, arg_word)
# If any expansions were detected, then parse again.
node = self._MaybeExpandAliases(suffix_words, cur_aliases)
if node:
# NOTE: There are other types of nodes with redirects. Do they matter?
if node.tag == command_e.SimpleCommand:
node.redirects = redirects
_AppendMoreEnv(preparsed_list, node.more_env)
return node
# TODO check that we don't have env1=x x[1]=y env2=z here.
# FOO=bar printenv.py FOO
node = _MakeSimpleCommand(preparsed_list, suffix_words, redirects)
return node
def ParseBraceGroup(self):
"""
brace_group : LBrace command_list RBrace ;
"""
left_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Lit_LBrace)
c_list = self._ParseCommandList()
assert c_list is not None
# Not needed
#right_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Lit_RBrace)
node = command.BraceGroup(c_list.children)
node.spids.append(left_spid)
return node
def ParseDoGroup(self):
"""
Used by ForEach, ForExpr, While, Until. Should this be a Do node?
do_group : Do command_list Done ; /* Apply rule 6 */
"""
self._Eat(Id.KW_Do)
do_spid = word.LeftMostSpanForWord(self.cur_word) # after _Eat
c_list = self._ParseCommandList() # could be any thing
assert c_list is not None
self._Eat(Id.KW_Done)
done_spid = word.LeftMostSpanForWord(self.cur_word) # after _Eat
node = command.DoGroup(c_list.children)
node.spids.extend((do_spid, done_spid))
return node
def ParseForWords(self):
"""
for_words : WORD* for_sep
;
for_sep : ';' newline_ok
| NEWLINES
;
"""
words = []
# The span_id of any semi-colon, so we can remove it.
semi_spid = const.NO_INTEGER
while True:
self._Peek()
if self.c_id == Id.Op_Semi:
semi_spid = self.cur_word.token.span_id # TokenWord
self._Next()
self._NewlineOk()
break
elif self.c_id == Id.Op_Newline:
self._Next()
break
if self.cur_word.tag != word_e.CompoundWord:
# TODO: Can we also show a pointer to the 'for' keyword?
p_die('Invalid word in for loop', word=self.cur_word)
words.append(self.cur_word)
self._Next()
return words, semi_spid
def _ParseForExprLoop(self):
"""
for (( init; cond; update )) for_sep? do_group
"""
node = self.w_parser.ReadForExpression()
assert node is not None
self._Next()
self._Peek()
if self.c_id == Id.Op_Semi:
self._Next()
self._NewlineOk()
elif self.c_id == Id.Op_Newline:
self._Next()
elif self.c_id == Id.KW_Do: # missing semicolon/newline allowed
pass
else:
p_die('Invalid word after for expression', word=self.cur_word)
body_node = self.ParseDoGroup()
assert body_node is not None
node.body = body_node
return node
def _ParseForEachLoop(self):
node = command.ForEach()
node.do_arg_iter = False
ok, iter_name, quoted = word.StaticEval(self.cur_word)
if not ok or quoted:
p_die("Loop variable name should be a constant", word=self.cur_word)
if not match.IsValidVarName(iter_name):
p_die("Invalid loop variable name", word=self.cur_word)
node.iter_name = iter_name
self._Next() # skip past name
self._NewlineOk()
in_spid = const.NO_INTEGER
semi_spid = const.NO_INTEGER
self._Peek()
if self.c_id == Id.KW_In:
self._Next() # skip in
in_spid = word.LeftMostSpanForWord(self.cur_word) + 1
iter_words, semi_spid = self.ParseForWords()
assert iter_words is not None
words2 = braces.BraceDetectAll(iter_words)
words3 = word.TildeDetectAll(words2)
node.iter_words = words3
elif self.c_id == Id.Op_Semi:
node.do_arg_iter = True # implicit for loop
self._Next()
elif self.c_id == Id.KW_Do:
node.do_arg_iter = True # implicit for loop
# do not advance
else: # for foo BAD
p_die('Unexpected word after for loop variable', word=self.cur_word)
node.spids.extend((in_spid, semi_spid))
body_node = self.ParseDoGroup()
assert body_node is not None
node.body = body_node
return node
def ParseFor(self):
"""
for_clause : For for_name newline_ok (in for_words? for_sep)? do_group ;
| For '((' ... TODO
"""
self._Eat(Id.KW_For)
self._Peek()
if self.c_id == Id.Op_DLeftParen:
node = self._ParseForExprLoop()
else:
node = self._ParseForEachLoop()
return node
def ParseWhileUntil(self):
"""
while_clause : While command_list do_group ;
until_clause : Until command_list do_group ;
"""
keyword = self.cur_word.parts[0].token
# This is ensured by the caller
assert keyword.id in (Id.KW_While, Id.KW_Until), keyword
self._Next() # skip while
cond_node = self._ParseCommandList()
assert cond_node is not None
body_node = self.ParseDoGroup()
assert body_node is not None
return command.WhileUntil(keyword, cond_node.children, body_node)
def ParseCaseItem(self):
"""
case_item: '('? pattern ('|' pattern)* ')'
newline_ok command_term? trailer? ;
"""
self.lexer.PushHint(Id.Op_RParen, Id.Right_CasePat)
left_spid = word.LeftMostSpanForWord(self.cur_word)
if self.c_id == Id.Op_LParen:
self._Next()
pat_words = []
while True:
self._Peek()
pat_words.append(self.cur_word)
self._Next()
self._Peek()
if self.c_id == Id.Op_Pipe:
self._Next()
else:
break
rparen_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Right_CasePat)
self._NewlineOk()
if self.c_id not in (Id.Op_DSemi, Id.KW_Esac):
c_list = self._ParseCommandTerm()
assert c_list is not None
action_children = c_list.children
else:
action_children = []
dsemi_spid = const.NO_INTEGER
last_spid = const.NO_INTEGER
self._Peek()
if self.c_id == Id.KW_Esac:
last_spid = word.LeftMostSpanForWord(self.cur_word)
elif self.c_id == Id.Op_DSemi:
dsemi_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next()
else:
# Happens on EOF
p_die('Expected ;; or esac', word=self.cur_word)
self._NewlineOk()
arm = syntax_asdl.case_arm(pat_words, action_children)
arm.spids.extend((left_spid, rparen_spid, dsemi_spid, last_spid))
return arm
def ParseCaseList(self, arms):
"""
case_list: case_item (DSEMI newline_ok case_item)* DSEMI? newline_ok;
"""
self._Peek()
while True:
# case item begins with a command word or (
if self.c_id == Id.KW_Esac:
break
if self.c_kind != Kind.Word and self.c_id != Id.Op_LParen:
break
arm = self.ParseCaseItem()
assert arm is not None
arms.append(arm)
self._Peek()
# Now look for DSEMI or ESAC
def ParseCase(self):
"""
case_clause : Case WORD newline_ok in newline_ok case_list? Esac ;
"""
case_node = command.Case()
case_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip case
self._Peek()
case_node.to_match = self.cur_word
self._Next()
self._NewlineOk()
in_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_In)
self._NewlineOk()
if self.c_id != Id.KW_Esac: # empty case list
self.ParseCaseList(case_node.arms)
# TODO: should it return a list of nodes, and extend?
self._Peek()
esac_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Esac)
self._Next()
case_node.spids.extend((case_spid, in_spid, esac_spid))
return case_node
def _ParseElifElse(self, if_node):
"""
else_part: (Elif command_list Then command_list)* Else command_list ;
"""
arms = if_node.arms
self._Peek()
while self.c_id == Id.KW_Elif:
elif_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip elif
cond = self._ParseCommandList()
assert cond is not None
then_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Then)
body = self._ParseCommandList()
assert body is not None
arm = syntax_asdl.if_arm(cond.children, body.children)
arm.spids.extend((elif_spid, then_spid))
arms.append(arm)
if self.c_id == Id.KW_Else:
else_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next()
body = self._ParseCommandList()
assert body is not None
if_node.else_action = body.children
else:
else_spid = const.NO_INTEGER
if_node.spids.append(else_spid)
def ParseIf(self):
"""
if_clause : If command_list Then command_list else_part? Fi ;
"""
if_node = command.If()
self._Next() # skip if
cond = self._ParseCommandList()
assert cond is not None
then_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Then)
body = self._ParseCommandList()
assert body is not None
arm = syntax_asdl.if_arm(cond.children, body.children)
arm.spids.extend((const.NO_INTEGER, then_spid)) # no if spid at first?
if_node.arms.append(arm)
if self.c_id in (Id.KW_Elif, Id.KW_Else):
self._ParseElifElse(if_node)
else:
if_node.spids.append(const.NO_INTEGER) # no else spid
fi_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Fi)
if_node.spids.append(fi_spid)
return if_node
def ParseTime(self):
"""
time [-p] pipeline
According to bash help.
"""
self._Next() # skip time
pipeline = self.ParsePipeline()
assert pipeline is not None
return command.TimeBlock(pipeline)
def ParseCompoundCommand(self):
"""
compound_command : brace_group
| subshell
| for_clause
| while_clause
| until_clause
| if_clause
| case_clause
| time_clause
| [[ BoolExpr ]]
| (( ArithExpr ))
;
"""
if self.c_id == Id.Lit_LBrace:
return self.ParseBraceGroup()
if self.c_id == Id.Op_LParen:
return self.ParseSubshell()
if self.c_id == Id.KW_For:
return self.ParseFor()
if self.c_id in (Id.KW_While, Id.KW_Until):
return self.ParseWhileUntil()
if self.c_id == Id.KW_If:
return self.ParseIf()
if self.c_id == Id.KW_Case:
return self.ParseCase()
if self.c_id == Id.KW_Time:
return self.ParseTime()
# Example of redirect that is observable:
# $ (( $(echo one 1>&2; echo 2) > 0 )) 2> out.txt
if self.c_id == Id.KW_DLeftBracket:
return self.ParseDBracket()
if self.c_id == Id.Op_DLeftParen:
return self.ParseDParen()
# This never happens?
p_die('Unexpected word while parsing compound command', word=self.cur_word)
def ParseFunctionBody(self, func):
"""
function_body : compound_command io_redirect* ; /* Apply rule 9 */
"""
body = self.ParseCompoundCommand()
assert body is not None
redirects = self._ParseRedirectList()
assert redirects is not None
func.body = body
func.redirects = redirects
def ParseFunctionDef(self):
"""
function_header : fname '(' ')'
function_def : function_header newline_ok function_body ;
Precondition: Looking at the function name.
Post condition:
NOTE: There is an ambiguity with:
function foo ( echo hi ) and
function foo () ( echo hi )
Bash only accepts the latter, though it doesn't really follow a grammar.
"""
left_spid = word.LeftMostSpanForWord(self.cur_word)
ok, name = word.AsFuncName(self.cur_word)
if not ok:
p_die('Invalid function name', word=self.cur_word)
self._Next() # skip function name
# Must be true beacuse of lookahead
self._Peek()
assert self.c_id == Id.Op_LParen, self.cur_word
self.lexer.PushHint(Id.Op_RParen, Id.Right_FuncDef)
self._Next()
self._Eat(Id.Right_FuncDef)
after_name_spid = word.LeftMostSpanForWord(self.cur_word) + 1
self._NewlineOk()
func = command.FuncDef()
func.name = name
self.ParseFunctionBody(func)
func.spids.append(left_spid)
func.spids.append(after_name_spid)
return func
def ParseKshFunctionDef(self):
"""
ksh_function_def : 'function' fname ( '(' ')' )? newline_ok function_body
"""
left_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip past 'function'
self._Peek()
ok, name = word.AsFuncName(self.cur_word)
if not ok:
p_die('Invalid KSH-style function name', word=self.cur_word)
after_name_spid = word.LeftMostSpanForWord(self.cur_word) + 1
self._Next() # skip past 'function name
self._Peek()
if self.c_id == Id.Op_LParen:
self.lexer.PushHint(Id.Op_RParen, Id.Right_FuncDef)
self._Next()
self._Eat(Id.Right_FuncDef)
# Change it: after )
after_name_spid = word.LeftMostSpanForWord(self.cur_word) + 1
self._NewlineOk()
func = command.FuncDef()
func.name = name
self.ParseFunctionBody(func)
func.spids.append(left_spid)
func.spids.append(after_name_spid)
return func
def ParseCoproc(self):
"""
TODO:
"""
raise NotImplementedError
def ParseSubshell(self):
left_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip past (
# Ensure that something $( (cd / && pwd) ) works. If ) is already on the
# translation stack, we want to delay it.
self.lexer.PushHint(Id.Op_RParen, Id.Right_Subshell)
c_list = self._ParseCommandList()
assert c_list is not None
# Remove singleton CommandList as an optimization.
if len(c_list.children) == 1:
child = c_list.children[0]
else:
child = c_list
node = command.Subshell(child)
right_spid = word.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Right_Subshell)
node.spids.extend((left_spid, right_spid))
return node
def ParseDBracket(self):
"""
Pass the underlying word parser off to the boolean expression parser.
"""
maybe_error_word = self.cur_word
# TODO: Test interactive. Without closing ]], you should get > prompt
# (PS2)
self._Next() # skip [[
b_parser = bool_parse.BoolParser(self.w_parser)
bnode = b_parser.Parse() # May raise
return command.DBracket(bnode)
def ParseDParen(self):
maybe_error_word = self.cur_word
left_spid = word.LeftMostSpanForWord(self.cur_word)
self._Next() # skip ((
anode, right_spid = self.w_parser.ReadDParen()
assert anode is not None
node = command.DParen(anode)
node.spids.append(left_spid)
node.spids.append(right_spid)
return node
def ParseCommand(self, cur_aliases=None):
"""
command : simple_command
| compound_command io_redirect*
| function_def
| ksh_function_def
;
"""
cur_aliases = cur_aliases or []
self._Peek()
if self.c_id in NOT_FIRST_WORDS:
p_die('Unexpected word when parsing command', word=self.cur_word)
if self.c_id == Id.KW_Function:
return self.ParseKshFunctionDef()
# NOTE: We should have another Kind for "initial keywords". And then
# NOT_FIRST_WORDS are "secondary keywords".
if self.c_id in (
Id.KW_DLeftBracket, Id.Op_DLeftParen, Id.Op_LParen, Id.Lit_LBrace,
Id.KW_For, Id.KW_While, Id.KW_Until, Id.KW_If, Id.KW_Case, Id.KW_Time):
node = self.ParseCompoundCommand()
assert node is not None
if node.tag != command_e.TimeBlock: # The only one without redirects
node.redirects = self._ParseRedirectList()
assert node.redirects is not None
return node
# NOTE: I added this to fix cases in parse-errors.test.sh, but it doesn't
# work because Lit_RBrace is in END_LIST below.
# TODO: KW_Do is also invalid here.
if self.c_id == Id.Lit_RBrace:
p_die('Unexpected right brace', word=self.cur_word)
if self.c_kind == Kind.Redir: # Leading redirect
return self.ParseSimpleCommand(cur_aliases)
if self.c_kind == Kind.Word:
if (self.w_parser.LookAhead() == Id.Op_LParen and
not word.IsVarLike(self.cur_word)):
return self.ParseFunctionDef() # f() { echo; } # function
# echo foo
# f=(a b c) # array
# array[1+2]+=1
return self.ParseSimpleCommand(cur_aliases)
if self.c_kind == Kind.Eof:
p_die("Unexpected EOF while parsing command", word=self.cur_word)
# e.g. )
p_die("Invalid word while parsing command", word=self.cur_word)
def ParsePipeline(self):
"""
pipeline : Bang? command ( '|' newline_ok command )* ;
"""
negated = False
self._Peek()
if self.c_id == Id.KW_Bang:
negated = True
self._Next()
child = self.ParseCommand()
assert child is not None
children = [child]
self._Peek()
if self.c_id not in (Id.Op_Pipe, Id.Op_PipeAmp):
if negated:
node = command.Pipeline(children, negated)
return node
else:
return child
pipe_index = 0
stderr_indices = []
if self.c_id == Id.Op_PipeAmp:
stderr_indices.append(pipe_index)
pipe_index += 1
while True:
self._Next() # skip past Id.Op_Pipe or Id.Op_PipeAmp
self._NewlineOk()
child = self.ParseCommand()
assert child is not None
children.append(child)
self._Peek()
if self.c_id not in (Id.Op_Pipe, Id.Op_PipeAmp):
break
if self.c_id == Id.Op_PipeAmp:
stderr_indices.append(pipe_index)
pipe_index += 1
node = command.Pipeline(children, negated)
node.stderr_indices = stderr_indices
return node
def ParseAndOr(self):
"""
and_or : and_or ( AND_IF | OR_IF ) newline_ok pipeline
| pipeline
Note that it is left recursive and left associative. We parse it
iteratively with a token of lookahead.
"""
child = self.ParsePipeline()
assert child is not None
self._Peek()
if self.c_id not in (Id.Op_DPipe, Id.Op_DAmp):
return child
ops = []
children = [child]
while True:
ops.append(self.c_id)
self._Next() # skip past || &&
self._NewlineOk()
child = self.ParsePipeline()
assert child is not None
children.append(child)
self._Peek()
if self.c_id not in (Id.Op_DPipe, Id.Op_DAmp):
break
node = command.AndOr(ops, children)
return node
# NOTE: _ParseCommandLine and _ParseCommandTerm are similar, but different.
# At the top level, We want to execute after every line:
# - to process alias
# - to process 'exit', because invalid syntax might appear after it
# But for say a while loop body, we want to parse the whole thing at once, and
# then execute it. We don't want to parse it over and over again!
# COMPARE
# command_line : and_or (sync_op and_or)* trailer? ; # TOP LEVEL
# command_term : and_or (trailer and_or)* ; # CHILDREN
def _ParseCommandLine(self):
"""
command_line : and_or (sync_op and_or)* trailer? ;
trailer : sync_op newline_ok
| NEWLINES;
sync_op : '&' | ';';
NOTE: This rule causes LL(k > 1) behavior. We would have to peek to see if
there is another command word after the sync op.
But it's easier to express imperatively. Do the following in a loop:
1. ParseAndOr
2. Peek.
a. If there's a newline, then return. (We're only parsing a single
line.)
b. If there's a sync_op, process it. Then look for a newline and
return. Otherwise, parse another AndOr.
"""
# NOTE: This is slightly different than END_LIST in _ParseCommandTerm, and
# unfortunately somewhat ad hoc.
END_LIST = (Id.Op_Newline, Id.Eof_Real, Id.Op_RParen)
children = []
done = False
while not done:
child = self.ParseAndOr()
assert child is not None
self._Peek()
if self.c_id in (Id.Op_Semi, Id.Op_Amp): # also Id.Op_Amp.
child = command.Sentence(child, self.cur_word.token)
self._Next()
self._Peek()
if self.c_id in END_LIST:
done = True
elif self.c_id in END_LIST:
done = True
else:
# e.g. echo a(b)
p_die('Unexpected word while parsing command line',
word=self.cur_word)
children.append(child)
# Simplify the AST.
if len(children) > 1:
return command.CommandList(children)
else:
return children[0]
def _ParseCommandTerm(self):
""""
command_term : and_or (trailer and_or)* ;
trailer : sync_op newline_ok
| NEWLINES;
sync_op : '&' | ';';
This is handled in imperative style, like _ParseCommandLine.
Called by _ParseCommandList for all blocks, and also for ParseCaseItem,
which is slightly different. (HOW? Is it the DSEMI?)
Returns:
syntax_asdl.command
"""
# Token types that will end the command term.
END_LIST = (self.eof_id, Id.Right_Subshell, Id.Lit_RBrace, Id.Op_DSemi)
# NOTE: This is similar to _ParseCommandLine.
#
# - Why aren't we doing END_LIST in _ParseCommandLine?
# - Because you will never be inside $() at the top level.
# - We also know it will end in a newline. It can't end in "fi"!
# - example: if true; then { echo hi; } fi
children = []
done = False
while not done:
self._Peek()
# Most keywords are valid "first words". But do/done/then do not BEGIN
# commands, so they are not valid.
if self.c_id in NOT_FIRST_WORDS:
break
child = self.ParseAndOr()
assert child is not None
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next()
self._Peek()
if self.c_id in END_LIST:
done = True
elif self.c_id in (Id.Op_Semi, Id.Op_Amp):
child = command.Sentence(child, self.cur_word.token)
self._Next()
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next() # skip over newline
# Test if we should keep going. There might be another command after
# the semi and newline.
self._Peek()
if self.c_id in END_LIST: # \n EOF
done = True
elif self.c_id in END_LIST: # ; EOF
done = True
elif self.c_id in END_LIST: # EOF
done = True
else:
pass # e.g. "} done", "fi fi", ") fi", etc. is OK
children.append(child)
self._Peek()
return command.CommandList(children)
# TODO: Make this private.
def _ParseCommandList(self):
"""
command_list : newline_ok command_term trailer? ;
This one is called by all the compound commands. It's basically a command
block.
NOTE: Rather than translating the CFG directly, the code follows a style
more like this: more like this: (and_or trailer)+. It makes capture
easier.
"""
self._NewlineOk()
node = self._ParseCommandTerm()
assert node is not None
return node
def ParseLogicalLine(self):
"""Parse a single line for main_loop.
A wrapper around _ParseCommandLine(). Similar but not identical to
_ParseCommandList() and ParseCommandSub().
Raises:
ParseError
We want to be able catch ParseError all in one place.
"""
self._NewlineOk()
self._Peek()
if self.c_id == Id.Eof_Real:
return None
node = self._ParseCommandLine()
assert node is not None
return node
def ParseCommandSub(self):
"""Parse $(echo hi) and `echo hi` for word_parse.py.
They can have multiple lines, like this:
echo $(
echo one
echo two
)
"""
self._NewlineOk()
if self.c_kind == Kind.Eof: # e.g. $()
return command.NoOp()
# This calls ParseAndOr(), but I think it should be a loop that calls
# _ParseCommandLine(), like oil.InteractiveLoop.
node = self._ParseCommandTerm()
assert node is not None
return node
def CheckForPendingHereDocs(self):
# NOTE: This happens when there is no newline at the end of a file, like
# osh -c 'cat <<EOF'
if self.pending_here_docs:
node = self.pending_here_docs[0] # Just show the first one?
p_die('Unterminated here doc began here', word=node.here_begin)
| 4,225 | 0 | 172 |
f4e69fb510368b9bfea693ac6496cf38c17f73ce | 26,178 | py | Python | desdeo_tools/scalarization/GLIDE_II.py | giomara-larraga/desdeo-tools | be3c96933c4db3180560617e12578851d5958aa3 | [
"MIT"
] | null | null | null | desdeo_tools/scalarization/GLIDE_II.py | giomara-larraga/desdeo-tools | be3c96933c4db3180560617e12578851d5958aa3 | [
"MIT"
] | null | null | null | desdeo_tools/scalarization/GLIDE_II.py | giomara-larraga/desdeo-tools | be3c96933c4db3180560617e12578851d5958aa3 | [
"MIT"
] | null | null | null | from abc import abstractmethod
from typing import Union
import numpy as np
class GLIDEError(Exception):
"""Raised when an error related to the ASF classes is encountered.
"""
class GLIDEBase:
"""
Implements the non-differentiable variant of GLIDE-II as proposed in
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __call__(self, objective_vector: np.ndarray, preference: dict) -> np.ndarray:
"""Evaluate the scalarization function value based on objective vectors and
DM preference.
Args:
objective_vector (np.ndarray): 2-dimensional array of objective values of solutions.
preference (dict): The preference given by the decision maker. The required
dictionary keys and their meanings can be found in self.required_keys variable.
Returns:
np.ndarray: The scalarized value obtained by using GLIDE-II over
objective_vector.
"""
self.preference = preference
self.objective_vector = objective_vector
f_minus_q = np.atleast_2d(objective_vector - self.q)
mu = np.atleast_2d(self.mu)
I_alpha = self.I_alpha
max_term = np.max(mu[:, I_alpha] * f_minus_q[:, I_alpha], axis=1)
sum_term = self.rho * np.sum(self.w * f_minus_q, axis=1)
return max_term + sum_term
def evaluate_constraints(
self, objective_vector: np.ndarray, preference: dict
) -> Union[None, np.ndarray]:
"""Evaluate the additional contraints generated by the GLIDE-II formulation.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
objective_vector (np.ndarray): [description]
preference (dict): [description]
Returns:
Union[None, np.ndarray]: [description]
"""
if not self.has_additional_constraints:
return None
self.preference = preference
self.objective_vector = objective_vector
constraints = (
self.epsilon[self.I_epsilon]
+ self.s_epsilon * self.delta_epsilon[self.I_epsilon]
- objective_vector[:, self.I_epsilon]
)
return constraints
@property
@abstractmethod
@property
@abstractmethod
@property
@abstractmethod
@property
@abstractmethod
@property
@abstractmethod
@property
@abstractmethod
@property
@abstractmethod
@property
@abstractmethod
class reference_point_method_GLIDE(GLIDEBase):
"""
Implements the reference point method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
@property
@property
@property
@property
@property
@property
@property
@property
class GUESS_GLIDE(GLIDEBase):
"""
Implements the GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
@property
@property
@property
@property
@property
@property
@property
@property
class AUG_GUESS_GLIDE(GUESS_GLIDE):
"""
Implements the Augmented GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
class NIMBUS_GLIDE(GLIDEBase):
"""
Implements the NIMBUS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
class STEP_GLIDE(GLIDEBase):
"""
Implements the STEP method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
@property
@property
@property
@property
@property
@property
@property
@property
@property
@property
class STOM_GLIDE(GLIDEBase):
"""
Implements the STOM method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Has no effect on STOM calculation. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
@property
@property
@property
@property
@property
@property
@property
@property
class AUG_STOM_GLIDE(STOM_GLIDE):
"""
Implements the Augmented STOM method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Has no effect on STOM calculation. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
class Tchebycheff_GLIDE(GLIDEBase):
"""
Implements the Tchebycheff method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
@property
@property
@property
@property
@property
@property
@property
@property
class PROJECT_GLIDE(GLIDEBase):
"""
Implements the PROJECT method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
@property
@property
@property
@property
@property
@property
@property
@property
| 31.92439 | 107 | 0.626404 | from abc import abstractmethod
from typing import Union
import numpy as np
class GLIDEError(Exception):
"""Raised when an error related to the ASF classes is encountered.
"""
class GLIDEBase:
"""
Implements the non-differentiable variant of GLIDE-II as proposed in
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
self.has_additional_constraints = False
self.utopian = utopian
self.nadir = nadir
self.rho = rho
self.required_keys: dict = {}
self.extras = kwargs
def __call__(self, objective_vector: np.ndarray, preference: dict) -> np.ndarray:
"""Evaluate the scalarization function value based on objective vectors and
DM preference.
Args:
objective_vector (np.ndarray): 2-dimensional array of objective values of solutions.
preference (dict): The preference given by the decision maker. The required
dictionary keys and their meanings can be found in self.required_keys variable.
Returns:
np.ndarray: The scalarized value obtained by using GLIDE-II over
objective_vector.
"""
self.preference = preference
self.objective_vector = objective_vector
f_minus_q = np.atleast_2d(objective_vector - self.q)
mu = np.atleast_2d(self.mu)
I_alpha = self.I_alpha
max_term = np.max(mu[:, I_alpha] * f_minus_q[:, I_alpha], axis=1)
sum_term = self.rho * np.sum(self.w * f_minus_q, axis=1)
return max_term + sum_term
def evaluate_constraints(
self, objective_vector: np.ndarray, preference: dict
) -> Union[None, np.ndarray]:
"""Evaluate the additional contraints generated by the GLIDE-II formulation.
Note:
Additional contraints produced by the GLIDE-II formulation are implemented
such that if the returned values are negative, the corresponding constraint is
violated. The returned value may be positive. In such cases, the returned value
is a measure of how close or far the corresponding feasible solution is from
violating the constraint.
Args:
objective_vector (np.ndarray): [description]
preference (dict): [description]
Returns:
Union[None, np.ndarray]: [description]
"""
if not self.has_additional_constraints:
return None
self.preference = preference
self.objective_vector = objective_vector
constraints = (
self.epsilon[self.I_epsilon]
+ self.s_epsilon * self.delta_epsilon[self.I_epsilon]
- objective_vector[:, self.I_epsilon]
)
return constraints
@property
@abstractmethod
def I_alpha(self):
pass
@property
@abstractmethod
def I_epsilon(self):
pass
@property
@abstractmethod
def mu(self):
pass
@property
@abstractmethod
def q(self):
pass
@property
@abstractmethod
def w(self):
pass
@property
@abstractmethod
def epsilon(self):
pass
@property
@abstractmethod
def s_epsilon(self):
pass
@property
@abstractmethod
def delta_epsilon(self):
pass
class reference_point_method_GLIDE(GLIDEBase):
"""
Implements the reference point method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 1
self.__mu = 1 / (nadir - utopian)
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line parallel to the nadir-utopian vector "
"and passing through the reference point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return self.__mu
@property
def w(self):
return self.__w
@property
def q(self):
return self.preference["reference point"]
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class GUESS_GLIDE(GLIDEBase):
"""
Implements the GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 0
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line going from the nadir point to the reference point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return 1 / (self.nadir - self.preference["reference point"])
@property
def w(self):
return self.__w
@property
def q(self):
return self.preference["reference point"]
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class AUG_GUESS_GLIDE(GUESS_GLIDE):
"""
Implements the Augmented GUESS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__w = 1
class NIMBUS_GLIDE(GLIDEBase):
"""
Implements the NIMBUS method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__mu = self.__w = 1 / (self.nadir - self.utopian)
self.has_additional_constraints = True
self.required_keys = {
"current solution": (
"A solution preferred by the DM currently. " "(type: numpy.ndarray)"
),
"classifications": (
"A list of same length as the number of objectives. Elements can only "
"include some or all of ['<', '<=', '=', '>=', '0']. These classify "
"the different objectives as defined in the NIMBUS or GLIDE-II paper. "
"(type: list)"
),
"levels": (
"A vector containing desirable levels of objectives or constraining bounds "
"depending on the classification. Same length as the number of objectives. "
"(type: numpy.ndarray)"
),
}
@property
def improve_unconstrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<")[0]
indices[relevant] = True
return indices
@property
def improve_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<=")[0]
indices[relevant] = True
return indices
@property
def satisfactory(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "=")[0]
indices[relevant] = True
return indices
@property
def relax_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == ">=")[0]
indices[relevant] = True
return indices
@property
def relax_unconstrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "0")[0]
indices[relevant] = True
return indices
@property
def I_alpha(self):
return self.improve_unconstrained + self.improve_constrained
@property
def I_epsilon(self):
return (
self.improve_unconstrained
+ self.improve_constrained
+ self.satisfactory
+ self.relax_constrained
)
@property
def w(self):
# This was in the paper
return self.__w
# This is what I think it should be. There may be division by zero errors here.
"""return (self.objective_vector / (self.objective_vector - self.q)) / (
self.nadir - self.utopian
)"""
@property
def mu(self):
return self.__mu
@property
def q(self):
q = np.full_like(self.utopian, fill_value=0, dtype=float)
q[self.improve_unconstrained] = self.utopian[self.improve_unconstrained]
q[self.improve_constrained] = self.preference["levels"][
self.improve_constrained
]
return q
@property
def epsilon(self):
e = np.full_like(self.utopian, fill_value=np.nan, dtype=float)
case1 = (
self.improve_constrained + self.improve_unconstrained + self.satisfactory
)
case2 = self.relax_constrained
e[case1] = self.preference["current solution"][case1]
e[case2] = self.preference["levels"][case2]
return e
@property
def s_epsilon(self):
return 0
@property
def delta_epsilon(self):
return np.zeros_like(self.utopian)
class STEP_GLIDE(GLIDEBase):
"""
Implements the STEP method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=nadir, rho=rho, **kwargs)
self.__mu = (self.nadir - self.utopian) / np.max(
np.abs(np.vstack((utopian, nadir))), axis=0
)
self.__w = 0
self.I_epsilon = np.full_like(self.utopian, dtype=np.bool_, fill_value=True)
self.has_additional_constraints = True
self.required_keys = {
"current solution": (
"A solution preferred by the DM currently. " "(type: numpy.ndarray)"
),
"classifications": (
"A list of same length as the number of objectives. Elements can only "
"include some or all of [<=', '=', '>=']. These classify "
"the different objectives as defined in the GLIDE-II paper. "
"(type: list)"
),
"levels": (
"A vector containing desirable levels of objectives or constraining bounds "
"depending on the classification. Same length as the number of objectives. "
"(type: numpy.ndarray)"
),
}
@property
def improve_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "<=")[0]
indices[relevant] = True
return indices
@property
def satisfactory(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == "=")[0]
indices[relevant] = True
return indices
@property
def relax_constrained(self):
indices = np.full_like(self.utopian, dtype=np.bool_, fill_value=False)
relevant = np.where(np.array(self.preference["classifications"]) == ">=")[0]
indices[relevant] = True
return indices
@property
def I_alpha(self):
return self.improve_constrained
@property
def w(self):
# This was in the paper
return self.__w
@property
def mu(self):
return self.__mu
@property
def q(self):
q = np.full_like(self.utopian, fill_value=0, dtype=float)
q[self.improve_constrained] = self.utopian[self.improve_constrained]
return q
@property
def epsilon(self):
e = np.full_like(self.utopian, fill_value=np.nan, dtype=float)
case1 = self.improve_constrained + self.satisfactory
case2 = self.relax_constrained
e[case1] = self.preference["current solution"][case1]
e[case2] = self.preference["levels"][case2]
return e
@property
def s_epsilon(self):
return 0
@property
def delta_epsilon(self):
return np.zeros_like(self.utopian)
class STOM_GLIDE(GLIDEBase):
"""
Implements the STOM method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Has no effect on STOM calculation. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=None, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 0
self.required_keys = {
"reference point": (
"Used to calculate the direction of improvement: "
"a line going from the reference point to the utopian point. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return 1 / (self.preference["reference point"] - self.utopian)
@property
def w(self):
return self.__w
@property
def q(self):
return self.utopian
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class AUG_STOM_GLIDE(STOM_GLIDE):
"""
Implements the Augmented STOM method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Has no effect on STOM calculation. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=None, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__w = 1
class Tchebycheff_GLIDE(GLIDEBase):
"""
Implements the Tchebycheff method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self,
utopian: np.ndarray = None,
nadir: np.ndarray = None,
rho: float = 1e-6,
**kwargs
):
super().__init__(utopian=utopian, nadir=None, rho=rho, **kwargs)
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
utopian, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
utopian, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 1
self.required_keys = {
"mu": (
"Vector defining the direction of improvement of the scalarizer. "
"(type: numpy.ndarray)"
)
}
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return self.preference["mu"]
@property
def w(self):
return self.__w
@property
def q(self):
return self.utopian
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
class PROJECT_GLIDE(GLIDEBase):
"""
Implements the PROJECT method of preference elicitation and scalarization
using the non-differentiable variant of GLIDE-II as proposed in:
Ruiz, Francisco, Mariano Luque, and Kaisa Miettinen.
"Improving the computational efficiency in a global formulation (GLIDE)
for interactive multiobjective optimization."
Annals of Operations Research 197.1 (2012): 47-70.
Args:
utopian (np.ndarray, optional): The utopian point. Defaults to None.
nadir (np.ndarray, optional): The nadir point. Defaults to None.
rho (float, optional): The augmentation term for the scalarization function.
Defaults to 1e-6.
"""
def __init__(
self, current_objective_vector: np.ndarray, rho: float = 1e-6, **kwargs
):
super().__init__(utopian=None, nadir=None, rho=rho, **kwargs)
self.current_objective_vector = current_objective_vector
self.has_additional_constraints = False
self.__I_alpha = np.full_like(
current_objective_vector, dtype=np.bool_, fill_value=True
).flatten()
self.__I_epsilon = np.full_like(
current_objective_vector, dtype=np.bool_, fill_value=False
).flatten()
self.__w = 0
@property
def I_epsilon(self):
return self.__I_epsilon
@property
def I_alpha(self):
return self.__I_alpha
@property
def mu(self):
return 1 / np.abs(
self.preference["reference point"] - self.current_objective_vector
)
@property
def w(self):
return self.__w
@property
def q(self):
return self.utopian
@property
def epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def s_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
@property
def delta_epsilon(self):
msg = "This part of the code should not be reached. Contact maintaner."
raise GLIDEError(msg)
| 12,916 | 0 | 2,116 |
d4adcc8efbf8e3a1a5d6c3fdf0e7eff6f06a8b13 | 2,037 | py | Python | tests/simulation.py | ajansen-tub/pyoma | 83d4a74a54ecf7456f715b5d1c429b801bd5fd07 | [
"MIT"
] | 5 | 2021-06-15T10:03:17.000Z | 2022-03-06T23:11:13.000Z | tests/simulation.py | ajansen-tub/pyoma | 83d4a74a54ecf7456f715b5d1c429b801bd5fd07 | [
"MIT"
] | 1 | 2022-01-31T00:39:32.000Z | 2022-02-23T19:19:56.000Z | tests/simulation.py | ajansen-tub/pyoma | 83d4a74a54ecf7456f715b5d1c429b801bd5fd07 | [
"MIT"
] | 3 | 2021-06-29T08:24:33.000Z | 2021-12-21T04:18:22.000Z | import numpy as np
from statespace_model import solver
from scipy.spatial.distance import cdist
from pyomac.misc import mac_value, err_rel
| 30.402985 | 117 | 0.635739 | import numpy as np
from statespace_model import solver
from scipy.spatial.distance import cdist
from pyomac.misc import mac_value, err_rel
def generate_impulse_data(m, k, c_damp):
# simulation parameters
fs = 300
time_step = 1 / fs
t_end = 60
t = np.arange(0, t_end, time_step)
# force vector
force = np.zeros((t.shape[0], m.shape[0]))
force[:200, 2] = 200.
t_impulse, acc_impulse = solver(m, k, c_damp, force, t)
return t_impulse, acc_impulse, fs
def generate_random_data(m, k, c_damp):
# simulation parameters
fs = 300
time_step = 1 / fs
t_end = 5 * 60 # 5 min
t = np.arange(0, t_end, time_step)
# force vector
np.random.seed(43)
force = np.random.normal(size=(t.shape[0], m.shape[0]), ) * 100
t_random, acc_random = solver(m, k, c_damp, force, t)
return t_random, acc_random, fs
def assert_modal_identification(test_obj, test_title: str,
f_e: np.ndarray, ms_e: np.ndarray, zeta: np.ndarray = None,
threshold_f: float = 0.025, threshold_ms: float = 0.95, threshold_zeta: float = 0.1):
print('---------')
print(test_title)
print('Identified natural frequencies: ' + str(f_e))
print('Analytical (undamped) natural frequencies: ' + str(test_obj.f_a))
# pairwise distance between estimated (e) and analytical (a) solution
d_f = cdist(f_e.reshape(-1, 1), test_obj.f_a.reshape(-1, 1), err_rel).min(axis=1)
d_ms = cdist(ms_e, test_obj.ms_a.T, mac_value).max(axis=1)
if zeta is not None:
d_zeta = cdist(zeta.reshape(-1, 1), test_obj.zeta_a.reshape(-1, 1), err_rel).min(axis=1)
print('Rel. error natural frequencies: ' + str(d_f))
print('MAC: ' + str(d_ms))
if zeta is not None:
print('Rel. error modal damping: ' + str(d_zeta))
test_obj.assertTrue((d_f < threshold_f).all())
test_obj.assertTrue((d_ms > threshold_ms).all())
if zeta is not None:
test_obj.assertTrue((d_zeta < threshold_zeta).all())
| 1,826 | 0 | 69 |
ad4979c86f4b8879362215c1a922cce4b2191e12 | 622 | py | Python | provisional_test.py | lleene/pybooru | 72a539c94b8326478cf2491e1d954b80b709cda8 | [
"MIT"
] | 79 | 2015-02-04T12:32:03.000Z | 2022-03-22T09:17:15.000Z | provisional_test.py | lleene/pybooru | 72a539c94b8326478cf2491e1d954b80b709cda8 | [
"MIT"
] | 38 | 2016-05-18T19:20:24.000Z | 2022-03-30T20:02:08.000Z | provisional_test.py | lleene/pybooru | 72a539c94b8326478cf2491e1d954b80b709cda8 | [
"MIT"
] | 30 | 2015-02-04T13:09:19.000Z | 2022-03-21T15:52:50.000Z | # encoding: utf-8
from __future__ import print_function
from pybooru import Danbooru
from pybooru import Moebooru
konachan = Moebooru("konachan")
kona_tags = konachan.tag_list(order='date')
print(konachan.last_call)
kona_post = konachan.post_list()
print(konachan.last_call)
lolibooru = Moebooru("lolibooru")
kona_tags = lolibooru.tag_list(order='date')
print(lolibooru.last_call)
kona_post = lolibooru.post_list()
print(lolibooru.last_call)
danbooru = Danbooru('danbooru')
dan_tags = danbooru.tag_list(order='name')
print(danbooru.last_call)
dan_post = danbooru.post_list(tags="computer")
print(danbooru.last_call)
| 23.923077 | 46 | 0.797428 | # encoding: utf-8
from __future__ import print_function
from pybooru import Danbooru
from pybooru import Moebooru
konachan = Moebooru("konachan")
kona_tags = konachan.tag_list(order='date')
print(konachan.last_call)
kona_post = konachan.post_list()
print(konachan.last_call)
lolibooru = Moebooru("lolibooru")
kona_tags = lolibooru.tag_list(order='date')
print(lolibooru.last_call)
kona_post = lolibooru.post_list()
print(lolibooru.last_call)
danbooru = Danbooru('danbooru')
dan_tags = danbooru.tag_list(order='name')
print(danbooru.last_call)
dan_post = danbooru.post_list(tags="computer")
print(danbooru.last_call)
| 0 | 0 | 0 |
a8d18cd1b0879ac04d924bee0cdf72d87be5cdf6 | 124 | py | Python | 2020/examples-in-class-2020-10-29/input_example_birth_year.py | ati-ozgur/course-python | 38237d120043c07230658b56dc3aeb01c3364933 | [
"Apache-2.0"
] | 1 | 2021-02-04T16:59:11.000Z | 2021-02-04T16:59:11.000Z | 2020/examples-in-class-2020-10-29/input_example_birth_year.py | ati-ozgur/course-python | 38237d120043c07230658b56dc3aeb01c3364933 | [
"Apache-2.0"
] | null | null | null | 2020/examples-in-class-2020-10-29/input_example_birth_year.py | ati-ozgur/course-python | 38237d120043c07230658b56dc3aeb01c3364933 | [
"Apache-2.0"
] | 1 | 2019-10-30T14:37:48.000Z | 2019-10-30T14:37:48.000Z | str_year = input("what is your birth year:")
year = int(str_year)
age = 2020-year-1
print(f"hello, your age is {age}")
| 12.4 | 44 | 0.66129 | str_year = input("what is your birth year:")
year = int(str_year)
age = 2020-year-1
print(f"hello, your age is {age}")
| 0 | 0 | 0 |
a8da378b3730c38346625872662739f1fb28f6fa | 2,730 | py | Python | neurostore/auth.py | neurostuff/python-neurostore | 0859c92a58e7d468c2268cff663814e4251aec15 | [
"MIT"
] | null | null | null | neurostore/auth.py | neurostuff/python-neurostore | 0859c92a58e7d468c2268cff663814e4251aec15 | [
"MIT"
] | 2 | 2022-02-10T01:35:38.000Z | 2022-02-10T01:58:44.000Z | neurostore/auth.py | neurostuff/python-neurostore | 0859c92a58e7d468c2268cff663814e4251aec15 | [
"MIT"
] | null | null | null | import threading
import base64
import hashlib
import webbrowser
import secrets
from time import sleep
from werkzeug.serving import make_server
from flask import Flask, request
global_dict = {
'received_callback': False,
'received_state': None,
'authorization_code': None,
'error_message': None
}
app = Flask(__name__)
@app.route("/callback")
def callback():
"""
The callback is invoked after a completed login attempt (succesful or otherwise).
It sets global variables with the auth code or error messages, then sets the
polling flag received_callback.
:return:
"""
if 'error' in request.args:
global_dict['error_message'] = request.args['error'] + ': ' + request.args['error_description']
else:
global_dict['authorization_code'] = request.args['code']
global_dict['received_state'] = request.args['state']
global_dict['received_callback'] = True
return "Please close this window and return to python-neurostore."
class ServerThread(threading.Thread):
"""
The Flask server is done this way to allow shutting down after a single request has been received.
"""
def auth0_url_encode(byte_data):
"""
Safe encoding handles + and /, and also replace = with nothing
:param byte_data:
:return:
"""
return base64.urlsafe_b64encode(byte_data).decode('utf-8').replace('=', '') | 29.354839 | 104 | 0.682418 | import threading
import base64
import hashlib
import webbrowser
import secrets
from time import sleep
from werkzeug.serving import make_server
from flask import Flask, request
global_dict = {
'received_callback': False,
'received_state': None,
'authorization_code': None,
'error_message': None
}
app = Flask(__name__)
@app.route("/callback")
def callback():
"""
The callback is invoked after a completed login attempt (succesful or otherwise).
It sets global variables with the auth code or error messages, then sets the
polling flag received_callback.
:return:
"""
if 'error' in request.args:
global_dict['error_message'] = request.args['error'] + ': ' + request.args['error_description']
else:
global_dict['authorization_code'] = request.args['code']
global_dict['received_state'] = request.args['state']
global_dict['received_callback'] = True
return "Please close this window and return to python-neurostore."
class ServerThread(threading.Thread):
"""
The Flask server is done this way to allow shutting down after a single request has been received.
"""
def __init__(self, app):
threading.Thread.__init__(self)
self.srv = make_server('127.0.0.1', 5000, app)
self.ctx = app.app_context()
self.ctx.push()
def run(self):
print('starting server')
self.srv.serve_forever()
def shutdown(self):
self.srv.shutdown()
def open_login(authorization_url, state):
# Open the browser window to the login url
# Start the server
# Poll til the callback has been invoked
global_dict['received_callback'] = False
webbrowser.open_new(authorization_url)
server = ServerThread(app)
server.start()
while not global_dict['received_callback']:
sleep(1)
server.shutdown()
if state != global_dict['received_state']:
print("Error: session replay or similar attack in progress. Please log out of all connections.")
exit(-1)
if global_dict['error_message']:
print("An error occurred:")
print(global_dict['error_message'])
exit(-1)
return global_dict['authorization_code']
def auth0_url_encode(byte_data):
"""
Safe encoding handles + and /, and also replace = with nothing
:param byte_data:
:return:
"""
return base64.urlsafe_b64encode(byte_data).decode('utf-8').replace('=', '')
def generate_challenge(a_verifier):
return auth0_url_encode(hashlib.sha256(a_verifier.encode()).digest())
def generate_pkse():
verifier = auth0_url_encode(secrets.token_bytes(32))
challenge = generate_challenge(verifier)
return verifier, challenge | 1,192 | 0 | 150 |
b80acfb2cbd14eab86a240f780997c286516eb72 | 1,628 | py | Python | src/pas/plugins/ldap/setuphandlers.py | b-freitas/pas.plugins.ldap | 10c6e82bfd6704a5032594afa87df61a73d4c636 | [
"BSD-3-Clause"
] | 8 | 2016-07-01T21:28:18.000Z | 2020-04-03T07:14:29.000Z | src/pas/plugins/ldap/setuphandlers.py | b-freitas/pas.plugins.ldap | 10c6e82bfd6704a5032594afa87df61a73d4c636 | [
"BSD-3-Clause"
] | 94 | 2015-02-10T11:14:08.000Z | 2022-02-14T21:56:57.000Z | src/pas/plugins/ldap/setuphandlers.py | b-freitas/pas.plugins.ldap | 10c6e82bfd6704a5032594afa87df61a73d4c636 | [
"BSD-3-Clause"
] | 18 | 2015-08-17T14:06:36.000Z | 2022-03-01T02:29:36.000Z | # -*- coding: utf-8 -*-
from pas.plugins.ldap.plugin import LDAPPlugin
from zope.component.hooks import getSite
TITLE = "LDAP plugin (pas.plugins.ldap)"
def remove_persistent_import_step(context):
"""Remove broken persistent import step.
profile/import_steps.xml defined an import step with id
"pas.plugins.ldap.setup" which pointed to
pas.plugins.ldap.setuphandlers.setupPlugin.
This function no longer exists, and the import step is not needed,
because a post_install handler is now used for this.
But you get an error in the log whenever you import a profile:
GenericSetup Step pas.plugins.ldap.setup has an invalid import handler
So we remove the step.
"""
registry = context.getImportStepRegistry()
import_step = "pas.plugins.ldap.setup"
if import_step in registry._registered:
registry.unregisterStep(import_step)
| 32.56 | 78 | 0.701474 | # -*- coding: utf-8 -*-
from pas.plugins.ldap.plugin import LDAPPlugin
from zope.component.hooks import getSite
TITLE = "LDAP plugin (pas.plugins.ldap)"
def remove_persistent_import_step(context):
"""Remove broken persistent import step.
profile/import_steps.xml defined an import step with id
"pas.plugins.ldap.setup" which pointed to
pas.plugins.ldap.setuphandlers.setupPlugin.
This function no longer exists, and the import step is not needed,
because a post_install handler is now used for this.
But you get an error in the log whenever you import a profile:
GenericSetup Step pas.plugins.ldap.setup has an invalid import handler
So we remove the step.
"""
registry = context.getImportStepRegistry()
import_step = "pas.plugins.ldap.setup"
if import_step in registry._registered:
registry.unregisterStep(import_step)
def _addPlugin(pas, pluginid="pasldap"):
installed = pas.objectIds()
if pluginid in installed:
return TITLE + " already installed."
plugin = LDAPPlugin(pluginid, title=TITLE)
pas._setObject(pluginid, plugin)
plugin = pas[plugin.getId()] # get plugin acquisition wrapped!
for info in pas.plugins.listPluginTypeInfo():
interface = info["interface"]
if not interface.providedBy(plugin):
continue
pas.plugins.activatePlugin(interface, plugin.getId())
pas.plugins.movePluginsDown(
interface, [x[0] for x in pas.plugins.listPlugins(interface)[:-1]]
)
def post_install(context):
site = getSite()
pas = site.acl_users
_addPlugin(pas)
| 691 | 0 | 46 |
8189aa7203e800f8a1c5d1651c18c894affa0f7b | 3,744 | py | Python | lcm/pub/nfvi/vim/api/openstack/image.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 4 | 2018-08-29T02:51:38.000Z | 2021-11-16T11:36:11.000Z | lcm/pub/nfvi/vim/api/openstack/image.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | null | null | null | lcm/pub/nfvi/vim/api/openstack/image.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 1 | 2019-05-12T08:21:19.000Z | 2019-05-12T08:21:19.000Z | # Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import traceback
import threading
from lcm.pub.nfvi.vim.api.openstack import glancebase
from lcm.pub.nfvi.vim.lib.syscomm import fun_name
from lcm.pub.nfvi.vim import const
logger = logging.getLogger(__name__)
| 34.990654 | 112 | 0.662126 | # Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import traceback
import threading
from lcm.pub.nfvi.vim.api.openstack import glancebase
from lcm.pub.nfvi.vim.lib.syscomm import fun_name
from lcm.pub.nfvi.vim import const
logger = logging.getLogger(__name__)
class ImageUploadThread(threading.Thread):
def __init__(self, glance, image_id, image_path):
threading.Thread.__init__(self)
self.glance = glance
self.image_id = image_id
self.image_path = image_path
def run(self):
try:
self.glance.images.upload(self.image_id, open(self.image_path, 'rb'))
except Exception as ex:
logger.error(traceback.format_exc())
err_msg = ex.args[0] if ex.args[0] else str(sys.exc_info())
logger.error("Failed to upload image(%s): %s", self.image_id, err_msg)
except:
logger.error(traceback.format_exc())
logger.error("Failed to upload image(%s): [%s]", self.image_id, str(sys.exc_info()))
def create_image(auth_info, data):
ret = None
glance = glancebase.get_glance(fun_name(), auth_info)
exist_img = [img for img in glance.images.list() if img.name == data["image_name"]]
if exist_img:
ret = [0, {"id": exist_img[0].id, "name": data["image_name"], const.RES_TYPE_KEY: const.RES_TYPE_EXIST}]
else:
img = glance.images.create(
name=data["image_name"],
disk_format=data["image_type"],
visibility='public',
container_format='bare')
ret = [0, {"id": img.id, "name": data["image_name"], const.RES_TYPE_KEY: const.RES_TYPE_NEW}]
try:
ImageUploadThread(glance, img.id, data["image_path"]).start()
except:
logger.error(traceback.format_exc())
logger.error(str(sys.exc_info()))
return ret
def get_image(auth_info, image_id):
from glanceclient.exc import HTTPNotFound
glance = glancebase.get_glance(fun_name(), auth_info)
img = None
try:
img = glance.images.get(image_id)
except HTTPNotFound:
logger.warn("Exception: %s" % str(sys.exc_info()))
return [2, "Image(%s) does not exist" % image_id]
ret_img_info = get_single_image(img)
if 'status' in ret_img_info and 'deleted' == ret_img_info["status"]:
return [2, "Image(%s) is deleted" % image_id]
return [0, ret_img_info]
def delete_image(auth_info, image_id):
from glanceclient.exc import HTTPNotFound
glance = glancebase.get_glance(fun_name(), auth_info)
try:
glance.images.delete(image_id)
except HTTPNotFound:
logger.warn("Exception: %s" % str(sys.exc_info()))
return [0, "Image(%s) does not exist" % image_id]
return [0, "Image(%s) has been deleted" % image_id]
def get_images(auth_info):
glance = glancebase.get_glance(fun_name(), auth_info)
imgs = glance.images.list()
return [0, {"image_list": [get_single_image(img) for img in imgs]}]
def get_single_image(img):
img_size = 0
try:
img_size = img.size / 1024
except:
pass
return {"id": img.id, "name": img.name, "size": img_size, "status": img.status}
| 2,703 | 21 | 191 |
69ae863712a666e0148aa5138903bddda6f729ce | 13,934 | py | Python | yamp/__init__.py | MaxHalford/yamp | 6cdcbedb089fb0fe977807de27835d55d774d1af | [
"MIT"
] | 9 | 2021-08-21T16:25:52.000Z | 2021-11-05T17:29:51.000Z | yamp/__init__.py | MaxHalford/yamp | 6cdcbedb089fb0fe977807de27835d55d774d1af | [
"MIT"
] | null | null | null | yamp/__init__.py | MaxHalford/yamp | 6cdcbedb089fb0fe977807de27835d55d774d1af | [
"MIT"
] | null | null | null | """This script is responsible for building the API reference. The API reference is located in
docs/api. The script scans through all the modules, classes, and functions. It processes
the __doc__ of each object and formats it so that MkDocs can process it in turn.
"""
import argparse
import functools
import importlib
import inspect
import os
import pathlib
import re
import shutil
from numpydoc.docscrape import ClassDoc, FunctionDoc
from yamp import md
from yamp import utils
def print_docstring(obj, file, depth, linkifier):
"""Prints a classes's docstring to a file."""
doc = ClassDoc(obj) if inspect.isclass(obj) else FunctionDoc(obj)
printf = functools.partial(print, file=file)
printf(md.h1(obj.__name__))
printf(linkifier.linkify_fences(md.line(concat_lines(doc["Summary"])), depth))
printf(
linkifier.linkify_fences(md.line(concat_lines(doc["Extended Summary"])), depth)
)
# We infer the type annotations from the signatures, and therefore rely on the signature
# instead of the docstring for documenting parameters
try:
signature = inspect.signature(obj)
except ValueError:
signature = (
inspect.Signature()
) # TODO: this is necessary for Cython classes, but it's not correct
params_desc = {param.name: " ".join(param.desc) for param in doc["Parameters"]}
# Parameters
if signature.parameters:
printf(md.h2("Parameters"))
for param in signature.parameters.values():
# Name
printf(f"- **{param.name}**", end="")
# Type annotation
if param.annotation is not param.empty:
anno = inspect.formatannotation(param.annotation)
anno = linkifier.linkify_dotted(anno, depth)
printf(f" (*{anno}*)", end="")
# Default value
if param.default is not param.empty:
printf(f" – defaults to `{param.default}`", end="")
printf("\n", file=file)
# Description
desc = params_desc[param.name]
if desc:
printf(f" {desc}\n")
printf("")
# Attributes
if doc["Attributes"]:
printf(md.h2("Attributes"))
for attr in doc["Attributes"]:
# Name
printf(f"- **{attr.name}**", end="")
# Type annotation
if attr.type:
printf(f" (*{attr.type}*)", end="")
printf("\n", file=file)
# Description
desc = " ".join(attr.desc)
if desc:
printf(f" {desc}\n")
printf("")
# Examples
if doc["Examples"]:
printf(md.h2("Examples"))
in_code = False
after_space = False
for line in inspect.cleandoc("\n".join(doc["Examples"])).splitlines():
if (
in_code
and after_space
and line
and not line.startswith(">>>")
and not line.startswith("...")
):
printf("```\n")
in_code = False
after_space = False
if not in_code and line.startswith(">>>"):
printf("```python")
in_code = True
after_space = False
if not line:
after_space = True
printf(line)
if in_code:
printf("```")
printf("")
# Methods
if inspect.isclass(obj) and doc["Methods"]:
printf(md.h2("Methods"))
printf_indent = lambda x, **kwargs: printf(f" {x}", **kwargs)
for meth in doc["Methods"]:
printf(md.line(f'???- note "{meth.name}"'))
# Parse method docstring
docstring = utils.find_method_docstring(klass=obj, method=meth.name)
if not docstring:
continue
meth_doc = FunctionDoc(func=None, doc=docstring)
printf_indent(md.line(" ".join(meth_doc["Summary"])))
if meth_doc["Extended Summary"]:
printf_indent(md.line(" ".join(meth_doc["Extended Summary"])))
# We infer the type annotations from the signatures, and therefore rely on the signature
# instead of the docstring for documenting parameters
signature = utils.find_method_signature(obj, meth.name)
params_desc = {
param.name: " ".join(param.desc) for param in doc["Parameters"]
}
# Parameters
if (
len(signature.parameters) > 1
): # signature is never empty, but self doesn't count
printf_indent("**Parameters**\n")
for param in signature.parameters.values():
if param.name == "self":
continue
# Name
printf_indent(f"- **{param.name}**", end="")
# Type annotation
if param.annotation is not param.empty:
printf_indent(
f" (*{inspect.formatannotation(param.annotation)}*)", end=""
)
# Default value
if param.default is not param.empty:
printf_indent(f" – defaults to `{param.default}`", end="")
printf_indent("", file=file)
# Description
desc = params_desc.get(param.name)
if desc:
printf_indent(f" {desc}")
printf_indent("")
# Returns
if meth_doc["Returns"]:
printf_indent("**Returns**\n")
return_val = meth_doc["Returns"][0]
if signature.return_annotation is not inspect._empty:
if inspect.isclass(signature.return_annotation):
printf_indent(
f"*{signature.return_annotation.__name__}*: ", end=""
)
else:
printf_indent(f"*{signature.return_annotation}*: ", end="")
printf_indent(return_val.type)
printf_indent("")
# Notes
if doc["Notes"]:
printf(md.h2("Notes"))
printf(md.line("\n".join(doc["Notes"])))
# References
if doc["References"]:
printf(md.h2("References"))
printf(md.line("\n".join(doc["References"])))
def cli_hook():
"""Command-line interface."""
parser = argparse.ArgumentParser()
parser.add_argument(
"library",
nargs="?",
help="the library to document",
)
parser.add_argument("--out", default="docs/api", help="where to dump the docs")
parser.add_argument("--verbose", dest="verbose", action="store_true")
parser.set_defaults(verbose=False)
args = parser.parse_args()
print_library(
library=args.library, output_dir=pathlib.Path(args.out), verbose=args.verbose
)
| 33.17619 | 100 | 0.551672 | """This script is responsible for building the API reference. The API reference is located in
docs/api. The script scans through all the modules, classes, and functions. It processes
the __doc__ of each object and formats it so that MkDocs can process it in turn.
"""
import argparse
import functools
import importlib
import inspect
import os
import pathlib
import re
import shutil
from numpydoc.docscrape import ClassDoc, FunctionDoc
from yamp import md
from yamp import utils
class Linkifier:
def __init__(self):
path_index = {}
name_index = {}
modules = dict(
inspect.getmembers(importlib.import_module("river"), inspect.ismodule)
)
modules = {
"base": modules["base"],
"linear_model": modules["linear_model"],
"stream": modules["stream"],
"optim": modules["optim"],
}
def index_module(mod_name, mod, path):
path = os.path.join(path, mod_name)
dotted_path = path.replace("/", ".")
for func_name, func in inspect.getmembers(mod, inspect.isfunction):
for e in (
f"{mod_name}.{func_name}",
f"{dotted_path}.{func_name}",
f"{func.__module__}.{func_name}",
):
path_index[e] = os.path.join(path, utils.snake_to_kebab(func_name))
name_index[e] = f"{dotted_path}.{func_name}"
for klass_name, klass in inspect.getmembers(mod, inspect.isclass):
for e in (
f"{mod_name}.{klass_name}",
f"{dotted_path}.{klass_name}",
f"{klass.__module__}.{klass_name}",
):
path_index[e] = os.path.join(path, klass_name)
name_index[e] = f"{dotted_path}.{klass_name}"
for submod_name, submod in inspect.getmembers(mod, inspect.ismodule):
if submod_name not in mod.__all__ or submod_name == "typing":
continue
for e in (f"{mod_name}.{submod_name}", f"{dotted_path}.{submod_name}"):
path_index[e] = os.path.join(
path, utils.snake_to_kebab(submod_name)
)
# Recurse
index_module(submod_name, submod, path=path)
for mod_name, mod in modules.items():
index_module(mod_name, mod, path="")
# Prepend river to each index entry
for k in list(path_index.keys()):
path_index[f"river.{k}"] = path_index[k]
for k in list(name_index.keys()):
name_index[f"river.{k}"] = name_index[k]
self.path_index = path_index
self.name_index = name_index
def linkify(self, text, use_fences, depth):
path = self.path_index.get(text)
name = self.name_index.get(text)
if path and name:
backwards = "../" * (depth + 1)
if use_fences:
return f"[`{name}`]({backwards}{path})"
return f"[{name}]({backwards}{path})"
return None
def linkify_fences(self, text, depth):
between_fences = re.compile(r"`[\w\.]+\.\w+`")
return between_fences.sub(
lambda x: self.linkify(x.group().strip("`"), True, depth) or x.group(), text
)
def linkify_dotted(self, text, depth):
dotted = re.compile(r"\w+\.[\.\w]+")
return dotted.sub(
lambda x: self.linkify(x.group(), False, depth) or x.group(), text
)
def concat_lines(lines):
return inspect.cleandoc(" ".join("\n\n" if line == "" else line for line in lines))
def print_docstring(obj, file, depth, linkifier):
"""Prints a classes's docstring to a file."""
doc = ClassDoc(obj) if inspect.isclass(obj) else FunctionDoc(obj)
printf = functools.partial(print, file=file)
printf(md.h1(obj.__name__))
printf(linkifier.linkify_fences(md.line(concat_lines(doc["Summary"])), depth))
printf(
linkifier.linkify_fences(md.line(concat_lines(doc["Extended Summary"])), depth)
)
# We infer the type annotations from the signatures, and therefore rely on the signature
# instead of the docstring for documenting parameters
try:
signature = inspect.signature(obj)
except ValueError:
signature = (
inspect.Signature()
) # TODO: this is necessary for Cython classes, but it's not correct
params_desc = {param.name: " ".join(param.desc) for param in doc["Parameters"]}
# Parameters
if signature.parameters:
printf(md.h2("Parameters"))
for param in signature.parameters.values():
# Name
printf(f"- **{param.name}**", end="")
# Type annotation
if param.annotation is not param.empty:
anno = inspect.formatannotation(param.annotation)
anno = linkifier.linkify_dotted(anno, depth)
printf(f" (*{anno}*)", end="")
# Default value
if param.default is not param.empty:
printf(f" – defaults to `{param.default}`", end="")
printf("\n", file=file)
# Description
desc = params_desc[param.name]
if desc:
printf(f" {desc}\n")
printf("")
# Attributes
if doc["Attributes"]:
printf(md.h2("Attributes"))
for attr in doc["Attributes"]:
# Name
printf(f"- **{attr.name}**", end="")
# Type annotation
if attr.type:
printf(f" (*{attr.type}*)", end="")
printf("\n", file=file)
# Description
desc = " ".join(attr.desc)
if desc:
printf(f" {desc}\n")
printf("")
# Examples
if doc["Examples"]:
printf(md.h2("Examples"))
in_code = False
after_space = False
for line in inspect.cleandoc("\n".join(doc["Examples"])).splitlines():
if (
in_code
and after_space
and line
and not line.startswith(">>>")
and not line.startswith("...")
):
printf("```\n")
in_code = False
after_space = False
if not in_code and line.startswith(">>>"):
printf("```python")
in_code = True
after_space = False
if not line:
after_space = True
printf(line)
if in_code:
printf("```")
printf("")
# Methods
if inspect.isclass(obj) and doc["Methods"]:
printf(md.h2("Methods"))
printf_indent = lambda x, **kwargs: printf(f" {x}", **kwargs)
for meth in doc["Methods"]:
printf(md.line(f'???- note "{meth.name}"'))
# Parse method docstring
docstring = utils.find_method_docstring(klass=obj, method=meth.name)
if not docstring:
continue
meth_doc = FunctionDoc(func=None, doc=docstring)
printf_indent(md.line(" ".join(meth_doc["Summary"])))
if meth_doc["Extended Summary"]:
printf_indent(md.line(" ".join(meth_doc["Extended Summary"])))
# We infer the type annotations from the signatures, and therefore rely on the signature
# instead of the docstring for documenting parameters
signature = utils.find_method_signature(obj, meth.name)
params_desc = {
param.name: " ".join(param.desc) for param in doc["Parameters"]
}
# Parameters
if (
len(signature.parameters) > 1
): # signature is never empty, but self doesn't count
printf_indent("**Parameters**\n")
for param in signature.parameters.values():
if param.name == "self":
continue
# Name
printf_indent(f"- **{param.name}**", end="")
# Type annotation
if param.annotation is not param.empty:
printf_indent(
f" (*{inspect.formatannotation(param.annotation)}*)", end=""
)
# Default value
if param.default is not param.empty:
printf_indent(f" – defaults to `{param.default}`", end="")
printf_indent("", file=file)
# Description
desc = params_desc.get(param.name)
if desc:
printf_indent(f" {desc}")
printf_indent("")
# Returns
if meth_doc["Returns"]:
printf_indent("**Returns**\n")
return_val = meth_doc["Returns"][0]
if signature.return_annotation is not inspect._empty:
if inspect.isclass(signature.return_annotation):
printf_indent(
f"*{signature.return_annotation.__name__}*: ", end=""
)
else:
printf_indent(f"*{signature.return_annotation}*: ", end="")
printf_indent(return_val.type)
printf_indent("")
# Notes
if doc["Notes"]:
printf(md.h2("Notes"))
printf(md.line("\n".join(doc["Notes"])))
# References
if doc["References"]:
printf(md.h2("References"))
printf(md.line("\n".join(doc["References"])))
def print_module(mod, path, overview, linkifier, is_submodule=False, verbose=False):
mod_name = mod.__name__.split(".")[-1]
# Create a directory for the module
mod_slug = utils.snake_to_kebab(mod_name)
mod_path = path.joinpath(mod_slug)
mod_short_path = str(mod_path).replace("docs/api/", "")
os.makedirs(mod_path, exist_ok=True)
with open(mod_path.joinpath(".pages"), "w") as f:
f.write(f"title: {mod_name}")
# Add the module to the overview
if is_submodule:
print(md.h3(mod_name), file=overview)
else:
print(md.h2(mod_name), file=overview)
if mod.__doc__:
print(md.line(mod.__doc__), file=overview)
# Extract all public classes and functions
ispublic = lambda x: x.__name__ in mod.__all__ and not x.__name__.startswith("_")
classes = inspect.getmembers(mod, lambda x: inspect.isclass(x) and ispublic(x))
funcs = inspect.getmembers(mod, lambda x: inspect.isfunction(x) and ispublic(x))
# Classes
if classes and funcs:
print("\n**Classes**\n", file=overview)
for _, c in classes:
if verbose:
print(f"{mod_name}.{c.__name__}")
# Add the class to the overview
slug = utils.snake_to_kebab(c.__name__)
print(
md.li(md.link(c.__name__, f"../{mod_short_path}/{slug}")),
end="",
file=overview,
)
# Write down the class' docstring
with open(mod_path.joinpath(slug).with_suffix(".md"), "w") as file:
print_docstring(
obj=c,
file=file,
linkifier=linkifier,
depth=mod_short_path.count("/") + 1,
)
# Functions
if classes and funcs:
print("\n**Functions**\n", file=overview)
for _, f in funcs:
if verbose:
print(f"{mod_name}.{f.__name__}")
# Add the function to the overview
slug = utils.snake_to_kebab(f.__name__)
print(
md.li(md.link(f.__name__, f"../{mod_short_path}/{slug}")),
end="",
file=overview,
)
# Write down the function' docstring
with open(mod_path.joinpath(slug).with_suffix(".md"), "w") as file:
print_docstring(
obj=f,
file=file,
linkifier=linkifier,
depth=mod_short_path.count(".") + 1,
)
# Sub-modules
for name, submod in inspect.getmembers(mod, inspect.ismodule):
# We only want to go through the public submodules, such as optim.schedulers
if (
name in ("tags", "typing", "inspect", "skmultiflow_utils")
or name not in mod.__all__
or name.startswith("_")
):
continue
print_module(
mod=submod,
path=mod_path,
overview=overview,
linkifier=linkifier,
is_submodule=True,
)
print("", file=overview)
def print_library(library: str, output_dir: pathlib.Path, verbose=False):
# Create a directory for the API reference
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir, exist_ok=True)
with open(output_dir.joinpath(".pages"), "w") as f:
f.write("title: API reference\narrange:\n - overview.md\n - ...\n")
overview = open(output_dir.joinpath("overview.md"), "w")
print(md.h1("Overview"), file=overview)
linkifier = Linkifier()
for mod_name, mod in inspect.getmembers(
importlib.import_module(library), inspect.ismodule
):
if mod_name.startswith("_"):
continue
if verbose:
print(mod_name)
print_module(
mod,
path=output_dir,
overview=overview,
linkifier=linkifier,
verbose=verbose,
)
def cli_hook():
"""Command-line interface."""
parser = argparse.ArgumentParser()
parser.add_argument(
"library",
nargs="?",
help="the library to document",
)
parser.add_argument("--out", default="docs/api", help="where to dump the docs")
parser.add_argument("--verbose", dest="verbose", action="store_true")
parser.set_defaults(verbose=False)
args = parser.parse_args()
print_library(
library=args.library, output_dir=pathlib.Path(args.out), verbose=args.verbose
)
| 6,887 | -5 | 199 |
0c0584c26d480ad99ffc72964cfc768284862ef6 | 510 | py | Python | run_several.py | davidmalmstrom/neural_collaborative_filtering | d3d93100d990381f9ef1a10ec37b455b31d38de7 | [
"Apache-2.0"
] | null | null | null | run_several.py | davidmalmstrom/neural_collaborative_filtering | d3d93100d990381f9ef1a10ec37b455b31d38de7 | [
"Apache-2.0"
] | null | null | null | run_several.py | davidmalmstrom/neural_collaborative_filtering | d3d93100d990381f9ef1a10ec37b455b31d38de7 | [
"Apache-2.0"
] | null | null | null | # Script to run several runs after each other. The runs are specified by the run-configurations
# located in the folder runs/several_runs/
import sys
import os
sys.path.append('..')
import run_script
run_files = os.listdir("runs/several_runs/")
run_files.sort()
for run_file_name in run_files:
try:
run_script.main(["run_several.py", run_file_name], "runs/several_runs/")
except Exception as e:
print((str(type(e)) + ": " + str(e)).replace('\n', ' '))
#os.system("shutdown now -h") | 28.333333 | 96 | 0.692157 | # Script to run several runs after each other. The runs are specified by the run-configurations
# located in the folder runs/several_runs/
import sys
import os
sys.path.append('..')
import run_script
run_files = os.listdir("runs/several_runs/")
run_files.sort()
for run_file_name in run_files:
try:
run_script.main(["run_several.py", run_file_name], "runs/several_runs/")
except Exception as e:
print((str(type(e)) + ": " + str(e)).replace('\n', ' '))
#os.system("shutdown now -h") | 0 | 0 | 0 |
99fb5558318029de7c4cecca9289a377a4ffa18d | 1,209 | py | Python | Questions/models.py | shubh242/E-Learning-App | 07320b1f1aba31602c3056398ffae16f2fa6f9ae | [
"MIT"
] | null | null | null | Questions/models.py | shubh242/E-Learning-App | 07320b1f1aba31602c3056398ffae16f2fa6f9ae | [
"MIT"
] | null | null | null | Questions/models.py | shubh242/E-Learning-App | 07320b1f1aba31602c3056398ffae16f2fa6f9ae | [
"MIT"
] | null | null | null | from django.db import models
from Client.models import Student, Parent, Teacher, Subject, Course
from django.contrib.auth.models import User
| 37.78125 | 80 | 0.757651 | from django.db import models
from Client.models import Student, Parent, Teacher, Subject, Course
from django.contrib.auth.models import User
class Post(models.Model):
question = models.TextField(max_length=100)
user = models.ForeignKey(User, on_delete=models.CASCADE)
subject = models.ManyToManyField(Subject, blank=True)
course = models.ManyToManyField(Course, blank=True)
likes = models.ManyToManyField(User, blank=True, related_name='postlike')
status = models.BooleanField(default=False)
def __str__(self):
return self.question
class PostImage(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
image = models.ImageField(upload_to='PostImages')
class Comment(models.Model):
content = models.TextField(max_length=300)
user = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
likes = models.ManyToManyField(User, blank=True, related_name='commentlike')
timestamp = models.DateTimeField(auto_now_add=True)
class Notification(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE)
teacher = models.ManyToManyField(Teacher, blank=True)
| 26 | 948 | 92 |
4a93d368e16d2a2855df80d717de7d6c6e8d7596 | 789 | py | Python | src/syft/core/common/storeable_object.py | dnabanita7/PySyft | ce2510e65f5bad382e88806bcde30fa38c3c76c4 | [
"Apache-2.0"
] | 2 | 2018-07-23T20:34:10.000Z | 2020-08-01T09:09:09.000Z | packages/syft/src/syft/core/common/storeable_object.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 5 | 2020-09-11T05:47:12.000Z | 2020-10-13T08:36:17.000Z | packages/syft/src/syft/core/common/storeable_object.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 1 | 2021-05-22T17:11:42.000Z | 2021-05-22T17:11:42.000Z | # stdlib
from typing import List
from typing import Optional
# syft relative
from .serde.serializable import Serializable
from .uid import UID
| 19.725 | 51 | 0.653992 | # stdlib
from typing import List
from typing import Optional
# syft relative
from .serde.serializable import Serializable
from .uid import UID
class AbstractStorableObject(Serializable):
data: object
id: UID
search_permissions: dict
@property
def icon(self) -> str:
return "🗂️"
@property
def pprint(self) -> str:
output = f"{self.icon} ({self.class_name})"
return output
@property
def class_name(self) -> str:
return str(self.__class__.__name__)
@property
def object_type(self) -> str:
raise NotImplementedError
@property
def tags(self) -> Optional[List[str]]:
raise NotImplementedError
@property
def description(self) -> Optional[str]:
raise NotImplementedError
| 299 | 327 | 23 |
ba644b5b04f381a01d7ff2350879bad2562214c3 | 19,384 | py | Python | sdk/python/pulumi_gcp/bigquery/data_transfer_config.py | dimpu47/pulumi-gcp | 38355de300a5768e11c49d344a8165ba0735deed | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/bigquery/data_transfer_config.py | dimpu47/pulumi-gcp | 38355de300a5768e11c49d344a8165ba0735deed | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_gcp/bigquery/data_transfer_config.py | dimpu47/pulumi-gcp | 38355de300a5768e11c49d344a8165ba0735deed | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['DataTransferConfig']
| 56.023121 | 187 | 0.672255 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['DataTransferConfig']
class DataTransferConfig(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
data_refresh_window_days: Optional[pulumi.Input[float]] = None,
data_source_id: Optional[pulumi.Input[str]] = None,
destination_dataset_id: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
notification_pubsub_topic: Optional[pulumi.Input[str]] = None,
params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
schedule: Optional[pulumi.Input[str]] = None,
sensitive_params: Optional[pulumi.Input[pulumi.InputType['DataTransferConfigSensitiveParamsArgs']]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a data transfer configuration. A transfer configuration
contains all metadata needed to perform a data transfer.
To get more information about Config, see:
* [API documentation](https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/v1/projects.locations.transferConfigs/create)
* How-to Guides
* [Official Documentation](https://cloud.google.com/bigquery/docs/reference/datatransfer/rest/)
> **Warning:** All arguments including `sensitive_params.secret_access_key` will be stored in the raw
state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/docs/state/sensitive-data.html).
## Example Usage
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] data_refresh_window_days: The number of days to look back to automatically refresh the data.
For example, if dataRefreshWindowDays = 10, then every day BigQuery
reingests data for [today-10, today-1], rather than ingesting data for
just [today-1]. Only valid if the data source supports the feature.
Set the value to 0 to use the default value.
:param pulumi.Input[str] data_source_id: The data source id. Cannot be changed once the transfer config is created.
:param pulumi.Input[str] destination_dataset_id: The BigQuery target dataset id.
:param pulumi.Input[bool] disabled: When set to true, no runs are scheduled for a given transfer.
:param pulumi.Input[str] display_name: The user specified display name for the transfer config.
:param pulumi.Input[str] location: The geographic location where the transfer config should reside.
Examples: US, EU, asia-northeast1. The default value is US.
:param pulumi.Input[str] notification_pubsub_topic: Pub/Sub topic where notifications will be sent after transfer runs
associated with this transfer config finish.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] params: These parameters are specific to each data source.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] schedule: Data transfer schedule. If the data source does not support a custom
schedule, this should be empty. If it is empty, the default value for
the data source will be used. The specified times are in UTC. Examples
of valid format: 1st,3rd monday of month 15:30, every wed,fri of jan,
jun 13:15, and first sunday of quarter 00:00. See more explanation
about the format here:
https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format
NOTE: the granularity should be at least 8 hours, or less frequent.
:param pulumi.Input[pulumi.InputType['DataTransferConfigSensitiveParamsArgs']] sensitive_params: Different parameters are configured primarily using the the `params` field on this
resource. This block contains the parameters which contain secrets or passwords so that they can be marked
sensitive and hidden from plan output. The name of the field, eg: secret_access_key, will be the key
in the `params` map in the api request.
Credentials may not be specified in both locations and will cause an error. Changing from one location
to a different credential configuration in the config will require an apply to update state.
Structure is documented below.
:param pulumi.Input[str] service_account_name: Optional service account name. If this field is set, transfer config will
be created with this service account credentials. It requires that
requesting user calling this API has permissions to act as this service account.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['data_refresh_window_days'] = data_refresh_window_days
if data_source_id is None:
raise TypeError("Missing required property 'data_source_id'")
__props__['data_source_id'] = data_source_id
if destination_dataset_id is None:
raise TypeError("Missing required property 'destination_dataset_id'")
__props__['destination_dataset_id'] = destination_dataset_id
__props__['disabled'] = disabled
if display_name is None:
raise TypeError("Missing required property 'display_name'")
__props__['display_name'] = display_name
__props__['location'] = location
__props__['notification_pubsub_topic'] = notification_pubsub_topic
if params is None:
raise TypeError("Missing required property 'params'")
__props__['params'] = params
__props__['project'] = project
__props__['schedule'] = schedule
__props__['sensitive_params'] = sensitive_params
__props__['service_account_name'] = service_account_name
__props__['name'] = None
super(DataTransferConfig, __self__).__init__(
'gcp:bigquery/dataTransferConfig:DataTransferConfig',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
data_refresh_window_days: Optional[pulumi.Input[float]] = None,
data_source_id: Optional[pulumi.Input[str]] = None,
destination_dataset_id: Optional[pulumi.Input[str]] = None,
disabled: Optional[pulumi.Input[bool]] = None,
display_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_pubsub_topic: Optional[pulumi.Input[str]] = None,
params: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
project: Optional[pulumi.Input[str]] = None,
schedule: Optional[pulumi.Input[str]] = None,
sensitive_params: Optional[pulumi.Input[pulumi.InputType['DataTransferConfigSensitiveParamsArgs']]] = None,
service_account_name: Optional[pulumi.Input[str]] = None) -> 'DataTransferConfig':
"""
Get an existing DataTransferConfig resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] data_refresh_window_days: The number of days to look back to automatically refresh the data.
For example, if dataRefreshWindowDays = 10, then every day BigQuery
reingests data for [today-10, today-1], rather than ingesting data for
just [today-1]. Only valid if the data source supports the feature.
Set the value to 0 to use the default value.
:param pulumi.Input[str] data_source_id: The data source id. Cannot be changed once the transfer config is created.
:param pulumi.Input[str] destination_dataset_id: The BigQuery target dataset id.
:param pulumi.Input[bool] disabled: When set to true, no runs are scheduled for a given transfer.
:param pulumi.Input[str] display_name: The user specified display name for the transfer config.
:param pulumi.Input[str] location: The geographic location where the transfer config should reside.
Examples: US, EU, asia-northeast1. The default value is US.
:param pulumi.Input[str] name: The resource name of the transfer config. Transfer config names have the form
projects/{projectId}/locations/{location}/transferConfigs/{configId}. Where configId is usually a uuid, but this is not
required. The name is ignored when creating a transfer config.
:param pulumi.Input[str] notification_pubsub_topic: Pub/Sub topic where notifications will be sent after transfer runs
associated with this transfer config finish.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] params: These parameters are specific to each data source.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] schedule: Data transfer schedule. If the data source does not support a custom
schedule, this should be empty. If it is empty, the default value for
the data source will be used. The specified times are in UTC. Examples
of valid format: 1st,3rd monday of month 15:30, every wed,fri of jan,
jun 13:15, and first sunday of quarter 00:00. See more explanation
about the format here:
https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format
NOTE: the granularity should be at least 8 hours, or less frequent.
:param pulumi.Input[pulumi.InputType['DataTransferConfigSensitiveParamsArgs']] sensitive_params: Different parameters are configured primarily using the the `params` field on this
resource. This block contains the parameters which contain secrets or passwords so that they can be marked
sensitive and hidden from plan output. The name of the field, eg: secret_access_key, will be the key
in the `params` map in the api request.
Credentials may not be specified in both locations and will cause an error. Changing from one location
to a different credential configuration in the config will require an apply to update state.
Structure is documented below.
:param pulumi.Input[str] service_account_name: Optional service account name. If this field is set, transfer config will
be created with this service account credentials. It requires that
requesting user calling this API has permissions to act as this service account.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["data_refresh_window_days"] = data_refresh_window_days
__props__["data_source_id"] = data_source_id
__props__["destination_dataset_id"] = destination_dataset_id
__props__["disabled"] = disabled
__props__["display_name"] = display_name
__props__["location"] = location
__props__["name"] = name
__props__["notification_pubsub_topic"] = notification_pubsub_topic
__props__["params"] = params
__props__["project"] = project
__props__["schedule"] = schedule
__props__["sensitive_params"] = sensitive_params
__props__["service_account_name"] = service_account_name
return DataTransferConfig(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataRefreshWindowDays")
def data_refresh_window_days(self) -> pulumi.Output[Optional[float]]:
"""
The number of days to look back to automatically refresh the data.
For example, if dataRefreshWindowDays = 10, then every day BigQuery
reingests data for [today-10, today-1], rather than ingesting data for
just [today-1]. Only valid if the data source supports the feature.
Set the value to 0 to use the default value.
"""
return pulumi.get(self, "data_refresh_window_days")
@property
@pulumi.getter(name="dataSourceId")
def data_source_id(self) -> pulumi.Output[str]:
"""
The data source id. Cannot be changed once the transfer config is created.
"""
return pulumi.get(self, "data_source_id")
@property
@pulumi.getter(name="destinationDatasetId")
def destination_dataset_id(self) -> pulumi.Output[str]:
"""
The BigQuery target dataset id.
"""
return pulumi.get(self, "destination_dataset_id")
@property
@pulumi.getter
def disabled(self) -> pulumi.Output[Optional[bool]]:
"""
When set to true, no runs are scheduled for a given transfer.
"""
return pulumi.get(self, "disabled")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
The user specified display name for the transfer config.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The geographic location where the transfer config should reside.
Examples: US, EU, asia-northeast1. The default value is US.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name of the transfer config. Transfer config names have the form
projects/{projectId}/locations/{location}/transferConfigs/{configId}. Where configId is usually a uuid, but this is not
required. The name is ignored when creating a transfer config.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationPubsubTopic")
def notification_pubsub_topic(self) -> pulumi.Output[Optional[str]]:
"""
Pub/Sub topic where notifications will be sent after transfer runs
associated with this transfer config finish.
"""
return pulumi.get(self, "notification_pubsub_topic")
@property
@pulumi.getter
def params(self) -> pulumi.Output[Mapping[str, str]]:
"""
These parameters are specific to each data source.
"""
return pulumi.get(self, "params")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def schedule(self) -> pulumi.Output[Optional[str]]:
"""
Data transfer schedule. If the data source does not support a custom
schedule, this should be empty. If it is empty, the default value for
the data source will be used. The specified times are in UTC. Examples
of valid format: 1st,3rd monday of month 15:30, every wed,fri of jan,
jun 13:15, and first sunday of quarter 00:00. See more explanation
about the format here:
https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format
NOTE: the granularity should be at least 8 hours, or less frequent.
"""
return pulumi.get(self, "schedule")
@property
@pulumi.getter(name="sensitiveParams")
def sensitive_params(self) -> pulumi.Output[Optional['outputs.DataTransferConfigSensitiveParams']]:
"""
Different parameters are configured primarily using the the `params` field on this
resource. This block contains the parameters which contain secrets or passwords so that they can be marked
sensitive and hidden from plan output. The name of the field, eg: secret_access_key, will be the key
in the `params` map in the api request.
Credentials may not be specified in both locations and will cause an error. Changing from one location
to a different credential configuration in the config will require an apply to update state.
Structure is documented below.
"""
return pulumi.get(self, "sensitive_params")
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> pulumi.Output[Optional[str]]:
"""
Optional service account name. If this field is set, transfer config will
be created with this service account credentials. It requires that
requesting user calling this API has permissions to act as this service account.
"""
return pulumi.get(self, "service_account_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 175 | 18,765 | 23 |
1383972698cf45b51067c65b9ab1a3a6664f2f26 | 435 | py | Python | src/ralph/ui/templatetags/di.py | vi4m/ralph | 2af767ee23d89be9e6cec0a537350a1ce8840bd1 | [
"Apache-2.0"
] | 1 | 2018-09-01T14:14:08.000Z | 2018-09-01T14:14:08.000Z | src/ralph/ui/templatetags/di.py | srikanth4372/sample | 127b5742ae464d42909a14d71e3c10c241ec3a23 | [
"Apache-2.0"
] | 1 | 2019-08-14T10:03:45.000Z | 2019-08-14T10:03:45.000Z | src/ralph/ui/templatetags/di.py | srikanth4372/sample | 127b5742ae464d42909a14d71e3c10c241ec3a23 | [
"Apache-2.0"
] | 1 | 2019-08-14T09:59:42.000Z | 2019-08-14T09:59:42.000Z | # -*- coding: utf-8 -*-
"""Tags for dependency injection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django import template
from ralph.util.di import get_extra_data
register = template.Library()
@register.simple_tag
| 22.894737 | 54 | 0.774713 | # -*- coding: utf-8 -*-
"""Tags for dependency injection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from django import template
from ralph.util.di import get_extra_data
register = template.Library()
@register.simple_tag
def extra_inclusion(name, *args, **kwargs):
return get_extra_data(name, *args, **kwargs) or ''
| 77 | 0 | 22 |
e15d9bdc2db3278d0f18e8f36e5453aebac6080a | 4,110 | py | Python | data/test/python/e15d9bdc2db3278d0f18e8f36e5453aebac6080acollect.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/test/python/e15d9bdc2db3278d0f18e8f36e5453aebac6080acollect.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/test/python/e15d9bdc2db3278d0f18e8f36e5453aebac6080acollect.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | import sys
import os.path
from urlparse import urljoin
import time
import urllib2
from bs4 import BeautifulSoup
| 38.411215 | 113 | 0.513869 | import sys
import os.path
from urlparse import urljoin
import time
import urllib2
from bs4 import BeautifulSoup
class Collector:
def __init__(self):
pass
def download_2015_excel(self, local_save_dir_base):
uri = 'http://ontario.ca/sites/default/files/pssd_compendium_2014.xlsx'
response = urllib2.urlopen(uri)
excel_file = response.read()
save_file_path = local_save_dir_base + 'pssd_compendium_2014.xlsx'
print "Saving... " + uri + " to " + save_file_path
with open(save_file_path, 'w') as text_file:
text_file.write(excel_file)
def get_relevant_links(self, html, sourceuri):
relevant_links = []
soup = BeautifulSoup(html)
for link in soup.findAll('a'):
href = link.get('href')
if href is not None:
href = urljoin(sourceuri, href)
if (href[:7] != 'mailto:'):
if not any(x in href for x in ['#', '.pdf', '.xls', '.doc']):
if 'http://www.fin.gov.on.ca/en/publications/salarydisclosure/' in href:
relevant_links.extend([href])
return relevant_links
def run(self, local_save_dir_base):
self.download_2015_excel(local_save_dir_base)
local_save_dir_base = local_save_dir_base + 'scrape/'
scraped_links = []
# Seed the scraper with a good set of what we really care about
unscraped_links = ['http://www.fin.gov.on.ca/en/publications/salarydisclosure/pssd/',
'http://www.ontario.ca/government/public-sector-salary-disclosure/']
for i in range(1997,2014):
unscraped_links.extend(['http://www.fin.gov.on.ca/en/publications/salarydisclosure/' + str(i) + '/'])
root_remote_dir = 'http://www.fin.gov.on.ca/en/publications/salarydisclosure/'
while len(unscraped_links) > 0:
uri = unscraped_links.pop(0)
scraped_links.extend([uri])
save_name = uri[len(root_remote_dir):]
save_name = save_name.replace('?','&')
if uri[-1:] == '/':
save_name = save_name + "index.html"
if os.path.isfile(local_save_dir_base + save_name):
with open(local_save_dir_base + save_name, "r") as text_file:
html = text_file.read()
else:
time.sleep(5)
try:
print "Getting... " + uri
response = urllib2.urlopen(uri)
html = response.read()
uri_new = response.geturl()
except urllib2.HTTPError:
print "Swallowing error: " + str(sys.exc_info()[0])
uri_new = uri
html = str(sys.exc_info()[0])
if uri_new != uri:
save_name = uri_new[len(root_remote_dir):]
if uri_new[-1:] == '/':
save_name = save_name + "index.html"
save_name = save_name.replace('?','&')
scraped_links.extend([uri_new])
print "Redirects to... " + uri_new
uri = uri_new
save_dir = save_name[:save_name.rfind('/')]
if not os.path.exists(local_save_dir_base + save_dir):
os.makedirs(local_save_dir_base + save_dir)
print "Saving... " + local_save_dir_base + save_name
with open(local_save_dir_base + save_name, "w") as text_file:
text_file.write(html)
links = self.get_relevant_links(html, uri)
for link in links:
if link not in scraped_links:
if link not in unscraped_links:
unscraped_links.extend([link])
| 3,855 | -5 | 138 |
ab3a14612ad0e1a79bdbfb12fbc3a9ccec664355 | 2,827 | py | Python | sdk/python/pulumi_azure_native/mixedreality/v20210301preview/list_remote_rendering_account_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/mixedreality/v20210301preview/list_remote_rendering_account_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/mixedreality/v20210301preview/list_remote_rendering_account_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListRemoteRenderingAccountKeysResult',
'AwaitableListRemoteRenderingAccountKeysResult',
'list_remote_rendering_account_keys',
]
@pulumi.output_type
class ListRemoteRenderingAccountKeysResult:
"""
Developer Keys of account
"""
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
value of primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
value of secondary key.
"""
return pulumi.get(self, "secondary_key")
# pylint: disable=using-constant-test
def list_remote_rendering_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListRemoteRenderingAccountKeysResult:
"""
Developer Keys of account
:param str account_name: Name of an Mixed Reality Account.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:mixedreality/v20210301preview:listRemoteRenderingAccountKeys', __args__, opts=opts, typ=ListRemoteRenderingAccountKeysResult).value
return AwaitableListRemoteRenderingAccountKeysResult(
primary_key=__ret__.primary_key,
secondary_key=__ret__.secondary_key)
| 35.78481 | 181 | 0.692961 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListRemoteRenderingAccountKeysResult',
'AwaitableListRemoteRenderingAccountKeysResult',
'list_remote_rendering_account_keys',
]
@pulumi.output_type
class ListRemoteRenderingAccountKeysResult:
"""
Developer Keys of account
"""
def __init__(__self__, primary_key=None, secondary_key=None):
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
value of primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
value of secondary key.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListRemoteRenderingAccountKeysResult(ListRemoteRenderingAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListRemoteRenderingAccountKeysResult(
primary_key=self.primary_key,
secondary_key=self.secondary_key)
def list_remote_rendering_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListRemoteRenderingAccountKeysResult:
"""
Developer Keys of account
:param str account_name: Name of an Mixed Reality Account.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:mixedreality/v20210301preview:listRemoteRenderingAccountKeys', __args__, opts=opts, typ=ListRemoteRenderingAccountKeysResult).value
return AwaitableListRemoteRenderingAccountKeysResult(
primary_key=__ret__.primary_key,
secondary_key=__ret__.secondary_key)
| 617 | 69 | 75 |
6f45b5dedd82dae3a3e46637a927226c8b7c8cfa | 2,081 | py | Python | gea/migrations/0008_auto_20210324_1808.py | quijot/gea-package | 1b75578cbf72bc1823e5190c30b2f47f6b4e4e8e | [
"MIT"
] | null | null | null | gea/migrations/0008_auto_20210324_1808.py | quijot/gea-package | 1b75578cbf72bc1823e5190c30b2f47f6b4e4e8e | [
"MIT"
] | 3 | 2020-02-11T21:26:49.000Z | 2021-01-26T17:14:17.000Z | gea/migrations/0008_auto_20210324_1808.py | quijot/gea-package | 1b75578cbf72bc1823e5190c30b2f47f6b4e4e8e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-03-24 21:08
from django.db import migrations, models
import django.db.models.deletion
| 33.031746 | 122 | 0.582412 | # Generated by Django 3.1.5 on 2021-03-24 21:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gea', '0007_auto_20210324_1222'),
]
operations = [
migrations.RemoveField(
model_name='persona',
name='celular',
),
migrations.RemoveField(
model_name='profesional',
name='celular',
),
migrations.AlterField(
model_name='catastro',
name='poligono',
field=models.CharField(blank=True, max_length=10, verbose_name='polígono'),
),
migrations.AlterField(
model_name='catastro',
name='seccion',
field=models.CharField(blank=True, max_length=10, verbose_name='sección'),
),
migrations.AlterField(
model_name='catastrolocal',
name='poligono',
field=models.CharField(blank=True, max_length=20, verbose_name='polígono'),
),
migrations.AlterField(
model_name='catastrolocal',
name='seccion',
field=models.CharField(blank=True, max_length=20, verbose_name='sección'),
),
migrations.AlterField(
model_name='expediente',
name='id',
field=models.IntegerField(primary_key=True, serialize=False, verbose_name='Expediente'),
),
migrations.AlterField(
model_name='persona',
name='telefono',
field=models.CharField(blank=True, max_length=15, verbose_name='teléfono'),
),
migrations.AlterField(
model_name='profesional',
name='telefono',
field=models.CharField(blank=True, max_length=15, verbose_name='teléfono'),
),
migrations.AlterField(
model_name='profesional',
name='titulo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='gea.titulo'),
),
]
| 0 | 1,940 | 23 |
2bd6a02145154ea659db2ea27fe0a3202a37d8e1 | 2,303 | py | Python | app/core/admin.py | nguyenlinh171/recipe-app-api | 339feb3df6aed6d424a19ca0748a49cd6ed173be | [
"MIT"
] | null | null | null | app/core/admin.py | nguyenlinh171/recipe-app-api | 339feb3df6aed6d424a19ca0748a49cd6ed173be | [
"MIT"
] | null | null | null | app/core/admin.py | nguyenlinh171/recipe-app-api | 339feb3df6aed6d424a19ca0748a49cd6ed173be | [
"MIT"
] | null | null | null | from core import models
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
"""Need to import the default Django user admin, need to change some
of the class variables to support our custom user admin using email
instead of username"""
from django.utils.translation import gettext as _
"""Import gettext function to convert strings in python to human
readable text, in this context, the strings get passed through the
transalation engine so we're not doing anything with translation.
If you want to extend the code to support multiple languages then this
would make easier for you to do that bcoz you just set up the transalation
files and then it'll convert the text appropriately"""
class UserAdmin(BaseUserAdmin):
""""Create our custom user admin by extending the BaseUserAdmin"""
ordering = ['id']
list_display = ['email', 'name']
"""Define the field set for test 2, each bracket is a section,
1st section: no title, contains 2 fields email, pw
2nd section: title: personal info, contains 1 field, needs to
add a comma after the only field otherwise it'll be recognised as
a string and won't work
3rd section: permission, contains 3 fields
4th section: Important dates, contains 1 field"""
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login',)}),
)
"""Define the additional field set for test 3 to include email,
password, password 2 to create a new user. The user admin by
default takes an add field sets which defines the fields that you
include on the add page which is the same as the create user page,
remember to add the comma at the end of the first item as it's the
only item, w/o the comma, python will be confused it as a string.
Classes assigned to the form: default option"""
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
"""Register the site in the Django admin"""
| 42.648148 | 74 | 0.684325 | from core import models
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
"""Need to import the default Django user admin, need to change some
of the class variables to support our custom user admin using email
instead of username"""
from django.utils.translation import gettext as _
"""Import gettext function to convert strings in python to human
readable text, in this context, the strings get passed through the
transalation engine so we're not doing anything with translation.
If you want to extend the code to support multiple languages then this
would make easier for you to do that bcoz you just set up the transalation
files and then it'll convert the text appropriately"""
class UserAdmin(BaseUserAdmin):
""""Create our custom user admin by extending the BaseUserAdmin"""
ordering = ['id']
list_display = ['email', 'name']
"""Define the field set for test 2, each bracket is a section,
1st section: no title, contains 2 fields email, pw
2nd section: title: personal info, contains 1 field, needs to
add a comma after the only field otherwise it'll be recognised as
a string and won't work
3rd section: permission, contains 3 fields
4th section: Important dates, contains 1 field"""
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important dates'), {'fields': ('last_login',)}),
)
"""Define the additional field set for test 3 to include email,
password, password 2 to create a new user. The user admin by
default takes an add field sets which defines the fields that you
include on the add page which is the same as the create user page,
remember to add the comma at the end of the first item as it's the
only item, w/o the comma, python will be confused it as a string.
Classes assigned to the form: default option"""
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
"""Register the site in the Django admin"""
| 0 | 0 | 0 |
1c56cafe82e08f64b415f37d537e652b5bfed036 | 13,271 | py | Python | src/compas_slicer/pre_processing/preprocessing_utils/compound_target.py | Spiritdude/compas_slicer | 58dedfbebd123258506174453d5f73d18d745819 | [
"MIT"
] | 14 | 2021-01-02T10:48:15.000Z | 2022-03-18T17:18:31.000Z | src/compas_slicer/pre_processing/preprocessing_utils/compound_target.py | Spiritdude/compas_slicer | 58dedfbebd123258506174453d5f73d18d745819 | [
"MIT"
] | 34 | 2021-01-19T10:33:09.000Z | 2022-03-14T07:37:45.000Z | src/compas_slicer/pre_processing/preprocessing_utils/compound_target.py | compas-dev/compas_slicer | 58dedfbebd123258506174453d5f73d18d745819 | [
"MIT"
] | 9 | 2021-01-02T10:49:09.000Z | 2022-03-22T18:35:55.000Z | import numpy as np
import math
from compas.datastructures import Mesh
import compas_slicer.utilities as utils
import logging
import networkx as nx
from compas_slicer.slicers.slice_utilities import create_graph_from_mesh_vkeys
from compas_slicer.pre_processing.preprocessing_utils.geodesics import get_igl_EXACT_geodesic_distances, \
get_custom_HEAT_geodesic_distances
import statistics
logger = logging.getLogger('logger')
__all__ = ['CompoundTarget',
'blend_union_list',
'stairs_union_list',
'chamfer_union_list']
class CompoundTarget:
"""
Represents a desired user-provided target. It acts as a key-frame that controls the print paths
orientations. After the curved slicing , the print paths will be aligned to the compound target close to
its area. The vertices that belong to the target are marked with their vertex attributes; they have
data['v_attr'] = value.
Attributes
----------
mesh: :class:`compas.datastructures.Mesh`
v_attr : str
The key of the attribute dict to be checked.
value: int
The value of the attribute dict with key=v_attr. If in a vertex data[v_attr]==value then the vertex is part of
this target.
DATA_PATH: str
has_blend_union: bool
blend_radius : float
geodesics_method: str
'exact_igl' exact igl geodesic distances
'heat' custom heat geodesic distances
anisotropic_scaling: bool
This is not yet implemented
"""
# --- Neighborhoods clustering
def find_targets_connected_components(self):
"""
Clusters all the vertices that belong to the target into neighborhoods using a graph.
Each target can have an arbitrary number of neighborhoods/clusters.
Fills in the attributes: self.all_target_vkeys, self.clustered_vkeys, self.number_of_boundaries
"""
self.all_target_vkeys = [vkey for vkey, data in self.mesh.vertices(data=True) if
data[self.v_attr] == self.value]
assert len(self.all_target_vkeys) > 0, "There are no vertices in the mesh with the attribute : " \
+ self.v_attr + ", value : %d" % self.value + " .Probably you made a " \
"mistake while creating the targets. "
G = create_graph_from_mesh_vkeys(self.mesh, self.all_target_vkeys)
assert len(list(G.nodes())) == len(self.all_target_vkeys)
self.number_of_boundaries = len(list(nx.connected_components(G)))
for i, cp in enumerate(nx.connected_components(G)):
self.clustered_vkeys.append(list(cp))
logger.info("Compound target with 'boundary'=%d. Number of connected_components : %d" % (
self.value, len(list(nx.connected_components(G)))))
# --- Geodesic distances
def compute_geodesic_distances(self):
"""
Computes the geodesic distances from each of the target's neighborhoods to all the mesh vertices.
Fills in the distances attributes.
"""
if self.geodesics_method == 'exact_igl':
distances_lists = [get_igl_EXACT_geodesic_distances(self.mesh, vstarts) for vstarts in
self.clustered_vkeys]
elif self.geodesics_method == 'heat':
distances_lists = [get_custom_HEAT_geodesic_distances(self.mesh, vstarts, self.OUTPUT_PATH) for vstarts in
self.clustered_vkeys]
else:
raise ValueError('Unknown geodesics method : ' + self.geodesics_method)
distances_lists = [list(dl) for dl in distances_lists] # number_of_boundaries x #V
self.update_distances_lists(distances_lists)
def update_distances_lists(self, distances_lists):
"""
Fills in the distances attributes.
"""
self._distances_lists = distances_lists
self._distances_lists_flipped = [] # empty
for i in range(self.VN):
current_values = [self._distances_lists[list_index][i] for list_index in range(self.number_of_boundaries)]
self._distances_lists_flipped.append(current_values)
self._np_distances_lists_flipped = np.array(self._distances_lists_flipped)
self._max_dist = np.max(self._np_distances_lists_flipped)
# --- Uneven weights
@property
def has_uneven_weights(self):
""" Returns True if the target has uneven_weights calculated, False otherwise. """
return len(self.weight_max_per_cluster) > 0
def compute_uneven_boundaries_weight_max(self, other_target):
"""
If the target has multiple neighborhoods/clusters of vertices, then it computes their maximum distance from
the other_target. Based on that it calculates their weight_max for the interpolation process
"""
if self.number_of_boundaries > 1:
ds_avg_HIGH = self.get_boundaries_rel_dist_from_other_target(other_target)
max_param = max(ds_avg_HIGH)
for i, d in enumerate(ds_avg_HIGH): # offset all distances except the maximum one
if abs(d - max_param) > 0.01: # if it isn't the max value
ds_avg_HIGH[i] = d + self.offset
self.weight_max_per_cluster = [d / max_param for d in ds_avg_HIGH]
logger.info('weight_max_per_cluster : ' + str(self.weight_max_per_cluster))
else:
logger.info("Did not compute_norm_of_gradient uneven boundaries, target consists of single component")
# --- Relation to other target
def get_boundaries_rel_dist_from_other_target(self, other_target, avg_type='median'):
"""
Returns a list, one relative distance value per connected boundary neighborhood.
That is the average of the distances of the vertices of that boundary neighborhood from the other_target.
"""
distances = []
for vi_starts in self.clustered_vkeys:
ds = [other_target.get_distance(vi) for vi in vi_starts]
if avg_type == 'mean':
distances.append(statistics.mean(ds))
else: # 'median'
distances.append(statistics.median(ds))
return distances
def get_avg_distances_from_other_target(self, other_target):
"""
Returns the minimum and maximum distance of the vertices of this target from the other_target
"""
extreme_distances = []
for v_index in other_target.all_target_vkeys:
extreme_distances.append(self.get_all_distances()[v_index])
return np.average(np.array(extreme_distances))
#############################
# --- get all distances
# All distances
def get_all_distances(self):
""" Returns the resulting distances per every vertex. """
return [self.get_distance(i) for i in range(self.VN)]
def get_all_clusters_distances_dict(self):
""" Returns dict. keys: index of connected target neighborhood, value: list, distances (one per vertex). """
return {i: self._distances_lists[i] for i in range(self.number_of_boundaries)}
def get_max_dist(self):
""" Returns the maximum distance that the target has on a mesh vertex. """
return self._max_dist
#############################
# --- per vkey distances
def get_all_distances_for_vkey(self, i):
""" Returns distances from each cluster separately for vertex i. Smooth union doesn't play here any role. """
return [self._distances_lists[list_index][i] for list_index in range(self.number_of_boundaries)]
def get_distance(self, i):
""" Return get_distance for vertex with vkey i. """
if self.union_method == 'min':
# --- simple union
return np.min(self._np_distances_lists_flipped[i])
elif self.union_method == 'smooth':
# --- blend (smooth) union
return blend_union_list(values=self._np_distances_lists_flipped[i], r=self.union_params[0])
elif self.union_method == 'chamfer':
# --- blend (smooth) union
return chamfer_union_list(values=self._np_distances_lists_flipped[i], r=self.union_params[0])
elif self.union_method == 'stairs':
# --- stairs union
return stairs_union_list(values=self._np_distances_lists_flipped[i], r=self.union_params[0],
n=self.union_params[1])
else:
raise ValueError("Unknown Union method : ", self.union_method)
#############################
# --- scalar field smoothing
def laplacian_smoothing(self, iterations, strength):
""" Smooth the distances on the mesh, using iterative laplacian smoothing. """
L = utils.get_mesh_cotmatrix_igl(self.mesh, fix_boundaries=True)
new_distances_lists = []
logger.info('Laplacian smoothing of all distances')
for i, a in enumerate(self._distances_lists):
a = np.array(a) # a: numpy array containing the attribute to be smoothed
for _ in range(iterations): # iterative smoothing
a_prime = a + strength * L * a
a = a_prime
new_distances_lists.append(list(a))
self.update_distances_lists(new_distances_lists)
#############################
# ------ output
def save_distances(self, name):
"""
Save distances to json.
Saves one list with distance values (one per vertex).
Parameters
----------
name: str, name of json to be saved
"""
utils.save_to_json(self.get_all_distances(), self.OUTPUT_PATH, name)
# ------ assign new Mesh
def assign_new_mesh(self, mesh):
""" When the base mesh changes, a new mesh needs to be assigned. """
mesh.to_json(self.OUTPUT_PATH + "/temp.obj")
mesh = Mesh.from_json(self.OUTPUT_PATH + "/temp.obj")
self.mesh = mesh
self.VN = len(list(self.mesh.vertices()))
####################
# unions on lists
def blend_union_list(values, r):
""" Returns a smooth union of all the elements in the list, with blend radius blend_radius. """
d_result = 9999999 # very big number
for d in values:
d_result = blend_union(d_result, d, r)
return d_result
def stairs_union_list(values, r, n):
""" Returns a stairs union of all the elements in the list, with blend radius r and number of peaks n-1."""
d_result = 9999999 # very big number
for i, d in enumerate(values):
d_result = stairs_union(d_result, d, r, n)
return d_result
####################
# unions on pairs
def blend_union(da, db, r):
""" Returns a smooth union of the two elements da, db with blend radius blend_radius. """
e = max(r - abs(da - db), 0)
return min(da, db) - e * e * 0.25 / r
def chamfer_union(a, b, r):
""" Returns a chamfer union of the two elements da, db with radius r. """
return min(min(a, b), (a - r + b) * math.sqrt(0.5))
def stairs_union(a, b, r, n):
""" Returns a stairs union of the two elements da, db with radius r. """
s = r / n
u = b - r
return min(min(a, b), 0.5 * (u + a + abs((u - a + s) % (2 * s) - s)))
if __name__ == "__main__":
pass
| 42.672026 | 131 | 0.644789 | import numpy as np
import math
from compas.datastructures import Mesh
import compas_slicer.utilities as utils
import logging
import networkx as nx
from compas_slicer.slicers.slice_utilities import create_graph_from_mesh_vkeys
from compas_slicer.pre_processing.preprocessing_utils.geodesics import get_igl_EXACT_geodesic_distances, \
get_custom_HEAT_geodesic_distances
import statistics
logger = logging.getLogger('logger')
__all__ = ['CompoundTarget',
'blend_union_list',
'stairs_union_list',
'chamfer_union_list']
class CompoundTarget:
"""
Represents a desired user-provided target. It acts as a key-frame that controls the print paths
orientations. After the curved slicing , the print paths will be aligned to the compound target close to
its area. The vertices that belong to the target are marked with their vertex attributes; they have
data['v_attr'] = value.
Attributes
----------
mesh: :class:`compas.datastructures.Mesh`
v_attr : str
The key of the attribute dict to be checked.
value: int
The value of the attribute dict with key=v_attr. If in a vertex data[v_attr]==value then the vertex is part of
this target.
DATA_PATH: str
has_blend_union: bool
blend_radius : float
geodesics_method: str
'exact_igl' exact igl geodesic distances
'heat' custom heat geodesic distances
anisotropic_scaling: bool
This is not yet implemented
"""
def __init__(self, mesh, v_attr, value, DATA_PATH, union_method='min', union_params=[],
geodesics_method='exact_igl', anisotropic_scaling=False):
logger.info('Creating target with attribute : ' + v_attr + '=%d' % value)
logger.info('union_method : ' + union_method + ', union_params = ' + str(union_params))
self.mesh = mesh
self.v_attr = v_attr
self.value = value
self.DATA_PATH = DATA_PATH
self.OUTPUT_PATH = utils.get_output_directory(DATA_PATH)
self.union_method = union_method
self.union_params = union_params
self.geodesics_method = geodesics_method
self.anisotropic_scaling = anisotropic_scaling # Anisotropic scaling not yet implemented
self.offset = 0
self.VN = len(list(self.mesh.vertices()))
# filled in by function 'self.find_targets_connected_components()'
self.all_target_vkeys = [] # flattened list with all vi_starts
self.clustered_vkeys = [] # nested list with all vi_starts
self.number_of_boundaries = None # int
self.weight_max_per_cluster = []
# geodesic distances
# filled in by function 'self.update_distances_lists()'
self._distances_lists = [] # nested list. Shape: number_of_boundaries x number_of_vertices
self._distances_lists_flipped = [] # nested list. Shape: number_of_vertices x number_of_boundaries
self._np_distances_lists_flipped = np.array([]) # numpy array of self._distances_lists_flipped
self._max_dist = None # maximum get_distance value from the target on any vertex of the mesh
# compute
self.find_targets_connected_components()
self.compute_geodesic_distances()
# --- Neighborhoods clustering
def find_targets_connected_components(self):
"""
Clusters all the vertices that belong to the target into neighborhoods using a graph.
Each target can have an arbitrary number of neighborhoods/clusters.
Fills in the attributes: self.all_target_vkeys, self.clustered_vkeys, self.number_of_boundaries
"""
self.all_target_vkeys = [vkey for vkey, data in self.mesh.vertices(data=True) if
data[self.v_attr] == self.value]
assert len(self.all_target_vkeys) > 0, "There are no vertices in the mesh with the attribute : " \
+ self.v_attr + ", value : %d" % self.value + " .Probably you made a " \
"mistake while creating the targets. "
G = create_graph_from_mesh_vkeys(self.mesh, self.all_target_vkeys)
assert len(list(G.nodes())) == len(self.all_target_vkeys)
self.number_of_boundaries = len(list(nx.connected_components(G)))
for i, cp in enumerate(nx.connected_components(G)):
self.clustered_vkeys.append(list(cp))
logger.info("Compound target with 'boundary'=%d. Number of connected_components : %d" % (
self.value, len(list(nx.connected_components(G)))))
# --- Geodesic distances
def compute_geodesic_distances(self):
"""
Computes the geodesic distances from each of the target's neighborhoods to all the mesh vertices.
Fills in the distances attributes.
"""
if self.geodesics_method == 'exact_igl':
distances_lists = [get_igl_EXACT_geodesic_distances(self.mesh, vstarts) for vstarts in
self.clustered_vkeys]
elif self.geodesics_method == 'heat':
distances_lists = [get_custom_HEAT_geodesic_distances(self.mesh, vstarts, self.OUTPUT_PATH) for vstarts in
self.clustered_vkeys]
else:
raise ValueError('Unknown geodesics method : ' + self.geodesics_method)
distances_lists = [list(dl) for dl in distances_lists] # number_of_boundaries x #V
self.update_distances_lists(distances_lists)
def update_distances_lists(self, distances_lists):
"""
Fills in the distances attributes.
"""
self._distances_lists = distances_lists
self._distances_lists_flipped = [] # empty
for i in range(self.VN):
current_values = [self._distances_lists[list_index][i] for list_index in range(self.number_of_boundaries)]
self._distances_lists_flipped.append(current_values)
self._np_distances_lists_flipped = np.array(self._distances_lists_flipped)
self._max_dist = np.max(self._np_distances_lists_flipped)
# --- Uneven weights
@property
def has_uneven_weights(self):
""" Returns True if the target has uneven_weights calculated, False otherwise. """
return len(self.weight_max_per_cluster) > 0
def compute_uneven_boundaries_weight_max(self, other_target):
"""
If the target has multiple neighborhoods/clusters of vertices, then it computes their maximum distance from
the other_target. Based on that it calculates their weight_max for the interpolation process
"""
if self.number_of_boundaries > 1:
ds_avg_HIGH = self.get_boundaries_rel_dist_from_other_target(other_target)
max_param = max(ds_avg_HIGH)
for i, d in enumerate(ds_avg_HIGH): # offset all distances except the maximum one
if abs(d - max_param) > 0.01: # if it isn't the max value
ds_avg_HIGH[i] = d + self.offset
self.weight_max_per_cluster = [d / max_param for d in ds_avg_HIGH]
logger.info('weight_max_per_cluster : ' + str(self.weight_max_per_cluster))
else:
logger.info("Did not compute_norm_of_gradient uneven boundaries, target consists of single component")
# --- Relation to other target
def get_boundaries_rel_dist_from_other_target(self, other_target, avg_type='median'):
"""
Returns a list, one relative distance value per connected boundary neighborhood.
That is the average of the distances of the vertices of that boundary neighborhood from the other_target.
"""
distances = []
for vi_starts in self.clustered_vkeys:
ds = [other_target.get_distance(vi) for vi in vi_starts]
if avg_type == 'mean':
distances.append(statistics.mean(ds))
else: # 'median'
distances.append(statistics.median(ds))
return distances
def get_avg_distances_from_other_target(self, other_target):
"""
Returns the minimum and maximum distance of the vertices of this target from the other_target
"""
extreme_distances = []
for v_index in other_target.all_target_vkeys:
extreme_distances.append(self.get_all_distances()[v_index])
return np.average(np.array(extreme_distances))
#############################
# --- get all distances
# All distances
def get_all_distances(self):
""" Returns the resulting distances per every vertex. """
return [self.get_distance(i) for i in range(self.VN)]
def get_all_clusters_distances_dict(self):
""" Returns dict. keys: index of connected target neighborhood, value: list, distances (one per vertex). """
return {i: self._distances_lists[i] for i in range(self.number_of_boundaries)}
def get_max_dist(self):
""" Returns the maximum distance that the target has on a mesh vertex. """
return self._max_dist
#############################
# --- per vkey distances
def get_all_distances_for_vkey(self, i):
""" Returns distances from each cluster separately for vertex i. Smooth union doesn't play here any role. """
return [self._distances_lists[list_index][i] for list_index in range(self.number_of_boundaries)]
def get_distance(self, i):
""" Return get_distance for vertex with vkey i. """
if self.union_method == 'min':
# --- simple union
return np.min(self._np_distances_lists_flipped[i])
elif self.union_method == 'smooth':
# --- blend (smooth) union
return blend_union_list(values=self._np_distances_lists_flipped[i], r=self.union_params[0])
elif self.union_method == 'chamfer':
# --- blend (smooth) union
return chamfer_union_list(values=self._np_distances_lists_flipped[i], r=self.union_params[0])
elif self.union_method == 'stairs':
# --- stairs union
return stairs_union_list(values=self._np_distances_lists_flipped[i], r=self.union_params[0],
n=self.union_params[1])
else:
raise ValueError("Unknown Union method : ", self.union_method)
#############################
# --- scalar field smoothing
def laplacian_smoothing(self, iterations, strength):
""" Smooth the distances on the mesh, using iterative laplacian smoothing. """
L = utils.get_mesh_cotmatrix_igl(self.mesh, fix_boundaries=True)
new_distances_lists = []
logger.info('Laplacian smoothing of all distances')
for i, a in enumerate(self._distances_lists):
a = np.array(a) # a: numpy array containing the attribute to be smoothed
for _ in range(iterations): # iterative smoothing
a_prime = a + strength * L * a
a = a_prime
new_distances_lists.append(list(a))
self.update_distances_lists(new_distances_lists)
#############################
# ------ output
def save_distances(self, name):
"""
Save distances to json.
Saves one list with distance values (one per vertex).
Parameters
----------
name: str, name of json to be saved
"""
utils.save_to_json(self.get_all_distances(), self.OUTPUT_PATH, name)
# ------ assign new Mesh
def assign_new_mesh(self, mesh):
""" When the base mesh changes, a new mesh needs to be assigned. """
mesh.to_json(self.OUTPUT_PATH + "/temp.obj")
mesh = Mesh.from_json(self.OUTPUT_PATH + "/temp.obj")
self.mesh = mesh
self.VN = len(list(self.mesh.vertices()))
####################
# unions on lists
def blend_union_list(values, r):
""" Returns a smooth union of all the elements in the list, with blend radius blend_radius. """
d_result = 9999999 # very big number
for d in values:
d_result = blend_union(d_result, d, r)
return d_result
def stairs_union_list(values, r, n):
""" Returns a stairs union of all the elements in the list, with blend radius r and number of peaks n-1."""
d_result = 9999999 # very big number
for i, d in enumerate(values):
d_result = stairs_union(d_result, d, r, n)
return d_result
def chamfer_union_list(values, r):
d_result = 9999999 # very big number
for i, d in enumerate(values):
d_result = chamfer_union(d_result, d, r)
return d_result
####################
# unions on pairs
def blend_union(da, db, r):
""" Returns a smooth union of the two elements da, db with blend radius blend_radius. """
e = max(r - abs(da - db), 0)
return min(da, db) - e * e * 0.25 / r
def chamfer_union(a, b, r):
""" Returns a chamfer union of the two elements da, db with radius r. """
return min(min(a, b), (a - r + b) * math.sqrt(0.5))
def stairs_union(a, b, r, n):
""" Returns a stairs union of the two elements da, db with radius r. """
s = r / n
u = b - r
return min(min(a, b), 0.5 * (u + a + abs((u - a + s) % (2 * s) - s)))
if __name__ == "__main__":
pass
| 1,891 | 0 | 50 |
5df8cfab48c8a542b687ba3e02010cc5e9fd9c28 | 1,134 | py | Python | new_app/models.py | 2141040025/Aplicacion-CRUD-Django | e760e7e02db7a2a4841d899af0e199db60d804b7 | [
"MIT"
] | null | null | null | new_app/models.py | 2141040025/Aplicacion-CRUD-Django | e760e7e02db7a2a4841d899af0e199db60d804b7 | [
"MIT"
] | null | null | null | new_app/models.py | 2141040025/Aplicacion-CRUD-Django | e760e7e02db7a2a4841d899af0e199db60d804b7 | [
"MIT"
] | null | null | null | #----------------------------------
# Models / new_app
#----------------------------------
from django.db import models
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#------------------------------------------------------------------------- | 28.35 | 74 | 0.561728 | #----------------------------------
# Models / new_app
#----------------------------------
from django.db import models
#-------------------------------------------------------------------------
class Profesor(models.Model):
departamento = models.ForeignKey('Departamento')
nombre = models.CharField(max_length=200)
direccion = models.CharField(max_length=200, blank=True)
tel = models.CharField(max_length=30, blank=True)
imagen = models.ImageField(upload_to='profesores/img', blank=True)
class Meta:
ordering = ['nombre']
verbose_name_plural = "profesores"
def __unicode__(self):
return self.nombre
#-------------------------------------------------------------------------
class Departamento(models.Model):
nombre = models.CharField(max_length=200)
desc = models.TextField(blank=True)
def __unicode__(self):
return self.nombre
#-------------------------------------------------------------------------
class Curso(models.Model):
profesor = models.ManyToManyField(Profesor)
nombre = models.CharField(max_length=200)
desc = models.TextField(blank=True)
def __unicode__(self):
return self.nombre | 66 | 647 | 69 |
0a04442bce0b91eab5c47f08077b28c7f389a3b9 | 1,152 | py | Python | mars/optimization/logical/chunk/core.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | 1 | 2022-02-02T03:03:48.000Z | 2022-02-02T03:03:48.000Z | mars/optimization/logical/chunk/core.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | mars/optimization/logical/chunk/core.py | wjsi/mars | a69fb19edfe748d4393b90ff2c4941a76c084596 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Type
from ....core import OperandType, ChunkGraph
from ..core import Optimizer, OptimizationRule, OptimizationRecords
class ChunkOptimizer(Optimizer):
"""
Tileable Optimizer
"""
| 30.315789 | 74 | 0.758681 | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Type
from ....core import OperandType, ChunkGraph
from ..core import Optimizer, OptimizationRule, OptimizationRecords
class ChunkOptimizer(Optimizer):
"""
Tileable Optimizer
"""
def register_chunk_optimization_rule(op_types: List[Type[OperandType]]):
def wrap(rule_type: Type[OptimizationRule]):
ChunkOptimizer.register_rule(op_types, rule_type)
return rule_type
return wrap
def optimize(chunk_graph: ChunkGraph) -> OptimizationRecords:
return ChunkOptimizer.optimize(chunk_graph)
| 288 | 0 | 46 |
55a1c2e8e33a01773c12f7d7af11c55f7b8e5f83 | 2,065 | py | Python | caterpillar_log.py | redrose2100/caterpillar_log | c2782c4836cd817e58a9a3ff1548067453604986 | [
"MulanPSL-1.0"
] | 1 | 2021-12-02T05:34:43.000Z | 2021-12-02T05:34:43.000Z | caterpillar_log.py | redrose2100/caterpillar_log | c2782c4836cd817e58a9a3ff1548067453604986 | [
"MulanPSL-1.0"
] | null | null | null | caterpillar_log.py | redrose2100/caterpillar_log | c2782c4836cd817e58a9a3ff1548067453604986 | [
"MulanPSL-1.0"
] | null | null | null | import logging
import platform
from pathlib import Path
logger = Log("caterpillar_log")
| 42.142857 | 108 | 0.651332 | import logging
import platform
from pathlib import Path
class Log(object):
def __init__(self, name="logger", console_level=logging.INFO, file_level=logging.INFO,
log_fmt="%(asctime)s | %(levelname)s | %(pathname)s:%(lineno)s | %(message)s", log_dir=""):
"""
Log 类初始化函数
:param name: logger的名称,默认值为:caterpillar_common
:param console_level: 终端日志级别,默认值为logging.INFO
:param file_level: 日志文件中的日志级别,默认为loging.INFO
:param log_fmt: 日志打印的格式,默认为 %(asctime)s | %(levelname)s | %(pathname)s:%(lineno)s | %(message)s
"""
self.__name = name
self.__console_level = console_level
self.__file_level = file_level
self.__log_fmt = log_fmt
if log_dir:
self.__log_dir = log_dir
self.__log_file = f"{self.__log_dir}/{self.__name}.log"
else:
if platform.system() == "Linux":
self.__log_file = f"/var/log/{self.__name}/{self.__name}.log"
self.__log_dir = f"/var/log/{self.__name}"
else:
self.__log_file = Path(__file__).resolve().parent.parent.parent / f"logs/{self.__name}.log"
self.__log_dir = Path(__file__).resolve().parent.parent.parent / "logs"
if not Path(self.__log_dir).exists():
Path(self.__log_dir).mkdir(parents=True, exist_ok=True)
self.__logger = logging.getLogger(self.__name)
self.__logger.setLevel(logging.DEBUG)
self.__console_handler = logging.StreamHandler()
self.__console_handler.setLevel(self.__console_level)
self.__file_handler = logging.FileHandler(filename=self.__log_file)
self.__file_handler.setLevel(self.__file_level)
self.__formatter = logging.Formatter(fmt=self.__log_fmt)
self.__console_handler.setFormatter(self.__formatter)
self.__file_handler.setFormatter(self.__formatter)
self.__logger.addHandler(self.__console_handler)
self.__logger.addHandler(self.__file_handler)
logger = Log("caterpillar_log")
| 0 | 2,053 | 23 |
fd38435ffe0a6ffb640283b85de483604576f371 | 613 | py | Python | labs/sort04/sorted04.py | queenskid/MyCode | 20756ebd1826ba7cb1cf9b06374e6aa568996bd1 | [
"MIT"
] | 3 | 2018-05-21T18:16:47.000Z | 2018-05-23T16:45:04.000Z | labs/sort04/sorted04.py | queenskid/MyCode | 20756ebd1826ba7cb1cf9b06374e6aa568996bd1 | [
"MIT"
] | null | null | null | labs/sort04/sorted04.py | queenskid/MyCode | 20756ebd1826ba7cb1cf9b06374e6aa568996bd1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
us_invasion = [{'ip':'10.10.1.2', 'un':'john', 'pw':'allstar'}, {'ip':'10.10.1.3', 'un':'paul', 'pw':'iils20s3'}, {'ip':'10.10.1.4', 'un':'george', 'pw':'hunkydoryzory'}, {'ip':'10.10.1.5', 'un':'stuart', 'pw':'alta3'}, {'ip':'10.10.1.6', 'un':'pete', 'pw':'a8dd827z3'}]
listbyusername = sorted(us_invasion, key=byUserName)
print('\nThe list us_invasion looks like: ', us_invasion)
print('\nResult of sorted(us_invasion, key=byUserName): ', listbyusername)
print('\nBut the value of the list us_invasion hasn\'t actually changed: ', us_invasion)
| 40.866667 | 270 | 0.636215 | #!/usr/bin/env python3
us_invasion = [{'ip':'10.10.1.2', 'un':'john', 'pw':'allstar'}, {'ip':'10.10.1.3', 'un':'paul', 'pw':'iils20s3'}, {'ip':'10.10.1.4', 'un':'george', 'pw':'hunkydoryzory'}, {'ip':'10.10.1.5', 'un':'stuart', 'pw':'alta3'}, {'ip':'10.10.1.6', 'un':'pete', 'pw':'a8dd827z3'}]
def byUserName(x):
return x['un']
listbyusername = sorted(us_invasion, key=byUserName)
print('\nThe list us_invasion looks like: ', us_invasion)
print('\nResult of sorted(us_invasion, key=byUserName): ', listbyusername)
print('\nBut the value of the list us_invasion hasn\'t actually changed: ', us_invasion)
| 16 | 0 | 23 |
b210ee8b14926078d8a74fa81fafd9e32ebef3a1 | 1,104 | py | Python | flaskapp/src/forms.py | manojnuvvala/smile_maker_proposal | 4b5ab60ad229a7829f929da380e3707e74963c0c | [
"MIT"
] | null | null | null | flaskapp/src/forms.py | manojnuvvala/smile_maker_proposal | 4b5ab60ad229a7829f929da380e3707e74963c0c | [
"MIT"
] | 6 | 2021-11-08T14:15:29.000Z | 2021-11-10T14:46:06.000Z | flaskapp/src/forms.py | manojnuvvala/smile_maker_proposal | 4b5ab60ad229a7829f929da380e3707e74963c0c | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import ValidationError
from wtforms.validators import Required, Email, EqualTo
from wtforms import StringField, PasswordField, SubmitField
from .models import User
| 32.470588 | 81 | 0.702899 | from flask_wtf import FlaskForm
from wtforms import ValidationError
from wtforms.validators import Required, Email, EqualTo
from wtforms import StringField, PasswordField, SubmitField
from .models import User
class RegistrationForm(FlaskForm):
email = StringField("Your Email Address", validators=[Required(), Email()])
password = PasswordField(
"Password",
validators=[
Required(),
EqualTo("confirm_password", message="Passwords must match"),
],
)
confirm_password = PasswordField("Confirm Password", validators=[Required()])
submit = SubmitField("Sign Up")
def validate_email(self, data_field):
if User.query.filter_by(email=data_field.data).first():
raise ValidationError("There is already an account with this email")
class LoginForm(FlaskForm):
email = StringField("Your Email Address", validators=[Required(), Email()])
password = PasswordField("Password", validators=[Required()])
submit = SubmitField("Sign In")
class JokeForm(FlaskForm):
submit = SubmitField("Get Another Joke")
| 161 | 661 | 69 |
31053edd052434562fd40c778b05bed6ad5788bd | 1,068 | py | Python | Python3/362.design-hit-counter.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/362.design-hit-counter.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/362.design-hit-counter.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=362 lang=python3
#
# [362] Design Hit Counter
#
import collections
# @lc code=start
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# ls = [[1],[2],[3],[300],[301]]
# for param in ls:
# obj.hit(param[0])
# print(obj.q)
# # param_2 = obj.getHits(timestamp)
# @lc code=end
| 23.733333 | 74 | 0.568352 | #
# @lc app=leetcode id=362 lang=python3
#
# [362] Design Hit Counter
#
import collections
# @lc code=start
class HitCounter:
def __init__(self):
"""
Initialize your data structure here.
"""
self.data = collections.deque()
def hit(self, timestamp: 'int') -> 'None':
"""
Record a hit.
@param timestamp - The current timestamp (in seconds granularity).
"""
self.data.append(timestamp)
def getHits(self, timestamp: 'int') -> 'int':
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
"""
while self.data and timestamp - self.data[0] >= 300:
self.data.popleft()
return len(self.data)
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# ls = [[1],[2],[3],[300],[301]]
# for param in ls:
# obj.hit(param[0])
# print(obj.q)
# # param_2 = obj.getHits(timestamp)
# @lc code=end
| 0 | 700 | 22 |
ded2c09c9e6d9c88f165bbf17b8eb3d06f047025 | 1,080 | py | Python | tkinter/thread-is_alive/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | tkinter/thread-is_alive/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | tkinter/thread-is_alive/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z |
# date: 2019.07.09
# https://stackoverflow.com/questions/56951383/tkinter-disable-buttons-while-thread-is-running/56953613#56953613
import tkinter as tk
from threading import Thread
import time
#-----------------------------------------------------
# counter displayed when thread is running
counter = 0
root = tk.Tk()
l = tk.Label(root)
l.pack()
b = tk.Button(root, text="Start", command=start_thread)
b.pack()
root.mainloop()
| 19.636364 | 112 | 0.585185 |
# date: 2019.07.09
# https://stackoverflow.com/questions/56951383/tkinter-disable-buttons-while-thread-is-running/56953613#56953613
import tkinter as tk
from threading import Thread
import time
def long_running_function():
print('start sleep')
time.sleep(3)
print('end sleep')
def start_thread():
global t
global counter
b['state'] = 'disable'
counter = 0
t = Thread(target=long_running_function)
t.start()
check_thread()
# or check after 100ms
# root.after(100, check_thread)
def check_thread():
global counter
if not t.is_alive():
b['state'] = 'normal'
l['text'] = ''
else:
l['text'] = '{:.1f}'.format(counter)
counter += 0.1
# check again after 100ms
root.after(100, check_thread)
#-----------------------------------------------------
# counter displayed when thread is running
counter = 0
root = tk.Tk()
l = tk.Label(root)
l.pack()
b = tk.Button(root, text="Start", command=start_thread)
b.pack()
root.mainloop()
| 558 | 0 | 70 |
5951381581d5be53a894913789659792e0670778 | 794 | py | Python | libdoc_sample/MyLibrary.py | thinkAmi-sandbox/RobotFramework-sample | 86455e912ac132350b8aacb430de9f060b9d722a | [
"Unlicense"
] | 9 | 2017-12-20T23:10:33.000Z | 2020-11-27T06:22:05.000Z | libdoc_sample/MyLibrary.py | thinkAmi-sandbox/RobotFramework-sample | 86455e912ac132350b8aacb430de9f060b9d722a | [
"Unlicense"
] | 1 | 2021-06-01T22:03:43.000Z | 2021-06-01T22:03:43.000Z | libdoc_sample/MyLibrary.py | thinkAmi-sandbox/RobotFramework-sample | 86455e912ac132350b8aacb430de9f060b9d722a | [
"Unlicense"
] | 2 | 2018-05-21T00:16:11.000Z | 2019-11-04T13:43:57.000Z | from robot.api import logger
class MyLibrary:
"""マイライブラリ
| =タイトル= | =もう一つタイトル= |
| 1行1列目 | 1行2列目 |
| | 1列目が空白 |
| 2列目が空白 | |
= カスタムセクション =
ここがカスタムセクション
= 次のセクション =
`カスタムセクション` へのリンク
セクションへのリンク
- `introduction`
- `importing`
- `shortcuts`
- `keywords`
*太字です*
_イタリックです_
普通です
- リスト1
- リスト2
Googleへ https://google.co.jp
こちらも [https://google.co.jp|Googleへ]
`Hello World` へ
``インラインコードスタイル``
複数行の *bold\n
try* みる
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def hello_world(self, name='foo'):
"""ハローワールドを出力します"""
logger.console(f'hello, world {name} !')
| 14.178571 | 56 | 0.540302 | from robot.api import logger
class MyLibrary:
"""マイライブラリ
| =タイトル= | =もう一つタイトル= |
| 1行1列目 | 1行2列目 |
| | 1列目が空白 |
| 2列目が空白 | |
= カスタムセクション =
ここがカスタムセクション
= 次のセクション =
`カスタムセクション` へのリンク
セクションへのリンク
- `introduction`
- `importing`
- `shortcuts`
- `keywords`
*太字です*
_イタリックです_
普通です
- リスト1
- リスト2
Googleへ https://google.co.jp
こちらも [https://google.co.jp|Googleへ]
`Hello World` へ
``インラインコードスタイル``
複数行の *bold\n
try* みる
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def hello_world(self, name='foo'):
"""ハローワールドを出力します"""
logger.console(f'hello, world {name} !')
def no_args(self):
pass
def multi_args(self, one, two='2', *args, **kwargs):
pass
| 54 | 0 | 54 |
9537bc4d62126af3c4dd206e1dfaa74a82c40530 | 4,284 | py | Python | Turnauswertung-py3/Turnauswertung/settings.py | naechtner/turn-events | 2c71f8ceb5d6f8280c6c19a6467922a834ae0b02 | [
"MIT"
] | null | null | null | Turnauswertung-py3/Turnauswertung/settings.py | naechtner/turn-events | 2c71f8ceb5d6f8280c6c19a6467922a834ae0b02 | [
"MIT"
] | 11 | 2015-06-20T11:51:10.000Z | 2017-01-09T07:09:04.000Z | Turnauswertung-py3/Turnauswertung/settings.py | naechtner/turn-events | 2c71f8ceb5d6f8280c6c19a6467922a834ae0b02 | [
"MIT"
] | null | null | null | """
Django settings for auswertung project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.contrib.messages import constants as messages
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ys-#t((t5g8^p-@9sn3@artu2_5my==hvd&vgmc1ho_@$nu(gw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'widget_tweaks',
'common',
'athletes',
'clubs',
'utils',
'squads',
'streams',
'teams',
'tournaments',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'Turnauswertung.urls'
WSGI_APPLICATION = 'Turnauswertung.wsgi.application'
MEDIA_ROOT = 'static/'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'TURNAUSWERTUNG',
'USER': 'root',
'PASSWORD': 'root',
},
'sqlite_fallback': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# USE_THOUSAND_SEPARATOR = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Multi-language support
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
)
LANGUAGES = (
('en', _('English')),
('de', _('German')),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
],
'debug': DEBUG,
},
},
]
| 26.775 | 73 | 0.704248 | """
Django settings for auswertung project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.contrib.messages import constants as messages
from django.utils.translation import ugettext_lazy as _
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ys-#t((t5g8^p-@9sn3@artu2_5my==hvd&vgmc1ho_@$nu(gw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'widget_tweaks',
'common',
'athletes',
'clubs',
'utils',
'squads',
'streams',
'teams',
'tournaments',
'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'Turnauswertung.urls'
WSGI_APPLICATION = 'Turnauswertung.wsgi.application'
MEDIA_ROOT = 'static/'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'TURNAUSWERTUNG',
'USER': 'root',
'PASSWORD': 'root',
},
'sqlite_fallback': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# USE_THOUSAND_SEPARATOR = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Multi-language support
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'locale/'),
)
LANGUAGES = (
('en', _('English')),
('de', _('German')),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
],
'debug': DEBUG,
},
},
]
| 0 | 0 | 0 |
4c0de6ade1ec780ff6d7e01a203723368aba4aac | 2,026 | py | Python | homeassistant/components/telegram_bot/polling.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/telegram_bot/polling.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/telegram_bot/polling.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Support for Telegram bot using polling."""
import logging
from telegram import Update
from telegram.error import NetworkError, RetryAfter, TelegramError, TimedOut
from telegram.ext import CallbackContext, TypeHandler, Updater
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from . import BaseTelegramBotEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, bot, config):
"""Set up the Telegram polling platform."""
pollbot = PollBot(hass, bot, config)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, pollbot.start_polling)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, pollbot.stop_polling)
return True
def process_error(update: Update, context: CallbackContext):
"""Telegram bot error handler."""
try:
raise context.error
except (TimedOut, NetworkError, RetryAfter):
# Long polling timeout or connection problem. Nothing serious.
pass
except TelegramError:
_LOGGER.error('Update "%s" caused error: "%s"', update, context.error)
class PollBot(BaseTelegramBotEntity):
"""
Controls the Updater object that holds the bot and a dispatcher.
The dispatcher is set up by the super class to pass telegram updates to `self.handle_update`
"""
def __init__(self, hass, bot, config):
"""Create Updater and Dispatcher before calling super()."""
self.bot = bot
self.updater = Updater(bot=bot, workers=4)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(TypeHandler(Update, self.handle_update))
self.dispatcher.add_error_handler(process_error)
super().__init__(hass, config)
def start_polling(self, event=None):
"""Start the polling task."""
_LOGGER.debug("Starting polling")
self.updater.start_polling()
def stop_polling(self, event=None):
"""Stop the polling task."""
_LOGGER.debug("Stopping polling")
self.updater.stop()
| 33.213115 | 96 | 0.715202 | """Support for Telegram bot using polling."""
import logging
from telegram import Update
from telegram.error import NetworkError, RetryAfter, TelegramError, TimedOut
from telegram.ext import CallbackContext, TypeHandler, Updater
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from . import BaseTelegramBotEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, bot, config):
"""Set up the Telegram polling platform."""
pollbot = PollBot(hass, bot, config)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, pollbot.start_polling)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, pollbot.stop_polling)
return True
def process_error(update: Update, context: CallbackContext):
"""Telegram bot error handler."""
try:
raise context.error
except (TimedOut, NetworkError, RetryAfter):
# Long polling timeout or connection problem. Nothing serious.
pass
except TelegramError:
_LOGGER.error('Update "%s" caused error: "%s"', update, context.error)
class PollBot(BaseTelegramBotEntity):
"""
Controls the Updater object that holds the bot and a dispatcher.
The dispatcher is set up by the super class to pass telegram updates to `self.handle_update`
"""
def __init__(self, hass, bot, config):
"""Create Updater and Dispatcher before calling super()."""
self.bot = bot
self.updater = Updater(bot=bot, workers=4)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(TypeHandler(Update, self.handle_update))
self.dispatcher.add_error_handler(process_error)
super().__init__(hass, config)
def start_polling(self, event=None):
"""Start the polling task."""
_LOGGER.debug("Starting polling")
self.updater.start_polling()
def stop_polling(self, event=None):
"""Stop the polling task."""
_LOGGER.debug("Stopping polling")
self.updater.stop()
| 0 | 0 | 0 |
93f42d3bd93aa26629da14f358f8e057c18aa7ff | 2,972 | py | Python | HackerEarth_problems/Xenny and Partially Sorted Strings/solution.py | gbrls/CompetitiveCode | b6f1b817a655635c3c843d40bd05793406fea9c6 | [
"MIT"
] | 165 | 2020-10-03T08:01:11.000Z | 2022-03-31T02:42:08.000Z | HackerEarth_problems/Xenny and Partially Sorted Strings/solution.py | gbrls/CompetitiveCode | b6f1b817a655635c3c843d40bd05793406fea9c6 | [
"MIT"
] | 383 | 2020-10-03T07:39:11.000Z | 2021-11-20T07:06:35.000Z | HackerEarth_problems/Xenny and Partially Sorted Strings/solution.py | gbrls/CompetitiveCode | b6f1b817a655635c3c843d40bd05793406fea9c6 | [
"MIT"
] | 380 | 2020-10-03T08:05:04.000Z | 2022-03-19T06:56:59.000Z | '''
Problem: Xenny and Partially Sorted Strings
Accept number of testcases as t
for each test case:
Given 3 integers N, K, M for each test case. Do the following steps-
Accept all N number of strings and store them in a list
sort the list by only considering first m charcaters in every element of list
output the Kth element from the above sorted list
'''
#Accept the first integer which denotes the number of testcases
#For accepting input as int use int() function while accepting the input()
#The accepted input is stored in variable t
t=int(input())
#Now iterating through every testcase i.e for each testcase
#As we neednot store the number of that iteration we can use '_' here
for _ in range(t):
#Accept 3 integers N,K,M respectively
#input() function accepts the input
#split function is used to convert the accepted input in to a list based on delimiters like space or empty
#int function is used to convert the accepted input to Integers
#map function maps or applies the int function to the each and every element of list and assigns to n,k,m variables
n,k,m=map(int,input().split())
#Declaring an empty list to store 'n' strings
l=[]
#As n denotes the number of strings ,we use a for loop to accept input 'n' times
for _ in range(n):
#In each iteration an input is accepted using input() and then converted to string
#We use append method of list to store the accepted string for future computation
l.append(str(input()))
#Hence by end of 'n' interations we will have a list containing of 'n' string which need to be sorted
#The sorting need to be done based on only first m charcaters of each string in the list
#List has inbuilt sort method to sort elements in a list
#Sort method has 2 paramenters
#reverse: It accepts a boolean value(true/false) i.e to sort in descending or acending order
#key: On what basis/condition/criteria we need to sort the list(like sort based on lengths etc)
#Incase of sorting on criteria of lengths we use len function i.e key=len will be assigned
#This will make the list sorted based on length of strings
#We can use lambda which is an anonmyous function
#Hence in order to sort based on first 'm' charcaters of each element we assign as follows
#key=lambda x:x[0:m]
#Here x[0:m]------>returns first m charcater of an element
#key condition is applied to each and every element of the list
#So as all the first m charcaters are identified sorting is done based on them
#sort function is an inplace function which means that it sorts and places the sorted order in the same list, here it is 'l'
l.sort(key=lambda x:x[0:m])
#As we need kth string ,for suppose we want 2nd string will be at index '1' i.e (2-1)
#Therefore similarly if we want kth string then we need to access (k-1) index
print(l[k-1])
| 51.241379 | 128 | 0.711642 | '''
Problem: Xenny and Partially Sorted Strings
Accept number of testcases as t
for each test case:
Given 3 integers N, K, M for each test case. Do the following steps-
Accept all N number of strings and store them in a list
sort the list by only considering first m charcaters in every element of list
output the Kth element from the above sorted list
'''
#Accept the first integer which denotes the number of testcases
#For accepting input as int use int() function while accepting the input()
#The accepted input is stored in variable t
t=int(input())
#Now iterating through every testcase i.e for each testcase
#As we neednot store the number of that iteration we can use '_' here
for _ in range(t):
#Accept 3 integers N,K,M respectively
#input() function accepts the input
#split function is used to convert the accepted input in to a list based on delimiters like space or empty
#int function is used to convert the accepted input to Integers
#map function maps or applies the int function to the each and every element of list and assigns to n,k,m variables
n,k,m=map(int,input().split())
#Declaring an empty list to store 'n' strings
l=[]
#As n denotes the number of strings ,we use a for loop to accept input 'n' times
for _ in range(n):
#In each iteration an input is accepted using input() and then converted to string
#We use append method of list to store the accepted string for future computation
l.append(str(input()))
#Hence by end of 'n' interations we will have a list containing of 'n' string which need to be sorted
#The sorting need to be done based on only first m charcaters of each string in the list
#List has inbuilt sort method to sort elements in a list
#Sort method has 2 paramenters
#reverse: It accepts a boolean value(true/false) i.e to sort in descending or acending order
#key: On what basis/condition/criteria we need to sort the list(like sort based on lengths etc)
#Incase of sorting on criteria of lengths we use len function i.e key=len will be assigned
#This will make the list sorted based on length of strings
#We can use lambda which is an anonmyous function
#Hence in order to sort based on first 'm' charcaters of each element we assign as follows
#key=lambda x:x[0:m]
#Here x[0:m]------>returns first m charcater of an element
#key condition is applied to each and every element of the list
#So as all the first m charcaters are identified sorting is done based on them
#sort function is an inplace function which means that it sorts and places the sorted order in the same list, here it is 'l'
l.sort(key=lambda x:x[0:m])
#As we need kth string ,for suppose we want 2nd string will be at index '1' i.e (2-1)
#Therefore similarly if we want kth string then we need to access (k-1) index
print(l[k-1])
| 0 | 0 | 0 |
f5e0368f160b8e1c7d9533f93c2f4c2fa8013e01 | 3,562 | py | Python | store/api/handlers/query/available_orders.py | alexadastra/backendschool2021 | 9b51c70f1f7a9b5bec032c8b62ff35a33f614c67 | [
"MIT"
] | null | null | null | store/api/handlers/query/available_orders.py | alexadastra/backendschool2021 | 9b51c70f1f7a9b5bec032c8b62ff35a33f614c67 | [
"MIT"
] | null | null | null | store/api/handlers/query/available_orders.py | alexadastra/backendschool2021 | 9b51c70f1f7a9b5bec032c8b62ff35a33f614c67 | [
"MIT"
] | null | null | null | from store.api.handlers.base import BaseView
from http import HTTPStatus
from typing import Generator
from datetime import datetime
from aiohttp.web_response import Response
from aiohttp.web_exceptions import HTTPNotFound
from aiohttp_apispec import docs, request_schema, response_schema
from sqlalchemy import and_, or_
from store.api.schema import OrdersAssignPostRequestSchema, OrdersAssignPostResponseSchema
from store.db.schema import orders_table, couriers_table, orders_delivery_hours_table, delivery_hours_table, \
working_hours_table, couriers_working_hours_table
from ..query import AVAILABLE_ORDERS_QUERY
from ...domain import CouriersOrdersResolver, CourierConfigurator
| 45.088608 | 119 | 0.686412 | from store.api.handlers.base import BaseView
from http import HTTPStatus
from typing import Generator
from datetime import datetime
from aiohttp.web_response import Response
from aiohttp.web_exceptions import HTTPNotFound
from aiohttp_apispec import docs, request_schema, response_schema
from sqlalchemy import and_, or_
from store.api.schema import OrdersAssignPostRequestSchema, OrdersAssignPostResponseSchema
from store.db.schema import orders_table, couriers_table, orders_delivery_hours_table, delivery_hours_table, \
working_hours_table, couriers_working_hours_table
from ..query import AVAILABLE_ORDERS_QUERY
from ...domain import CouriersOrdersResolver, CourierConfigurator
class AvailableOrdersDefiner:
@staticmethod
async def get_available_orders(conn, courier, courier_id=None):
if not courier:
return
if not courier['working_hours'] or not courier['regions']:
return []
region_conditions = or_(*list([orders_table.c.region == region for region in courier['regions']]))
# according to the task, ends of interval are not counted,
# so working_hours and delivery_hours are intersected if
# min(working_finish, delivery_finish) - max(working_start, delivery_start) > 0
hours_conditions = []
for working_hours in courier['working_hours']:
hours_conditions.append(and_(
working_hours['time_start'] > delivery_hours_table.c.time_start,
working_hours['time_finish'] > delivery_hours_table.c.time_finish,
delivery_hours_table.c.time_finish - working_hours['time_start'] > 0
))
hours_conditions.append(and_(
working_hours['time_start'] > delivery_hours_table.c.time_start,
working_hours['time_finish'] <= delivery_hours_table.c.time_finish,
# condition delivery_time_finish - delivery_time_start > 0,
# as it is checked in POST /orders request validation
))
hours_conditions.append(and_(
working_hours['time_start'] <= delivery_hours_table.c.time_start,
working_hours['time_finish'] > delivery_hours_table.c.time_finish
# condition working_time_finish - working_time_start > 0,
# as it is checked in POST /couriers request validation
))
hours_conditions.append(and_(
working_hours['time_start'] <= delivery_hours_table.c.time_start,
working_hours['time_finish'] <= delivery_hours_table.c.time_finish,
working_hours['time_finish'] - delivery_hours_table.c.time_start > 0
))
query = AVAILABLE_ORDERS_QUERY.where(and_(
region_conditions,
or_(*hours_conditions),
orders_table.c.courier_id == courier_id,
orders_table.c.weight <= courier['carrying_capacity']
))
return await conn.fetch(query)
async def get_orders(self, conn, courier, courier_id=None):
courier['carrying_capacity'] = await CourierConfigurator.get_courier_carrying_capacity(courier['courier_type'])
orders = await self.get_available_orders(conn, courier, courier_id)
if not orders:
return []
orders_to_assign_ids = await CouriersOrdersResolver(
orders_={orders[i]['order_id']: orders[i]['weight'] for i in range(len(orders))},
max_weight=courier['carrying_capacity']).resolve_orders()
return orders_to_assign_ids
| 2,770 | 79 | 23 |
5535fd9792414199f146a72f014a74bda5dbffd2 | 10,212 | py | Python | NEST/second_level/src/topologies/extensor_cpg_concept_sbs.py | late-goodbye/memristive-spinal-cord | f6d7bca154fb4cab503f416fa01b8dc9b0a7d046 | [
"MIT"
] | null | null | null | NEST/second_level/src/topologies/extensor_cpg_concept_sbs.py | late-goodbye/memristive-spinal-cord | f6d7bca154fb4cab503f416fa01b8dc9b0a7d046 | [
"MIT"
] | null | null | null | NEST/second_level/src/topologies/extensor_cpg_concept_sbs.py | late-goodbye/memristive-spinal-cord | f6d7bca154fb4cab503f416fa01b8dc9b0a7d046 | [
"MIT"
] | null | null | null | from enum import Enum
from nest import Create, Connect
from the_second_level.src.tools.multimeter import add_multimeter
| 34.734694 | 119 | 0.546906 | from enum import Enum
from nest import Create, Connect
from the_second_level.src.tools.multimeter import add_multimeter
class Params(Enum):
NUM_SUBLEVELS = 6
NUM_SPIKES = 6
RATE = 40
SIMULATION_TIME = 175.
INH_COEF = 1.
PLOT_SLICES_SHIFT = 8. # ms
TO_PLOT = {
'node1.1': 'Node 1.1',
'node1.2': 'Node 1.2',
'node1.3': 'Node 1.3',
'hidden_1': 'Hidden Nuclei 1',
'node2.1': 'Node 2.1',
'node2.2': 'Node 2.2',
'node2.3': 'Node 2.3',
'node2.4': 'Node 2.4',
'node2.5': 'Node 2.5',
'node2.6': 'Node 2.6',
'node3.1': 'Node 3.1',
'node3.2': 'Node 3.2',
'node3.3': 'Node 3.3',
'node3.4': 'Node 3.4',
'node3.5': 'Node 3.5',
'node3.6': 'Node 3.6',
'node4.1': 'Node 4.1',
'node4.2': 'Node 4.2',
'node4.3': 'Node 4.3',
'node4.4': 'Node 4.4',
'node4.5': 'Node 4.5',
'node4.6': 'Node 4.6',
'node5.1': 'Node 5.1',
'node5.2': 'Node 5.2',
'node5.3': 'Node 5.3',
'node5.4': 'Node 5.4',
'node5.5': 'Node 5.5',
'node5.6': 'Node 5.6',
'node6.1': 'Node 6.1',
'node6.2': 'Node 6.2',
'node6.3': 'Node 6.3',
'node6.4': 'Node 6.4',
'node6.5': 'Node 6.5',
'pool1': 'Pool1',
'pool2': 'Pool2',
'pool3': 'Pool3',
'pool4': 'Pool4',
'pool5': 'Pool5',
'pool6': 'Pool6',
'moto': 'Moto',
}
TO_PLOT_WITH_SLICES = {
'moto': 6
}
def create(n: int):
return Create(
model='hh_cond_exp_traub',
n=n,
params={
't_ref': 2.,
'V_m': -70.0,
'E_L': -70.0,
'g_L': 75.0,
'tau_syn_ex': .2,
'tau_syn_in': 3.})
def create_with_mmeter(n: int, name: str):
gids = create(n)
Connect(pre=add_multimeter(name), post=gids)
return gids
def connect(pre, post, weight, degree, delay=1.):
Connect(
pre=pre,
post=post,
syn_spec={
'model': 'static_synapse',
'delay': delay,
'weight': weight,
},
conn_spec={
'rule': 'fixed_outdegree',
'outdegree': degree,
'multapses': True,
'autapses': True
})
class EES:
def __init__(self):
self.ees = Create(
model='spike_generator',
params={
'spike_times': [10. + i * round(1000. / Params.RATE.value, 1) for i in range(Params.NUM_SPIKES.value)],
'spike_weights': [500. for i in range(Params.NUM_SPIKES.value)]})
def connect_ees(cls, post):
Connect(
pre=cls.ees,
post=post,
syn_spec={
'model': 'static_synapse',
'weight': 1.,
'delay': .1
},
conn_spec={
'rule': 'fixed_outdegree',
'outdegree': len(post),
'autapses': False,
'multapses': False
})
class Node:
def __init__(self, index):
self.node1 = create_with_mmeter(40, 'node{}.1'.format(index))
self.node2 = create_with_mmeter(40, 'node{}.2'.format(index))
self.node3 = create_with_mmeter(40, 'node{}.3'.format(index))
self.node4 = create_with_mmeter(40, 'node{}.4'.format(index))
self.node5 = create_with_mmeter(40, 'node{}.5'.format(index))
self.node6 = create_with_mmeter(40, 'node{}.6'.format(index))
connect(self.node1, self.node2, 15., 30)
connect(self.node2, self.node1, 15., 30)
connect(self.node1, self.node3, 10., 30)
connect(self.node2, self.node3, -10., 40)
connect(self.node3, self.node4, 10., 30)
connect(self.node4, self.node5, 10., 30)
connect(self.node5, self.node6, 10., 30)
class Topology:
def __init__(self):
sensory = create_with_mmeter(60, 'sensory')
ia_aff = create_with_mmeter(169, 'ia_aff')
pool = [create_with_mmeter(40, 'pool{}'.format(i)) for i in range(1, 7)]
moto = create_with_mmeter(169, 'moto')
ees = EES()
ees.connect_ees(sensory)
ees.connect_ees(moto)
for pool_nucleus in pool:
connect(pool_nucleus, moto, 25, 20)
connect(ia_aff, moto, 25, 20)
node11 = create_with_mmeter(40, 'node1.1')
node12 = create_with_mmeter(40, 'node1.2')
node13 = create_with_mmeter(40, 'node1.3')
node14 = create_with_mmeter(40, 'node1.4')
node15 = create_with_mmeter(40, 'node1.5')
node16 = create_with_mmeter(40, 'node1.6')
node17 = create_with_mmeter(40, 'node1.7')
node18 = create_with_mmeter(40, 'node1.8')
node19 = create_with_mmeter(40, 'node1.9')
node0110 = create_with_mmeter(40, 'node1.010')
node0111 = create_with_mmeter(40, 'node1.011')
connect(node13, pool[0], 40., 30)
connect(node14, pool[0], 40., 30)
connect(node15, pool[0], 40., 30)
connect(node16, pool[0], 40., 30)
connect(node17, pool[0], 40., 30)
connect(node18, pool[0], 40., 30)
connect(node19, pool[0], 40., 30)
connect(sensory, node11, 10., 40)
connect(node11, node12, 15., 40, 2.)
connect(node12, node13, 15., 40, 2.)
connect(node13, node14, 15., 40, 2.)
connect(node14, node15, 15., 40, 2.)
connect(node15, node16, 15., 40, 2.)
connect(node16, node17, 15., 40, 2.)
connect(node17, node18, 15., 40, 2.)
connect(node18, node19, 15., 40, 2.)
connect(node19, node0110, 15., 40, 2.)
connect(node0110, node0111, 15., 40, 2.)
hidden_nuclei_1 = create_with_mmeter(40, 'hidden_1')
node21 = create_with_mmeter(40, 'node2.1')
node22 = create_with_mmeter(40, 'node2.2')
node23 = create_with_mmeter(40, 'node2.3')
node24 = create_with_mmeter(40, 'node2.4')
node25 = create_with_mmeter(40, 'node2.5')
node26 = create_with_mmeter(40, 'node2.6')
connect(node11, node21, 15., 40, 2.)
connect(node21, node22, 20., 40)
connect(node22, node21, 20., 40)
connect(node21, node23, 4., 40)
connect(node23, hidden_nuclei_1, 15., 40, 2.)
connect(hidden_nuclei_1, node24, 15., 40, 2.)
connect(node24, node25, 15., 40)
connect(node25, node26, 15., 40)
connect(node11, node23, 7., 40, 0.1)
connect(node24, pool[1], 80., 40)
connect(node25, pool[1], 80., 40)
connect(node26, pool[1], 80., 40)
hidden_nuclei_2 = create_with_mmeter(40, 'hidden_2')
node31 = create_with_mmeter(40, 'node3.1') # Why?
node32 = create_with_mmeter(40, 'node3.2') # Why?
node33 = create_with_mmeter(40, 'node3.3')
node34 = create_with_mmeter(40, 'node3.4')
node35 = create_with_mmeter(40, 'node3.5')
node36 = create_with_mmeter(40, 'node3.6')
connect(node23, node31, 15., 40)
connect(node23, node33, 6., 40, .1)
connect(node31, node32, 17., 40)
connect(node32, node31, 17., 40)
connect(node31, node33, 4., 40, 1.5)
connect(node33, hidden_nuclei_2, 15., 40, 2.)
connect(hidden_nuclei_2, node34, 15., 40)
connect(node34, node35, 15., 40)
connect(node35, node36, 15., 40)
connect(node34, pool[2], 80., 40)
connect(node35, pool[2], 80., 40)
connect(node36, pool[2], 80., 40)
connect(node33, node13, Params.INH_COEF.value * -40, 80, .1)
hidden_nuclei_3 = create_with_mmeter(40, 'hidden_3')
node41 = create_with_mmeter(40, 'node4.1')
node42 = create_with_mmeter(40, 'node4.2')
node43 = create_with_mmeter(40, 'node4.3')
node44 = create_with_mmeter(40, 'node4.4')
node45 = create_with_mmeter(40, 'node4.5')
node46 = create_with_mmeter(40, 'node4.6')
connect(node33, node41, 15., 40)
connect(node33, node43, 6., 40, .1)
connect(node41, node42, 17., 40)
connect(node42, node41, 17., 40)
connect(node41, node43, 4., 40)
connect(node43, hidden_nuclei_3, 15., 40, 2.)
connect(hidden_nuclei_3, node44, 15., 40, 2.)
connect(node44, node45, 15., 40)
connect(node45, node46, 15., 40)
connect(node44, pool[3], 40., 60)
connect(node45, pool[3], 60., 60)
connect(node46, pool[3], 80., 60)
connect(node43, node24, Params.INH_COEF.value * -30, 60, .1)
node51 = create_with_mmeter(40, 'node5.1')
node52 = create_with_mmeter(40, 'node5.2')
node53 = create_with_mmeter(40, 'node5.3')
node54 = create_with_mmeter(40, 'node5.4')
node55 = create_with_mmeter(40, 'node5.5')
node56 = create_with_mmeter(40, 'node5.6')
connect(node43, node51, 15., 40)
connect(node43, node53, 9., 40, .1)
connect(node51, node52, 17., 40)
connect(node52, node51, 17., 40)
connect(node51, node53, 4., 40)
connect(node53, node54, 15., 40, 2.)
connect(node54, node55, 15., 40)
connect(node55, node56, 15., 40)
connect(node54, pool[4], 40., 40)
connect(node55, pool[4], 60., 40)
connect(node56, pool[4], 80., 40)
connect(node53, node34, Params.INH_COEF.value * -30, 60, .1)
node61 = create_with_mmeter(40, 'node6.1')
node62 = create_with_mmeter(40, 'node6.2')
node63 = create_with_mmeter(40, 'node6.3')
node64 = create_with_mmeter(40, 'node6.4')
node65 = create_with_mmeter(40, 'node6.5')
connect(node53, node61, 15., 40)
connect(node53, node63, 6., 40, .1)
connect(node61, node62, 17., 40)
connect(node62, node61, 17., 40)
connect(node61, node63, 4., 40)
connect(node63, node64, 15., 40, 2.)
connect(node64, node65, 15., 40, 2.)
connect(node64, pool[5], 60., 40)
connect(node65, pool[5], 80., 40)
connect(node63, node45, Params.INH_COEF.value * -25, 60, .1)
connect(node63, node54, Params.INH_COEF.value * -25, 60, .1) | 8,429 | 1,394 | 266 |
c5c8a2aac624541b391bc84ed88422229fdafc2d | 2,109 | py | Python | demisto_sdk/commands/common/content/tests/objects/abstract_objects/yaml_object_test.py | nericksen/demisto-sdk | 27c870997597209f196a36358e30e896ca69fbb9 | [
"MIT"
] | null | null | null | demisto_sdk/commands/common/content/tests/objects/abstract_objects/yaml_object_test.py | nericksen/demisto-sdk | 27c870997597209f196a36358e30e896ca69fbb9 | [
"MIT"
] | null | null | null | demisto_sdk/commands/common/content/tests/objects/abstract_objects/yaml_object_test.py | nericksen/demisto-sdk | 27c870997597209f196a36358e30e896ca69fbb9 | [
"MIT"
] | null | null | null | import pytest
from demisto_sdk.commands.common.constants import PACKS_DIR, PLAYBOOKS_DIR
from demisto_sdk.commands.common.content.errors import (ContentInitializeError,
ContentSerializeError)
from demisto_sdk.commands.common.content.objects.abstract_objects import \
YAMLObject
from demisto_sdk.commands.common.handlers import YAML_Handler
from demisto_sdk.commands.common.tools import src_root
TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_VALID_YAML = TEST_CONTENT_REPO / PACKS_DIR / 'Sample01' / PLAYBOOKS_DIR / 'playbook-sample_new.yml'
TEST_NOT_VALID_YAML = TEST_DATA / 'malformed.yaml'
yaml = YAML_Handler(width=50000)
| 39.055556 | 104 | 0.705073 | import pytest
from demisto_sdk.commands.common.constants import PACKS_DIR, PLAYBOOKS_DIR
from demisto_sdk.commands.common.content.errors import (ContentInitializeError,
ContentSerializeError)
from demisto_sdk.commands.common.content.objects.abstract_objects import \
YAMLObject
from demisto_sdk.commands.common.handlers import YAML_Handler
from demisto_sdk.commands.common.tools import src_root
TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_VALID_YAML = TEST_CONTENT_REPO / PACKS_DIR / 'Sample01' / PLAYBOOKS_DIR / 'playbook-sample_new.yml'
TEST_NOT_VALID_YAML = TEST_DATA / 'malformed.yaml'
yaml = YAML_Handler(width=50000)
class TestValidYAML:
def test_valid_yaml_file_path(self):
obj = YAMLObject(TEST_VALID_YAML)
assert obj.to_dict() == yaml.load(TEST_VALID_YAML.open())
def test_get_item(self):
obj = YAMLObject(TEST_VALID_YAML)
assert obj["fromversion"] == yaml.load(TEST_VALID_YAML.open())["fromversion"]
@pytest.mark.parametrize(argnames="default_value", argvalues=["test_value", ""])
def test_get(self, default_value: str):
obj = YAMLObject(TEST_VALID_YAML)
if default_value:
assert obj.get("no such key", default_value) == default_value
else:
assert obj["fromversion"] == yaml.load(TEST_VALID_YAML.open())["fromversion"]
def test_dump(self, datadir):
expected_file = TEST_VALID_YAML.parent / f'prefix-{TEST_VALID_YAML.name}'
obj = YAMLObject(TEST_VALID_YAML, "prefix")
assert obj.dump()[0] == expected_file
assert obj.to_dict() == yaml.load(expected_file.open())
expected_file.unlink()
class TestInvalidYAML:
def test_malformed_yaml_data_file_path(self, datadir):
obj = YAMLObject(TEST_NOT_VALID_YAML)
with pytest.raises(ContentSerializeError):
obj.to_dict()
def test_malformed_yaml_path(self, datadir):
with pytest.raises(ContentInitializeError):
YAMLObject('Not valid path')
| 1,074 | 192 | 99 |
3e4d395c491972acbf25456942062fe1ab0efbe6 | 3,018 | py | Python | tools/sparrow/binfiletool.py | sics-iot/sparrow | 3ab386ccd80293928da7d5a98c2b7a9ef5fb96f9 | [
"BSD-3-Clause"
] | 26 | 2016-07-15T06:18:27.000Z | 2021-12-15T19:54:49.000Z | tools/sparrow/binfiletool.py | sics-iot/sparrow | 3ab386ccd80293928da7d5a98c2b7a9ef5fb96f9 | [
"BSD-3-Clause"
] | 34 | 2016-08-16T09:55:55.000Z | 2018-02-19T09:53:47.000Z | tools/sparrow/binfiletool.py | sics-iot/sparrow | 3ab386ccd80293928da7d5a98c2b7a9ef5fb96f9 | [
"BSD-3-Clause"
] | 16 | 2016-08-21T06:38:29.000Z | 2021-11-28T22:02:44.000Z | #!/usr/bin/env python
#
# Copyright (c) 2016, SICS, Swedish ICT
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Joakim Eriksson, joakime@sics.se
#
#
import sys, binascii, argparse, re
parser = argparse.ArgumentParser(description='convert hex to binary - and add padding.')
parser.add_argument("-i", help="input file - stdin is default.")
parser.add_argument("-o", help="input file - stdout is default.")
parser.add_argument("-c", help="byte value for padding - 0xff is default.")
parser.add_argument("-p", help="number of pad bytes AFTER infile or with minus - the total size of the file to pad.")
parser.add_argument("-P", help="number of pad bytes BEFORE infile.")
parser.add_argument("-B", action="store_true", help="file is binary - no hex to bin conversion.")
parser.add_argument("-V", action="store_true", help="print version and exit.")
args = parser.parse_args()
if args.V:
print "Sparrow binfile tool - Version 1.0"
exit()
# setup the in and out files
infile = open(args.i, 'r') if args.i else sys.stdin
outfile = open(args.o, 'w') if args.o else sys.stdout
padc = chr(int(args.c if args.c else "0xff", 16))
pad_after = int(args.p if args.p else 0)
pad_before = int(args.P if args.P else 0)
data = infile.read()
if not args.B:
data = re.sub(r'(?m)^#.*\n?', '', data)
data = binascii.unhexlify(''.join(data.split()))
# pad at the end
if pad_after < 0:
data = data + padc * ((-pad_after) - len(data))
else:
data = data + padc * pad_after
# pad at start
data = padc * pad_before + data
# write the file
outfile.write(data)
| 40.24 | 117 | 0.733267 | #!/usr/bin/env python
#
# Copyright (c) 2016, SICS, Swedish ICT
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Institute nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Joakim Eriksson, joakime@sics.se
#
#
import sys, binascii, argparse, re
parser = argparse.ArgumentParser(description='convert hex to binary - and add padding.')
parser.add_argument("-i", help="input file - stdin is default.")
parser.add_argument("-o", help="input file - stdout is default.")
parser.add_argument("-c", help="byte value for padding - 0xff is default.")
parser.add_argument("-p", help="number of pad bytes AFTER infile or with minus - the total size of the file to pad.")
parser.add_argument("-P", help="number of pad bytes BEFORE infile.")
parser.add_argument("-B", action="store_true", help="file is binary - no hex to bin conversion.")
parser.add_argument("-V", action="store_true", help="print version and exit.")
args = parser.parse_args()
if args.V:
print "Sparrow binfile tool - Version 1.0"
exit()
# setup the in and out files
infile = open(args.i, 'r') if args.i else sys.stdin
outfile = open(args.o, 'w') if args.o else sys.stdout
padc = chr(int(args.c if args.c else "0xff", 16))
pad_after = int(args.p if args.p else 0)
pad_before = int(args.P if args.P else 0)
data = infile.read()
if not args.B:
data = re.sub(r'(?m)^#.*\n?', '', data)
data = binascii.unhexlify(''.join(data.split()))
# pad at the end
if pad_after < 0:
data = data + padc * ((-pad_after) - len(data))
else:
data = data + padc * pad_after
# pad at start
data = padc * pad_before + data
# write the file
outfile.write(data)
| 0 | 0 | 0 |
e161dd801ed9c09d1d2f4a17b19cd271ea31abb7 | 1,262 | py | Python | libutils/primes.py | nicknaym530/android_system_core | 9f6b99e7aa5eda0391f973a69e97921824de7502 | [
"MIT"
] | 8,865 | 2017-03-13T03:27:32.000Z | 2022-03-31T12:57:44.000Z | libutils/primes.py | nicknaym530/android_system_core | 9f6b99e7aa5eda0391f973a69e97921824de7502 | [
"MIT"
] | 359 | 2017-03-13T06:37:22.000Z | 2022-01-27T14:31:43.000Z | libutils/primes.py | nicknaym530/android_system_core | 9f6b99e7aa5eda0391f973a69e97921824de7502 | [
"MIT"
] | 1,709 | 2017-03-13T02:29:13.000Z | 2022-03-31T12:57:48.000Z | #!/usr/bin/env python2.6
#
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Generates a table of prime numbers for use in BasicHashtable.cpp.
#
# Each prime is chosen such that it is a little more than twice as large as
# the previous prime in the table. This makes it easier to choose a new
# hashtable size when the underlying array is grown by as nominal factor
# of two each time.
#
print "static size_t PRIMES[] = {"
n = 5
max = 2**31 - 1
while n < max:
print " %d," % (n)
n = n * 2 + 1
while not is_odd_prime(n):
n += 2
print " 0,"
print "};"
| 26.291667 | 75 | 0.684628 | #!/usr/bin/env python2.6
#
# Copyright (C) 2011 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Generates a table of prime numbers for use in BasicHashtable.cpp.
#
# Each prime is chosen such that it is a little more than twice as large as
# the previous prime in the table. This makes it easier to choose a new
# hashtable size when the underlying array is grown by as nominal factor
# of two each time.
#
def is_odd_prime(n):
limit = (n - 1) / 2
d = 3
while d <= limit:
if n % d == 0:
return False
d += 2
return True
print "static size_t PRIMES[] = {"
n = 5
max = 2**31 - 1
while n < max:
print " %d," % (n)
n = n * 2 + 1
while not is_odd_prime(n):
n += 2
print " 0,"
print "};"
| 112 | 0 | 23 |
78104853e3b07d47b418bdf8759412a8ee2b5fa5 | 2,175 | py | Python | samples/patterns.py | ShiJbey/neighborly | 5af1e3211f1ef0e25803790850e7cd3d3a49be69 | [
"MIT"
] | null | null | null | samples/patterns.py | ShiJbey/neighborly | 5af1e3211f1ef0e25803790850e7cd3d3a49be69 | [
"MIT"
] | null | null | null | samples/patterns.py | ShiJbey/neighborly | 5af1e3211f1ef0e25803790850e7cd3d3a49be69 | [
"MIT"
] | null | null | null | """
In this file, I am messing around with using generator functions
to handle things like pattern matching in LifeEventRules and
timestep sizes when handling level-of-detail changes
"""
from abc import abstractmethod
from typing import Generator, List, Protocol, Tuple
from dataclasses import dataclass
from neighborly.core.ecs import GameObject, Component
from neighborly.core.life_event import (
ILifeEventListener,
LifeEvent,
check_gameobject_preconditions,
handle_gameobject_effects,
)
strength_greater_than_10 = strength_greater_than(10)
@dataclass
@dataclass
if __name__ == "__main__":
main()
| 24.438202 | 76 | 0.664368 | """
In this file, I am messing around with using generator functions
to handle things like pattern matching in LifeEventRules and
timestep sizes when handling level-of-detail changes
"""
from abc import abstractmethod
from typing import Generator, List, Protocol, Tuple
from dataclasses import dataclass
from neighborly.core.ecs import GameObject, Component
from neighborly.core.life_event import (
ILifeEventListener,
LifeEvent,
check_gameobject_preconditions,
handle_gameobject_effects,
)
class IPatternFn(Protocol):
@abstractmethod
def __call__(
self, world: List[GameObject]
) -> Generator[Tuple[LifeEvent, Tuple[GameObject, ...]], None, None]:
raise NotImplementedError()
class TestEvent(LifeEvent):
event_type: str = "test-event"
def __init__(self, timestamp: str) -> None:
super().__init__(timestamp)
@classmethod
def get_type(cls) -> str:
return cls.event_type
def strength_greater_than(value: int) -> IPatternFn:
def pattern(
world: List[GameObject],
):
for g in world:
if g.has_component(A) and g.get_component(A).strength > value:
yield TestEvent("now"), (g,)
return pattern
strength_greater_than_10 = strength_greater_than(10)
@dataclass
class A(Component, ILifeEventListener):
strength: int = 0
def handle_event(self, event: LifeEvent) -> bool:
if event.get_type() == "test-event":
print("Handling works")
return True
def check_preconditions(self, event: LifeEvent) -> bool:
print("Pizza")
return True
@dataclass
class B(Component):
health: int = 0
def main():
world = [
GameObject(components=[A()]),
GameObject(components=[A(11), B(10)]),
GameObject(components=[A(25)]),
]
for event, participants in strength_greater_than_10(world):
preconditions_pass = all(
[check_gameobject_preconditions(g, event) for g in participants]
)
if preconditions_pass:
for g in participants:
handle_gameobject_effects(g, event)
if __name__ == "__main__":
main()
| 1,130 | 277 | 136 |
42d22cceabcb1eaaaa88e412012bd0ae040d3ad4 | 3,228 | py | Python | app/preprocessing_utils.py | Sylvariane/ncc-sezam | b0b24c14e07a774cc95e52a09d4a833fc68b955d | [
"MIT"
] | 2 | 2021-10-16T08:16:01.000Z | 2021-10-16T08:18:26.000Z | app/preprocessing_utils.py | Sylvariane/ncc-sezam | b0b24c14e07a774cc95e52a09d4a833fc68b955d | [
"MIT"
] | null | null | null | app/preprocessing_utils.py | Sylvariane/ncc-sezam | b0b24c14e07a774cc95e52a09d4a833fc68b955d | [
"MIT"
] | 1 | 2021-12-20T19:16:48.000Z | 2021-12-20T19:16:48.000Z | """
This file implements different preprocessing utils functions.
"""
import pandas as pd
import re
from stopwords import stopwords_nltk, stopwords_specific
import spacy
import unicodedata
def preprocess_string(nlp, input_string):
"""
Wraps all operations to ensure common normalization.
"""
processed_string = input_string.lower()
processed_string = special_structures(processed_string)
#NA for match processed_string = remove_stopwords(processed_string)
processed_string = replace_digit(processed_string)
processed_string = remove_trailings(processed_string)
processed_string = strip_accents(processed_string)
#processed_string = lemmatize(nlp, processed_string)
return processed_string
"""#############"""
"""Sub-functions"""
"""#############"""
def lemmatize(nlp, input_string):
"""
Lemmatizes an input string
"""
# Can use spacy pipeline to increase speed
doc = nlp(input_string)
return " ".join([token.lemma_ for token in doc])
def remove_stopwords(input_string):
"""
This function removes stopwords from an input string.
"""
stopwords = stopwords_nltk + stopwords_specific
output_string = " ".join(
[word for word in input_string.split() if word not in stopwords]
)
return output_string
def replace_digit(input_string):
"""
Remove all digits from an input string (Slow on large corpuses).
"""
output_string = "".join([i for i in input_string if not i.isdigit()])
return output_string
def remove_trailings(input_string):
"""
Remove duplicated spaces.
"""
output_string = " ".join(input_string.split())
return output_string
def special_structures(input_string):
"""
Replace some special structures by space.
"""
input_string = input_string.replace("'", " ")
input_string = input_string.replace("(", " ")
input_string = input_string.replace(")", " ")
input_string = input_string.replace("1er ", " ")
input_string = input_string.replace(",", " ")
input_string = input_string.replace("«", " ")
input_string = input_string.replace("»", " ")
output_string = input_string.replace("n°", " ")
return output_string
def strip_accents(text):
"""
Removes accents.
"""
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
# UNUSED
def regex_loi(input_series: pd.Series) -> pd.Series:
"""
Finds the law patterns that match a given token.
"""
token = r"(\w+).*\s\d{4}\s"
replace_by = "<LOI> "
# Finds patterns that match tokens
types = input_series.apply(
lambda s: m.group(1).lower() if (m := re.match(token, s)) else None
)
types = set(types)
types.discard(None)
# Change "arrêté du ... 2021" en <LOI>
patterns = [rf"{type_loi}(.*?)\s\d{{4}}\s" for type_loi in types]
for pattern in patterns:
output_series = input_series.apply(
lambda s: re.sub(pattern, replace_by, s, flags=re.IGNORECASE)
)
return output_series
| 27.12605 | 75 | 0.653036 | """
This file implements different preprocessing utils functions.
"""
import pandas as pd
import re
from stopwords import stopwords_nltk, stopwords_specific
import spacy
import unicodedata
def preprocess_string(nlp, input_string):
"""
Wraps all operations to ensure common normalization.
"""
processed_string = input_string.lower()
processed_string = special_structures(processed_string)
#NA for match processed_string = remove_stopwords(processed_string)
processed_string = replace_digit(processed_string)
processed_string = remove_trailings(processed_string)
processed_string = strip_accents(processed_string)
#processed_string = lemmatize(nlp, processed_string)
return processed_string
"""#############"""
"""Sub-functions"""
"""#############"""
def lemmatize(nlp, input_string):
"""
Lemmatizes an input string
"""
# Can use spacy pipeline to increase speed
doc = nlp(input_string)
return " ".join([token.lemma_ for token in doc])
def remove_stopwords(input_string):
"""
This function removes stopwords from an input string.
"""
stopwords = stopwords_nltk + stopwords_specific
output_string = " ".join(
[word for word in input_string.split() if word not in stopwords]
)
return output_string
def replace_digit(input_string):
"""
Remove all digits from an input string (Slow on large corpuses).
"""
output_string = "".join([i for i in input_string if not i.isdigit()])
return output_string
def remove_trailings(input_string):
"""
Remove duplicated spaces.
"""
output_string = " ".join(input_string.split())
return output_string
def special_structures(input_string):
"""
Replace some special structures by space.
"""
input_string = input_string.replace("'", " ")
input_string = input_string.replace("(", " ")
input_string = input_string.replace(")", " ")
input_string = input_string.replace("1er ", " ")
input_string = input_string.replace(",", " ")
input_string = input_string.replace("«", " ")
input_string = input_string.replace("»", " ")
output_string = input_string.replace("n°", " ")
return output_string
def strip_accents(text):
"""
Removes accents.
"""
try:
text = unicode(text, 'utf-8')
except NameError: # unicode is a default on python 3
pass
text = unicodedata.normalize('NFD', text)\
.encode('ascii', 'ignore')\
.decode("utf-8")
return str(text)
# UNUSED
def regex_loi(input_series: pd.Series) -> pd.Series:
"""
Finds the law patterns that match a given token.
"""
token = r"(\w+).*\s\d{4}\s"
replace_by = "<LOI> "
# Finds patterns that match tokens
types = input_series.apply(
lambda s: m.group(1).lower() if (m := re.match(token, s)) else None
)
types = set(types)
types.discard(None)
# Change "arrêté du ... 2021" en <LOI>
patterns = [rf"{type_loi}(.*?)\s\d{{4}}\s" for type_loi in types]
for pattern in patterns:
output_series = input_series.apply(
lambda s: re.sub(pattern, replace_by, s, flags=re.IGNORECASE)
)
return output_series
| 0 | 0 | 0 |
6a82091195b6c3c72d4d13558f0e7dd10bb2ea34 | 7,261 | py | Python | model/modules/checkpoint_activations.py | RenShuhuai-Andy/WMT18-English-Chinese-Machine-Translation | 509eb0ab423160784baaeb449033a7933dc58ced | [
"MIT"
] | 8 | 2021-06-08T10:53:16.000Z | 2022-03-30T11:13:48.000Z | hippop_transformer/model/modules/checkpoint_activations.py | RenShuhuai-Andy/HippopTransformer | 1b8397b50b4ef35893c8dfa4e9f1efd9e7270540 | [
"MIT"
] | null | null | null | hippop_transformer/model/modules/checkpoint_activations.py | RenShuhuai-Andy/HippopTransformer | 1b8397b50b4ef35893c8dfa4e9f1efd9e7270540 | [
"MIT"
] | 1 | 2021-03-10T07:38:53.000Z | 2021-03-10T07:38:53.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Tuple, Union
import torch
import torch.utils.checkpoint as checkpoint
from fairseq import utils
def checkpoint_wrapper(m):
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
Usage::
checkpointed_module = checkpoint_wrapper(my_module)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
"""
original_forward = m.forward
m.forward = _checkpointed_forward
return m
def pack_kwargs(*args, **kwargs) -> Tuple[List[str], List[Any]]:
"""
Usage::
kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4)
args, kwargs = unpack_kwargs(kwarg_keys, flat_args)
assert args == [1, 2]
assert kwargs == {"a": 3, "b": 4}
"""
kwarg_keys = []
flat_args = list(args)
for k, v in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return kwarg_keys, flat_args
def split_non_tensors(
mixed: Union[torch.Tensor, Tuple[Any]]
) -> Tuple[Tuple[torch.Tensor], Dict[str, List[Any]]]:
"""
Usage::
x = torch.Tensor([1])
y = torch.Tensor([2])
tensors, packed_non_tensors = split_non_tensors((x, y, None, 3))
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y, None, 3)
"""
if isinstance(mixed, torch.Tensor):
return (mixed,), None
tensors = []
packed_non_tensors = {"is_tensor": [], "objects": []}
for o in mixed:
if isinstance(o, torch.Tensor):
packed_non_tensors["is_tensor"].append(True)
tensors.append(o)
else:
packed_non_tensors["is_tensor"].append(False)
packed_non_tensors["objects"].append(o)
return tuple(tensors), packed_non_tensors
class CheckpointFunction(torch.autograd.Function):
"""Similar to the torch version, but support non-Tensor outputs.
The caller is expected to provide a dict (*parent_ctx_dict*) that will hold
the non-Tensor outputs. These should be combined with the Tensor *outputs*
by calling ``unpack_non_tensors``.
"""
@staticmethod
@staticmethod
| 36.487437 | 98 | 0.652665 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Tuple, Union
import torch
import torch.utils.checkpoint as checkpoint
from fairseq import utils
def checkpoint_wrapper(m):
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
Usage::
checkpointed_module = checkpoint_wrapper(my_module)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
"""
original_forward = m.forward
def _checkpointed_forward(*args, **kwargs):
# Autograd Functions in PyTorch work best with positional args, since
# the backward must return gradients (or None) for every input argument.
# We can flatten keyword arguments to make this easier.
kwarg_keys, flat_args = pack_kwargs(*args, **kwargs)
parent_ctx_dict = {}
output = CheckpointFunction.apply(
original_forward, parent_ctx_dict, kwarg_keys, *flat_args
)
if isinstance(output, torch.Tensor):
return output
else:
packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"]
if packed_non_tensor_outputs:
output = unpack_non_tensors(output, packed_non_tensor_outputs)
return output
m.forward = _checkpointed_forward
return m
def pack_kwargs(*args, **kwargs) -> Tuple[List[str], List[Any]]:
"""
Usage::
kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4)
args, kwargs = unpack_kwargs(kwarg_keys, flat_args)
assert args == [1, 2]
assert kwargs == {"a": 3, "b": 4}
"""
kwarg_keys = []
flat_args = list(args)
for k, v in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return kwarg_keys, flat_args
def unpack_kwargs(
kwarg_keys: List[str], flat_args: List[Any]
) -> Tuple[List[Any], Dict[str, Any]]:
if len(kwarg_keys) == 0:
return flat_args, {}
args = flat_args[: -len(kwarg_keys)]
kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])}
return args, kwargs
def split_non_tensors(
mixed: Union[torch.Tensor, Tuple[Any]]
) -> Tuple[Tuple[torch.Tensor], Dict[str, List[Any]]]:
"""
Usage::
x = torch.Tensor([1])
y = torch.Tensor([2])
tensors, packed_non_tensors = split_non_tensors((x, y, None, 3))
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y, None, 3)
"""
if isinstance(mixed, torch.Tensor):
return (mixed,), None
tensors = []
packed_non_tensors = {"is_tensor": [], "objects": []}
for o in mixed:
if isinstance(o, torch.Tensor):
packed_non_tensors["is_tensor"].append(True)
tensors.append(o)
else:
packed_non_tensors["is_tensor"].append(False)
packed_non_tensors["objects"].append(o)
return tuple(tensors), packed_non_tensors
def unpack_non_tensors(
tensors: Tuple[torch.Tensor],
packed_non_tensors: Dict[str, List[Any]],
) -> Tuple[Any]:
if packed_non_tensors is None:
return tensors
assert isinstance(packed_non_tensors, dict)
mixed = []
is_tensor_list = packed_non_tensors["is_tensor"]
objects = packed_non_tensors["objects"]
assert len(tensors) + len(objects) == len(is_tensor_list)
obj_i = tnsr_i = 0
for is_tensor in is_tensor_list:
if is_tensor:
mixed.append(tensors[tnsr_i])
tnsr_i += 1
else:
mixed.append(objects[obj_i])
obj_i += 1
return tuple(mixed)
class CheckpointFunction(torch.autograd.Function):
"""Similar to the torch version, but support non-Tensor outputs.
The caller is expected to provide a dict (*parent_ctx_dict*) that will hold
the non-Tensor outputs. These should be combined with the Tensor *outputs*
by calling ``unpack_non_tensors``.
"""
@staticmethod
def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args):
if torch.is_grad_enabled(): # grad may be disabled, e.g., during validation
checkpoint.check_backward_validity(args)
ctx.run_function = run_function
ctx.kwarg_keys = kwarg_keys
ctx.fwd_rng_state = utils.get_rng_state()
tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args)
ctx.save_for_backward(*tensor_inputs)
ctx.packed_non_tensor_inputs = packed_non_tensor_inputs
with torch.no_grad():
unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args)
outputs = run_function(*unpacked_args, **unpacked_kwargs)
if isinstance(outputs, torch.Tensor):
return outputs
else:
# Autograd Functions don't like non-Tensor outputs. We can split the
# non-Tensor and Tensor outputs, returning the former by reference
# through *parent_ctx_dict* and returning the latter directly.
outputs, packed_non_tensor_outputs = split_non_tensors(outputs)
parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), please use .backward() if possible"
)
tensor_inputs = ctx.saved_tensors
tensor_inputs = checkpoint.detach_variable(tensor_inputs)
inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs)
# Store the current states.
bwd_rng_state = utils.get_rng_state()
# Set the states to what it used to be before the forward pass.
utils.set_rng_state(ctx.fwd_rng_state)
with torch.enable_grad():
unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs)
outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs)
tensor_outputs, _ = split_non_tensors(outputs)
# Set the states back to what it was at the start of this function.
utils.set_rng_state(bwd_rng_state)
# Run backward() with only Tensors that require grad
outputs_with_grad = []
args_with_grad = []
for i in range(len(tensor_outputs)):
if tensor_outputs[i].requires_grad:
outputs_with_grad.append(tensor_outputs[i])
args_with_grad.append(args[i])
if len(outputs_with_grad) == 0:
raise RuntimeError(
"None of the outputs have requires_grad=True, "
"this checkpoint() is not necessary"
)
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs
)
return (None, None, None) + grads
| 4,592 | 0 | 125 |
9a010192a9dd0a5c6d8fdfbb670fcd51a1b7877b | 574 | py | Python | civic_jabber_ingest/models/contact.py | civic-jabber/data-ingest | bf44c6041ad947547ceede535124c5db004d2f43 | [
"MIT"
] | null | null | null | civic_jabber_ingest/models/contact.py | civic-jabber/data-ingest | bf44c6041ad947547ceede535124c5db004d2f43 | [
"MIT"
] | 26 | 2020-10-03T21:08:11.000Z | 2020-12-22T22:39:35.000Z | civic_jabber_ingest/models/contact.py | civic-jabber/data-ingest | bf44c6041ad947547ceede535124c5db004d2f43 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from civic_jabber_ingest.models.base import DataModel
from civic_jabber_ingest.utils.xml import get_jinja_template
@dataclass
| 24.956522 | 60 | 0.695122 | from dataclasses import dataclass
from civic_jabber_ingest.models.base import DataModel
from civic_jabber_ingest.utils.xml import get_jinja_template
@dataclass
class Contact(DataModel):
first_name: str = None
last_name: str = None
agency: str = None
address: str = None
city: str = None
state: str = None
zip_code: str = None
phone: str = None
email: str = None
def xml_template(self):
data = self.to_dict(drop_empty=True)
template = get_jinja_template("contact")
return template.render(data=data).strip()
| 146 | 243 | 22 |
d25229d1a0cc8a230825096b11afe272fdca3534 | 6,632 | py | Python | tests/fixtures/doaj/e65469_doaj_json.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 17 | 2015-02-10T07:10:29.000Z | 2021-05-14T22:24:45.000Z | tests/fixtures/doaj/e65469_doaj_json.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 459 | 2015-03-31T18:24:23.000Z | 2022-03-30T19:44:40.000Z | tests/fixtures/doaj/e65469_doaj_json.py | elifesciences/elife-bot | d3a102c8030e4b7ec83cbd45e5f839dba4f9ffd9 | [
"MIT"
] | 9 | 2015-04-18T16:57:31.000Z | 2020-10-30T11:49:13.000Z | # coding=utf-8
from collections import OrderedDict
EXPECTED = OrderedDict(
[
(
"bibjson",
OrderedDict(
[
(
"abstract",
"The inflammatory environment of demyelinated lesions in multiple sclerosis (MS) patients contributes to remyelination failure. Inflammation activates a cytoprotective pathway, the integrated stress response (ISR), but it remains unclear whether enhancing the ISR can improve remyelination in an inflammatory environment. To examine this possibility, the remyelination stage of experimental autoimmune encephalomyelitis (EAE), as well as a mouse model that incorporates cuprizone-induced demyelination along with CNS delivery of the proinflammatory cytokine IFN-γ were used here. We demonstrate that either genetic or pharmacological ISR enhancement significantly increased the number of remyelinating oligodendrocytes and remyelinated axons in the inflammatory lesions. Moreover, the combined treatment of the ISR modulator Sephin1 with the oligodendrocyte differentiation enhancing reagent bazedoxifene increased myelin thickness of remyelinated axons to pre-lesion levels. Taken together, our findings indicate that prolonging the ISR protects remyelinating oligodendrocytes and promotes remyelination in the presence of inflammation, suggesting that ISR enhancement may provide reparative benefit to MS patients.",
),
(
"author",
[
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Yanan Chen"),
(
"orcid_id",
"https://orcid.org/0000-0001-5510-231X",
),
]
),
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Rejani B Kunjamma"),
]
),
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Molly Weiner"),
]
),
OrderedDict(
[
(
"affiliation",
"Weill Institute for Neuroscience, Department of Neurology, University of California, San Francisco, San Francisco, United States",
),
("name", "Jonah R Chan"),
(
"orcid_id",
"https://orcid.org/0000-0002-2176-1242",
),
]
),
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Brian Popko"),
(
"orcid_id",
"https://orcid.org/0000-0001-9948-2553",
),
]
),
],
),
(
"identifier",
[
OrderedDict(
[("id", "10.7554/eLife.65469"), ("type", "doi")]
),
OrderedDict([("id", "2050-084X"), ("type", "eissn")]),
OrderedDict([("id", "e65469"), ("type", "elocationid")]),
],
),
("journal", OrderedDict([("volume", "10")])),
(
"keywords",
[
"integrated stress response",
"remyelination",
"interferon gamma",
"oligodendrocyte",
"cuprizone",
"multiple sclerosis",
],
),
(
"link",
[
OrderedDict(
[
("content_type", "text/html"),
("type", "fulltext"),
("url", "https://elifesciences.org/articles/65469"),
]
)
],
),
("month", "3"),
(
"title",
"Prolonging the integrated stress response enhances CNS remyelination in an inflammatory environment",
),
("year", "2021"),
]
),
)
]
)
| 54.809917 | 1,242 | 0.375452 | # coding=utf-8
from collections import OrderedDict
EXPECTED = OrderedDict(
[
(
"bibjson",
OrderedDict(
[
(
"abstract",
"The inflammatory environment of demyelinated lesions in multiple sclerosis (MS) patients contributes to remyelination failure. Inflammation activates a cytoprotective pathway, the integrated stress response (ISR), but it remains unclear whether enhancing the ISR can improve remyelination in an inflammatory environment. To examine this possibility, the remyelination stage of experimental autoimmune encephalomyelitis (EAE), as well as a mouse model that incorporates cuprizone-induced demyelination along with CNS delivery of the proinflammatory cytokine IFN-γ were used here. We demonstrate that either genetic or pharmacological ISR enhancement significantly increased the number of remyelinating oligodendrocytes and remyelinated axons in the inflammatory lesions. Moreover, the combined treatment of the ISR modulator Sephin1 with the oligodendrocyte differentiation enhancing reagent bazedoxifene increased myelin thickness of remyelinated axons to pre-lesion levels. Taken together, our findings indicate that prolonging the ISR protects remyelinating oligodendrocytes and promotes remyelination in the presence of inflammation, suggesting that ISR enhancement may provide reparative benefit to MS patients.",
),
(
"author",
[
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Yanan Chen"),
(
"orcid_id",
"https://orcid.org/0000-0001-5510-231X",
),
]
),
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Rejani B Kunjamma"),
]
),
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Molly Weiner"),
]
),
OrderedDict(
[
(
"affiliation",
"Weill Institute for Neuroscience, Department of Neurology, University of California, San Francisco, San Francisco, United States",
),
("name", "Jonah R Chan"),
(
"orcid_id",
"https://orcid.org/0000-0002-2176-1242",
),
]
),
OrderedDict(
[
(
"affiliation",
"Department of Neurology, Division of Multiple Sclerosis and Neuroimmunology, Northwestern University Feinberg School of Medicine, Chicago, United States",
),
("name", "Brian Popko"),
(
"orcid_id",
"https://orcid.org/0000-0001-9948-2553",
),
]
),
],
),
(
"identifier",
[
OrderedDict(
[("id", "10.7554/eLife.65469"), ("type", "doi")]
),
OrderedDict([("id", "2050-084X"), ("type", "eissn")]),
OrderedDict([("id", "e65469"), ("type", "elocationid")]),
],
),
("journal", OrderedDict([("volume", "10")])),
(
"keywords",
[
"integrated stress response",
"remyelination",
"interferon gamma",
"oligodendrocyte",
"cuprizone",
"multiple sclerosis",
],
),
(
"link",
[
OrderedDict(
[
("content_type", "text/html"),
("type", "fulltext"),
("url", "https://elifesciences.org/articles/65469"),
]
)
],
),
("month", "3"),
(
"title",
"Prolonging the integrated stress response enhances CNS remyelination in an inflammatory environment",
),
("year", "2021"),
]
),
)
]
)
| 0 | 0 | 0 |
a8e3224b84936329ff6db86c5a7e78635d188e74 | 869 | py | Python | tensorflow_blade/tf_blade/util/tf2onnx_import_helper.py | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | 328 | 2021-12-20T03:29:35.000Z | 2022-03-31T14:27:23.000Z | tensorflow_blade/tf_blade/util/tf2onnx_import_helper.py | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | 82 | 2021-12-20T09:15:16.000Z | 2022-03-31T09:33:48.000Z | tensorflow_blade/tf_blade/util/tf2onnx_import_helper.py | JamesTheZ/BladeDISC | e6c76ee557ebfccd560d44f6b6276bbc4e0a8a34 | [
"Apache-2.0"
] | 66 | 2021-12-21T17:28:27.000Z | 2022-03-29T12:08:34.000Z | # Copyright 2022 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
__TF_LOG_ENV = 'TF_CPP_MIN_LOG_LEVEL'
__ENV_VALUE = os.environ.get(__TF_LOG_ENV, None)
try:
import tf2onnx # noqa: F401
finally:
if __ENV_VALUE is not None:
os.environ[__TF_LOG_ENV] = __ENV_VALUE
else:
del os.environ[__TF_LOG_ENV]
| 37.782609 | 74 | 0.756041 | # Copyright 2022 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
__TF_LOG_ENV = 'TF_CPP_MIN_LOG_LEVEL'
__ENV_VALUE = os.environ.get(__TF_LOG_ENV, None)
try:
import tf2onnx # noqa: F401
finally:
if __ENV_VALUE is not None:
os.environ[__TF_LOG_ENV] = __ENV_VALUE
else:
del os.environ[__TF_LOG_ENV]
| 0 | 0 | 0 |
782d2d0fd1174706f95dd685af843c816e06171a | 803 | py | Python | prophet_gcp/ml_engine_utils.py | SpikeLab-CL/paralell_prophet | c04b069ae27eb8645dd10e0cf9992415e585ba62 | [
"WTFPL"
] | 7 | 2018-10-18T18:06:27.000Z | 2021-11-02T19:53:31.000Z | prophet_gcp/ml_engine_utils.py | SpikeLab-CL/paralell_prophet | c04b069ae27eb8645dd10e0cf9992415e585ba62 | [
"WTFPL"
] | null | null | null | prophet_gcp/ml_engine_utils.py | SpikeLab-CL/paralell_prophet | c04b069ae27eb8645dd10e0cf9992415e585ba62 | [
"WTFPL"
] | 5 | 2020-01-23T22:03:00.000Z | 2022-02-17T08:28:51.000Z | import subprocess
import os
import sys
def download_file_from_storage(gs_file_path):
"""download a file from GCS into ml-engine running container
Arguments:
gs_ile_path: string path to the MOJO file.
Returns:
string: path of the downloaded file
"""
file_name = "{0}".format(gs_file_path.split("/")[-1])
subprocess.check_call(['gsutil','-q', 'cp', gs_file_path, file_name], stderr=sys.stdout)
path = "{0}/{1}".format(os.getcwd(),file_name)
return path
def save_in_gcs(file_path, gcs_path):
"""Store a file into GCS
Arguments:
file_path: string with the file path.
gcs_path: string path to the GCS folder
"""
subprocess.check_call(['gsutil','-q', 'cp', file_path, gcs_path], stderr=sys.stdout) | 32.12 | 92 | 0.646326 | import subprocess
import os
import sys
def download_file_from_storage(gs_file_path):
"""download a file from GCS into ml-engine running container
Arguments:
gs_ile_path: string path to the MOJO file.
Returns:
string: path of the downloaded file
"""
file_name = "{0}".format(gs_file_path.split("/")[-1])
subprocess.check_call(['gsutil','-q', 'cp', gs_file_path, file_name], stderr=sys.stdout)
path = "{0}/{1}".format(os.getcwd(),file_name)
return path
def save_in_gcs(file_path, gcs_path):
"""Store a file into GCS
Arguments:
file_path: string with the file path.
gcs_path: string path to the GCS folder
"""
subprocess.check_call(['gsutil','-q', 'cp', file_path, gcs_path], stderr=sys.stdout) | 0 | 0 | 0 |
a6f540a2b4b02a1e13ec399b8ca55d39e0adf3a7 | 1,506 | py | Python | 2017-04-06 - Pythonic Code Through 5 Examples/tip4_generators/gen.py | skolbin-ssi/WintellectWebinars | 63612580c7c2f7d0c6dca930abba5696b2f40286 | [
"Apache-2.0"
] | 8 | 2019-01-24T19:22:29.000Z | 2019-06-11T17:00:56.000Z | 2017-04-06 - Pythonic Code Through 5 Examples/tip4_generators/gen.py | skolbin-ssi/WintellectWebinars | 63612580c7c2f7d0c6dca930abba5696b2f40286 | [
"Apache-2.0"
] | 28 | 2021-03-10T08:24:07.000Z | 2022-03-02T07:26:39.000Z | 2017-04-06 - Pythonic Code Through 5 Examples/tip4_generators/gen.py | skolbin-ssi/WintellectWebinars | 63612580c7c2f7d0c6dca930abba5696b2f40286 | [
"Apache-2.0"
] | 2 | 2018-01-23T11:24:44.000Z | 2019-06-06T18:27:32.000Z | import csv
import os
from purchases import Purchase
if __name__ == '__main__':
main()
| 18.825 | 71 | 0.549137 | import csv
import os
from purchases import Purchase
def fibonacci_blocking(limit):
nums = []
current, nxt = 0, 1
for _ in range(1, limit):
current, nxt = nxt, current + nxt
nums.append(current)
return nums
def fibonacci():
current, nxt = 0, 1
while True:
current, nxt = nxt, current + nxt
yield current
def main():
# fibonacci
for n in fibonacci():
print(n, end=', ')
if n > 100:
break
print()
# list style
# generator style
data = get_data()
two_bed_100k_homes = (
home
for home in data
if home.beds >= 2 and home.price > 100_000
)
two_bed_tups = (
(h.price, h.beds)
for h in two_bed_100k_homes
)
print(two_bed_tups)
count = 0
for p, b in two_bed_tups:
count += 1
print(p, b)
if count > 5:
break
# Find 2 bedroom houses over 100k
# loop
# comp
pass
def get_data():
base_folder = os.path.dirname(__file__)
filename = os.path.join(base_folder, 'data',
'SacramentoRealEstateTransactions2008.csv')
with open(filename, 'r', encoding='utf-8') as fin:
# with open(filename, 'r') as fin:
reader = csv.DictReader(fin)
purchases = []
for row in reader:
p = Purchase.create_from_dict(row)
purchases.append(p)
return purchases
if __name__ == '__main__':
main()
| 1,317 | 0 | 92 |
f204c48ff95fbc3bd96c2c4fa5b53379356e0442 | 4,632 | py | Python | models/gpu_model.py | jdp527/DL_Text_Classification | b354c3523f39deab64ad6d574e5847332ad8390c | [
"MIT"
] | 24 | 2018-04-14T20:08:46.000Z | 2021-08-01T09:06:47.000Z | models/gpu_model.py | jdp527/DL_Text_Classification | b354c3523f39deab64ad6d574e5847332ad8390c | [
"MIT"
] | 2 | 2018-04-21T19:36:18.000Z | 2018-04-28T22:31:20.000Z | models/gpu_model.py | jdp527/DL_Text_Classification | b354c3523f39deab64ad6d574e5847332ad8390c | [
"MIT"
] | 2 | 2018-05-15T05:38:29.000Z | 2019-09-03T10:08:12.000Z | '''
Collection of Text Classification Keras Algorithms for Toxic Comment Classification Challenge.
https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge
'''
# Keras
from keras.layers import (Dense, Input, Bidirectional, Activation, Dropout, Embedding, Flatten, CuDNNLSTM, CuDNNGRU,
Conv2D, MaxPool2D, concatenate, K, Reshape, LSTM)
from keras.models import Model
from keras import regularizers
from keras.utils import multi_gpu_model
def blstm_2dcnn(maxlen, max_features, embed_size, embedding_matrix,
embedding_dropout = .5,
blstm_units = 300,
blstm_dropout = .2,
cnn_filters = 100,
cnn_kernel_size = (5,5),
max_pool_size = (5,5),
dense_dropout = .4,
l2_reg = .00001,
gpus = 1):
'''
Bidirectional LSTM with Two-dimensional Max Pooling
:param maxlen: max length of sequence
:param max_features: max number of word embeddings
:param embed_size: dimension of word embeddings
:param embedding_matrix: embedding matrix created from embed file
:param embedding_dropout: dropout after embedding layer
:param blstm_units: number of lstm units for the biderectional lstm
:param blstm_dropout: dropout after the blstm layer
:param cnn_filters: number of CNN filters
:param cnn_kernel_size: kernel size of the convolution
:param max_pool_size: max pool size
:param dense_dropout: dropout before dense layer
:param l2_reg: l2 kernel regularizer parameter
:gpus: number of gpus
:returns: Keras parallel model
'''
inp = Input(shape=(maxlen, ))
x = Embedding(max_features, embed_size, weights=[embedding_matrix], input_length=maxlen)(inp)
x = Dropout(embedding_dropout)(x)
x = Bidirectional(CuDNNLSTM(blstm_units, return_sequences=True), merge_mode='sum')(x)
x = Dropout(blstm_dropout)(x)
x = Reshape((maxlen, blstm_units, 1))(x)
x = Conv2D(cnn_filters, kernel_size=cnn_kernel_size, padding='valid', kernel_initializer='glorot_uniform')(x)
x = MaxPool2D(pool_size=max_pool_size)(x)
x = Flatten()(x)
x = Dropout(dense_dropout)(x)
x = Dense(6, activation = "sigmoid", kernel_regularizer=regularizers.l2(l2_reg))(x)
parallel_model = Model(inputs = inp, outputs = x)
parallel_model = multi_gpu_model(parallel_model, gpus=gpus)
parallel_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return parallel_model
def bgru_2dcnn(maxlen, max_features, embed_size, embedding_matrix,
embedding_dropout = .5,
bgru_units = 300,
bgru_dropout = .2,
cnn_filters = 100,
cnn_kernel_size = (5,5),
max_pool_size = (5,5),
dense_dropout = .4,
l2_reg = .00001,
gpus = 1):
'''
Bidirectional GRU with Two-dimensional Max Pooling
:param maxlen: max length of sequence
:param max_features: max number of word embeddings
:param embed_size: dimension of word embeddings
:param embedding_matrix: embedding matrix created from embed file
:param embedding_dropout: dropout after embedding layer
:param bgru_units: number of gru units for the biderectional gru
:param bgru_dropout: dropout after the bgru layer
:param cnn_filters: number of cnn filters
:param cnn_kernel_size: kernel size of the convolution
:param max_pool_size: max pool size
:param dense_dropout: dropout before dense layer
:param l2_reg: l2 kernel regularizer parameter
:gpus: number of gpus
:returns: Keras parallel model
'''
inp = Input(shape=(maxlen, ))
x = Embedding(max_features, embed_size, weights=[embedding_matrix], input_length=maxlen)(inp)
x = Dropout(embedding_dropout)(x)
x = Bidirectional(CuDNNGRU(bgru_units, return_sequences=True), merge_mode='sum')(x)
x = Dropout(bgru_dropout)(x)
x = Reshape((maxlen, bgru_units, 1))(x)
x = Conv2D(cnn_filters, kernel_size=cnn_kernel_size, padding='valid', kernel_initializer='glorot_uniform')(x)
x = MaxPool2D(pool_size=max_pool_size)(x)
x = Flatten()(x)
x = Dropout(dense_dropout)(x)
x = Dense(6, activation = "sigmoid", kernel_regularizer=regularizers.l2(l2_reg))(x)
parallel_model = Model(inputs = inp, outputs = x)
parallel_model = multi_gpu_model(parallel_model, gpus=gpus)
parallel_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return parallel_model
| 38.92437 | 116 | 0.682211 | '''
Collection of Text Classification Keras Algorithms for Toxic Comment Classification Challenge.
https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge
'''
# Keras
from keras.layers import (Dense, Input, Bidirectional, Activation, Dropout, Embedding, Flatten, CuDNNLSTM, CuDNNGRU,
Conv2D, MaxPool2D, concatenate, K, Reshape, LSTM)
from keras.models import Model
from keras import regularizers
from keras.utils import multi_gpu_model
def blstm_2dcnn(maxlen, max_features, embed_size, embedding_matrix,
embedding_dropout = .5,
blstm_units = 300,
blstm_dropout = .2,
cnn_filters = 100,
cnn_kernel_size = (5,5),
max_pool_size = (5,5),
dense_dropout = .4,
l2_reg = .00001,
gpus = 1):
'''
Bidirectional LSTM with Two-dimensional Max Pooling
:param maxlen: max length of sequence
:param max_features: max number of word embeddings
:param embed_size: dimension of word embeddings
:param embedding_matrix: embedding matrix created from embed file
:param embedding_dropout: dropout after embedding layer
:param blstm_units: number of lstm units for the biderectional lstm
:param blstm_dropout: dropout after the blstm layer
:param cnn_filters: number of CNN filters
:param cnn_kernel_size: kernel size of the convolution
:param max_pool_size: max pool size
:param dense_dropout: dropout before dense layer
:param l2_reg: l2 kernel regularizer parameter
:gpus: number of gpus
:returns: Keras parallel model
'''
inp = Input(shape=(maxlen, ))
x = Embedding(max_features, embed_size, weights=[embedding_matrix], input_length=maxlen)(inp)
x = Dropout(embedding_dropout)(x)
x = Bidirectional(CuDNNLSTM(blstm_units, return_sequences=True), merge_mode='sum')(x)
x = Dropout(blstm_dropout)(x)
x = Reshape((maxlen, blstm_units, 1))(x)
x = Conv2D(cnn_filters, kernel_size=cnn_kernel_size, padding='valid', kernel_initializer='glorot_uniform')(x)
x = MaxPool2D(pool_size=max_pool_size)(x)
x = Flatten()(x)
x = Dropout(dense_dropout)(x)
x = Dense(6, activation = "sigmoid", kernel_regularizer=regularizers.l2(l2_reg))(x)
parallel_model = Model(inputs = inp, outputs = x)
parallel_model = multi_gpu_model(parallel_model, gpus=gpus)
parallel_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return parallel_model
def bgru_2dcnn(maxlen, max_features, embed_size, embedding_matrix,
embedding_dropout = .5,
bgru_units = 300,
bgru_dropout = .2,
cnn_filters = 100,
cnn_kernel_size = (5,5),
max_pool_size = (5,5),
dense_dropout = .4,
l2_reg = .00001,
gpus = 1):
'''
Bidirectional GRU with Two-dimensional Max Pooling
:param maxlen: max length of sequence
:param max_features: max number of word embeddings
:param embed_size: dimension of word embeddings
:param embedding_matrix: embedding matrix created from embed file
:param embedding_dropout: dropout after embedding layer
:param bgru_units: number of gru units for the biderectional gru
:param bgru_dropout: dropout after the bgru layer
:param cnn_filters: number of cnn filters
:param cnn_kernel_size: kernel size of the convolution
:param max_pool_size: max pool size
:param dense_dropout: dropout before dense layer
:param l2_reg: l2 kernel regularizer parameter
:gpus: number of gpus
:returns: Keras parallel model
'''
inp = Input(shape=(maxlen, ))
x = Embedding(max_features, embed_size, weights=[embedding_matrix], input_length=maxlen)(inp)
x = Dropout(embedding_dropout)(x)
x = Bidirectional(CuDNNGRU(bgru_units, return_sequences=True), merge_mode='sum')(x)
x = Dropout(bgru_dropout)(x)
x = Reshape((maxlen, bgru_units, 1))(x)
x = Conv2D(cnn_filters, kernel_size=cnn_kernel_size, padding='valid', kernel_initializer='glorot_uniform')(x)
x = MaxPool2D(pool_size=max_pool_size)(x)
x = Flatten()(x)
x = Dropout(dense_dropout)(x)
x = Dense(6, activation = "sigmoid", kernel_regularizer=regularizers.l2(l2_reg))(x)
parallel_model = Model(inputs = inp, outputs = x)
parallel_model = multi_gpu_model(parallel_model, gpus=gpus)
parallel_model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return parallel_model
| 0 | 0 | 0 |
10fc1af9e52081f2aa8dc9407d8ad7c4a5b2e7c5 | 9,415 | py | Python | chat_server.py | nixpal/Terminal-ChatServer | 2447ba2db220c7fd3092fee957de264d20b89124 | [
"MIT"
] | null | null | null | chat_server.py | nixpal/Terminal-ChatServer | 2447ba2db220c7fd3092fee957de264d20b89124 | [
"MIT"
] | null | null | null | chat_server.py | nixpal/Terminal-ChatServer | 2447ba2db220c7fd3092fee957de264d20b89124 | [
"MIT"
] | 1 | 2021-07-23T02:57:41.000Z | 2021-07-23T02:57:41.000Z | #!/usr/bin/python
import socket
from thread import *
import threading
from terminaltables import AsciiTable
from chat_module import *
reset="\033[0;0m"
red="\033[38;5;9m"
byellow="\033[38;5;3m"
yellowb="\033[38;5;11m"
blue="\033[38;5;27m"
purple="\033[1;33;35m"
cyan="\033[38;5;6m"
white="\033[38;5;7m"
orange="\033[38;5;202m"
lblue="\033[38;5;117m"
green="\033[38;5;2m"
host = ''
port = 8888
TR_num = []
clients_lists = []
TR_ip = []
TR_port = []
thread_num = 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
print "[+] Binded successfully."
s.listen(5)
print "[+] Listening on port {}".format(green + str(port) + reset)
start_new_thread(CMD, ())
while 1:
try:
c, addr = s.accept()
clients_lists.append(c)
thread_num += 1
print "[+] Client%s connected. IP : %s PORT : %s" % (red + str(thread_num) + reset, red + str(addr[0]) + reset, red + str(addr[1]) + reset)
conn_data = {'IP':str(addr[0]), 'PORT':str(addr[1])}
start_new_thread(threaded, (c,thread_num, conn_data))
except KeyboardInterrupt:
print "[+] Closing.."
break
s.close()
| 32.91958 | 149 | 0.481041 | #!/usr/bin/python
import socket
from thread import *
import threading
from terminaltables import AsciiTable
from chat_module import *
reset="\033[0;0m"
red="\033[38;5;9m"
byellow="\033[38;5;3m"
yellowb="\033[38;5;11m"
blue="\033[38;5;27m"
purple="\033[1;33;35m"
cyan="\033[38;5;6m"
white="\033[38;5;7m"
orange="\033[38;5;202m"
lblue="\033[38;5;117m"
green="\033[38;5;2m"
host = ''
port = 8888
TR_num = []
clients_lists = []
TR_ip = []
TR_port = []
def Help():
print """
+------------+
| Help: Menu |
+------------+
clients List all sessions
clients -i <session number> Interact with specific session
kill client <session number> Kill specific session
exit Exit the main room
"""
def threaded(c, thread_num, conn_data):
IP = conn_data['IP']
PORT = conn_data['PORT']
mydata = threading.local()
mydata.x = thread_num
TR_ip.append(str(IP))
TR_port.append(str(PORT))
TR_num.append(str(mydata.x))
while 1:
data = c.recv(1024)
if not data or data.rstrip() == 'exit':
clients_lists.remove(c)
TR_num.pop(0)
print "[+] Client closed connection"
print "[+] Removing his nickname from data"
if nicknames == []:
print '[+] User %s hasn\'t chosen nickname yet. No need for removal' % (green + IP + reset)
c.close()
break
else:
print '[+] Removing User.'
for name in nicknames:
for key, value in name.iteritems():
if value == c:
nicknames.remove(name)
print '[+] User %s has been removed' % (orange + key + reset)
c.close()
break
if "nickname" in data:
if data.split(" ")[0] == "set" and data.split(" ")[1] == "nickname" and len(data.split(" ")) == 3:
nickname = str(data.rstrip().split(" ")[2])
if nicknames == []:
nicknames.append({nickname:c})
print '[+] Client %s nickname is now => %s' % (orange + IP + reset, orange + nickname + reset)
c.send('Nickname set -> ' + lblue + nickname + reset)
else:
name = Admin(c, nickname=nickname)
nickname_result = name.check_nickname()
if nickname_result == 'already set':
c.send('You have chosen this nickname already.')
continue
elif nickname_result == 'different user':
c.send('This nickname has been chosen by a different User.')
elif 'changed' in nickname_result:
old_name = nickname_result.split(':')[1]
print '[+] Client %s changed nickname %s <=> %s ' % (green + IP + reset, green + old_name + reset, green + nickname + reset)
c.send("Nickname has been changed -> " + nickname )
continue
elif nickname_result == 'added':
print '[+] Client %s nickname is now => %s' % (orange + IP + reset, orange + nickname + reset)
c.send('Nickname set -> ' + lblue + nickname + reset )
elif "create" in data or 'join' in data:
check_name = Admin(c)
nickname_value = check_name.check_nickname()
if nickname_value == "not found":
c.send("Choose nickname first before joining or creating rooms. Ex. set nickname <test1>")
continue
else:
if data.split(" ")[0] == "create" and len(data.split(" ")) == 2:
room2create = str(data.rstrip().split(" ")[1])
roomData = Admin(c, nickname_value, room2create)
room_exist = roomData.check_room()
if room_exist == 'empty' or room_exist == 'nothing':
#print 'Room name doesnt exist'
roomData.create_Room()
user_status = roomData.inside_Room()
if user_status == 'kicked':
c.send( red + 'You have been kicked from the room' + reset)
elif room_exist == "exist":
c.send('The room %s already exist. Choose a different name' %(room2create))
continue
elif data.split(" ")[0] == "join" and len(data.split(" ")) == 2:
room2join = str(data.rstrip().split(" ")[1])
roomData = Admin(c, nickname_value, room2join)
print '[+] User %s wants to join room %s'.format(white + nickname_value + reset, white + room2join + reset)
room_exist = roomData.check_room()
if room_exist == 'empty' or room_exist == 'nothing':
c.send('Room was not found')
else:
roomData.join_Room()
user_status = roomData.inside_Room()
if user_status == 'kicked':
c.send(red + 'You have been kicked from the room'+reset)
else:
if nicknames == []:
print "[+] Client" + str(mydata.x) + " says: " + white + data + reset
c.send("You must set a nickname first. Ex. set nickname <test1>")
else:
check_name = Admin(c)
nickname_value = check_name.check_nickname()
if nickname_value == "not found":
c.send("You must set a nickname first. Ex. set nickname <test1>")
else:
c.send('Command not found')
def check_sessions():
if TR_num == []:
print red + "No clients" + reset
else:
print "+--------------------------------------------+"
for tr_num, IP, PORT in zip(TR_num, TR_ip, TR_port):
print "Client : {} IP : {} Port {}".format(green + tr_num + reset, green + IP + reset, green + PORT + reset)
print "+--------------------------------------------+"
def remove_session(conn):
if conn in clients_lists:
clients_lists.remove(conn)
TR_num.pop(0)
def kill_session(s_number):
for tr_num, session in zip(TR_num, clients_lists):
if tr_num == s_number:
print "[+] Client {} killed".format(red + s_number + reset)
session.close()
remove_session(session)
def CMD():
while 1:
cmd = raw_input(green +"> "+reset)
if cmd == "":
continue
elif cmd == "exit":
print red + "[Hit Control + C to switch to main menu]" + reset
break
elif cmd == "help":
Help()
elif "client" in cmd or "clients" in cmd:
if cmd.split(" ")[0] == 'clients' and len(cmd.split(" ")) == 1:
check_sessions()
elif len(cmd.split(" ")) == 3 and cmd.split(" ")[0] == "kill":
session = cmd.split(" ")[2]
kill_session(session)
elif cmd == 'list nicknames':
all_names = Admin(client=None)
all_names.list_nicknames()
elif 'sendto' in cmd:
if cmd.split(" ")[0] == 'sendto' and len(cmd.split(" ")) == 2:
nickname = cmd.split(" ")[1]
msg_to_send = Admin(client=None, nickname=nickname)
check_name = msg_to_send.check_before_send()
if check_name == 'found':
msg_to_send.admin_msg()
elif check_name == 'not found':
print '[+] User {} not found'.format(red + nickname + reset)
continue
elif cmd == 'list all users':
admin = Admin(client=None)
admin.list_users()
elif 'kick' in cmd and cmd.split(" ")[0] == 'kick' and len(cmd.split(" ")) == 2:
name_to_kick = cmd.split(" ")[1]
admin = Admin(client=None, nickname=name_to_kick)
admin.kick_user()
else:
print "[+] {}".format(red + "Invalid command" + reset)
thread_num = 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
print "[+] Binded successfully."
s.listen(5)
print "[+] Listening on port {}".format(green + str(port) + reset)
start_new_thread(CMD, ())
while 1:
try:
c, addr = s.accept()
clients_lists.append(c)
thread_num += 1
print "[+] Client%s connected. IP : %s PORT : %s" % (red + str(thread_num) + reset, red + str(addr[0]) + reset, red + str(addr[1]) + reset)
conn_data = {'IP':str(addr[0]), 'PORT':str(addr[1])}
start_new_thread(threaded, (c,thread_num, conn_data))
except KeyboardInterrupt:
print "[+] Closing.."
break
s.close()
| 7,937 | 0 | 138 |
cea009eb8599a0f7cad6fdba7b5641d450f7c636 | 52,451 | py | Python | korok.py | spiolynn/log-processing | dbc99a97b7a1163d6e1bb3e6e47b9d47df69578b | [
"MIT"
] | 3 | 2019-07-15T00:42:07.000Z | 2019-07-29T07:32:01.000Z | korok.py | spiolynn/log-processing | dbc99a97b7a1163d6e1bb3e6e47b9d47df69578b | [
"MIT"
] | null | null | null | korok.py | spiolynn/log-processing | dbc99a97b7a1163d6e1bb3e6e47b9d47df69578b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2019/7/11 10:32
# @Author : hoo
# @Site :
# @File : korok.py
# @Software: PyCharm Community Edition
'''
2019/7/9 : 日志清理脚本
'''
import codecs
import glob
import traceback
import tarfile
import subprocess
import sys
import datetime
import time
import shutil
import os
import logging
import optparse
import shlex
import socket
import fnmatch
import re
import platform
# python2 python3 import diff
from sys import version_info
if version_info.major == 2:
import ConfigParser
else:
import configparser as ConfigParser
'''
# 数据备份
[archive-eg]
action=archive
# 备份原路径
src=/home/ap/dev/app/logs
dst=/home/ap/dev/backup
pattern=/home/ap/dev/app/logs/2018*.log
# 备份文件是否带时间戳
timestamp=y
# 是否保存原文件
reserve=y
# day-hour-minute-second
mtime=0-0-0-01
'''
'''
# 数据清理
[clear-test]
action=clear
src=./test_folder/dst
pattern=*.tar.gz
mtime=0-0-0-01
timestamp=y
recursive=y
'''
def ParseArgs():
'''
参数解析
:return: option args
'''
parser = optparse.OptionParser()
parser.add_option(
"-f",
"--file",
type="string",
dest="filename",
help="Specify the Config file",
default="setting.ini")
parser.add_option(
"-n",
"--node",
type="string",
dest="node",
help="Specify the the name of Server/Node")
parser.add_option(
"-s",
"--section",
type="string",
dest="section",
help="Specify the Section to Run",
default="clear-test")
parser.add_option(
"-l",
"--log",
type="string",
dest="log",
help="Specify the log path")
parser.add_option(
"-d",
action="store_true",
default='True',
dest="debug",
help="Indicate whether to log debug info")
(options, args) = parser.parse_args()
if not options.filename:
options.error(
'Error : Config file Missing. Use -f or --file to specify the config file')
print('*' * 50)
print(options)
print('*' * 50)
return options, args
if __name__ == '__main__':
main()
| 35.705242 | 116 | 0.480029 | # -*- coding: utf-8 -*-
# @Time : 2019/7/11 10:32
# @Author : hoo
# @Site :
# @File : korok.py
# @Software: PyCharm Community Edition
'''
2019/7/9 : 日志清理脚本
'''
import codecs
import glob
import traceback
import tarfile
import subprocess
import sys
import datetime
import time
import shutil
import os
import logging
import optparse
import shlex
import socket
import fnmatch
import re
import platform
# python2 python3 import diff
from sys import version_info
if version_info.major == 2:
import ConfigParser
else:
import configparser as ConfigParser
'''
# 数据备份
[archive-eg]
action=archive
# 备份原路径
src=/home/ap/dev/app/logs
dst=/home/ap/dev/backup
pattern=/home/ap/dev/app/logs/2018*.log
# 备份文件是否带时间戳
timestamp=y
# 是否保存原文件
reserve=y
# day-hour-minute-second
mtime=0-0-0-01
'''
'''
# 数据清理
[clear-test]
action=clear
src=./test_folder/dst
pattern=*.tar.gz
mtime=0-0-0-01
timestamp=y
recursive=y
'''
class Coper:
def __init__(self, options, args):
self.ConfigFile = options.filename
self.NowTime = time.time()
self.ExecTime = self.nowtime()
self.Options = options
self.Args = args
self.HostNm = socket.gethostname()
# log setting
try:
# debug level
if options.debug == 'True':
loglevel = logging.DEBUG
formatter = logging.Formatter(
'%(asctime)s-%(levelname)s-%(funcName)15s-%(lineno)5d : %(message)s')
else:
loglevel = logging.INFO
formatter = logging.Formatter(
'%(asctime)s-%(levelname)s : %(message)s')
# logfile setting
if options.log:
logpath = options.log
else:
logpath = 'korok.log'
# fh = logging.handlers.RotatingFileHandler(logpath, maxBytes = 1024*1024, backupCount = 5)
logging.basicConfig(level=loglevel)
fh = logging.FileHandler(logpath)
ch = logging.StreamHandler(sys.stdout)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.Logger = logging.getLogger()
self.Logger.addHandler(fh)
self.Logger.addHandler(ch)
except Exception as ex:
print('Logging Initialization Failed')
traceback.print_exc()
try:
# 解析 ini 文件
self.ConParser = ConfigParser.ConfigParser()
self.ConParser.read(self.ConfigFile)
except Exception as ex:
logging.error('Config File ' + self.ConfigFile + ' Error')
logging.exception(ex)
sys.exit()
def Run(self):
'''
启动
:return:
'''
success = 0
failure = 0
ret = 0
sections = self.ConParser.sections()
section = self.Options.section
args = self.Args
if section in sections:
try:
action = self.ConParser.get(section, 'action')
except Exception as ex:
ret = 2
logging.error('Error: No Action definition ' + section)
logging.exception(ex)
if action == 'copy':
if not self.Copy(section, args):
ret = 11
elif action == 'execute':
if not self.Execute(section):
ret = 12
# clear
elif action == 'clear':
if not self.Clear(section, args):
ret = 13
elif action == 'processmon':
if not self.ProcessMon(section):
ret = 14
elif action == 'service':
if not self.Service(section, args):
ret = 15
# backup
elif action == 'archive':
if not self.Zip(section):
ret = 16
# backup by month
elif action == 'archive_month':
if not self.Zip_Month(section):
ret = 16
else:
logging.error('Error: No such Action ' + section)
ret = 3
logging.debug(section + ' ends')
else:
logging.info(" {}.{}".format(section,"is not setting"))
return ret
def Copy(self, section, args):
# need parameter
# src
# dst
# pattern
# mtime
# timestamp
# reserve
# recursive
# compress
try:
src = self.ConParser.get(section, 'src')
logging.info('Source Base Directory %s' % src)
except Exception as ex:
logging.error('No Source defined for Copy %s' % section)
logging.exception(ex)
return 0
# confirm dst
try:
dest = self.ConParser.get(section, 'dst')
logging.info('dest Directory %s' % dest)
except Exception as ex:
logging.error('No destination defined for Copy %s' % section)
logging.exception(ex)
return 0
# confirm pattern
try:
patternlist = self.ConParser.get(section, 'pattern').split()
logging.info('patternlist %s' % str(patternlist))
except Exception as ex:
logging.error('No pattern defined for Copy %s' % section)
logging.exception(ex)
return 0
try:
exclusionlist = self.ConParser.get(section, 'exclude').split()
logging.info('exclusionlist %s' % str(exclusionlist))
except Exception as ex:
logging.info('No exclusion defined for Copy %s' % section)
exclusionlist = None
try:
duration = str(self.ConParser.get(section, 'mtime'))
delta = self.convert_to_time(duration)
logging.info('delta %s' % str(delta))
if delta is None:
logging.error('Can not get time delta')
return 0
except Exception as ex:
logging.warning(
'No mtime defined for Copy %s, default value 0 used' %
section)
delta = 0
try:
timestamp = str(self.ConParser.get(section, 'timestamp'))
logging.info('timestamp %s' % str(timestamp))
if timestamp.upper() == 'NO' or timestamp.upper() == 'N':
timestamp = ''
else:
dest = dest + '.' + self.ExecTime
except Exception as ex:
dest = dest + self.ExecTime
logging.info('Un mark time stamp for Copy %s' % section)
try:
recursive = str(self.ConParser.get(section, 'recursive'))
logging.info('recursive %s' % str(recursive))
if recursive.upper() == 'YES' or recursive.upper() == 'Y':
recursive = True
else:
recursive = False
except Exception as ex:
recursive = False
logging.info('Non-recursive Mode for Copy %s' % section)
try:
compress = str(self.ConParser.get(section, 'compress'))
logging.info('compress %s' % str(compress))
if compress.upper() == 'YES' or compress.upper() == 'Y':
compress = True
else:
compress = False
except Exception as ex:
compress = False
logging.info('Non-compress Mode for Copy %s' % section)
# begin copy handle
copylist = []
if recursive:
# 递归处理
try:
'''
for (root,dirs,files) in os.walk(src.decode('utf-8'),topdown=True):
for filename in files:
abssrc=os.path.join(root,filename)
mtime=os.stat(abssrc).st_mtime
if (self.NowTime-mtime) < delta:
continue
for pattern in patternlist:
#pattern=os.path.join(src,pattern)
if fnmatch.fnmatch(filename,pattern):
copylist.append(abssrc)
break
#os.path.walk(src.decode('utf-8'),self.scan,(patternlist,src,delta,copylist))
'''
for member in os.listdir(src):
self.mycopywalk(
os.path.join(
src,
member),
patternlist,
exclusionlist,
delta,
copylist)
except Exception as ex:
logging.exception(ex)
else:
for pattern in patternlist:
# pattern=os.path.join(src,pattern)
# srcfiles=glob.glob(pattern)
try:
srcfiles = os.listdir(src)
except Exception as ex:
logging.exception(ex)
for srcfile in srcfiles:
abssrc = os.path.join(src, srcfile)
mtime = os.stat(abssrc).st_mtime
if (self.NowTime - mtime) < delta:
continue
if not fnmatch.fnmatch(srcfile, pattern):
continue
# if os.path.islink(os.path.join(src,srcfile)):
# continue
copylist.append(abssrc)
if exclusionlist:
for expattern in exclusionlist:
for srcfile in copylist:
if fnmatch.fnmatch(os.path.basename(srcfile), expattern):
copylist.remove(srcfile)
# exclude.append(srcfile)
logging.info('Copy List:')
for copyfile in copylist:
logging.info(copyfile)
logging.info('Copy List Ends')
if compress:
try:
destgzfile = tarfile.open(dest + '.tar.gz', 'w:gz')
# destgzfile=tarfile.TarFile(name=dest+'.tar.gz',mode='w:gz',dereference=False)
for srcfile in copylist:
destgzfile.add(srcfile, recursive=(not recursive))
destgzfile.close()
except Exception as ex:
logging.exception(ex)
else:
if not os.path.exists(dest):
os.makedirs(dest)
for srcfile in copylist:
try:
relpath = os.path.relpath(srcfile, src)
absdest = os.path.join(dest, relpath)
if os.path.isdir(srcfile):
if recursive:
shutil.copytree(srcfile, absdest, symlinks=True)
logging.info('copy directory %s' % srcfile)
elif os.path.isfile(srcfile):
shutil.copy2(srcfile, absdest)
logging.info('copy file %s' % srcfile)
elif os.path.islink(srcfile):
os.symlink(absdest, os.readlink(srcfile))
except Exception as ex:
logging.exception(ex)
return 1
# zip by month
def Zip_Month(self, section):
'''
:param section:
# section need parameter
# src 源目录
# dst 目标目录
# pattern 正则匹配
# mtime 修改时间
# timestamp bool 加时间标签
# reserve 是否递归
'''
print('zip_by_month')
_month = self.nowMonth()
try:
srclist = self.ConParser.get(section, 'src').split()
_srclist = []
for src in srclist:
_srclist.append(os.path.join(src, _month))
srclist = _srclist
logging.debug('{:15}{:10}'.format('srcpath lst:', str(srclist)))
except BaseException:
print('No Source defined for Copy', section)
self.PrintStack()
return 0
try:
dest = self.ConParser.get(section, 'dst')
dest = os.path.join(dest, _month)
logging.debug('{:15}{:10}'.format('dstpath str:', str(dest)))
except BaseException:
print('No destination defined for Copy', section)
self.PrintStack()
return 0
try:
patternlist = self.ConParser.get(section, 'pattern').split('@@')
# logging.debug('pattern lst' + str(patternlist))
_patternlist = []
for i in patternlist:
_dirpath = os.path.dirname(i)
_pattern = os.path.basename(i)
_patternlist.append(os.path.join(_dirpath, _month, _pattern))
patternlist = _patternlist
logging.debug(
'{:15}{:10}'.format(
'pattern lst:',
str(patternlist)))
except BaseException:
print('No pattern defined for Copy', section)
self.PrintStack()
return 0
try:
duration = str(self.ConParser.get(section, 'mtime'))
# logging.debug('duration ' + str(duration))
logging.debug('{:15}{:10}'.format('duration:', str(duration)))
except BaseException:
duration = None
print('No mtime defined for Copy', section)
try:
timestamp = str(self.ConParser.get(section, 'timestamp'))
if timestamp == 'yes' or timestamp == 'YES' or timestamp == 'y' or timestamp == 'Y':
dest = os.path.join(
dest, self.ExecTime + '-' + self.HostNm + '.tar.gz')
# dest = dest + self.ExecTime + '-' + self.HostNm + '.tar.gz'
else:
# dest = dest + '-' + self.HostNm + '.tar.gz'
dest = os.path.join(dest, self.HostNm + '.tar.gz')
# logging.debug('dest name will: ' + str(dest))
logging.debug('{:15}{:10}'.format('dest will:', str(dest)))
except BaseException:
print('Unmark time stamp for Zip', section)
try:
reserve = str(self.ConParser.get(section, 'reserve'))
if reserve == 'no' or reserve == 'NO' or reserve == 'n' or reserve == 'N':
reserve = False
else:
reserve = True
# logging.debug('reserve : ' + str(reserve))
logging.debug('{:15}{:10}'.format('reserve:', str(reserve)))
except BaseException:
reserve = True
print('Un mark time stamp for Copy', section)
# get delta
if duration:
delta = self.convert_to_time(duration)
if not delta:
logging.error("Can not get time delta")
print('Can not get time delta')
return 0
# create tar file
try:
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
destgzfile = tarfile.open(dest, 'w:gz')
except BaseException:
print('Zip File creation failed for', dest)
self.PrintStack()
return 1
# get file
for pattern in patternlist:
srcfiles = glob.glob(pattern)
logging.debug('{:15}{:10}'.format('pattern file:', str(srcfiles)))
for srcfile in srcfiles:
cur_path = os.getcwd()
os.chdir(os.path.dirname(srcfile))
mtime = os.stat(srcfile).st_mtime
if not (self.NowTime - mtime) > delta:
continue
print('Zip from', srcfile, 'to', dest)
logging.debug(
'Zip from {:15} srcfile {:10}'.format(
srcfile, dest))
try:
srcfile = os.path.basename(srcfile)
if os.path.isfile(srcfile):
destgzfile.add(srcfile)
if not reserve:
os.remove(srcfile)
elif os.path.isdir(srcfile):
destgzfile.add(srcfile)
if not reserve:
shutil.rmtree(srcfile)
except BaseException:
os.chdir(cur_path)
self.PrintStack()
destgzfile.close()
def Zip(self, section):
'''
:param section:
# section need parameter
# src 源目录
# dst 目标目录
# pattern 正则匹配
# mtime 修改时间
# timestamp bool 加时间标签
# reserve 是否递归
'''
print('zip')
try:
srclist = self.ConParser.get(section, 'src').split('|')
# logging.debug('srcpath lst:'+ str(srclist))
logging.debug('{:15}{:10}'.format('srcpath lst:', str(srclist)))
except BaseException:
print('No Source defined for Copy', section)
self.PrintStack()
return 0
try:
dest = self.ConParser.get(section, 'dst')
# logging.debug('dstpath str' + str(dest))
logging.debug('{:15}{:10}'.format('dstpath str:', str(dest)))
except BaseException:
print('No destination defined for Copy', section)
self.PrintStack()
return 0
try:
patternlist = self.ConParser.get(section, 'pattern').split('@@')
# logging.debug('pattern lst' + str(patternlist))
logging.debug(
'{:15}{:10}'.format(
'pattern lst:',
str(patternlist)))
except BaseException:
print('No pattern defined for Copy', section)
self.PrintStack()
return 0
try:
duration = str(self.ConParser.get(section, 'mtime'))
# logging.debug('duration ' + str(duration))
logging.debug('{:15}{:10}'.format('duration:', str(duration)))
except BaseException:
duration = None
print('No mtime defined for Copy', section)
try:
timestamp = str(self.ConParser.get(section, 'timestamp'))
if timestamp == 'yes' or timestamp == 'YES' or timestamp == 'y' or timestamp == 'Y':
dest = dest + self.ExecTime + '-' + self.HostNm + '.tar.gz'
else:
dest = dest + '-' + self.HostNm + '.tar.gz'
# logging.debug('dest name will: ' + str(dest))
logging.debug('{:15}{:10}'.format('dest will:', str(dest)))
except BaseException:
print('Unmark time stamp for Zip', section)
try:
reserve = str(self.ConParser.get(section, 'reserve'))
if reserve == 'no' or reserve == 'NO' or reserve == 'n' or reserve == 'N':
reserve = False
else:
reserve = True
# logging.debug('reserve : ' + str(reserve))
logging.debug('{:15}{:10}'.format('reserve:', str(reserve)))
except BaseException:
reserve = True
print('Un mark time stamp for Copy', section)
# get delta
if duration:
delta = self.convert_to_time(duration)
if not delta:
logging.error("Can not get time delta")
print('Can not get time delta')
return 0
# create tar file
try:
destgzfile = tarfile.open(dest, 'w:gz')
except BaseException:
print('Zip File creation failed for', dest)
self.PrintStack()
# get file
for pattern in patternlist:
srcfiles = glob.glob(pattern)
logging.debug('{:15}{:10}'.format('pattern file:', str(srcfiles)))
for srcfile in srcfiles:
mtime = os.stat(srcfile).st_mtime
if not (self.NowTime - mtime) > delta:
continue
print('Zip from', srcfile, 'to', dest)
logging.debug(
'Zip from {:15} srcfile {:10}'.format(
srcfile, dest))
try:
print(os.getcwd())
if os.path.isfile(srcfile):
destgzfile.add(srcfile)
if not reserve:
os.remove(srcfile)
elif os.path.isdir(srcfile):
destgzfile.add(srcfile)
if not reserve:
shutil.rmtree(srcfile)
except BaseException:
self.PrintStack()
def mycopywalk(self, root, patternlist, exclusionlist, delta, copylist):
logging.info('enter %s' % root)
matchflag = False
timeflag = False
excludeflag = False
mtime = os.stat(root).st_mtime
if (self.NowTime - mtime) > delta:
timeflag = True
for pattern in patternlist:
if fnmatch.fnmatch(root, pattern):
matchflag = True
break
if exclusionlist:
for exclude in exclusionlist:
if fnmatch.fnmatch(os.path.basename(root), exclude):
excludeflag = True
break
if timeflag and matchflag and (not excludeflag):
copylist.append(root)
if os.path.isdir(root) and not (os.path.islink(root)):
if not excludeflag:
logging.info('dir %s', root)
for member in os.listdir(root):
self.mycopywalk(
os.path.join(
root,
member),
patternlist,
exclusionlist,
delta,
copylist)
def scan(self, patternlist, src, delta, copylist, dirname, files):
for filename in files:
for pattern in patternlist:
abssrc = os.path.join(dirname, filename)
if fnmatch.fnmatch(abssrc, pattern):
# abssrc=os.path.join(dirname,filename)
relpath = os.path.relpath(abssrc, src)
try:
mtime = os.stat(abssrc).st_mtime
if (self.NowTime - mtime) < delta:
continue
copylist.append(abssrc)
except Exception as ex:
logging.exception(ex)
return 0
return 1
# 数据清理
def Clear(self, section, args):
# src
# pattern
# mtime
# timestamp
# recursive
try:
src = self.ConParser.get(section, 'src')
logging.info('Source Base Directory %s' % src)
except Exception as ex:
logging.error('No srcdir defined for Clear %s' % section)
logging.exception(ex)
return 0
try:
patternlist = self.ConParser.get(section, 'pattern').split()
print(patternlist)
except Exception as ex:
print('No pattern defined for Clear', section)
self.printstack()
return 0
try:
exclusionlist = self.ConParser.get(section, 'exclude').split()
except Exception as ex:
logging.info('No exclusion defined for Copy %s' % section)
exclusionlist = None
try:
duration = str(self.ConParser.get(section, 'mtime'))
delta = self.convert_to_time(duration)
if delta is None:
print('Can not get time delta')
return 0
except Exception as ex:
print('No mtime defined for Clear', section)
duration = 0
try:
recursive = str(self.ConParser.get(section, 'recursive'))
if recursive.upper() == 'YES' or recursive.upper() == 'Y':
recursive = True
else:
recursive = False
except Exception as ex:
recursive = False
print('Non-recursive Mode', section)
deletelist = []
if recursive:
try:
for (
root,
dirs,
files) in os.walk(
src,
topdown=False):
for filename in files:
abssrc = os.path.join(root, filename)
for pattern in patternlist:
if fnmatch.fnmatch(filename, pattern):
mtime = os.stat(abssrc).st_mtime
if (self.NowTime - mtime) < delta:
break
deletelist.append(abssrc)
for dirname in dirs:
abssrc = os.path.join(root, dirname)
for pattern in patternlist:
if fnmatch.fnmatch(dirname, pattern):
mtime = os.stat(abssrc).st_mtime
if (self.NowTime - mtime) < delta:
break
deletelist.append(abssrc)
except Exception as ex:
logging.exception(ex)
else:
for pattern in patternlist:
try:
srcfiles = os.listdir(src)
except Exception as ex:
logging.exception(ex)
for srcfile in srcfiles:
abssrc = os.path.join(src, srcfile)
mtime = os.stat(abssrc).st_mtime
if (self.NowTime - mtime) < delta:
continue
if not fnmatch.fnmatch(srcfile, pattern):
continue
# if os.path.islink(os.path.join(src,srcfile)):
# continue
deletelist.append(abssrc)
if exclusionlist:
for expattern in exclusionlist:
for srcfile in deletelist:
if fnmatch.fnmatch(os.path.basename(srcfile), expattern):
deletelist.remove(srcfile)
logging.info('Delete List:')
for deletefile in deletelist:
logging.info(deletefile)
if os.path.isfile(deletefile) or os.path.islink(deletefile):
try:
os.remove(deletefile)
except Exception as ex:
logging.exception(ex)
elif os.path.isdir(deletefile):
try:
shutil.rmtree(deletefile)
except Exception as ex:
logging.exception(ex)
logging.info('Delete List Ends')
def Execute(self, section):
try:
command = self.ConParser.get(section, 'command')
except Exception as ex:
print('No Command defined for Execution', section)
self.printstack()
return 0
try:
output = self.ConParser.get(section, 'output')
print
output
except Exception as ex:
output = None
print('No Output defined forExecution', section)
outfile = None
if output:
try:
outfile = open(output, 'w')
print('Open file', output, 'for execution output')
except Exception as ex:
print('Open file for execution output failed', section)
return 0
try:
process = subprocess.Popen(command, stdout=outfile, shell=True)
except Exception as ex:
self.printstack()
return 0
return 1
def ProcessMon(self, section):
try:
interval = float(self.ConParser.get(section, 'interval'))
except Exception as ex:
print('No interval defined for ProcessMon, use default value', section)
self.printstack()
interval = 5
try:
processkeyword = self.ConParser.get(section, 'processkeyword')
except Exception as ex:
print('No processkeyword defined for ProcessMon,exit', section)
self.printstack()
return 0
try:
command = self.ConParser.get(section, 'command')
except Exception as ex:
print('No command defined for ProcessMon', section)
command = None
self.printstack()
try:
outfile = self.ConParser.get(section, 'outfile')
except Exception as ex:
print('No outfile defined for ProcessMon, use default value', section)
self.printstack()
outfile = processkeyword + '.log'
while True:
(result, search) = self.find_process_by_cmd(processkeyword)
if result:
if not search:
print(
self.nowtime,
' process ',
processkeyword,
'unsearched')
if not command:
print('No command defined for process to start')
time.sleep(interval)
continue
try:
process = subprocess.Popen(
command, stdout=open(
outfile, 'w'), shell=True)
except Exception as ex:
self.printstack()
print('Process starts Abnomal')
else:
print(
self.nowtime,
' process ',
processkeyword,
'searched')
else:
print('Module find_process_by_cmd Abnomal')
sys.exit()
time.sleep(interval)
return 0
def PrintStack(self):
print("*** print_exc begins:")
traceback.print_exc()
print("*** print_exc ends ***")
def Service(self, section, args):
# startcommand
# logfile
# processes
# listenports
# connections
# waittime
# checkonly
logging.debug('Enter Service')
todoaction = args[0].split()[0].upper()
actionlist = ['START', 'STOP', 'CHECK']
check = {}
if todoaction not in actionlist:
logging.error(
'No Action %s in Action List %s:' %
(todoaction, actionlist))
return 0
try:
startcommand = self.ConParser.get(section, 'start')
logging.info('Startcommand: %s', startcommand)
except Exception as ex:
logging.error('No Startcommand defined for Service %s' % section)
logging.exception(ex)
return False
try:
stopcommand = self.ConParser.get(section, 'stop')
logging.info('Stopcommand: %s', stopcommand)
except Exception as ex:
logging.error('No Stopcommand defined for Service %s' % section)
logging.exception(ex)
return False
try:
logfile = self.ConParser.get(section, 'logfile')
except Exception as ex:
logging.warning('No Log file defined for Service %s' % section)
logfile = None
try:
processkeywords = self.ConParser.get(
section, 'processes').split('|')
check['processcheck'] = None
except Exception as ex:
logging.warning(
'No Processes keywords defined for Service %s' %
section)
processkeywords = None
try:
listenports = self.ConParser.get(section, 'listenports').split('|')
check['listenportcheck'] = None
except Exception as ex:
logging.warning('No Listenports defined for Service %s' % section)
listenports = None
try:
connections = self.ConParser.get(section, 'connections').split('|')
check['connectioncheck'] = None
except Exception as ex:
logging.warning('No Connnections defined for Service %s' % section)
connections = None
try:
winservice = self.ConParser.get(section, 'winservice').split('|')
check['winservice'] = None
except Exception as ex:
logging.warning('No Winservice defined for Service %s' % section)
winservice = None
try:
waittime = self.ConParser.get(section, 'waittime')
except Exception as ex:
logging.warning(
'No Waittime defined for Service %s, default value 1 second' %
section)
waittime = 1
try:
weblogiccheckcommand = self.ConParser.get(section, 'weblogiccheck')
logging.info('Weblogic Check command %s' % weblogiccheckcommand)
check['weblogiccheck'] = None
except Exception as ex:
logging.warning(
'No Weblogic Check defined for Service %s' %
section)
weblogiccheckcommand = None
if weblogiccheckcommand:
try:
weblogicnodes = self.ConParser.get(
section, 'weblogicnodes').split('|')
logging.info('Weblogic Nodes to Check %s' % weblogicnodes)
except Exception as ex:
logging.error(
'No Weblogic Nodes defined for Service %s' %
section)
weblogicnodes = None
if todoaction == 'START':
logging.info('To Start the Service %s' % section)
if startcommand:
executioncommand = startcommand
else:
logging.error('No Start Command,Exit')
return False
elif todoaction == 'STOP':
logging.info('To Stop the Service %s' % section)
if startcommand:
executioncommand = stopcommand
else:
logging.error('No Stop Command,Exit')
return False
elif todoaction == 'CHECK':
logging.info('To Check the Service %s' % section)
executioncommand = None
if executioncommand:
'''
if not platform.system() =='Windows':
executioncommand=shlex.split(executioncommand)
SHELL=False
else:
SHELL=True
'''
SHELL = True
try:
logging.info('Start Command: %s' % executioncommand)
if logfile:
outfile = open(logfile, 'w+')
logging.info('Output to the Log file: %s' % logfile)
process = subprocess.Popen(
executioncommand, stdout=outfile, shell=SHELL)
else:
process = subprocess.Popen(executioncommand, shell=SHELL)
logging.info(
'The %s process\' pid :%s' %
(todoaction, process.pid))
except Exception as ex:
logging.exception(ex)
return False
time.sleep(float(waittime))
if processkeywords:
logging.info('Begin Process keyword Check')
promatch = []
for processkeyword in processkeywords:
# index=processkeywords.index(processkeyword)
# promatch[index]=False
logging.info('The keywords to match: %s' % processkeyword)
keywords = processkeyword.split(',')
if not keywords:
continue
try:
processnum = int(keywords[0])
except Exception as ex:
logging.error('Start Module:processkeyword complie ERROR')
return False
for keyword in keywords[1:]:
keywords[keywords.index(keyword)] = re.compile(keyword)
resultpros = []
try:
if platform.system() == 'Windows':
procheckcommand = 'tasklist /v'
else:
procheckcommand = 'ps -ef'
process = subprocess.Popen(
procheckcommand,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
ret = output.strip()
if retcode:
logging.warning('Process Check retcode: %s' % retcode)
logging.debug('The Process Check output:\n %s' % ret)
logging.debug('Process Check unused_err %s' % unused_err)
# ret = self.get_execution_result(procheckcommand)
for proline in ret.split('\n'):
matchedflag = True
for keyword in keywords[1:]:
if not keyword.search(proline):
matchedflag = False
break
if matchedflag:
# print proline,'matched'
resultpros.append(proline)
# print processnum
if len(resultpros) < processnum:
check['processcheck'] = False
logging.error(
'Processes Check Failed, Keyword %s Unmatched' %
processkeyword)
else:
check['processcheck'] = True
logging.info(
'Processes Check OK, Keyword %s matched, ' %
processkeyword)
logging.info('Matched Process content %s' % resultpros)
except Exception as ex:
check['processcheck'] = False
logging.exception(ex)
logging.info('End Process keyword Check')
if listenports:
logging.info('Begin Listen Ports Check')
try:
for ipport in listenports:
(ip, port) = ipport.split(':')
if not (ip and port):
continue
port = int(port)
logging.info('Try to connect %s:%s' % (ip, port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn_ret = sock.connect_ex((ip, port))
if conn_ret:
check['listenportcheck'] = False
logging.error(
'Listen Port Check ERROR:failed to connect to Listen_port %s:%s' %
(ip, port))
logging.error('The socket ERROR Code %s' % conn_ret)
else:
check['listenportcheck'] = True
logging.info(
'Listen Port Check OK:succeed to connect to Listen_port %s:%s' %
(ip, port))
except Exception as ex:
check['listenportcheck'] = False
logging.error(
'Listen Port Check ERROR:failed to connect to Listen_port %s:%s' %
(ip, port))
logging.exception(ex)
finally:
sock.close()
logging.info('End Listen Ports Check')
connectioncheck = True
if connections:
logging.info('Begin Connections Check')
for ipports in connections:
(connnum, ipport) = ipports.split(',')
if not connnum:
continue
connnum = int(connnum)
(ip, port) = ipport.split(':')
if not (ip and port):
continue
port = int(port)
try:
if platform.system() == 'Windows':
logging.debug('The Check Command:')
# logging.debug('netstat -an | find \"ESTABLISHED\" | find \"'+ip+'\" | find \"'+str(port)+'\"')
command = 'netstat -an | find \"ESTABLISHED\" | find \"' + \
ip + '\" | find \"' + str(port) + '\"'
else:
# logging.debug('netstat -an | grep ESTABLISHED | grep '+ip+' | grep '+str(port))
command = 'netstat -an | grep ESTABLISHED | grep ' + \
ip + ' | grep ' + str(port)
logging.debug('The Check Command: %s' % command)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
ret = output.strip()
if retcode:
logging.warning('Check Process retcode: %s' % retcode)
logging.debug('The Connection Check output:\n %s' % ret)
logging.debug('Weblogic Check unused_err %s' % unused_err)
if ret == '':
retlen = 0
else:
retlen = len(ret.split('\n'))
logging.debug('The Connnections established: %s,' % retlen)
logging.debug(
'The Number of Connnections should be: %s,' %
connnum)
if retlen < connnum:
check['connectioncheck'] = False
print(
'Only %i Connections connected to %s:%s' %
(retlen, ip, port))
else:
check['connectioncheck'] = True
print(
'Connection Check OK: %i Connections connected to %s:%s' %
(retlen, ip, port))
except Exception as ex:
logging.error('Connections Check failed')
check['connectioncheck'] = False
logging.exception(ex)
finally:
logging.info('End Connections Check')
if winservice:
logging.info('Begin Windows Service Check')
winservicecheckcommand = 'sc query'
svrstate = {}
check['winservice'] = True
for service in winservice:
svrstate[service] = None
try:
process = subprocess.Popen(
winservicecheckcommand,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
ret = output.strip()
if retcode:
logging.warning(
'Windows Service Check Process retcode: %s' %
retcode)
logging.debug(
'The Windows Service Check output:\n %s' %
ret)
logging.debug(
'he Windows Service Check unused_err %s' %
unused_err)
svr = re.compile('SERVICE_NAME')
svrname = re.compile(service)
retlines = ret.split('\n')
for outline in retlines:
if svr.search(outline) and svrname.search(outline):
index = retlines.index(outline) + 3
stateline = retlines[index]
state = stateline.split(':')[1].split()[1].strip()
# check['winservice']=True
svrstate[service] = state
logging.info(
'The Service %s State :%s' %
(service, state))
break
if not svrstate[service] == 'RUNNING':
check['winservice'] = False
except Exception as ex:
logging.error('Windows Service %s Check failed' % service)
# check['winservice'] = False
logging.exception(ex)
logging.debug('Windows Service Check %s' % svrstate)
if weblogiccheckcommand:
try:
process = subprocess.Popen(
weblogiccheckcommand,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
ret = output.strip()
if retcode:
logging.warning('Check Process retcode: %s' % retcode)
logging.debug('Weblogic Check Output :\n %s' % output)
logging.debug('Weblogic Check unused_err %s' % unused_err)
if weblogicnodes:
nodestatus = {}
logging.debug('Weblogic Nodes: %s' % weblogicnodes)
for node in weblogicnodes:
nodestatus[node] = None
keyword = 'Current state of \'' + node + '\''
keyword = re.compile(keyword)
for line in ret.split('\n'):
if keyword.search(line):
logging.debug('Matched line %s ' % line)
status = line.split(':')[1].strip()
logging.debug('Node status %s' % status)
nodestatus[node] = status
logging.info('The Weblogic Nodes Status %s' % nodestatus)
if todoaction in ['START', 'CHECK']:
check['weblogiccheck'] = True
for node in weblogicnodes:
if not (nodestatus[node] == 'RUNNING'):
check['weblogiccheck'] = False
break
elif todoaction in ['STOP']:
check['weblogiccheck'] = False
for node in weblogicnodes:
if not (
nodestatus[node] == 'SHUTDOWN' or nodestatus[node] is None):
check['weblogiccheck'] = True
break
else:
check['weblogiccheck'] = False
logging.error('Weblogic Nodes not defined')
except Exception as ex:
check['weblogiccheck'] = False
logging.exception(ex)
finally:
logging.info('End Weblogic Check')
logging.info('The Total check result: %s' % check)
if todoaction in ['START', 'CHECK']:
checkret = True
for checkterm in check.keys():
checkret = checkret and check[checkterm]
elif todoaction in ['STOP']:
checkret = False
for checkterm in check.keys():
checkret = checkret or check[checkterm]
checkret = not checkret
return checkret
def get_execution_result(self, command):
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
logging.warning('execution retcode: %s' % retcode)
ret = output.strip()
logging.debug('execution retcode: %s' % retcode)
logging.debug('execution Output :\n %s' % output)
logging.debug('execution unused_err %s' % unused_err)
return ret
def find_process_by_cmd(self, cmdkeyword):
# print(platform.architecture())
# print(platform.platform())
# print(platform.system())
plist = []
if platform.system() == 'Windows':
try:
psutil = __import__('psutil')
processlist = psutil.get_pid_list()
except Exception as ex:
self.printstack()
return (False, plist)
# print processlist
for process in processlist:
try:
pro = psutil.Process(process)
except Exception as ex:
print(self.nowtime, 'Abnormal:No process of', process)
self.printstack()
continue
p = re.compile(cmdkeyword)
for cmd in pro.cmdline:
if p.search(cmd):
print(cmdkeyword, 'Matched')
plist.append((process, pro.name))
else:
try:
process = subprocess.Popen(
'ps -ef', stdout=subprocess.PIPE, shell=True)
process.stdout.readlines()
except Exception as ex:
pass
return (True, plist)
def printstack(self):
print("*** print_exc begins:")
traceback.print_exc(file=sys.stdout)
print("*** print_exc ends ***")
# 1-0-0-11
# day-hour-min-sec
# return seconds
def convert_to_time(self, duration):
duration = duration.split('-')
if not len(duration) == 4:
print('duration format ERROR')
return None
try:
days = int(duration[0])
hours = int(duration[1])
minutes = int(duration[2])
seconds = int(duration[3])
duration = [days, hours, minutes, seconds]
# print duration
delta = ((days * 24 + hours) * 60 + minutes) * 60 + seconds
except Exception as ex:
print('duration format ERROR')
self.printstack()
return None
# print delta
return delta
def nowtime(self):
return time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))
def nowMonth(self):
return time.strftime('%Y%m', time.localtime(time.time()))
def ParseArgs():
'''
参数解析
:return: option args
'''
parser = optparse.OptionParser()
parser.add_option(
"-f",
"--file",
type="string",
dest="filename",
help="Specify the Config file",
default="setting.ini")
parser.add_option(
"-n",
"--node",
type="string",
dest="node",
help="Specify the the name of Server/Node")
parser.add_option(
"-s",
"--section",
type="string",
dest="section",
help="Specify the Section to Run",
default="clear-test")
parser.add_option(
"-l",
"--log",
type="string",
dest="log",
help="Specify the log path")
parser.add_option(
"-d",
action="store_true",
default='True',
dest="debug",
help="Indicate whether to log debug info")
(options, args) = parser.parse_args()
if not options.filename:
options.error(
'Error : Config file Missing. Use -f or --file to specify the config file')
print('*' * 50)
print(options)
print('*' * 50)
return options, args
def main():
options,args = ParseArgs()
MyOper = Coper(options, args)
ret = MyOper.Run()
sys.exit(ret)
if __name__ == '__main__':
main()
| 38,718 | 11,678 | 46 |
172e48614760c2fed05e8b7e2fafcdcdf70e7a08 | 264 | py | Python | imgaug/tests/doc_test.py | costajob/image_augmenter | a39a1b62c02526573788d691b94eee7ad13d49e9 | [
"MIT"
] | 2 | 2019-10-26T21:44:01.000Z | 2020-11-12T06:32:17.000Z | imgaug/tests/doc_test.py | costajob/image_augmenter | a39a1b62c02526573788d691b94eee7ad13d49e9 | [
"MIT"
] | 6 | 2019-10-23T08:34:55.000Z | 2022-03-11T23:42:27.000Z | imgaug/tests/doc_test.py | costajob/image_augmenter | a39a1b62c02526573788d691b94eee7ad13d49e9 | [
"MIT"
] | null | null | null | import doctest
from pkgutil import iter_modules
| 26.4 | 82 | 0.69697 | import doctest
from pkgutil import iter_modules
def load_tests(loader, tests, ignore):
modules = [name for _, name, _ in iter_modules(['imgaug']) if name != 'tests']
for mod in modules:
tests.addTests(doctest.DocTestSuite(mod))
return tests
| 191 | 0 | 23 |
d2e168284d43ab79217bafd2f060d67b9b6df3e8 | 317 | py | Python | tfpromote/version.py | billtrust/terraform-promote | bd54fc8aaef6988bc2ce0ab14ddde077f133c0b7 | [
"MIT"
] | 4 | 2020-12-17T05:47:58.000Z | 2022-03-16T13:46:34.000Z | tfpromote/version.py | billtrust/terraform-promote | bd54fc8aaef6988bc2ce0ab14ddde077f133c0b7 | [
"MIT"
] | null | null | null | tfpromote/version.py | billtrust/terraform-promote | bd54fc8aaef6988bc2ce0ab14ddde077f133c0b7 | [
"MIT"
] | null | null | null | __version__ = '0.2.9'
__title__ = 'tfpromote'
__description__ = 'Compare and promote Terraform files from dev to prod environments.'
__url__ = 'https://github.com/billtrust/terraform-promote'
__author__ = 'Doug Kerwin'
__author_email__ = 'dkerwin@billtrust.com'
__license__ = 'MIT'
__keywords__ = ['terraform'] | 39.625 | 87 | 0.753943 | __version__ = '0.2.9'
__title__ = 'tfpromote'
__description__ = 'Compare and promote Terraform files from dev to prod environments.'
__url__ = 'https://github.com/billtrust/terraform-promote'
__author__ = 'Doug Kerwin'
__author_email__ = 'dkerwin@billtrust.com'
__license__ = 'MIT'
__keywords__ = ['terraform'] | 0 | 0 | 0 |
feaea07a171ac30bdc6ccc6ffccc971901356e5f | 5,106 | py | Python | zorg_emic/emic2.py | zorg-framework/zorg-emic | 34d49897131cf7773b2b0f46e1e0a796911144e3 | [
"MIT"
] | 3 | 2016-03-25T15:40:11.000Z | 2017-04-22T03:05:16.000Z | zorg_emic/emic2.py | zorg-framework/zorg-emic | 34d49897131cf7773b2b0f46e1e0a796911144e3 | [
"MIT"
] | null | null | null | zorg_emic/emic2.py | zorg-framework/zorg-emic | 34d49897131cf7773b2b0f46e1e0a796911144e3 | [
"MIT"
] | null | null | null | from zorg.driver import Driver
from multiprocessing import Queue
from threading import Thread
import time
| 27.75 | 75 | 0.533294 | from zorg.driver import Driver
from multiprocessing import Queue
from threading import Thread
import time
class Emic2(Driver):
def __init__(self, options, connection):
super(Emic2, self).__init__(options, connection)
self.currentAction = 'idle'
self.queue = Queue()
self.thread = Thread(target=self.watch, args=())
self.thread.daemon = True
self.commands += [
"speak", "set_voice", "set_language",
"set_volume", "set_rate", "set_parser",
"pause", "stop"
]
def watch(self):
while True:
waiting = True
# Wait if the queue is empty
if self.queue.empty():
time.sleep(0.5)
continue
while waiting:
self.connection.serial.write("\n")
time.sleep(0.3)
data = self.connection.serial_read()
# The Emic 2 transmits a ":" when ready to receive commands
if data == ':':
value = self.queue.get()
self.connection.serial_write("%s\n" % (value))
waiting = False
time.sleep(0.5)
self.connection.disconnect()
def start(self):
self.connection.connect()
# Setup involves writing a new line to initialize the board
self.connection.serial_write('\n')
# Pause for 500 milliseconds
time.sleep(0.05)
# Start a background thread to process items in the queue
self.thread.start()
def is_valid_string(self, text):
"""
The Emic 2 expects characters that conform to the ISO-8859-1 Latin
character set. This method will return false if a string is not
ISO-8859-1 compatible.
"""
return all(ord(character) < 128 for character in text)
def word_wrap(self, text, width=1023):
"""
A simple word wrapping greedy algorithm that puts
as many words into a single string as possible.
"""
substrings = []
string = text
while len(string) > width:
index = width - 1
while not string[index].isspace():
index = index - 1
line = string[0:index]
substrings.append(line)
string = string[index + 1:]
substrings.append(string)
return substrings
def speak(self, text):
"""
The main function to convert text into speech.
"""
if not self.is_valid_string(text):
raise Exception("%s is not ISO-8859-1 compatible." % (text))
# Maximum allowable 1023 characters per message
if len(text) > 1023:
lines = self.word_wrap(text, width=1023)
for line in lines:
self.queue.put("S%s" % (line))
else:
self.queue.put("S%s" % (text))
def set_voice(self, voice):
"""
Change between 9 available voices on the Emic2.
0: Perfect Paul (Paulo)
1: Huge Harry (Francisco)
2: Beautiful Betty
3: Uppity Ursula
4: Doctor Dennis (Enrique)
5: Kit the Kid
6: Frail Frank
7: Rough Rita
8: Whispering Wendy (Beatriz)
"""
self.currentAction = 'setting voice'
self.queue.put('N%d' % (voice))
def set_language(self, language, dialect=None):
"""
Set the language used for TTS.
en: English
es: Spanish | [ lan: latino or ca: castilian ]
"""
self.currentAction = 'setting language'
l = 0
if language == 'en':
l = 0
elif language == 'es':
l = 1
if dialect == 'ca':
l = 2
self.queue.put('l%s' % (l))
def set_volume(self, volume):
"""
Set the volume of the Emic 2.
Volume range [-48 to 18]
-48 (softest) to 18 (loudest)
"""
self.currentAction = 'setting volume'
self.queue.put('V%d' % (volume))
def set_rate(self, rate):
"""
Set the speaking rate in words per minute.
From 75 (slowest) to 600 (fastest).
Default value: 200.
"""
self.currentAction = 'setting rate'
self.queue.put('W%d' % (rate))
def set_parser(self, parser):
"""
Select either the Epson or DECtalk text parsing engine.
0 DECtalk
1 Epson (default)
"""
self.queue.put('P%d' % (parser))
def pause(self):
"""
Immediately pause current message.
"""
self.currentAction = 'paused'
self.queue.put('Z')
def stop(self):
"""
Immediately stop the current message from being spoken.
This command is only valid while a message is playing.
"""
self.currentAction = 'stopped'
self.queue.put('X')
def reset(self):
"""
Reset the current message beign spoken.
"""
self.currentAction = 'resetting'
self.queue.put('R')
| 1,367 | 3,609 | 23 |
c9e06b3be13469f6ff313565de884b77d100f2da | 28,760 | py | Python | jumpscale/sals/vdc/proxy.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 13 | 2020-09-02T09:05:08.000Z | 2022-03-12T02:43:24.000Z | jumpscale/sals/vdc/proxy.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 1,998 | 2020-06-15T11:46:10.000Z | 2022-03-24T22:12:41.000Z | jumpscale/sals/vdc/proxy.py | threefoldtech/js-sdk | 811f783ac34a60225175bab2d806802a87b9d5c7 | [
"Apache-2.0"
] | 8 | 2020-09-29T06:50:35.000Z | 2021-06-14T03:30:52.000Z | import random
import uuid
import os
import gevent
from jumpscale.clients.explorer.models import NextAction, WorkloadType
from jumpscale.loader import j
from jumpscale.sals.reservation_chatflow import deployer
from jumpscale.sals.reservation_chatflow.deployer import DeploymentFailed
from .base_component import VDCBaseComponent
from .scheduler import Scheduler
from textwrap import dedent
VDC_PARENT_DOMAIN = j.core.config.get("VDC_PARENT_DOMAIN", "grid.tf")
PROXY_SERVICE_TEMPLATE = """
kind: Service
apiVersion: v1
metadata:
name: {{ service_name }}
spec:
type: ClusterIP
ports:
- port: {{ port }}
"""
PROXY_ENDPOINT_TEMPLATE = """
kind: Endpoints
apiVersion: v1
metadata:
name: {{ endpoint_name }}
subsets:
- addresses:
{% for address in addresses %}
- ip: {{ address }}
{% endfor %}
ports:
- port: {{ port }}
"""
PROXY_INGRESS_TEMPLATE = """
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ ingress_name }}
{% if force_https %}
annotations:
ingress.kubernetes.io/ssl-redirect: "true"
{% endif %}
spec:
rules:
- host: {{ hostname }}
http:
paths:
- path: /
backend:
serviceName: {{ service_name }}
servicePort: {{ service_port }}
"""
| 41.500722 | 178 | 0.578999 | import random
import uuid
import os
import gevent
from jumpscale.clients.explorer.models import NextAction, WorkloadType
from jumpscale.loader import j
from jumpscale.sals.reservation_chatflow import deployer
from jumpscale.sals.reservation_chatflow.deployer import DeploymentFailed
from .base_component import VDCBaseComponent
from .scheduler import Scheduler
from textwrap import dedent
VDC_PARENT_DOMAIN = j.core.config.get("VDC_PARENT_DOMAIN", "grid.tf")
PROXY_SERVICE_TEMPLATE = """
kind: Service
apiVersion: v1
metadata:
name: {{ service_name }}
spec:
type: ClusterIP
ports:
- port: {{ port }}
"""
PROXY_ENDPOINT_TEMPLATE = """
kind: Endpoints
apiVersion: v1
metadata:
name: {{ endpoint_name }}
subsets:
- addresses:
{% for address in addresses %}
- ip: {{ address }}
{% endfor %}
ports:
- port: {{ port }}
"""
PROXY_INGRESS_TEMPLATE = """
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ ingress_name }}
{% if force_https %}
annotations:
ingress.kubernetes.io/ssl-redirect: "true"
{% endif %}
spec:
rules:
- host: {{ hostname }}
http:
paths:
- path: /
backend:
serviceName: {{ service_name }}
servicePort: {{ service_port }}
"""
class VDCProxy(VDCBaseComponent):
def __init__(self, vdc_deployer, farm_name=None):
super().__init__(vdc_deployer)
self._farm_name = farm_name
self._farm = None
@property
def farm(self):
if not self._farm:
self._farm = self.explorer.farms.get(farm_name=self.farm_name)
return self._farm
@property
def farm_name(self):
if not self._farm_name:
gateways = self.explorer.gateway.list()
random.shuffle(gateways)
for gateway in gateways:
if not self.zos.nodes_finder.filter_is_up(gateway):
continue
if not gateway.dns_nameserver:
continue
if not gateway.farm_id:
continue
farm_id = gateway.farm_id
try:
farm = self.explorer.farms.get(farm_id)
self._farm_name = farm.name
return self._farm_name
except Exception as e:
self.vdc_deployer.error(f"failed to fetch farm with id {farm_id} due to error {str(e)}")
continue
raise j.exceptions.Runtime("couldn't find any running gateway")
return self._farm_name
def fetch_myfarm_gateways(self):
farm_gateways = []
for gateway in self.explorer.gateway.list(farm_id=self.farm.id):
if not self.zos.nodes_finder.filter_is_up(gateway):
continue
if not gateway.dns_nameserver:
continue
farm_gateways.append(gateway)
return farm_gateways
def get_gateway_pool_id(self):
"""
return a pool id on my farm that has available gateway
"""
self.vdc_deployer.info(f"looking for pool with gateways within farm: {self.farm_name}")
farm_gateways = self.fetch_myfarm_gateways()
if not farm_gateways:
self.vdc_deployer.error(f"no gateways available in farm: {self.farm_name}")
return
self.vdc_deployer.info(f"looking for existing pools that contain gateways of farm: {self.farm_name}")
gateway_node_ids = [node.node_id for node in farm_gateways]
for pool in self.zos.pools.list():
if list(set(pool.node_ids) & set(gateway_node_ids)):
self.vdc_deployer.info(
f"found pool with available gateways on farm: {self.farm_name} pool_id: {pool.pool_id}"
)
return pool.pool_id
self.vdc_deployer.info(f"reserving an empty pool on farm: {self.farm_name}")
# no pool was found need to create a pool
pool_info = self.zos.pools.create(0, 0, 0, self.farm_name)
self.vdc_deployer.info(f"gateway pool: {pool_info.reservation_id}")
return pool_info.reservation_id
def get_gateway_addresses(self, gateway):
addresses = []
for nameserver in gateway.dns_nameserver:
try:
self.vdc_deployer.info(f"resolving name: {nameserver} of gateway {gateway.node_id}")
addresses.append(j.sals.nettools.get_host_by_name(nameserver))
except Exception as e:
self.vdc_deployer.error(
f"failed to resolve dns: {nameserver} of gateway {gateway.node_id} due to error {str(e)}"
)
continue
return addresses
@staticmethod
def check_domain_availability(domain):
try:
ip = j.sals.nettools.get_host_by_name(domain)
if ip:
return True
except:
return False
def wait_domain_population(self, domain, timeout=5):
end = j.data.time.now().timestamp + timeout * 60
while j.data.time.now().timestamp < end:
if self.check_domain_availability(domain):
return True
gevent.sleep(3)
return False
def check_subdomain_existence(self, subdomain, workloads=None):
self.vdc_deployer.info(f"checking the ownership of subdomain {subdomain}")
workloads = workloads or self.zos.workloads.list(self.identity.tid, NextAction.DEPLOY)
# get the latest workload that represents this domain
old_workloads = []
latest_domain_workload = None
for workload in workloads:
if workload.info.workload_type != WorkloadType.Subdomain:
continue
if workload.domain != subdomain:
continue
old_workloads.append(workload)
latest_domain_workload = workload
if len(old_workloads) > 1:
old_workloads.pop(-1)
self.vdc_deployer.info(
f"Cancelling old workloads for subdomain: {subdomain} wids: {[workload.id for workload in old_workloads]}"
)
for workload in old_workloads:
self.zos.decomission(workload.id)
for workload in old_workloads:
deployer.wait_workload_deletion(workload.id, identity_name=self.identity.instance_name)
return latest_domain_workload
def verify_subdomain(self, subdomain_workload, addresses=None):
gateway = self.explorer.gateway.get(subdomain_workload.info.node_id)
addresses = addresses or self.get_gateway_addresses(gateway)
self.vdc_deployer.info(
f"verifying subdomain workload: {subdomain_workload.id} ips: {subdomain_workload.ips} matching addresses {addresses}"
)
if set(addresses.sort()) == set(subdomain_workload.ips.sort()):
self.vdc_deployer.info(f"subdomain {subdomain_workload.id} matching addresses {addresses}")
return True
self.vdc_deployer.info(f"Cancelling subdomain workload {subdomain_workload.id}")
self.zos.workloads.decomission(subdomain_workload.id)
deployer.wait_workload_deletion(subdomain_workload.id, identity_name=self.identity.instance_name)
return False
def check_subdomain_owner(self, subdomain):
e = self.zos._explorer
users = e.users.list()
uids = [u.id for u in users]
for uid in uids:
workloads = e.workloads.list_workloads(uid, next_action="DEPLOY")
for w in workloads:
if w.info.workload_type != WorkloadType.Subdomain:
continue
if w.domain == subdomain:
return uid
def reserve_subdomain(self, gateway, prefix, vdc_uuid, pool_id=None, ip_address=None, exposed_wid=None):
"""
it will try to create a working subdomain on any of the available managed domain of the gateway
Args:
gateway: gateway to use
prefix: the prefix that will be added to the managed domain
ip_address: which the subdomain will point to. by default will point to the chosen gateway
yields:
subdomain
workload id
"""
desc = j.data.serializers.json.loads(self.vdc_deployer.description)
desc["exposed_wid"] = exposed_wid
desc = j.data.serializers.json.dumps(desc)
pool_id = pool_id or self.get_gateway_pool_id()
if not pool_id:
return None
for managed_domain in gateway.managed_domains:
self.vdc_deployer.info(f"reserving subdomain of {managed_domain}")
# vdc 3bot to be vdctest.grid.tf, solutions to be webg1test.grid.tf or alike
if not managed_domain.startswith("vdc"):
continue
subdomain = f"{prefix}.{managed_domain}"
addresses = None
# check availability of the subdomain
if self.check_domain_availability(subdomain):
self.vdc_deployer.info(f"subdomain {subdomain} already exists")
# check if the subdomain is owned by me
self.vdc_deployer.info(f"checking if subdomain {subdomain} is owned by identity {self.identity.tid}")
subdomain_workload = self.check_subdomain_existence(subdomain)
if not subdomain_workload:
# subdomain is not mine, get a new one
self.vdc_deployer.info(f"checking if subdomain {subdomain} is deployed on the explorer")
owner_id = self.check_subdomain_owner(subdomain)
if owner_id:
self.vdc_deployer.error(
f"subdomain {subdomain} exists and not owned by VDC identity {self.identity.tid}. Subdomain owner id: {owner_id}"
)
continue
# verify the subdomain is pointing to the correct address or cancel it
valid = self.verify_subdomain(subdomain_workload, addresses)
if valid:
# use the subdomain
yield subdomain, subdomain_workload.id
# check the next managed domain
continue
if ip_address:
addresses = [ip_address]
else:
addresses = self.get_gateway_addresses(gateway)
# check resolvable names of the gateway dns servers
if not addresses:
self.vdc_deployer.error(f"gateway {gateway.node_id} doesn't have any valid nameservers configured")
break
# check population of the managed domain
if not deployer.test_managed_domain(
gateway.node_id, managed_domain, pool_id, gateway, self.identity.instance_name
):
self.vdc_deployer.error(
f"population of managed domain {managed_domain} failed on gateway {gateway.node_id}"
)
continue
# reserve subdomain
wid = deployer.create_subdomain(
pool_id,
gateway.node_id,
subdomain,
addresses,
identity_name=self.identity.instance_name,
solution_uuid=vdc_uuid,
exposed_wid=exposed_wid,
description=desc,
)
try:
success = deployer.wait_workload(
wid, bot=self.bot, identity_name=self.identity.instance_name, cancel_by_uuid=False
)
if not success:
raise DeploymentFailed()
except DeploymentFailed:
self.vdc_deployer.error(f"Subdomain {subdomain} failed. wid: {wid}")
continue
populated = self.wait_domain_population(subdomain)
if populated:
self.vdc_deployer.info(f"Subdomain {subdomain} created successfully pointing to {addresses}")
yield subdomain, wid
else:
self.vdc_deployer.error(f"Subdomain {subdomain} failed to populate wid: {wid}")
self.zos.workloads.decomission(wid)
self.vdc_deployer.error(f"All attempts to reserve a subdomain failed on farm {self.farm_name}")
def _deploy_nginx_proxy(
self,
scheduler,
wid,
subdomain,
gateway,
pool_id,
secret,
ip_address,
port,
gateway_pool_id,
solution_uuid,
description,
):
# proxy the conainer
cont_id = None
proxy_id = None
for node in scheduler.nodes_by_capacity(cru=1, mru=1, sru=0.25):
try:
self.vdc_deployer.info(
f"deploying nginx proxy for wid: {wid} on node: {node.node_id} subdomain: {subdomain} gateway: {gateway.node_id}"
)
cont_id, proxy_id = deployer.expose_and_create_certificate(
pool_id=pool_id,
gateway_id=gateway.node_id,
network_name=self.vdc_name,
trc_secret=secret,
domain=subdomain,
email=self.vdc_deployer.email,
solution_ip=ip_address,
solution_port=port,
enforce_https=True,
proxy_pool_id=gateway_pool_id,
bot=self.bot,
solution_uuid=solution_uuid,
secret=secret,
node_id=node.node_id,
exposed_wid=wid,
identity_name=self.identity.instance_name,
public_key=self.vdc_deployer.ssh_key.public_key.strip(),
description=description,
)
success = deployer.wait_workload(
cont_id, self.bot, identity_name=self.identity.instance_name, cancel_by_uuid=False
)
if not success:
self.vdc_deployer.error(
f"Nginx container for wid: {wid} failed on node: {node.node_id}, nginx_wid: {cont_id}"
)
# container only failed. no need to decomission subdomain
self.zos.workloads.decomission(proxy_id)
continue
return subdomain
except DeploymentFailed:
self.vdc_deployer.error(
f"Proxy reservation for wid: {wid} failed on node: {node.node_id}, subdomain: {subdomain}, gateway: {gateway.node_id}"
)
if cont_id:
self.zos.workloads.decomission(cont_id)
if proxy_id:
self.zos.workloads.decomission(proxy_id)
continue
def _deploy_trc_proxy(
self,
scheduler,
wid,
subdomain,
gateway,
pool_id,
secret,
ip_address,
port,
tls_port,
gateway_pool_id,
solution_uuid,
description,
):
cont_id = None
proxy_id = None
for node in scheduler.nodes_by_capacity(cru=1, mru=1, sru=0.25):
try:
self.vdc_deployer.info(
f"Deploying trc proxy for wid: {wid} on node: {node.node_id} subdomain: {subdomain} gateway: {gateway.node_id}"
)
cont_id, proxy_id = deployer.expose_address(
reserve_proxy=True,
pool_id=pool_id,
gateway_id=gateway.node_id,
network_name=self.vdc_name,
trc_secret=secret,
domain_name=subdomain,
local_ip=ip_address,
port=port,
tls_port=tls_port,
proxy_pool_id=gateway_pool_id,
bot=self.bot,
solution_uuid=solution_uuid,
node_id=node.node_id,
exposed_wid=wid,
identity_name=self.identity.instance_name,
description=description,
)
success = deployer.wait_workload(
cont_id, self.bot, identity_name=self.identity.instance_name, cancel_by_uuid=False
)
if not success:
self.vdc_deployer.error(
f"Nginx container for wid: {wid} failed on node: {node.node_id}, nginx_wid: {cont_id}"
)
# container only failed. no need to decomission subdomain
self.zos.workloads.decomission(proxy_id)
continue
return subdomain
except DeploymentFailed:
self.vdc_deployer.error(
f"proxy reservation for wid: {wid} failed on node: {node.node_id}, subdomain: {subdomain}, gateway: {gateway.node_id}"
)
if cont_id:
self.zos.workloads.decomission(cont_id)
if proxy_id:
self.zos.workloads.decomission(proxy_id)
continue
def proxy_container_over_custom_domain(
self,
prefix,
wid,
port,
solution_uuid,
pool_id=None,
secret=None,
scheduler=None,
tls_port=None,
parent_domain=None,
):
"""
Args:
prefix: MUST BE UNIQUE will be appended to parent domain (vdc.grid.tf) if it already exist it will be deleted and recreated
wid: workload id of the container to expose
"""
parent_domain = parent_domain or VDC_PARENT_DOMAIN
subdomain = f"{prefix}.{parent_domain}"
nc = j.clients.name.get("VDC")
nc.username = os.environ.get("VDC_NAME_USER")
nc.token = os.environ.get("VDC_NAME_TOKEN")
secret = secret or uuid.uuid4().hex
secret = f"{self.identity.tid}:{secret}"
scheduler = scheduler or Scheduler(self.farm_name)
workload = self.zos.workloads.get(wid)
if workload.info.workload_type != WorkloadType.Container:
raise j.exceptions.Validation(f"can't expose workload {wid} of type {workload.info.workload_type}")
pool_id = pool_id or workload.info.pool_id
ip_address = workload.network_connection[0].ipaddress
self.vdc_deployer.info(f"proxy container {wid} ip: {ip_address} port: {port} pool: {pool_id}")
gateways = self.fetch_myfarm_gateways()
random.shuffle(gateways)
gateway_pool_id = self.get_gateway_pool_id()
desc = j.data.serializers.json.loads(self.vdc_deployer.description)
desc["exposed_wid"] = wid
desc = j.data.serializers.json.dumps(desc)
for gateway in gateways:
# if old records exist for this prefix clean it.
existing_records = nc.nameclient.list_records_for_host(parent_domain, prefix)
if existing_records:
for record_dict in existing_records:
nc.nameclient.delete_record(record_dict["fqdn"][:-1], record_dict["id"])
# create a subdomain in domain provider that points to the gateway
ip_addresses = self.get_gateway_addresses(gateway)
for address in ip_addresses:
nc.nameclient.create_record(parent_domain, prefix, "A", address)
if not tls_port:
result = self._deploy_nginx_proxy(
scheduler,
wid,
subdomain,
gateway,
pool_id,
secret,
ip_address,
port,
gateway_pool_id,
solution_uuid,
desc,
)
else:
result = self._deploy_trc_proxy(
scheduler,
wid,
subdomain,
gateway,
pool_id,
secret,
ip_address,
port,
tls_port,
gateway_pool_id,
solution_uuid,
desc,
)
if result:
return result
scheduler.refresh_nodes()
def proxy_container_over_managed_domain(
self, prefix, wid, port, solution_uuid, pool_id=None, secret=None, scheduler=None, tls_port=None
):
secret = secret or uuid.uuid4().hex
secret = f"{self.identity.tid}:{secret}"
scheduler = scheduler or Scheduler(self.farm_name)
workload = self.zos.workloads.get(wid)
if workload.info.workload_type != WorkloadType.Container:
raise j.exceptions.Validation(f"can't expose workload {wid} of type {workload.info.workload_type}")
pool_id = pool_id or workload.info.pool_id
ip_address = workload.network_connection[0].ipaddress
self.vdc_deployer.info(f"proxy container {wid} ip: {ip_address} port: {port} pool: {pool_id}")
gateways = self.fetch_myfarm_gateways()
random.shuffle(gateways)
gateway_pool_id = self.get_gateway_pool_id()
desc = j.data.serializers.json.loads(self.vdc_deployer.description)
desc["exposed_wid"] = wid
desc = j.data.serializers.json.dumps(desc)
for gateway in gateways:
for subdomain, subdomain_id in self.reserve_subdomain(
gateway, prefix, solution_uuid, gateway_pool_id, exposed_wid=wid
):
if not tls_port:
result = self._deploy_nginx_proxy(
scheduler,
wid,
subdomain,
gateway,
pool_id,
secret,
ip_address,
port,
gateway_pool_id,
solution_uuid,
desc,
)
else:
result = self._deploy_trc_proxy(
scheduler,
wid,
subdomain,
gateway,
pool_id,
secret,
ip_address,
port,
tls_port,
gateway_pool_id,
solution_uuid,
desc,
)
if result:
return result
self.zos.workloads.decomission(subdomain_id)
self.vdc_deployer.error(f"failed to proxy wid: {wid} on subdomain {subdomain}")
scheduler.refresh_nodes()
self.vdc_deployer.error(f"failed to expose workload {wid} on gateway {gateway.node_id}")
self.vdc_deployer.error(f"All attempts to expose wid {wid} failed")
def ingress_proxy_over_custom_domain(
self, name, prefix, port, public_ip, private_ip=None, wid=None, parent_domain=None, force_https=True
):
if not any([private_ip, wid]):
raise j.exceptions.Input(f"must pass private ip or wid")
parent_domain = parent_domain or VDC_PARENT_DOMAIN
subdomain = f"{prefix}.{parent_domain}"
nc = j.clients.name.get("VDC")
nc.username = os.environ.get("VDC_NAME_USER")
nc.token = os.environ.get("VDC_NAME_TOKEN")
if not private_ip:
workload = self.zos.workloads.get(wid)
if workload.info.workload_type != WorkloadType.Container:
raise j.exceptions.Validation(f"can't expose workload {wid} of type {workload.info.workload_type}")
ip_address = workload.network_connection[0].ipaddress
else:
ip_address = private_ip
self.vdc_deployer.info(
f"ingress proxy over custom domain: {subdomain}, name: {name}, ip_address: {ip_address}, public_ip: {public_ip}"
)
# if old records exist for this prefix clean it.
existing_records = nc.nameclient.list_records_for_host(parent_domain, prefix)
if existing_records:
for record_dict in existing_records:
nc.nameclient.delete_record(record_dict["fqdn"][:-1], record_dict["id"])
# create a subdomain in domain provider that points to the gateway
nc.nameclient.create_record(parent_domain, prefix, "A", public_ip)
self._create_ingress(name, subdomain, [ip_address], port, force_https)
return subdomain
def ingress_proxy_over_managed_domain(self, name, prefix, wid, port, public_ip, solution_uuid, force_https=True):
workload = self.zos.workloads.get(wid)
if workload.info.workload_type != WorkloadType.Container:
raise j.exceptions.Validation(f"can't expose workload {wid} of type {workload.info.workload_type}")
ip_address = workload.network_connection[0].ipaddress
gateways = self.fetch_myfarm_gateways()
gateway_pool_id = self.get_gateway_pool_id()
random.shuffle(gateways)
for gateway in gateways:
domain_generator = self.reserve_subdomain(
gateway, prefix, solution_uuid, gateway_pool_id, exposed_wid=wid, ip_address=public_ip
)
try:
subdomain, subdomain_id = next(domain_generator)
except StopIteration:
continue
self.vdc_deployer.info(
f"ingress proxy over custom domain: {subdomain}, name: {name}, ip_address: {ip_address}, public_ip: {public_ip}"
)
try:
self._create_ingress(name, subdomain, [ip_address], port, force_https)
return subdomain
except Exception as e:
self.vdc_deployer.error(f"failed to create proxy ingress config due to error {str(e)}")
self.zos.workloads.decomission(subdomain_id)
return
def _create_ingress(self, name, domain, addresses, port, force_https=True):
service_text = j.tools.jinja2.render_template(
template_text=PROXY_SERVICE_TEMPLATE, service_name=name, port=port
)
self.vdc_deployer.vdc_k8s_manager.execute_native_cmd(f"echo -e '{service_text}' | kubectl apply -f -")
endpoint_text = j.tools.jinja2.render_template(
template_text=PROXY_ENDPOINT_TEMPLATE, endpoint_name=name, addresses=addresses, port=port
)
self.vdc_deployer.vdc_k8s_manager.execute_native_cmd(f"echo -e '{endpoint_text}' | kubectl apply -f -")
ingress_text = j.tools.jinja2.render_template(
template_text=PROXY_INGRESS_TEMPLATE,
ingress_name=name,
hostname=domain,
service_name=name,
service_port=port,
force_https=force_https,
)
self.vdc_deployer.vdc_k8s_manager.execute_native_cmd(f"echo -e '{ingress_text}' | kubectl apply -f -")
def socat_proxy(self, name, src_port, dst_port, dst_ip):
public_ip = self.vdc_instance.get_public_ip()
if not public_ip:
raise j.exceptions.Runtime(f"couldn't get a public ip for vdc: {self.vdc_instance.vdc_name}")
ssh_client = self.vdc_instance.get_ssh_client(
name,
public_ip,
"rancher",
f"{self.vdc_deployer.ssh_key_path}/id_rsa" if self.vdc_deployer.ssh_key_path else None,
)
rc, out, _ = ssh_client.sshclient.run(f"sudo netstat -tulpn | grep :{src_port}", warn=True)
if rc == 0:
raise j.exceptions.Input(f"port: {src_port} is already exposed. details: {out}. choose a different port")
socat = "/var/lib/rancher/k3s/data/current/bin/socat"
cmd = f"{socat} tcp-listen:{src_port},reuseaddr,fork tcp:{dst_ip}:{dst_port}"
template = f"""#!/sbin/openrc-run
name="{name}"
command="{cmd}"
pidfile="/var/run/{name}.pid"
command_background=true
"""
template = dedent(template)
file_name = f"socat-{name}"
rc, out, err = ssh_client.sshclient.run(
f"sudo touch /etc/init.d/{file_name} && sudo chmod 777 /etc/init.d/{file_name} && echo '{template}' >> /etc/init.d/{file_name} && sudo rc-service {file_name} start",
warn=True,
)
if rc != 0:
j.exceptions.Runtime(f"failed to expose port using socat. rc: {rc}, out: {out}, err: {err}")
return public_ip
| 17,885 | 9,584 | 23 |
19dfbf03a31338c1c9570f4f0c2a4a31c04fc290 | 1,496 | py | Python | urmovie/urls.py | xuyangliu/UR | 8a3c94dd6b6f16bf233167333464c0429ad269d8 | [
"Apache-2.0"
] | null | null | null | urmovie/urls.py | xuyangliu/UR | 8a3c94dd6b6f16bf233167333464c0429ad269d8 | [
"Apache-2.0"
] | null | null | null | urmovie/urls.py | xuyangliu/UR | 8a3c94dd6b6f16bf233167333464c0429ad269d8 | [
"Apache-2.0"
] | null | null | null | """UR URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
from urmovie.views import main_view,movie_view,actor_view,contact_view
"""
URMovie应用下的路由系统
"""
urlpatterns = [
path('', main_view.index),
path('index', main_view.index),
path('movie_class', main_view.movie_class),
path('actor_class', main_view.actor_class),
path('contact', main_view.contact),
url(r'^recommend-(?P<name>\d+)',movie_view.recommend),
url(r'^queryMovieByAge-(?P<age>\d+)-(?P<pageid>\d+)',movie_view.queryMovieByAge),
url(r'^queryMovieByCate-(?P<cate>\d+)-(?P<pageid>\d+)',movie_view.queryMovieByCate),
url(r'^queryMovie-(?P<id>\d+)',movie_view.queryMovie),
url(r'^queryActorByNation-(?P<nation>\d+)-(?P<pageid>\d+)',actor_view.queryActorByNation),
url(r'^queryActor-(?P<id>\d+)',actor_view.queryActor)
]
| 40.432432 | 94 | 0.70254 | """UR URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url,include
from urmovie.views import main_view,movie_view,actor_view,contact_view
"""
URMovie应用下的路由系统
"""
urlpatterns = [
path('', main_view.index),
path('index', main_view.index),
path('movie_class', main_view.movie_class),
path('actor_class', main_view.actor_class),
path('contact', main_view.contact),
url(r'^recommend-(?P<name>\d+)',movie_view.recommend),
url(r'^queryMovieByAge-(?P<age>\d+)-(?P<pageid>\d+)',movie_view.queryMovieByAge),
url(r'^queryMovieByCate-(?P<cate>\d+)-(?P<pageid>\d+)',movie_view.queryMovieByCate),
url(r'^queryMovie-(?P<id>\d+)',movie_view.queryMovie),
url(r'^queryActorByNation-(?P<nation>\d+)-(?P<pageid>\d+)',actor_view.queryActorByNation),
url(r'^queryActor-(?P<id>\d+)',actor_view.queryActor)
]
| 0 | 0 | 0 |
660211f47fb3b9811a63ab733b4ba1b3c7297670 | 341 | py | Python | final.py | tecind/wallpaper-changer | ffe65da635f5c59548ebd1d238ac748070f3cde7 | [
"MIT"
] | null | null | null | final.py | tecind/wallpaper-changer | ffe65da635f5c59548ebd1d238ac748070f3cde7 | [
"MIT"
] | null | null | null | final.py | tecind/wallpaper-changer | ffe65da635f5c59548ebd1d238ac748070f3cde7 | [
"MIT"
] | null | null | null | import os
from os.path import isfile, join
import time
import ctypes
folderpath = r"E:\All Projects\Auto Wallpaper"
all_files = [ f for f in os.listdir(folderpath) if isfile(join(folderpath, f))]
for image in all_files:
print(image)
ctypes.windll.user32.SystemParametersInfoW(20, 0, folderpath+ "\\" + image, 0)
time.sleep(1)
| 24.357143 | 82 | 0.72434 | import os
from os.path import isfile, join
import time
import ctypes
folderpath = r"E:\All Projects\Auto Wallpaper"
all_files = [ f for f in os.listdir(folderpath) if isfile(join(folderpath, f))]
for image in all_files:
print(image)
ctypes.windll.user32.SystemParametersInfoW(20, 0, folderpath+ "\\" + image, 0)
time.sleep(1)
| 0 | 0 | 0 |
06cb52e8eb547edf7ac66c5c42b3c1cc2c4098ba | 7,779 | py | Python | data/experiments/sock-echo/post-processing/plot_results.py | di-unipi-socc/yRCA | 9c75df2fb38a8bff198885008216eb644ccaf734 | [
"Apache-2.0"
] | null | null | null | data/experiments/sock-echo/post-processing/plot_results.py | di-unipi-socc/yRCA | 9c75df2fb38a8bff198885008216eb644ccaf734 | [
"Apache-2.0"
] | null | null | null | data/experiments/sock-echo/post-processing/plot_results.py | di-unipi-socc/yRCA | 9c75df2fb38a8bff198885008216eb644ccaf734 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import os
import re
plotsDir = "plots"
# function to add an output, if experiment and value are both defined
# function to parse a csv timeFile
# with lines s.t. "experiment,logFile,elapsedTime,fileSize,timePerMB"
# function to adapt a given "experimentValue"
# - if load rate (of the form "0.x"), translated to rate of requests/s
# - if probability (of the form "x%"), unchanged
# - if name of root causing service, translated to length of corresponding cascade
# function to plot "experiment" list
# function to print experiment results
if __name__ == "__main__":
print("Generating plots...",end="",flush=True)
# create folder where to store plots (if not existing)
if not os.path.exists(plotsDir):
os.makedirs(plotsDir)
# confige plt's defaults
plt.rcParams.update({'font.size': 28})
plt.figure(figsize=(7, 4.3))
# ----------------
# plot outputs
# ----------------
outputs = parseOutputs("outputs.txt")
for o in outputs["count"]:
plot("count",outputs["count"][o],"explanations",outputs["roots"][o],"root causes",o,6)
plot("success_percentage",outputs["accuracy"][o],None,None,None,o,100)
# ----------------
# plot times
# ----------------
times = parseTimes("times.csv")
for t in times:
plot("time",times[t],None,None,None,t,501)
print("done!")
printResults("count (cascades)",outputs["count"])
printResults("count (root causes)",outputs["roots"])
printResults("success_percentage",outputs["accuracy"])
printResults("times",times)
| 33.675325 | 114 | 0.59185 | import matplotlib.pyplot as plt
import numpy as np
import os
import re
plotsDir = "plots"
def parseOutputs(outputsFile):
out = { }
out["count"] = {}
out["roots"] = {}
out["accuracy"] = {}
outputs = open(outputsFile)
experiment = None
value = None
count = None
nFailures = None
noExps = None
roots = None
for outputLine in list(outputs):
if "logs_exp" in outputLine: # case: new experiment
#addOutput(out,experiment,value,nFailures,count,noExps,roots)
experiment = adaptLabel(outputLine[:-1])
out["count"][experiment] = []
out["roots"][experiment] = []
out["accuracy"][experiment] = []
value = None
count = None
roots = None
elif outputLine[0] == ">": # case: new experiment's value
#addOutput(out,experiment,value,nFailures,count,noExps,roots)
logFileInfo = re.match(r'> all-(?P<value>.*).log \((?P<n>.*) failures\)',outputLine)
value = adaptValue(logFileInfo.group("value"))
nFailures = int(logFileInfo.group("n"))
count = 0
noExps = 0
roots = 0
rootVals = {}
elif outputLine[0] == "[": # case: new solution
count += 1
elif "no failure cascade" in outputLine: # case: no solution found
noExps += 1
elif ": unreachable" in outputLine or ": <internal error>" in outputLine: # case: root cause
rootVal = outputLine.split(":")[0].replace(" ","")[2:]
if not rootVal in rootVals: # added only if not already considered as a root cause for current failure
rootVals[rootVal] = True
roots +=1
elif outputLine[0] == "{": # case: new failure for an experiment's value
rootVals = {}
elif outputLine == "\n":
addOutput(out,experiment,value,nFailures,count,noExps,roots)
outputs.close()
# sort experiments' lists by experiment value
for experiment in out["count"]:
out["count"][experiment].sort(key=lambda pair:pair[0])
for experiment in out["count"]:
out["roots"][experiment].sort(key=lambda pair:pair[0])
for experiment in out["accuracy"]:
out["accuracy"][experiment].sort(key=lambda pair:pair[0])
return out
# function to add an output, if experiment and value are both defined
def addOutput(outputs,experiment,value,nFailures,count,noExps,roots):
if experiment and value and count:
nExplainedFailures = nFailures - noExps
outputs["count"][experiment].append([value,count/nExplainedFailures])
outputs["roots"][experiment].append([value,roots/nExplainedFailures])
accuracy = nExplainedFailures * 100 / nFailures
outputs["accuracy"][experiment].append([value,accuracy])
# function to parse a csv timeFile
# with lines s.t. "experiment,logFile,elapsedTime,fileSize,timePerMB"
def parseTimes(timesFile):
times = {}
# process each csv line separately
csvTimes = open(timesFile)
for csvTime in list(csvTimes):
if csvTime == "\n":
continue # skip empty lines (if any)
splittedTime = csvTime.split(",")
# add "experiment" to "times", if not there already
experiment = adaptLabel(splittedTime[0])
if experiment not in times:
times[experiment] = []
# add pair [n,timePerMB], where n is the value used in the experiment run
# (n excerpted from logFile name)
logFile = splittedTime[1]
logFileInfo = re.match(r'all-(?P<value>.*).log',logFile)
value = adaptValue(logFileInfo.group("value"))
millisPerMB = float(splittedTime[4][:-1]) * 1000
times[experiment].append([value,millisPerMB])
# sort experiments' lists by experiment value
for experiment in times:
times[experiment].sort(key=lambda pair:pair[0])
return times
def adaptLabel(experimentLabel):
# excerpt label from experiment name
label = experimentLabel.split("_")[2]
label = re.sub("([A-Z])"," \g<0>",label).lower()
# add unit (if needed) based on type of experiment
if "rate" in label:
label += " (req/s)"
elif "probability" in label:
label += " (%)"
return label
# function to adapt a given "experimentValue"
# - if load rate (of the form "0.x"), translated to rate of requests/s
# - if probability (of the form "x%"), unchanged
# - if name of root causing service, translated to length of corresponding cascade
def adaptValue(experimentValue):
try: # case: numeric value
num = float(experimentValue)
if num < 1: # case: load rate
return int(1/num)
else: # case: percentage
return num
except: # case: name of root causing service
if experimentValue == "frontend":
return 1
elif experimentValue == "orders":
return 2
elif experimentValue == "shipping":
return 3
elif experimentValue == "rabbitMq":
return 4
# unknown cases
return None
# function to plot "experiment" list
def plot(pdfName,coord1,label1,coord2,label2,experimentName,yTop):
# configure plot
axes = plt.gca()
# plot x/y coordinates from list of pairs "coord1"
x1 = [p[0] for p in coord1]
y1 = [p[1] for p in coord1]
if label1 is not None:
plt.plot(x1,y1,":bo",label=label1)
else:
plt.plot(x1,y1,":bo")
# plot x/y coordinates from list of pairs "coord2" (if any)
if coord2 is not None:
x2 = [p[0] for p in coord2]
y2 = [p[1] for p in coord2]
if label1 is not None:
plt.plot(x2,y2,":g^",label=label2)
else:
plt.plot(x2,y2,":g^")
# plot legend, if any
if label1 or label2:
plt.legend(loc="upper left")
# configure x ticks
if len(x1) < 6:
xs = x1
else:
xs = []
i = 1
step = 2
while i < len(x1):
xs.append(x1[i])
i += step
axes.set_xticks(xs)
# configure y ticks
axes.set_ylim([0, yTop])
if yTop < 10:
axes.set_yticks(np.arange(0, yTop, 1))
else:
axes.set_yticks(np.arange(0,yTop,100))
# store plot on PDF
pdfName = plotsDir + "/" + experimentName.split(" ")[0] + "_" + pdfName + ".pdf"
plt.savefig(pdfName)
plt.clf()
# function to print experiment results
def printResults(heading,results):
print()
print("* "*3 + heading.upper() + " *"*3)
for experiment in results:
print(experiment)
for pair in results[experiment]:
print(" " + str(pair[0]) + "\t" + "{:.3f}".format(pair[1]))
if __name__ == "__main__":
print("Generating plots...",end="",flush=True)
# create folder where to store plots (if not existing)
if not os.path.exists(plotsDir):
os.makedirs(plotsDir)
# confige plt's defaults
plt.rcParams.update({'font.size': 28})
plt.figure(figsize=(7, 4.3))
# ----------------
# plot outputs
# ----------------
outputs = parseOutputs("outputs.txt")
for o in outputs["count"]:
plot("count",outputs["count"][o],"explanations",outputs["roots"][o],"root causes",o,6)
plot("success_percentage",outputs["accuracy"][o],None,None,None,o,100)
# ----------------
# plot times
# ----------------
times = parseTimes("times.csv")
for t in times:
plot("time",times[t],None,None,None,t,501)
print("done!")
printResults("count (cascades)",outputs["count"])
printResults("count (root causes)",outputs["roots"])
printResults("success_percentage",outputs["accuracy"])
printResults("times",times)
| 6,011 | 0 | 156 |
ed9343bfb5b3ee8263500da7447ed1563a0c1cf8 | 10,469 | py | Python | tutorials/05-dcr/plot_fwd_2_dcr2d.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | 1 | 2022-02-18T16:31:27.000Z | 2022-02-18T16:31:27.000Z | tutorials/05-dcr/plot_fwd_2_dcr2d.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | null | null | null | tutorials/05-dcr/plot_fwd_2_dcr2d.py | ElliotCheung/simpeg | ce5bde154179ca63798a62a12787a7ec3535472c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
DC Resistivity Forward Simulation in 2.5D
=========================================
Here we use the module *SimPEG.electromagnetics.static.resistivity* to predict
DC resistivity data and plot using a pseudosection. In this tutorial, we focus
on the following:
- How to define the survey
- How to define the forward simulation
- How to predict normalized voltage data for a synthetic conductivity model
- How to include surface topography
- The units of the model and resulting data
"""
#########################################################################
# Import modules
# --------------
#
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import model_builder, surface2ind_topo
from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc
from SimPEG import maps, data
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.electromagnetics.static.utils.static_utils import (
generate_dcip_sources_line,
apparent_resistivity_from_voltage,
plot_pseudosection,
)
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
write_output = False
mpl.rcParams.update({"font.size": 16})
# sphinx_gallery_thumbnail_number = 3
###############################################################
# Defining Topography
# -------------------
#
# Here we define surface topography as an (N, 3) numpy array. Topography could
# also be loaded from a file. In our case, our survey takes place within a set
# of valleys that run North-South.
#
x_topo, y_topo = np.meshgrid(
np.linspace(-3000, 3000, 601), np.linspace(-3000, 3000, 101)
)
z_topo = 40.0 * np.sin(2 * np.pi * x_topo / 800) - 40.0
x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo)
topo_xyz = np.c_[x_topo, y_topo, z_topo]
# Create 2D topography. Since our 3D topography only changes in the x direction,
# it is easy to define the 2D topography projected along the survey line. For
# arbitrary topography and for an arbitrary survey orientation, the user must
# define the 2D topography along the survey line.
topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0)
#####################################################################
# Create Dipole-Dipole Survey
# ---------------------------
#
# Here we define a single EW survey line that uses a dipole-dipole configuration.
# For the source, we must define the AB electrode locations. For the receivers
# we must define the MN electrode locations. Instead of creating the survey
# from scratch (see 1D example), we will use the *generat_dcip_survey_line* utility.
#
# Define survey line parameters
survey_type = "dipole-dipole"
dimension_type = "2D"
data_type = "volt"
end_locations = np.r_[-400.0, 400.0]
station_separation = 40.0
num_rx_per_src = 10
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
data_type,
dimension_type,
end_locations,
topo_2d,
num_rx_per_src,
station_separation,
)
# Define survey
survey = dc.survey.Survey(source_list, survey_type=survey_type)
###############################################################
# Create Tree Mesh
# ------------------
#
# Here, we create the Tree mesh that will be used to predict DC data.
#
dh = 4 # base cell width
dom_width_x = 3200.0 # domain width x
dom_width_z = 2400.0 # domain width z
nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x
nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z
# Define the base mesh
hx = [(dh, nbcx)]
hz = [(dh, nbcz)]
mesh = TreeMesh([hx, hz], x0="CN")
# Mesh refinement based on topography
mesh = refine_tree_xyz(
mesh,
topo_xyz[:, [0, 2]],
octree_levels=[0, 0, 4, 4],
method="surface",
finalize=False,
)
# Mesh refinement near transmitters and receivers. First we need to obtain the
# set of unique electrode locations.
electrode_locations = np.c_[
survey.locations_a,
survey.locations_b,
survey.locations_m,
survey.locations_n,
]
unique_locations = np.unique(
np.reshape(electrode_locations, (4 * survey.nD, 2)), axis=0
)
mesh = refine_tree_xyz(
mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False
)
# Refine core mesh region
xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(zp)]
mesh = refine_tree_xyz(
mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False
)
mesh.finalize()
###############################################################
# Create Conductivity Model and Mapping for Tree Mesh
# -----------------------------------------------------
#
# It is important that electrodes are not modeled as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# lie on the discretized surface.
#
# Define conductivity model in S/m (or resistivity model in Ohm m)
air_conductivity = 1e-8
background_conductivity = 1e-2
conductor_conductivity = 1e-1
resistor_conductivity = 1e-3
# Find active cells in forward modeling (cell below surface)
ind_active = surface2ind_topo(mesh, topo_xyz[:, [0, 2]])
# Define mapping from model to active cells
nC = int(ind_active.sum())
conductivity_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity)
# Define model
conductivity_model = background_conductivity * np.ones(nC)
ind_conductor = model_builder.getIndicesSphere(np.r_[-120.0, -160.0], 60.0, mesh.gridCC)
ind_conductor = ind_conductor[ind_active]
conductivity_model[ind_conductor] = conductor_conductivity
ind_resistor = model_builder.getIndicesSphere(np.r_[120.0, -100.0], 60.0, mesh.gridCC)
ind_resistor = ind_resistor[ind_active]
conductivity_model[ind_resistor] = resistor_conductivity
# Plot Conductivity Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
norm = LogNorm(vmin=1e-3, vmax=1e-1)
ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7])
mesh.plot_image(
plotting_map * conductivity_model, ax=ax1, grid=False, pcolor_opts={"norm": norm}
)
ax1.set_xlim(-600, 600)
ax1.set_ylim(-600, 0)
ax1.set_title("Conductivity Model")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("z (m)")
ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7])
cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical")
cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12)
plt.show()
###############################################################
# Project Survey to Discretized Topography
# ----------------------------------------
#
# It is important that electrodes are not model as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# like on the discretized surface.
#
survey.drape_electrodes_on_topography(mesh, ind_active, option="top")
#######################################################################
# Predict DC Resistivity Data
# ---------------------------
#
# Here we predict DC resistivity data. If the keyword argument *sigmaMap* is
# defined, the simulation will expect a conductivity model. If the keyword
# argument *rhoMap* is defined, the simulation will expect a resistivity model.
#
simulation = dc.simulation_2d.Simulation2DNodal(
mesh, survey=survey, sigmaMap=conductivity_map, solver=Solver
)
# Predict the data by running the simulation. The data are the raw voltage in
# units of volts.
dpred = simulation.dpred(conductivity_model)
#######################################################################
# Plotting in Pseudo-Section
# --------------------------
#
# Here, we demonstrate how to plot 2D data in pseudo-section.
# First, we plot the voltages in pseudo-section as a scatter plot. This
# allows us to visualize the pseudo-sensitivity locations for our survey.
# Next, we plot the apparent conductivities in pseudo-section as a filled
# contour plot.
#
# Plot voltages pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
survey,
dobs=np.abs(dpred),
plot_type="scatter",
ax=ax1,
scale="log",
cbar_label="V/A",
scatter_opts={"cmap": mpl.cm.viridis},
)
ax1.set_title("Normalized Voltages")
plt.show()
# Get apparent conductivities from volts and survey geometry
apparent_conductivities = 1 / apparent_resistivity_from_voltage(survey, dpred)
# Plot apparent conductivity pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
survey,
dobs=apparent_conductivities,
plot_type="contourf",
ax=ax1,
scale="log",
cbar_label="S/m",
mask_topography=True,
contourf_opts={"levels": 20, "cmap": mpl.cm.viridis},
)
ax1.set_title("Apparent Conductivity")
plt.show()
#######################################################################
# Optional: Write out dpred
# -------------------------
#
# Write DC resistivity data, topography and true model
#
if write_output:
dir_path = os.path.dirname(__file__).split(os.path.sep)
dir_path.extend(["outputs"])
dir_path = os.path.sep.join(dir_path) + os.path.sep
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# Add 10% Gaussian noise to each datum
np.random.seed(225)
std = 0.05 * np.abs(dpred)
dc_noise = std * np.random.rand(len(dpred))
dobs = dpred + dc_noise
# Create a survey with the original electrode locations
# and not the shifted ones
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
data_type,
dimension_type,
end_locations,
topo_xyz,
num_rx_per_src,
station_separation,
)
survey_original = dc.survey.Survey(source_list)
# Write out data at their original electrode locations (not shifted)
data_obj = data.Data(survey_original, dobs=dobs, standard_deviation=std)
fname = dir_path + "dc_data.obs"
write_dcip2d_ubc(fname, data_obj, "volt", "dobs")
fname = dir_path + "topo_xyz.txt"
np.savetxt(fname, topo_xyz, fmt="%.4e")
| 31.250746 | 88 | 0.66845 | # -*- coding: utf-8 -*-
"""
DC Resistivity Forward Simulation in 2.5D
=========================================
Here we use the module *SimPEG.electromagnetics.static.resistivity* to predict
DC resistivity data and plot using a pseudosection. In this tutorial, we focus
on the following:
- How to define the survey
- How to define the forward simulation
- How to predict normalized voltage data for a synthetic conductivity model
- How to include surface topography
- The units of the model and resulting data
"""
#########################################################################
# Import modules
# --------------
#
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import model_builder, surface2ind_topo
from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc
from SimPEG import maps, data
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.electromagnetics.static.utils.static_utils import (
generate_dcip_sources_line,
apparent_resistivity_from_voltage,
plot_pseudosection,
)
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
write_output = False
mpl.rcParams.update({"font.size": 16})
# sphinx_gallery_thumbnail_number = 3
###############################################################
# Defining Topography
# -------------------
#
# Here we define surface topography as an (N, 3) numpy array. Topography could
# also be loaded from a file. In our case, our survey takes place within a set
# of valleys that run North-South.
#
x_topo, y_topo = np.meshgrid(
np.linspace(-3000, 3000, 601), np.linspace(-3000, 3000, 101)
)
z_topo = 40.0 * np.sin(2 * np.pi * x_topo / 800) - 40.0
x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo)
topo_xyz = np.c_[x_topo, y_topo, z_topo]
# Create 2D topography. Since our 3D topography only changes in the x direction,
# it is easy to define the 2D topography projected along the survey line. For
# arbitrary topography and for an arbitrary survey orientation, the user must
# define the 2D topography along the survey line.
topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0)
#####################################################################
# Create Dipole-Dipole Survey
# ---------------------------
#
# Here we define a single EW survey line that uses a dipole-dipole configuration.
# For the source, we must define the AB electrode locations. For the receivers
# we must define the MN electrode locations. Instead of creating the survey
# from scratch (see 1D example), we will use the *generat_dcip_survey_line* utility.
#
# Define survey line parameters
survey_type = "dipole-dipole"
dimension_type = "2D"
data_type = "volt"
end_locations = np.r_[-400.0, 400.0]
station_separation = 40.0
num_rx_per_src = 10
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
data_type,
dimension_type,
end_locations,
topo_2d,
num_rx_per_src,
station_separation,
)
# Define survey
survey = dc.survey.Survey(source_list, survey_type=survey_type)
###############################################################
# Create Tree Mesh
# ------------------
#
# Here, we create the Tree mesh that will be used to predict DC data.
#
dh = 4 # base cell width
dom_width_x = 3200.0 # domain width x
dom_width_z = 2400.0 # domain width z
nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x
nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z
# Define the base mesh
hx = [(dh, nbcx)]
hz = [(dh, nbcz)]
mesh = TreeMesh([hx, hz], x0="CN")
# Mesh refinement based on topography
mesh = refine_tree_xyz(
mesh,
topo_xyz[:, [0, 2]],
octree_levels=[0, 0, 4, 4],
method="surface",
finalize=False,
)
# Mesh refinement near transmitters and receivers. First we need to obtain the
# set of unique electrode locations.
electrode_locations = np.c_[
survey.locations_a,
survey.locations_b,
survey.locations_m,
survey.locations_n,
]
unique_locations = np.unique(
np.reshape(electrode_locations, (4 * survey.nD, 2)), axis=0
)
mesh = refine_tree_xyz(
mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False
)
# Refine core mesh region
xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(zp)]
mesh = refine_tree_xyz(
mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False
)
mesh.finalize()
###############################################################
# Create Conductivity Model and Mapping for Tree Mesh
# -----------------------------------------------------
#
# It is important that electrodes are not modeled as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# lie on the discretized surface.
#
# Define conductivity model in S/m (or resistivity model in Ohm m)
air_conductivity = 1e-8
background_conductivity = 1e-2
conductor_conductivity = 1e-1
resistor_conductivity = 1e-3
# Find active cells in forward modeling (cell below surface)
ind_active = surface2ind_topo(mesh, topo_xyz[:, [0, 2]])
# Define mapping from model to active cells
nC = int(ind_active.sum())
conductivity_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity)
# Define model
conductivity_model = background_conductivity * np.ones(nC)
ind_conductor = model_builder.getIndicesSphere(np.r_[-120.0, -160.0], 60.0, mesh.gridCC)
ind_conductor = ind_conductor[ind_active]
conductivity_model[ind_conductor] = conductor_conductivity
ind_resistor = model_builder.getIndicesSphere(np.r_[120.0, -100.0], 60.0, mesh.gridCC)
ind_resistor = ind_resistor[ind_active]
conductivity_model[ind_resistor] = resistor_conductivity
# Plot Conductivity Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
norm = LogNorm(vmin=1e-3, vmax=1e-1)
ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7])
mesh.plot_image(
plotting_map * conductivity_model, ax=ax1, grid=False, pcolor_opts={"norm": norm}
)
ax1.set_xlim(-600, 600)
ax1.set_ylim(-600, 0)
ax1.set_title("Conductivity Model")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("z (m)")
ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7])
cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical")
cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12)
plt.show()
###############################################################
# Project Survey to Discretized Topography
# ----------------------------------------
#
# It is important that electrodes are not model as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# like on the discretized surface.
#
survey.drape_electrodes_on_topography(mesh, ind_active, option="top")
#######################################################################
# Predict DC Resistivity Data
# ---------------------------
#
# Here we predict DC resistivity data. If the keyword argument *sigmaMap* is
# defined, the simulation will expect a conductivity model. If the keyword
# argument *rhoMap* is defined, the simulation will expect a resistivity model.
#
simulation = dc.simulation_2d.Simulation2DNodal(
mesh, survey=survey, sigmaMap=conductivity_map, solver=Solver
)
# Predict the data by running the simulation. The data are the raw voltage in
# units of volts.
dpred = simulation.dpred(conductivity_model)
#######################################################################
# Plotting in Pseudo-Section
# --------------------------
#
# Here, we demonstrate how to plot 2D data in pseudo-section.
# First, we plot the voltages in pseudo-section as a scatter plot. This
# allows us to visualize the pseudo-sensitivity locations for our survey.
# Next, we plot the apparent conductivities in pseudo-section as a filled
# contour plot.
#
# Plot voltages pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
survey,
dobs=np.abs(dpred),
plot_type="scatter",
ax=ax1,
scale="log",
cbar_label="V/A",
scatter_opts={"cmap": mpl.cm.viridis},
)
ax1.set_title("Normalized Voltages")
plt.show()
# Get apparent conductivities from volts and survey geometry
apparent_conductivities = 1 / apparent_resistivity_from_voltage(survey, dpred)
# Plot apparent conductivity pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
survey,
dobs=apparent_conductivities,
plot_type="contourf",
ax=ax1,
scale="log",
cbar_label="S/m",
mask_topography=True,
contourf_opts={"levels": 20, "cmap": mpl.cm.viridis},
)
ax1.set_title("Apparent Conductivity")
plt.show()
#######################################################################
# Optional: Write out dpred
# -------------------------
#
# Write DC resistivity data, topography and true model
#
if write_output:
dir_path = os.path.dirname(__file__).split(os.path.sep)
dir_path.extend(["outputs"])
dir_path = os.path.sep.join(dir_path) + os.path.sep
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# Add 10% Gaussian noise to each datum
np.random.seed(225)
std = 0.05 * np.abs(dpred)
dc_noise = std * np.random.rand(len(dpred))
dobs = dpred + dc_noise
# Create a survey with the original electrode locations
# and not the shifted ones
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
data_type,
dimension_type,
end_locations,
topo_xyz,
num_rx_per_src,
station_separation,
)
survey_original = dc.survey.Survey(source_list)
# Write out data at their original electrode locations (not shifted)
data_obj = data.Data(survey_original, dobs=dobs, standard_deviation=std)
fname = dir_path + "dc_data.obs"
write_dcip2d_ubc(fname, data_obj, "volt", "dobs")
fname = dir_path + "topo_xyz.txt"
np.savetxt(fname, topo_xyz, fmt="%.4e")
| 0 | 0 | 0 |
e37e584958a6aca16c3f35896e29d1a83d9ee805 | 1,023 | py | Python | tools/clang-tidy.py | qftphys/Simulate-the-non-equilibrium-dynamics-of-Fermionic-systems | 48d36fecbe4bc12af90f104cdf1f9f68352c508c | [
"MIT"
] | 2 | 2021-01-18T14:35:43.000Z | 2022-03-22T15:12:49.000Z | tools/clang-tidy.py | f-koehler/ieompp | 48d36fecbe4bc12af90f104cdf1f9f68352c508c | [
"MIT"
] | null | null | null | tools/clang-tidy.py | f-koehler/ieompp | 48d36fecbe4bc12af90f104cdf1f9f68352c508c | [
"MIT"
] | null | null | null | #!/bin/env python3
from os import walk
from os.path import abspath, dirname, join, realpath, splitext
from subprocess import call
from sys import argv, stderr
checks = [
"clang-analyzer-*",
"cppcoreguidelines-*",
"llvm-namespace-comment",
"modernize-*",
"performance-*",
"readability-*",
]
if __name__ == "__main__":
if len(argv) < 2:
print("Specify path to build directory", file=stderr)
exit(1)
build_dir = argv[1]
project_dir = abspath(join(dirname(realpath(__file__)), ".."))
for root, _, files in walk("src"):
for file in files:
path = join(root, file)
if splitext(path)[1] != ".cpp":
continue
cmd = [
"clang-tidy",
"-p", build_dir,
"-header-filter=" + join(project_dir, "include", "ieompp")+".*",
"-checks=" + ','.join(checks),
"-fix",
path
]
print(path)
call(cmd)
| 26.230769 | 80 | 0.514174 | #!/bin/env python3
from os import walk
from os.path import abspath, dirname, join, realpath, splitext
from subprocess import call
from sys import argv, stderr
checks = [
"clang-analyzer-*",
"cppcoreguidelines-*",
"llvm-namespace-comment",
"modernize-*",
"performance-*",
"readability-*",
]
if __name__ == "__main__":
if len(argv) < 2:
print("Specify path to build directory", file=stderr)
exit(1)
build_dir = argv[1]
project_dir = abspath(join(dirname(realpath(__file__)), ".."))
for root, _, files in walk("src"):
for file in files:
path = join(root, file)
if splitext(path)[1] != ".cpp":
continue
cmd = [
"clang-tidy",
"-p", build_dir,
"-header-filter=" + join(project_dir, "include", "ieompp")+".*",
"-checks=" + ','.join(checks),
"-fix",
path
]
print(path)
call(cmd)
| 0 | 0 | 0 |
9953aba771c62a8b944af6fde0c4471fab6ddade | 2,949 | py | Python | simclr/datasets.py | k-stacke/ssl-pathology | d440ce11712a5c1b6631d698dc3cafe7c04e2786 | [
"Apache-2.0"
] | 2 | 2021-12-22T15:18:02.000Z | 2022-03-10T11:46:38.000Z | simclr/datasets.py | k-stacke/ssl-pathology | d440ce11712a5c1b6631d698dc3cafe7c04e2786 | [
"Apache-2.0"
] | null | null | null | simclr/datasets.py | k-stacke/ssl-pathology | d440ce11712a5c1b6631d698dc3cafe7c04e2786 | [
"Apache-2.0"
] | null | null | null | import random
from PIL import Image
import lmdb
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision
from torchvision.transforms import transforms
| 28.631068 | 95 | 0.596134 | import random
from PIL import Image
import lmdb
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
import torchvision
from torchvision.transforms import transforms
class ImagePatchesDataset(Dataset):
def __init__(self, opt, dataframe, image_dir, transform=None, label_enum=None):
self.opt = opt
self.dataframe = dataframe
self.image_dir = image_dir
self.transform = transform
self.image_size = opt.image_size
self.label_enum = {'TUMOR': 1, 'NONTUMOR': 0} if label_enum is None else label_enum
print(self.label_enum)
def __len__(self):
return len(self.dataframe.index)
def get_views(self, image):
pos_1 = self.transform(image)
pos_2 = torch.zeros_like(pos_1) if self.opt.train_supervised else self.transform(image)
return pos_1, pos_2
def __getitem__(self, index):
row = self.dataframe.iloc[index]
path = f"{self.image_dir}/{row.filename}"
try:
image = Image.open(path) # pil image
except IOError:
print(f"could not open {path}")
return None
pos_1, pos_2 = self.get_views(image)
label = self.label_enum[row.label]
try:
id_ = row.patch_id
except AttributeError:
id_ = row.filename
return pos_1, pos_2, label, id_, row.slide_id
class LmdbDataset(torch.utils.data.Dataset):
def __init__(self, lmdb_path, transform):
self.cursor_access = False
self.lmdb_path = lmdb_path
self.image_dimensions = (224, 224, 3) # size of files in lmdb
self.transform = transform
self._init_db()
def __len__(self):
return self.length
def _init_db(self):
num_readers = 999
self.env = lmdb.open(self.lmdb_path,
max_readers=num_readers,
readonly=1,
lock=0,
readahead=0,
meminit=0)
self.txn = self.env.begin(write=False)
self.cursor = self.txn.cursor()
self.length = self.txn.stat()['entries']
print('Generating keys to lmdb dataset, this takes a while...')
self.keys = [key for key, _ in self.txn.cursor()] # not so fast...
def close(self):
self.env.close()
def __getitem__(self, index):
' cursor in lmdb is much faster than random access '
if self.cursor_access:
if not self.cursor.next():
self.cursor.first()
image = self.cursor.value()
else:
image = self.txn.get(self.keys[index])
image = np.frombuffer(image, dtype=np.uint8)
image = image.reshape(self.image_dimensions)
image = Image.fromarray(image)
pos_1 = self.transform(image)
pos_2 = self.transform(image)
return pos_1, pos_2
| 1,894 | 703 | 153 |
46359266b32939e9cfad0757def494294aa7883d | 11,105 | py | Python | datasets/augments.py | milesgray/ImageFunctions | 35e4423b94149b0ba291eafb0cd98260a70d5f31 | [
"Apache-2.0"
] | null | null | null | datasets/augments.py | milesgray/ImageFunctions | 35e4423b94149b0ba291eafb0cd98260a70d5f31 | [
"Apache-2.0"
] | null | null | null | datasets/augments.py | milesgray/ImageFunctions | 35e4423b94149b0ba291eafb0cd98260a70d5f31 | [
"Apache-2.0"
] | null | null | null | import pathlib
import numbers
import random
from typing import Any, Optional
import numpy as np
import torch
from torch import Tensor
from torch.jit.annotations import List, Tuple
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torchvision.datasets as datasets
import torchvision.transforms.functional as TF
from torchvision.transforms import InterpolationMode
from PIL import Image
import cv2
try:
import accimage
except ImportError:
accimage = None
# Custom Augment Classes that can be put into Compose
def apply_color_distortion(x: Tensor, bright: float=1., contrast: float=1., saturation: float=1., hue: float=0., gamma: float=1.) -> Tensor:
"""Applies the Pytorchvision functional HSV+G color adjustments in a deterministic manner.
Args:
x ([type]): [description]
bright (float, optional): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2. Defaults to 1..
contrast (float, optional): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2. Defaults to 1..
saturation (float, optional): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2. Defaults to 1..
hue (float, optional): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image. Defaults to 0..
gamma (float, optional): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter. Defaults to 1..
use_gray (bool, optional): Applies grayscale conversion after all others.
Returns:
PIL Image or Tensor: Gamma correction adjusted image.
"""
x = transforms.functional.adjust_contrast(x, contrast)
x = transforms.functional.adjust_brightness(x, bright)
x = transforms.functional.adjust_saturation(x, saturation)
x = transforms.functional.adjust_hue(x, hue)
x = transforms.functional.adjust_gamma(x, gamma)
return x
| 37.140468 | 140 | 0.585592 | import pathlib
import numbers
import random
from typing import Any, Optional
import numpy as np
import torch
from torch import Tensor
from torch.jit.annotations import List, Tuple
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import torchvision.datasets as datasets
import torchvision.transforms.functional as TF
from torchvision.transforms import InterpolationMode
from PIL import Image
import cv2
try:
import accimage
except ImportError:
accimage = None
def _is_pil_image(img):
if accimage is not None:
return isinstance(img, (Image.Image, accimage.Image))
else:
return isinstance(img, Image.Image)
def _is_numpy(img):
return isinstance(img, np.ndarray)
def _is_numpy_image(img):
return img.ndim in {2, 3}
def _get_image_size(img):
if _is_pil_image(img):
return img.size
elif isinstance(img, torch.Tensor) and img.dim() > 2:
return img.shape[-2:][::-1]
else:
raise TypeError("Unexpected type {}".format(type(img)))
# Custom Augment Classes that can be put into Compose
class MultiCrop:
def __init__(self, crop_size, resize_size,
count=5,
padding=None,
return_original=False,
pad_if_needed=False,
fill=0,
padding_mode='constant',
interpolation=InterpolationMode.BILINEAR):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
self.count = count
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
self.return_original = return_original
if isinstance(resize_size, numbers.Number):
self.resize_size = (int(resize_size), int(resize_size))
else:
self.resize_size = resize_size
self.interp = interpolation
self.resizecrop = transforms.Resize(self.resize_size, interpolation=self.interp)
@staticmethod
def get_params(img, output_size):
"""Get parameters for ``crop`` for a random crop.
Args:
img (PIL Image): Image to be cropped.
output_size (tuple): Expected output size of the crop.
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
"""
w, h = _get_image_size(img)
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, img):
img = self._check_size(img)
if self.return_original:
originals = []
results = []
coords = []
for i in range(self.count):
data, coord = self._random_crop(img)
if self.return_original:
originals.append(data.copy())
data = self.resizecrop(data)
results.append(data)
coords.append(self._resize_coord(coord))
if self.return_original:
return (results, originals, coords)
else:
return (results, coords)
def _check_size(self, x):
""" Ensures the image is big enough to
"""
self.h, self.w = _get_image_size(x)
# if not using padding boundary for valid crop area, then total size is just crop size
# if use pad is enforced, there is an extra amount of padding that is not valid, so the resulting image is larger
total_h = self.crop_size[0]
total_w = self.crop_size[1]
if self.h < total_h or self.w < total_w:
pad_amount = 0
# calculate image size ratio to preserve preportions after resize
if self.h < self.w:
# smaller side will be equal to crop size + pad amount
ratio_h = 1
# larger side will be scaled up so that it stays larger
ratio_w = self.w / self.h
# unified ratio to increase size by based on smaller side
ratio_r = total_w / self.h
else:
ratio_h = self.h / self.w
ratio_w = 1
ratio_r = total_h / self.w
resize_width = int(int(self.w * ratio_r) + pad_amount * ratio_w)
resize_height = int(int(self.h * ratio_r) + pad_amount * ratio_h)
# do resize based on if either PIL or Tensor
if _is_pil_image(x):
x = x.resize((resize_width,
resize_height))
# get new size
self.h, self.w = _get_image_size(x)
elif isinstance(img, torch.Tensor) and img.dim() > 2:
x = x.resize(resize_width,
resize_height)
# get new size
self.h, self.w = _get_image_size(x)
return x
else:
# image is large enough already
return x
def _random_crop(self, img):
"""
Args:
img (PIL Image): Image to be cropped.
Returns:
PIL Image: Cropped image.
"""
if self.padding is not None:
img = F.pad(img, self.padding, self.fill, self.padding_mode)
# pad the width if needed
if self.pad_if_needed and img.size[0] < self.crop_size[1]:
img = F.pad(img, (self.crop_size[1] - img.size[0], 0), self.fill, self.padding_mode)
# pad the height if needed
if self.pad_if_needed and img.size[1] < self.crop_size[0]:
img = F.pad(img, (0, self.crop_size[0] - img.size[1]), self.fill, self.padding_mode)
i, j, h, w = self.get_params(img, self.crop_size)
x1 = i
y1 = j
x2 = x1 + h
y2 = y1 + w
return TF.crop(img, i, j, h, w), (x1, y1, x2, y2, h, w)
def _resize_coord(self, coord):
""" Scale the coordinates by the amount the crop was resized
"""
ratio_x = self.resize_size[0] / self.crop_size[1]
ratio_y = self.resize_size[0] / self.crop_size[1]
x1 = int(coord[0] * ratio_x)
y1 = int(coord[1] * ratio_y)
x2 = int(coord[2] * ratio_x)
y2 = int(coord[3] * ratio_y)
h = int(coord[4] * ratio_x)
w = int(coord[5] * ratio_y)
return (x1, y1, x2, y2, h, w)
class BuildOutput:
def __init__(self, mean, std, super_res=False):
self.mean = mean
self.std = std
self.super_res = super_res
def __call__(self, x):
if self.super_res:
y = x[2] # coords of crop resized imgs
z = x[1] # original cropped imgs
x = x[0] # cropped resized imgs
else:
y = x[1] # coords of crop resized imgs
x = x[0] # cropped resized imgs
data = torch.stack(
[
transforms.Normalize(self.mean, self.std, inplace=True)(
torch.from_numpy(
np.array(crop, np.float32, copy=False).transpose((2, 0, 1))
).contiguous()
)
for crop in x
]
)
label = torch.Tensor(y)
if self.super_res:
original_data = torch.stack(
[
transforms.Normalize(self.mean, self.std, inplace=True)(
torch.from_numpy(
np.array(crop, np.float32, copy=False).transpose((2, 0, 1))
).contiguous()
)
for crop in z
]
)
return data, original_data, label
else:
return data, label
class RandomGaussianBlur:
def __init__(self, p=0.5, window=23):
self.window = window
if isinstance(p, int):
self.p = p / 100 if p < 100 else 1.0
elif isinstance(p, float):
self.p = p
else:
self.p = float(p)
def __call__(self, img):
if np.random.rand() < self.p:
return img
sigma = np.random.rand() * 1.9 + 0.1
return cv2.GaussianBlur(np.asarray(img), (self.window, self.window), sigma)
def get_blur(p=0.5, s=1.0, kernel_size=23, sigma=(0.1,2.0)):
blur = transforms.GaussianBlur(int(kernel_size*s), sigma=sigma)
rnd_blur = transforms.RandomApply([blur], p=p)
return rnd_blur
def get_color_distortion(s=1.0, use_grayscale=True):
# s is the strength of color distortion.
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
transform_list = []
transform_list.append(transforms.RandomApply([color_jitter], p=0.8))
if use_grayscale:
transform_list.append(transforms.RandomGrayscale(p=0.2))
color_distort = transforms.Compose(transform_list)
return color_distort
def apply_color_distortion(x: Tensor, bright: float=1., contrast: float=1., saturation: float=1., hue: float=0., gamma: float=1.) -> Tensor:
"""Applies the Pytorchvision functional HSV+G color adjustments in a deterministic manner.
Args:
x ([type]): [description]
bright (float, optional): How much to adjust the brightness. Can be
any non negative number. 0 gives a black image, 1 gives the
original image while 2 increases the brightness by a factor of 2. Defaults to 1..
contrast (float, optional): How much to adjust the contrast. Can be any
non negative number. 0 gives a solid gray image, 1 gives the
original image while 2 increases the contrast by a factor of 2. Defaults to 1..
saturation (float, optional): How much to adjust the saturation. 0 will
give a black and white image, 1 will give the original image while
2 will enhance the saturation by a factor of 2. Defaults to 1..
hue (float, optional): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image. Defaults to 0..
gamma (float, optional): Non negative real number, same as :math:`\gamma` in the equation.
gamma larger than 1 make the shadows darker,
while gamma smaller than 1 make dark regions lighter. Defaults to 1..
use_gray (bool, optional): Applies grayscale conversion after all others.
Returns:
PIL Image or Tensor: Gamma correction adjusted image.
"""
x = transforms.functional.adjust_contrast(x, contrast)
x = transforms.functional.adjust_brightness(x, bright)
x = transforms.functional.adjust_saturation(x, saturation)
x = transforms.functional.adjust_hue(x, hue)
x = transforms.functional.adjust_gamma(x, gamma)
return x
| 4,263 | 3,838 | 312 |
a8405610799ce8775ba88ab0f6c2fc30b7ca9b2a | 3,389 | py | Python | examples/raw_data_processing/a_02_basic_filters.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 3 | 2022-01-23T19:33:32.000Z | 2022-03-14T10:29:36.000Z | examples/raw_data_processing/a_02_basic_filters.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 2 | 2022-03-02T20:45:30.000Z | 2022-03-22T18:49:24.000Z | examples/raw_data_processing/a_02_basic_filters.py | NREL/flasc | ac734892efc1bc7684e2393ffa1ce7a97a54efa1 | [
"Apache-2.0"
] | 4 | 2022-02-17T18:40:36.000Z | 2022-03-24T05:44:31.000Z | # Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
from matplotlib import pyplot as plt
import pandas as pd
from flasc.dataframe_operations import (
dataframe_filtering as dff,
dataframe_manipulations as dfm,
)
if __name__ == "__main__":
# In this script, we do some very basic filtering steps, such as filtering
# for negative wind speeds and power productions. We also filter the data
# by one or multiple variables that inherently already tells us if data
# is good or bad according to the data logger/turbine itself. In our case,
# this self-flagged variable is "is_operational_normal_00x".
# Load data and get properties
df = load_data()
num_turbines = dfm.get_num_turbines(df)
root_path = os.path.dirname(os.path.abspath(__file__))
out_path = os.path.join(root_path, "data", "02_basic_filtered")
figs_path = os.path.join(out_path, "figures")
os.makedirs(figs_path, exist_ok=True)
# Basic filters: address self flags and obviously wrong points
for ti in range(num_turbines):
# Specify filtering conditions
conds = [
~df["is_operation_normal_{:03d}".format(ti)], # Self-status
df["ws_{:03d}".format(ti)] <= 0.0, # Non-negative wind speeds
df["pow_{:03d}".format(ti)] <= 0.0,
] # Non-negative powers
# Retrieve a single, combined condition array
conds_combined = conds[0]
for cond in conds:
conds_combined = conds_combined | cond
# Plot time vs filtered data
fig, ax = dff.plot_highlight_data_by_conds(df, conds, ti)
ax.legend(
["All data", "Bad self-status", "Negative WS", "Negative power"]
)
fp = os.path.join(figs_path, "basic_filtering_%03d.png" % ti)
print("Saving figure to {:s} for turbine {:03d}.".format(fp, ti))
fig.savefig(fp, dpi=200)
plt.close(fig)
# Apply filtering to dataframe
df = dff.df_mark_turbdata_as_faulty(
df, conds_combined, ti, verbose=True
)
# Remove unnecessary columns after filtering
self_status_cols = [
"is_operation_normal_%03d" % ti for ti in range(num_turbines)
]
df = df.drop(columns=self_status_cols) # Remove self status columns
# Save as a single file and as batch files
fout = os.path.join(out_path, "scada_data_60s.ftr")
print("Savig filtered data to {:s}.".format(fout))
os.makedirs(out_path, exist_ok=True)
df = df.reset_index(drop=("time" in df.columns))
df.to_feather(fout)
| 37.655556 | 79 | 0.680732 | # Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
from matplotlib import pyplot as plt
import pandas as pd
from flasc.dataframe_operations import (
dataframe_filtering as dff,
dataframe_manipulations as dfm,
)
def load_data():
# Load the data
print("Loading .ftr data. This may take a minute or two...")
root_path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(root_path, "data", "01_in_common_df_format")
return pd.read_feather(os.path.join(data_path, "scada_data_60s.ftr"))
if __name__ == "__main__":
# In this script, we do some very basic filtering steps, such as filtering
# for negative wind speeds and power productions. We also filter the data
# by one or multiple variables that inherently already tells us if data
# is good or bad according to the data logger/turbine itself. In our case,
# this self-flagged variable is "is_operational_normal_00x".
# Load data and get properties
df = load_data()
num_turbines = dfm.get_num_turbines(df)
root_path = os.path.dirname(os.path.abspath(__file__))
out_path = os.path.join(root_path, "data", "02_basic_filtered")
figs_path = os.path.join(out_path, "figures")
os.makedirs(figs_path, exist_ok=True)
# Basic filters: address self flags and obviously wrong points
for ti in range(num_turbines):
# Specify filtering conditions
conds = [
~df["is_operation_normal_{:03d}".format(ti)], # Self-status
df["ws_{:03d}".format(ti)] <= 0.0, # Non-negative wind speeds
df["pow_{:03d}".format(ti)] <= 0.0,
] # Non-negative powers
# Retrieve a single, combined condition array
conds_combined = conds[0]
for cond in conds:
conds_combined = conds_combined | cond
# Plot time vs filtered data
fig, ax = dff.plot_highlight_data_by_conds(df, conds, ti)
ax.legend(
["All data", "Bad self-status", "Negative WS", "Negative power"]
)
fp = os.path.join(figs_path, "basic_filtering_%03d.png" % ti)
print("Saving figure to {:s} for turbine {:03d}.".format(fp, ti))
fig.savefig(fp, dpi=200)
plt.close(fig)
# Apply filtering to dataframe
df = dff.df_mark_turbdata_as_faulty(
df, conds_combined, ti, verbose=True
)
# Remove unnecessary columns after filtering
self_status_cols = [
"is_operation_normal_%03d" % ti for ti in range(num_turbines)
]
df = df.drop(columns=self_status_cols) # Remove self status columns
# Save as a single file and as batch files
fout = os.path.join(out_path, "scada_data_60s.ftr")
print("Savig filtered data to {:s}.".format(fout))
os.makedirs(out_path, exist_ok=True)
df = df.reset_index(drop=("time" in df.columns))
df.to_feather(fout)
| 287 | 0 | 23 |
4e8a26df0a152db97762d1ac8668ece5643de1eb | 4,878 | py | Python | scrappybara/langmodel/language_model.py | cd109/scrappybara | 691385e3eb18eac4c5bec58950166b4c03c75a4b | [
"MIT"
] | null | null | null | scrappybara/langmodel/language_model.py | cd109/scrappybara | 691385e3eb18eac4c5bec58950166b4c03c75a4b | [
"MIT"
] | null | null | null | scrappybara/langmodel/language_model.py | cd109/scrappybara | 691385e3eb18eac4c5bec58950166b4c03c75a4b | [
"MIT"
] | 1 | 2020-10-20T03:36:59.000Z | 2020-10-20T03:36:59.000Z | import math
from scrappybara.langmodel.token_context import TokenContext
from scrappybara.preprocessing.tokenizer import Tokenizer
from scrappybara.utils.files import txt_file_reader
from scrappybara.utils.mutables import append_to_dict_list
| 44.345455 | 111 | 0.616851 | import math
from scrappybara.langmodel.token_context import TokenContext
from scrappybara.preprocessing.tokenizer import Tokenizer
from scrappybara.utils.files import txt_file_reader
from scrappybara.utils.mutables import append_to_dict_list
class LanguageModel(object):
def __init__(self, *min_counts):
"""Pass the min count for each order needed.
Examples:
* If only unigrams are needed with min count 1, instantiate LanguageModel(1)
* To get unigrams with mincount 10 and bigrams with mincount 5, instantiate LanguageModel(10, 5)
"""
self.__max_order = len(min_counts)
self.__ngrams_details = {} # ngram => tuple (count, proba, logp)
self.__unk = (0, 0.0, float('-inf')) # Unknown ngram (count=0, proba=0.0, logp=-inf)
self.__tokenize = Tokenizer()
# Load ngrams
for idx, min_count in enumerate(min_counts):
order = idx + 1
with txt_file_reader('langmodel', ('%d_grams.txt' % order)) as text_file:
for text, count, proba in [line.split('\t') for line in text_file]:
count = int(count)
if count >= min_count:
proba = float(proba)
self.__ngrams_details[text] = (count, proba, math.log(proba))
# Build next_token dict
self.__next_tokens = {} # ngram => list of tuples (token, proba) ordered by descending proba
if self.__max_order > 1:
for ngram, details in self.__ngrams_details.items():
tokens = ngram.split()
if len(tokens) > 1:
append_to_dict_list(self.__next_tokens, ' '.join(tokens[:-1]), (tokens[-1], details[1]))
for ngram, next_tokens in self.__next_tokens.items():
next_tokens.sort(key=lambda x: x[1], reverse=True)
def __len__(self):
return len(self.__ngrams_details)
def __contains__(self, ngram):
return ngram in self.__ngrams_details
def __logp(self, ngram):
"""Log-probability"""
return self.__ngrams_details.get(ngram, self.__unk)[2]
def __logps(self, token_context):
"""Returns list of log-probabilities sorted in descending order.
e.g. for the ngram 'a hat', it will return [logp('a hat'), logp('hat')]
"""
probas = []
for i in range(self.__max_order, 1, -1):
candidates = token_context.candidates(i)
if candidates:
probas.append(max([self.__logp(ngram) for ngram in candidates]))
else:
probas.append(float('-inf'))
probas.append(self.__logp(token_context.token))
return probas
def top_ngrams(self, order, limit=None):
ngrams = []
for ngram, details in self.__ngrams_details.items():
if len(ngram.split()) == order:
ngrams.append((ngram, *details[:-1]))
sorted_ngrams = sorted(ngrams, key=lambda x: x[2], reverse=True)
return sorted_ngrams[:limit]
def next_word(self, text, limit=None):
tokens = self.__tokenize(text)
for i in range(self.__max_order - 1, 0, -1):
try:
return self.__next_tokens[' '.join(tokens[-i:])][:limit]
except KeyError:
continue
return []
def best_token(self, *tokens, before=None, after=None):
"""Finds the best token among a list of candidates.
E.g. if we want to know the best token between 'personnel' & 'personal', given the context:
"hire best [personnel/personal] today"
Call: best_token('personnel', 'personal', before='hire best', after='today')
If none of the tokens have been found, returns None.
"""
if not any([token in self.__ngrams_details for token in tokens]):
return None
tokens_before = self.__tokenize(before) or []
tokens_after = self.__tokenize(after) or []
token_contexts = [TokenContext(token, tokens_before, tokens_after) for token in tokens]
tprobas = {tuple(self.__logps(token_context)): idx for idx, token_context in enumerate(token_contexts)}
return tokens[tprobas[max(tprobas.keys())]]
def best_ngram(self, *ngrams):
"""Returns the ngram with the highest probability among the selection.
If none of the ngrams has been found, returns None.
"""
if not any([ngram in self.__ngrams_details for ngram in ngrams]):
return None
token_logp_tuples = [(ngram, self.__logp(ngram)) for ngram in ngrams]
token_logp_tuples.sort(key=lambda x: x[1], reverse=True)
return token_logp_tuples[0][0]
def has_ngram(self, ngram, min_count=1):
if min_count < 1:
return False
return self.__ngrams_details.get(ngram, self.__unk)[0] >= min_count
| 819 | 3,792 | 23 |
066f467a09fc460769496e0b2d2881cc8b22ebdf | 10,036 | py | Python | py/instalog/plugins/input_socket.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 3 | 2022-01-06T16:52:52.000Z | 2022-03-07T11:30:47.000Z | py/instalog/plugins/input_socket.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | null | null | null | py/instalog/plugins/input_socket.py | arccode/factory | a1b0fccd68987d8cd9c89710adc3c04b868347ec | [
"BSD-3-Clause"
] | 1 | 2021-10-24T01:47:22.000Z | 2021-10-24T01:47:22.000Z | #!/usr/bin/env python3
#
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Input socket plugin.
Waits for events from an output socket plugin running on another Instalog node.
See socket_common.py for protocol definition.
See input_socket_unittest.py for reference examples.
"""
import hashlib
import logging
import socket
import tempfile
import threading
import time
from cros.factory.instalog import datatypes
from cros.factory.instalog import log_utils
from cros.factory.instalog import plugin_base
from cros.factory.instalog.plugins import socket_common
from cros.factory.instalog.utils.arg_utils import Arg
from cros.factory.instalog.utils import file_utils
_DEFAULT_HOSTNAME = '0.0.0.0'
class ChecksumError(Exception):
"""Represents a checksum mismatch."""
class InputSocketReceiver(log_utils.LoggerMixin):
"""Receives a request from an output socket plugin."""
def ProcessRequest(self):
"""Receives a request from an output socket plugin."""
# Create the temporary directory for attachments.
with file_utils.TempDirectory(prefix='input_socket_') as self._tmp_dir:
self.debug('Temporary directory for attachments: %s', self._tmp_dir)
try:
events = []
num_events = self.RecvInt()
while num_events == 0:
self.Pong()
num_events = self.RecvInt()
total_bytes = 0
start_time = time.time()
for event_id in range(num_events):
event_bytes, event = self.RecvEvent()
self.debug('Received event[%d] size: %.2f kB', event_id,
event_bytes / 1024)
total_bytes += event_bytes
events.append(event)
receive_time = time.time() - start_time
except socket.timeout:
self.error('Socket timeout error, remote connection closed?')
self.Close()
return
except ChecksumError:
self.error('Checksum mismatch, abort')
self.Close()
return
except Exception:
self.exception('Unknown exception encountered')
self.Close()
return
self.debug('Notifying transmitting side of data-received (syn)')
self._conn.sendall(socket_common.DATA_RECEIVED_CHAR)
self.debug('Waiting for request-emit (ack)...')
if self._conn.recv(1) != socket_common.REQUEST_EMIT_CHAR:
self.error('Did not receive request-emit (ack), aborting')
self.Close()
return
self.debug('Calling Emit()...')
start_time = time.time()
if not self._plugin_api.Emit(events):
self.error('Unable to emit, aborting')
self.Close()
return
emit_time = time.time() - start_time
try:
self.debug('Success; sending emit-success to transmitting side '
'(syn-ack)')
self._conn.sendall(socket_common.EMIT_SUCCESS_CHAR)
except Exception:
self.exception('Received events were emitted successfully, but failed '
'to confirm success with remote side: duplicate data '
'may occur')
finally:
total_kbytes = total_bytes / 1024
self.info('Received %d events, total %.2f kB in %.1f+%.1f sec '
'(%.2f kB/sec)',
len(events), total_kbytes, receive_time, emit_time,
total_kbytes / receive_time)
self.Close()
def Pong(self):
"""Called for an empty transfer (0 events)."""
self.debug('Empty transfer: Pong!')
try:
self._conn.sendall(socket_common.PING_RESPONSE)
except Exception:
pass
def Close(self):
"""Shuts down and closes the socket stream."""
try:
self.debug('Closing socket')
self._conn.shutdown(socket.SHUT_RDWR)
self._conn.close()
except Exception:
self.exception('Error closing socket')
def RecvItem(self):
"""Returns the next item in socket stream."""
buf = b''
while True:
data = self._conn.recv(1)
if not data:
raise socket.timeout
if data == socket_common.SEPARATOR:
break
buf += data
return buf
def RecvInt(self):
"""Returns the next integer in socket stream."""
return int(self.RecvItem())
def RecvFieldParts(self):
"""Returns a generator to retrieve the next field in socket stream."""
total = self.RecvInt()
self.debug('RecvFieldParts total = %d bytes' % total)
progress = 0
local_hash = hashlib.sha1()
while progress < total:
recv_size = total - progress
# Recv may return any number of bytes <= recv_size, so it's important
# to check the size of its output.
out = self._conn.recv(recv_size)
if not out:
raise socket.timeout
local_hash.update(out)
progress += len(out)
yield progress, out
# Verify SHA1 checksum.
remote_checksum = self.RecvItem()
local_checksum = local_hash.hexdigest()
if remote_checksum.decode('utf-8') != local_checksum:
raise ChecksumError
def RecvField(self):
"""Returns the next field in socket stream."""
buf = b''
for unused_progress, field in self.RecvFieldParts():
buf += field
return buf
def RecvEvent(self):
"""Returns the next event in socket stream.
Returns:
A tuple with (total bytes, Event object)
"""
total_bytes = 0
# Retrieve the event itself.
event_field = self.RecvField()
total_bytes += len(event_field)
event = datatypes.Event.Deserialize(event_field.decode('utf-8'))
# An event is followed by its number of attachments.
num_atts = self.RecvInt()
self.debug('num_atts = %d', num_atts)
for att_index in range(num_atts):
# Attachment format: <attachment_id> <attachment_data>
att_id = self.RecvField()
total_bytes += len(att_id)
att_size, att_path = self.RecvAttachmentData()
total_bytes += att_size
self.debug('Attachment[%d] %s: %d bytes', att_index, att_id, att_size)
event.attachments[att_id] = att_path
self.debug('Retrieved event (%d bytes): %s', total_bytes, event)
return total_bytes, event
def RecvAttachmentData(self):
"""Receives attachment data and writes to a temporary file on disk.
Returns:
A tuple with (total bytes received, temporary path).
"""
progress = 0
with tempfile.NamedTemporaryFile('wb', dir=self._tmp_dir,
delete=False) as f:
for progress, bin_part in self.RecvFieldParts():
f.write(bin_part)
return progress, f.name
if __name__ == '__main__':
plugin_base.main()
| 32.478964 | 80 | 0.65564 | #!/usr/bin/env python3
#
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Input socket plugin.
Waits for events from an output socket plugin running on another Instalog node.
See socket_common.py for protocol definition.
See input_socket_unittest.py for reference examples.
"""
import hashlib
import logging
import socket
import tempfile
import threading
import time
from cros.factory.instalog import datatypes
from cros.factory.instalog import log_utils
from cros.factory.instalog import plugin_base
from cros.factory.instalog.plugins import socket_common
from cros.factory.instalog.utils.arg_utils import Arg
from cros.factory.instalog.utils import file_utils
_DEFAULT_HOSTNAME = '0.0.0.0'
class ChecksumError(Exception):
"""Represents a checksum mismatch."""
class InputSocket(plugin_base.InputPlugin):
ARGS = [
Arg('hostname', str, 'Hostname that server should bind to.',
default=_DEFAULT_HOSTNAME),
Arg('port', int, 'Port that server should bind to.',
default=socket_common.DEFAULT_PORT)
]
def __init__(self, *args, **kwargs):
self._sock = None
self._accept_thread = None
self._threads = {}
super(InputSocket, self).__init__(*args, **kwargs)
def SetUp(self):
"""Sets up the plugin."""
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.debug('Socket created')
# Bind socket.
try:
self._sock.bind((self.args.hostname, self.args.port))
except socket.error as e:
self.exception('Bind failed. Error : %s' % e)
raise
self.debug('Socket bind complete')
# Queue up to 5 requests.
self._sock.listen(5)
self.info('Socket now listening on %s:%d...',
self.args.hostname, self.args.port)
# Start the AcceptLoop thread to wait for incoming connections.
self._accept_thread = threading.Thread(target=self.AcceptLoop)
self._accept_thread.daemon = False
self._accept_thread.start()
def AcceptLoop(self):
"""Main accept loop which waits for incoming connections."""
while not self.IsStopping():
# Purge any finished threads.
for thread in list(self._threads.keys()):
if not thread.is_alive():
del self._threads[thread]
conn, addr = self._sock.accept()
self.info('Connected with %s:%d' % (addr[0], addr[1]))
conn.settimeout(socket_common.SOCKET_TIMEOUT)
conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF,
socket_common.SOCKET_BUFFER_SIZE)
# Since sock.accept is a blocking call, check for the STOPPING state
# afterwards. TearDown may have purposely initiated a connection in order
# to break the sock.accept call.
if self.IsStopping():
conn.close()
return
receiver = InputSocketReceiver(self.logger.name, conn, self)
t = threading.Thread(target=receiver.ProcessRequest)
t.daemon = False
self._threads[t] = True
t.start()
def TearDown(self):
"""Tears down the plugin."""
if self._sock:
self.info('Closing socket and shutting down accept thread...')
# Initiate a fake connection in order to break a blocking sock.accept
# call.
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(
(self.args.hostname, self.args.port))
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
if self._accept_thread:
self._accept_thread.join()
else:
self.warning('TearDown: AcceptLoop thread was never started')
else:
self.warning('TearDown: Socket was never opened')
self.info('Join on %d InputSocketReceiver threads...', len(self._threads))
for thread in self._threads:
thread.join()
self.info('Shutdown complete')
class InputSocketReceiver(log_utils.LoggerMixin):
"""Receives a request from an output socket plugin."""
def __init__(self, logger_name, conn, plugin_api):
# log_utils.LoggerMixin creates shortcut functions for convenience.
self.logger = logging.getLogger(logger_name)
self._conn = conn
self._plugin_api = plugin_api
self._tmp_dir = None
super(InputSocketReceiver, self).__init__()
def ProcessRequest(self):
"""Receives a request from an output socket plugin."""
# Create the temporary directory for attachments.
with file_utils.TempDirectory(prefix='input_socket_') as self._tmp_dir:
self.debug('Temporary directory for attachments: %s', self._tmp_dir)
try:
events = []
num_events = self.RecvInt()
while num_events == 0:
self.Pong()
num_events = self.RecvInt()
total_bytes = 0
start_time = time.time()
for event_id in range(num_events):
event_bytes, event = self.RecvEvent()
self.debug('Received event[%d] size: %.2f kB', event_id,
event_bytes / 1024)
total_bytes += event_bytes
events.append(event)
receive_time = time.time() - start_time
except socket.timeout:
self.error('Socket timeout error, remote connection closed?')
self.Close()
return
except ChecksumError:
self.error('Checksum mismatch, abort')
self.Close()
return
except Exception:
self.exception('Unknown exception encountered')
self.Close()
return
self.debug('Notifying transmitting side of data-received (syn)')
self._conn.sendall(socket_common.DATA_RECEIVED_CHAR)
self.debug('Waiting for request-emit (ack)...')
if self._conn.recv(1) != socket_common.REQUEST_EMIT_CHAR:
self.error('Did not receive request-emit (ack), aborting')
self.Close()
return
self.debug('Calling Emit()...')
start_time = time.time()
if not self._plugin_api.Emit(events):
self.error('Unable to emit, aborting')
self.Close()
return
emit_time = time.time() - start_time
try:
self.debug('Success; sending emit-success to transmitting side '
'(syn-ack)')
self._conn.sendall(socket_common.EMIT_SUCCESS_CHAR)
except Exception:
self.exception('Received events were emitted successfully, but failed '
'to confirm success with remote side: duplicate data '
'may occur')
finally:
total_kbytes = total_bytes / 1024
self.info('Received %d events, total %.2f kB in %.1f+%.1f sec '
'(%.2f kB/sec)',
len(events), total_kbytes, receive_time, emit_time,
total_kbytes / receive_time)
self.Close()
def Pong(self):
"""Called for an empty transfer (0 events)."""
self.debug('Empty transfer: Pong!')
try:
self._conn.sendall(socket_common.PING_RESPONSE)
except Exception:
pass
def Close(self):
"""Shuts down and closes the socket stream."""
try:
self.debug('Closing socket')
self._conn.shutdown(socket.SHUT_RDWR)
self._conn.close()
except Exception:
self.exception('Error closing socket')
def RecvItem(self):
"""Returns the next item in socket stream."""
buf = b''
while True:
data = self._conn.recv(1)
if not data:
raise socket.timeout
if data == socket_common.SEPARATOR:
break
buf += data
return buf
def RecvInt(self):
"""Returns the next integer in socket stream."""
return int(self.RecvItem())
def RecvFieldParts(self):
"""Returns a generator to retrieve the next field in socket stream."""
total = self.RecvInt()
self.debug('RecvFieldParts total = %d bytes' % total)
progress = 0
local_hash = hashlib.sha1()
while progress < total:
recv_size = total - progress
# Recv may return any number of bytes <= recv_size, so it's important
# to check the size of its output.
out = self._conn.recv(recv_size)
if not out:
raise socket.timeout
local_hash.update(out)
progress += len(out)
yield progress, out
# Verify SHA1 checksum.
remote_checksum = self.RecvItem()
local_checksum = local_hash.hexdigest()
if remote_checksum.decode('utf-8') != local_checksum:
raise ChecksumError
def RecvField(self):
"""Returns the next field in socket stream."""
buf = b''
for unused_progress, field in self.RecvFieldParts():
buf += field
return buf
def RecvEvent(self):
"""Returns the next event in socket stream.
Returns:
A tuple with (total bytes, Event object)
"""
total_bytes = 0
# Retrieve the event itself.
event_field = self.RecvField()
total_bytes += len(event_field)
event = datatypes.Event.Deserialize(event_field.decode('utf-8'))
# An event is followed by its number of attachments.
num_atts = self.RecvInt()
self.debug('num_atts = %d', num_atts)
for att_index in range(num_atts):
# Attachment format: <attachment_id> <attachment_data>
att_id = self.RecvField()
total_bytes += len(att_id)
att_size, att_path = self.RecvAttachmentData()
total_bytes += att_size
self.debug('Attachment[%d] %s: %d bytes', att_index, att_id, att_size)
event.attachments[att_id] = att_path
self.debug('Retrieved event (%d bytes): %s', total_bytes, event)
return total_bytes, event
def RecvAttachmentData(self):
"""Receives attachment data and writes to a temporary file on disk.
Returns:
A tuple with (total bytes received, temporary path).
"""
progress = 0
with tempfile.NamedTemporaryFile('wb', dir=self._tmp_dir,
delete=False) as f:
for progress, bin_part in self.RecvFieldParts():
f.write(bin_part)
return progress, f.name
if __name__ == '__main__':
plugin_base.main()
| 425 | 2,876 | 48 |
72af4e1f9e46cb1826d0b6d934a94d322b2baf6f | 669 | py | Python | attic/gym_copter/__init__.py | sendera99/gym-copter | 08ac2bc4e7e43a58d93af14f7a618a9bdda78e79 | [
"MIT"
] | 14 | 2019-11-03T05:17:46.000Z | 2022-02-26T05:37:32.000Z | attic/gym_copter/__init__.py | sendera99/gym-copter | 08ac2bc4e7e43a58d93af14f7a618a9bdda78e79 | [
"MIT"
] | 77 | 2020-05-17T01:56:29.000Z | 2021-06-19T02:46:52.000Z | attic/gym_copter/__init__.py | sendera99/gym-copter | 08ac2bc4e7e43a58d93af14f7a618a9bdda78e79 | [
"MIT"
] | 6 | 2020-01-01T07:22:15.000Z | 2021-05-11T17:45:33.000Z | '''
Copyright (C) 2019 Simon D. Levy
MIT License
'''
from gym.envs.registration import register
register(
id='Lander-v0',
entry_point='gym_copter.envs:Lander2D',
max_episode_steps=2000
)
register(
id='Lander3D-v0',
entry_point='gym_copter.envs:Lander3D',
max_episode_steps=2000
)
register(
id='Lander3D-v1',
entry_point='gym_copter.envs:TargetedLander3D',
max_episode_steps=2000
)
register(
id='Distance-v0',
entry_point='gym_copter.envs:Distance',
max_episode_steps=1000
)
register(
id='Takeoff-v0',
entry_point='gym_copter.envs:Takeoff',
max_episode_steps=1000
)
| 17.605263 | 52 | 0.665172 | '''
Copyright (C) 2019 Simon D. Levy
MIT License
'''
from gym.envs.registration import register
register(
id='Lander-v0',
entry_point='gym_copter.envs:Lander2D',
max_episode_steps=2000
)
register(
id='Lander3D-v0',
entry_point='gym_copter.envs:Lander3D',
max_episode_steps=2000
)
register(
id='Lander3D-v1',
entry_point='gym_copter.envs:TargetedLander3D',
max_episode_steps=2000
)
register(
id='Distance-v0',
entry_point='gym_copter.envs:Distance',
max_episode_steps=1000
)
register(
id='Takeoff-v0',
entry_point='gym_copter.envs:Takeoff',
max_episode_steps=1000
)
| 0 | 0 | 0 |
7f73ad566fc4754a6ab01da18fd42eede682a194 | 12,890 | py | Python | tests/evolve_population.py | sebastiengilbert73/gp_stop_signs | 651170d2be05bc08869e1990b98a8292dc6a1799 | [
"MIT"
] | 1 | 2021-12-18T23:22:11.000Z | 2021-12-18T23:22:11.000Z | tests/evolve_population.py | sebastiengilbert73/gp_stop_signs | 651170d2be05bc08869e1990b98a8292dc6a1799 | [
"MIT"
] | null | null | null | tests/evolve_population.py | sebastiengilbert73/gp_stop_signs | 651170d2be05bc08869e1990b98a8292dc6a1799 | [
"MIT"
] | null | null | null | import ast
import logging
import argparse
import xml.etree.ElementTree as ET
import cv2
import vision_genprog.tasks.image_processing as image_processing
import vision_genprog.semanticSegmentersPop as semanticSegmentersPop
import os
import ast
import synthetic_heatmap.generators.stop_sign as stop_sign
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('validationPairsDirectory', help="The filepath to the validation image pairs")
parser.add_argument('--primitivesFilepath', help="The filepath to the primitives xml file. Default: 'vision_genprog/tasks/image_processing.xml'", default='vision_genprog/tasks/image_processing.xml')
parser.add_argument('--imageShapeHW', help="The image shape (height, width). Default='(256, 256)'", default='(256, 256)')
parser.add_argument('--outputDirectory', help="The output directory. Default: './outputs'", default='./outputs')
parser.add_argument('--numberOfIndividuals', help="The number of individuals. Default: 200", type=int, default=200)
parser.add_argument('--levelToFunctionProbabilityDict',
help="The probability to generate a function, at each level. Default: '{0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}'",
default='{0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}')
parser.add_argument('--proportionOfConstants',
help='The probability to generate a constant, when a variable could be used. Default: 0',
type=float, default=0)
parser.add_argument('--constantCreationParametersList',
help="The parameters to use when creating constants: [minFloat, maxFloat, minInt, maxInt, width, height]. Default: '[-1, 1, 0, 255, 256, 256]'",
default='[-1, 1, 0, 255, 256, 256]')
parser.add_argument('--numberOfGenerations', help="The number of generations to run. Default: 32", type=int,
default=32)
parser.add_argument('--weightForNumberOfNodes',
help="Penalty term proportional to the number of nodes. Default: 0.001", type=float,
default=0.001)
parser.add_argument('--numberOfTournamentParticipants',
help="The number of participants in selection tournaments. Default: 2", type=int, default=2)
parser.add_argument('--mutationProbability', help="The probability to mutate a child. Default: 0.1", type=float,
default=0.1)
parser.add_argument('--proportionOfNewIndividuals',
help="The proportion of randomly generates individuals per generation. Default: 0.1",
type=float, default=0.1)
parser.add_argument('--maximumNumberOfMissedCreationTrials',
help="The maximum number if missed creation trials. Default: 1000", type=int, default=1000)
parser.add_argument('--numberOfTrainingPairs', help="The number of generated training pairs, per epoch. Default: 160", type=int, default=160)
args = parser.parse_args()
imageShapeHW = ast.literal_eval(args.imageShapeHW)
levelToFunctionProbabilityDict = ast.literal_eval(args.levelToFunctionProbabilityDict)
constantCreationParametersList = ast.literal_eval(args.constantCreationParametersList)
main(
args.validationPairsDirectory,
args.primitivesFilepath,
imageShapeHW,
args.outputDirectory,
args.numberOfIndividuals,
levelToFunctionProbabilityDict,
args.proportionOfConstants,
constantCreationParametersList,
args.numberOfGenerations,
args.weightForNumberOfNodes,
args.numberOfTournamentParticipants,
args.mutationProbability,
args.proportionOfNewIndividuals,
args.maximumNumberOfMissedCreationTrials,
args.numberOfTrainingPairs
) | 57.544643 | 214 | 0.723662 | import ast
import logging
import argparse
import xml.etree.ElementTree as ET
import cv2
import vision_genprog.tasks.image_processing as image_processing
import vision_genprog.semanticSegmentersPop as semanticSegmentersPop
import os
import ast
import synthetic_heatmap.generators.stop_sign as stop_sign
logging.basicConfig(level=logging.DEBUG, format='%(asctime)-15s %(message)s')
def main(
validationPairsDirectory,
primitivesFilepath,
imageShapeHW,
outputDirectory,
numberOfIndividuals,
levelToFunctionProbabilityDict,
proportionOfConstants,
constantCreationParametersList,
numberOfGenerations,
weightForNumberOfNodes,
numberOfTournamentParticipants,
mutationProbability,
proportionOfNewIndividuals,
maximumNumberOfMissedCreationTrials,
numberOfTrainingPairs
):
logging.info("evolve_population.main()")
# Create the output directory
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
# Create the interpreter
primitive_functions_tree = ET.parse(primitivesFilepath)
interpreter = image_processing.Interpreter(primitive_functions_tree, imageShapeHW)
variableName_to_type = {'image': 'grayscale_image'}
return_type = 'binary_image' # We want a semantic segmentation separating background from objects of interest
# Load the validation pairs
validation_input_output_tuples = InputOutputTuples(validationPairsDirectory, imageShapeHW, 'image')
# Create a population
semantic_segmenters_pop = semanticSegmentersPop.SemanticSegmentersPopulation()
semantic_segmenters_pop.Generate(
numberOfIndividuals=numberOfIndividuals,
interpreter=interpreter,
returnType=return_type,
levelToFunctionProbabilityDict=levelToFunctionProbabilityDict,
proportionOfConstants=proportionOfConstants,
constantCreationParametersList=constantCreationParametersList,
variableNameToTypeDict=variableName_to_type,
functionNameToWeightDict=None
)
# Original population cost
validation_individual_to_cost_dict = semantic_segmenters_pop.EvaluateIndividualCosts(
inputOutputTuplesList=validation_input_output_tuples,
variableNameToTypeDict=variableName_to_type,
interpreter=interpreter,
returnType=return_type,
weightForNumberOfElements=weightForNumberOfNodes
)
(champion, validation_lowest_cost) = semantic_segmenters_pop.Champion(validation_individual_to_cost_dict)
validation_median_cost = semantic_segmenters_pop.MedianCost(validation_individual_to_cost_dict)
validation_average_cost = semantic_segmenters_pop.AverageCost(validation_individual_to_cost_dict)
validation_cost_std_dev = semantic_segmenters_pop.StandardDeviationOfCost(validation_individual_to_cost_dict)
train_individual_to_cost_dict = None
with open(os.path.join(args.outputDirectory, "generations.csv"), 'w+') as generations_file:
generations_file.write("generation,train_lowest_cost,validation_lowest_cost,train_median_cost,validation_median_cost,train_average_cost,validation_average_cost,train_cost_std_dev,validation_cost_std_dev\n")
generations_file.write("0,-,{},-,{},-,{},-,{}\n".format(validation_lowest_cost, validation_median_cost, validation_average_cost, validation_cost_std_dev))
# Create a generator of stop sign image and heatmaps
stop_sign_generator = stop_sign.StopSign()
lowest_validation_champion_cost = 1.0e9
for generation_ndx in range(1, numberOfGenerations + 1):
logging.info("***** Generation {} *****".format(generation_ndx))
# Generate synthetic training input-output tuples
train_input_output_tuples_list = GenerateSyntheticInputOutputTuples(
stop_sign_generator, imageShapeHW, numberOfTrainingPairs
)
# Evolve one generation
train_individual_to_cost_dict = semantic_segmenters_pop.NewGenerationWithTournament(
inputOutputTuplesList=train_input_output_tuples_list,
variableNameToTypeDict=variableName_to_type,
interpreter=interpreter,
returnType=return_type,
numberOfTournamentParticipants=numberOfTournamentParticipants,
mutationProbability=mutationProbability,
currentIndividualToCostDict=train_individual_to_cost_dict,
proportionOfConstants=proportionOfConstants,
levelToFunctionProbabilityDict=levelToFunctionProbabilityDict,
functionNameToWeightDict=None,
constantCreationParametersList=constantCreationParametersList,
proportionOfNewIndividuals=proportionOfNewIndividuals,
weightForNumberOfElements=weightForNumberOfNodes,
maximumNumberOfMissedCreationTrials=maximumNumberOfMissedCreationTrials
)
train_champion, train_lowest_cost = semantic_segmenters_pop.Champion(train_individual_to_cost_dict)
train_median_cost = semantic_segmenters_pop.MedianCost(train_individual_to_cost_dict)
train_average_cost = semantic_segmenters_pop.AverageCost(train_individual_to_cost_dict)
train_cost_std_dev = semantic_segmenters_pop.StandardDeviationOfCost(train_individual_to_cost_dict)
# Validation of the new population
validation_individual_to_cost_dict = semantic_segmenters_pop.EvaluateIndividualCosts(
inputOutputTuplesList=validation_input_output_tuples,
variableNameToTypeDict=variableName_to_type,
interpreter=interpreter,
returnType=return_type,
weightForNumberOfElements=weightForNumberOfNodes
)
(champion, validation_lowest_cost) = semantic_segmenters_pop.Champion(validation_individual_to_cost_dict)
validation_median_cost = semantic_segmenters_pop.MedianCost(validation_individual_to_cost_dict)
validation_average_cost = semantic_segmenters_pop.AverageCost(validation_individual_to_cost_dict)
validation_cost_std_dev = semantic_segmenters_pop.StandardDeviationOfCost(validation_individual_to_cost_dict)
logging.info("train_lowest_cost = {}; validation_lowest_cost = {}".format(train_lowest_cost, validation_lowest_cost))
with open(os.path.join(args.outputDirectory, "generations.csv"), 'a') as generations_file:
generations_file.write("{},{},{},{},{},{},{},{},{}\n".format(generation_ndx,
train_lowest_cost, validation_lowest_cost,
train_median_cost, validation_median_cost,
train_average_cost, validation_average_cost,
train_cost_std_dev, validation_cost_std_dev))
if validation_lowest_cost < lowest_validation_champion_cost:
lowest_validation_champion_cost = validation_lowest_cost
champion_filepath = os.path.join(outputDirectory, "champion_{}_{:.4f}.xml".format(generation_ndx, validation_lowest_cost))
champion.Save(champion_filepath)
def InputOutputTuples(image_pairs_directory, expected_image_shapeHW, variable_name='image'):
# List[Tuple[Dict[str, Any], Any]]
input_heatmap_filepaths = InputHeatmapFilepaths(image_pairs_directory)
inputOutput_list = []
for input_filepath, heatmap_filepath in input_heatmap_filepaths:
image = cv2.imread(input_filepath, cv2.IMREAD_GRAYSCALE)
if image.shape != expected_image_shapeHW:
raise ValueError("InputOutputTuples(): The shape of image '{}' ({}) is not the expected shape {}".format(
input_filepath, image.shape, expected_image_shapeHW))
heatmap = cv2.imread(heatmap_filepath, cv2.IMREAD_GRAYSCALE)
inputOutput_list.append(({variable_name: image}, heatmap))
return inputOutput_list
def InputHeatmapFilepaths(images_directory):
input_heatmap_filepaths = []
input_filepaths_in_directory = [os.path.join(images_directory, filename) for filename in os.listdir(images_directory)
if os.path.isfile(os.path.join(images_directory, filename))
and '_input' in filename
and filename.upper().endswith('.PNG')]
for input_filepath in input_filepaths_in_directory:
corresponding_heatmap_filepath = os.path.join(images_directory, os.path.basename(input_filepath).replace('_input', '_heatmap'))
if not os.path.exists(corresponding_heatmap_filepath):
raise FileNotFoundError("InputHeatmapFilepaths(): Could not find the heatmap file '{}'".format(corresponding_heatmap_filepath))
input_heatmap_filepaths.append((input_filepath, corresponding_heatmap_filepath))
return input_heatmap_filepaths
def GenerateSyntheticInputOutputTuples(generator, image_sizeHW, number_of_pairs_to_generate,
variable_name='image'):
input_output_tuples_list = []
for image_ndx in range(number_of_pairs_to_generate):
(input_image, heatmap, result_msg) = generator.Generate(image_sizeHW)
input_output_tuples_list.append(({variable_name: input_image}, heatmap))
return input_output_tuples_list
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('validationPairsDirectory', help="The filepath to the validation image pairs")
parser.add_argument('--primitivesFilepath', help="The filepath to the primitives xml file. Default: 'vision_genprog/tasks/image_processing.xml'", default='vision_genprog/tasks/image_processing.xml')
parser.add_argument('--imageShapeHW', help="The image shape (height, width). Default='(256, 256)'", default='(256, 256)')
parser.add_argument('--outputDirectory', help="The output directory. Default: './outputs'", default='./outputs')
parser.add_argument('--numberOfIndividuals', help="The number of individuals. Default: 200", type=int, default=200)
parser.add_argument('--levelToFunctionProbabilityDict',
help="The probability to generate a function, at each level. Default: '{0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}'",
default='{0: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}')
parser.add_argument('--proportionOfConstants',
help='The probability to generate a constant, when a variable could be used. Default: 0',
type=float, default=0)
parser.add_argument('--constantCreationParametersList',
help="The parameters to use when creating constants: [minFloat, maxFloat, minInt, maxInt, width, height]. Default: '[-1, 1, 0, 255, 256, 256]'",
default='[-1, 1, 0, 255, 256, 256]')
parser.add_argument('--numberOfGenerations', help="The number of generations to run. Default: 32", type=int,
default=32)
parser.add_argument('--weightForNumberOfNodes',
help="Penalty term proportional to the number of nodes. Default: 0.001", type=float,
default=0.001)
parser.add_argument('--numberOfTournamentParticipants',
help="The number of participants in selection tournaments. Default: 2", type=int, default=2)
parser.add_argument('--mutationProbability', help="The probability to mutate a child. Default: 0.1", type=float,
default=0.1)
parser.add_argument('--proportionOfNewIndividuals',
help="The proportion of randomly generates individuals per generation. Default: 0.1",
type=float, default=0.1)
parser.add_argument('--maximumNumberOfMissedCreationTrials',
help="The maximum number if missed creation trials. Default: 1000", type=int, default=1000)
parser.add_argument('--numberOfTrainingPairs', help="The number of generated training pairs, per epoch. Default: 160", type=int, default=160)
args = parser.parse_args()
imageShapeHW = ast.literal_eval(args.imageShapeHW)
levelToFunctionProbabilityDict = ast.literal_eval(args.levelToFunctionProbabilityDict)
constantCreationParametersList = ast.literal_eval(args.constantCreationParametersList)
main(
args.validationPairsDirectory,
args.primitivesFilepath,
imageShapeHW,
args.outputDirectory,
args.numberOfIndividuals,
levelToFunctionProbabilityDict,
args.proportionOfConstants,
constantCreationParametersList,
args.numberOfGenerations,
args.weightForNumberOfNodes,
args.numberOfTournamentParticipants,
args.mutationProbability,
args.proportionOfNewIndividuals,
args.maximumNumberOfMissedCreationTrials,
args.numberOfTrainingPairs
) | 8,854 | 0 | 92 |
a8ce1c766bdc35aa3ff590928db9ee364e92c9bf | 1,355 | py | Python | perplex/console.py | ofek/perplex | 339284ac7c54500ebf15eadbeb721d079b61f017 | [
"Apache-2.0",
"MIT"
] | null | null | null | perplex/console.py | ofek/perplex | 339284ac7c54500ebf15eadbeb721d079b61f017 | [
"Apache-2.0",
"MIT"
] | 1 | 2019-02-04T03:55:43.000Z | 2019-02-04T03:55:43.000Z | perplex/console.py | ofek/perplex | 339284ac7c54500ebf15eadbeb721d079b61f017 | [
"Apache-2.0",
"MIT"
] | null | null | null | import sys
from textwrap import indent as __indent_text
import click
CONTEXT_SETTINGS = {'help_option_names': ['-h', '--help']}
UNKNOWN_OPTIONS = {'help_option_names': [], 'ignore_unknown_options': True}
DEFAULT_INDENT = 4
| 27.653061 | 84 | 0.672325 | import sys
from textwrap import indent as __indent_text
import click
CONTEXT_SETTINGS = {'help_option_names': ['-h', '--help']}
UNKNOWN_OPTIONS = {'help_option_names': [], 'ignore_unknown_options': True}
DEFAULT_INDENT = 4
def indent_text(text, indent):
return __indent_text(text, ' ' * (DEFAULT_INDENT if indent is True else indent))
def echo_info(text, nl=True, err=False, indent=None):
if indent:
text = indent_text(text, indent)
click.secho(text, bold=True, nl=nl, err=err)
def echo_success(text, nl=True, err=False, indent=None):
if indent:
text = indent_text(text, indent)
click.secho(text, fg='cyan', bold=True, nl=nl, err=err)
def echo_failure(text, nl=True, err=False, indent=None):
if indent:
text = indent_text(text, indent)
click.secho(text, fg='red', bold=True, nl=nl, err=err)
def echo_warning(text, nl=True, err=False, indent=None):
if indent:
text = indent_text(text, indent)
click.secho(text, fg='yellow', bold=True, nl=nl, err=err)
def echo_waiting(text, nl=True, err=False, indent=None):
if indent:
text = indent_text(text, indent)
click.secho(text, fg='magenta', bold=True, nl=nl, err=err)
def abort(text=None, code=1, out=False):
if text is not None:
click.secho(text, fg='red', bold=True, err=not out)
sys.exit(code)
| 962 | 0 | 161 |
9579172d20e06597026d0e3b84c37a566bf42c90 | 5,503 | py | Python | warpseq/notation/note_parser.py | simian-terminal/warpseq | f61e68d1e6a9ad15a5e0c899237be784bcff7093 | [
"Apache-2.0"
] | 3 | 2021-01-22T01:20:20.000Z | 2022-03-10T20:58:42.000Z | warpseq/notation/note_parser.py | simianterminal/warpseq | f61e68d1e6a9ad15a5e0c899237be784bcff7093 | [
"Apache-2.0"
] | 1 | 2020-08-13T00:28:28.000Z | 2020-08-13T00:28:28.000Z | warpseq/notation/note_parser.py | simianterminal/warpseq | f61e68d1e6a9ad15a5e0c899237be784bcff7093 | [
"Apache-2.0"
] | null | null | null | # ------------------------------------------------------------------
# Warp Sequencer
# (C) 2020 Michael DeHaan <michael@michaeldehaan.net> & contributors
# Apache2 Licensed
# ------------------------------------------------------------------
# uses the code in smart.py or literal.py to evaluate a symbol that
# might be supported by either of those other classes. Also processes
# any mod expressions after those symbols. Used in clip evaluation.
from ..api.exceptions import *
from ..model.note import Note, NOTES, EQUIVALENCE
from ..model.chord import Chord, CHORD_TYPES
from .mod import ModExpression
import functools
import traceback
import re
import time
NOTE_SHORTCUT_REGEX = re.compile("([A-Za-z#]+)([0-9]*)")
CHORD_SYMBOLS = dict(
I = [ 1, 'major' ],
II = [ 2, 'major' ],
III = [ 3, 'major' ],
IV = [ 4, 'major' ],
V = [ 5, 'major' ],
VI = [ 6, 'major' ],
VII = [ 7, 'major' ],
i = [ 1, 'minor' ],
ii = [ 2, 'minor' ],
iii = [ 3, 'minor' ],
iv = [ 4, 'minor' ],
v = [ 5, 'minor' ],
vi = [ 6, 'minor' ],
vii = [ 7, 'minor' ],
)
CHORD_KEYS = CHORD_SYMBOLS.keys()
| 29.116402 | 126 | 0.531528 | # ------------------------------------------------------------------
# Warp Sequencer
# (C) 2020 Michael DeHaan <michael@michaeldehaan.net> & contributors
# Apache2 Licensed
# ------------------------------------------------------------------
# uses the code in smart.py or literal.py to evaluate a symbol that
# might be supported by either of those other classes. Also processes
# any mod expressions after those symbols. Used in clip evaluation.
from ..api.exceptions import *
from ..model.note import Note, NOTES, EQUIVALENCE
from ..model.chord import Chord, CHORD_TYPES
from .mod import ModExpression
import functools
import traceback
import re
import time
NOTE_SHORTCUT_REGEX = re.compile("([A-Za-z#]+)([0-9]*)")
CHORD_SYMBOLS = dict(
I = [ 1, 'major' ],
II = [ 2, 'major' ],
III = [ 3, 'major' ],
IV = [ 4, 'major' ],
V = [ 5, 'major' ],
VI = [ 6, 'major' ],
VII = [ 7, 'major' ],
i = [ 1, 'minor' ],
ii = [ 2, 'minor' ],
iii = [ 3, 'minor' ],
iv = [ 4, 'minor' ],
v = [ 5, 'minor' ],
vi = [ 6, 'minor' ],
vii = [ 7, 'minor' ],
)
CHORD_KEYS = CHORD_SYMBOLS.keys()
class ExpressionEvaluationError(Exception):
pass
class NoteParser(object):
__slots__ = [ 'scale', 'song', 'clip', 'track', 'pattern', '_chord_scale', '_literal', '_mod', '_slot_duration', '_notes']
def __init__(self, scale=None, song=None, clip=None, track=None, pattern=None):
self.scale = scale
self.song = song
self.clip = clip
self.track = track
self.pattern = pattern
def setup(self):
self._mod = ModExpression(defer=False)
self._slot_duration = round(self.clip.slot_duration(self.song, self.pattern))
self._notes = self.scale.get_notes()
def do(self, sym, octave_shift):
"""
Converts a symbol or list of symbols into an array of chords or notes.
This uses a combination of the 'literal' and 'smart' evaluator and therefore
does not exactly have the same API.
"""
items = None
if type(sym) == list:
items = sym
else:
items = [ sym ]
all_notes = []
sd = self._slot_duration
scale = self.scale
track = self.track
mod = self._mod
for sym in items:
if sym is None:
sym = ""
sym = str(sym).strip()
tokens = sym.split(None)
if sym:
sym = tokens[0]
mod_expressions = tokens[1:]
else:
sym = ''
mod_expressions = ''
(strategy, extra_mods) = self._get_strategy(sym)
res = strategy(sym)
if not mod_expressions:
mod_expressions = []
mod_expressions.extend(extra_mods)
if not res:
all_notes.extend([None])
continue
if type(res) == Chord:
notes = res.notes
else:
notes = [ res ]
for note in notes:
if note:
note.length = sd
note.octave = note.octave + octave_shift
if mod_expressions:
new_notes = []
for note in notes:
mod.scale = scale
mod.track = track
new_note = mod.do(note, mod_expressions)
new_notes.append(new_note)
all_notes.extend(new_notes)
else:
all_notes.extend(notes)
return all_notes
def _get_strategy(self, sym):
# FIXME: a rest should be a real note and not NONE because we can affix other
# mod expressions to it. We could consider it being a note with velocity 0?
if sym.startswith("-"):
if sym == "-":
return (self._tie_strategy, [])
elif sym[1:].isdigit():
return (self._root_strategy, ["S%s" % sym])
#return self._scale_note_strategy
elif sym.isdigit():
return (self._scale_note_strategy, [])
elif sym in [ "" , "_", ".", "x"]:
return (self._rest_strategy, [])
elif sym in CHORD_KEYS or ":" in sym:
# I, IV, ivv, 3:major
return (self._scale_chord_strategy, [])
return (self._literal_note_strategy, [])
def _root_strategy(self, sym):
return self._notes[0].copy()
def _rest_strategy(self, sym):
return None
def _tie_strategy(self, sym):
return Note(tie=True, name=None)
def _scale_note_strategy(self, sym):
return self._notes[int(sym)-1].copy()
def _scale_chord_strategy(self, sym):
override_typ = None
if ":" in sym:
(sym, override_typ) = sym.split(":",1)
chord_data = CHORD_SYMBOLS.get(sym, None)
if chord_data is None:
raise InvalidSymbol("do not know how to parse chord symbol: %s" % sym)
(scale_num, typ) = chord_data
if override_typ is not None:
typ = override_typ
return Chord(root=self._notes[int(scale_num) - 1].copy(), chord_type=typ)
def _literal_note_strategy(self, sym):
match = NOTE_SHORTCUT_REGEX.match(sym)
name = match.group(1)
octave = match.group(2)
if octave:
octave = int(octave)
else:
octave = 4
return Note(name=name, octave=octave)
| 2,065 | 2,257 | 46 |
825bbdd6e2c230a5f9d91998cf96c86e20e2c234 | 10,887 | py | Python | cogs/fun/serverinfo.py | noaione/naoTimes | 39f3f1ae434baf4ff9f3ed4a19cbfd69f76f881d | [
"MIT"
] | 5 | 2019-06-14T01:29:46.000Z | 2021-02-08T08:21:24.000Z | cogs/fun/serverinfo.py | naoTimesdev/naoTimes | 39f3f1ae434baf4ff9f3ed4a19cbfd69f76f881d | [
"MIT"
] | 21 | 2021-03-26T08:31:45.000Z | 2022-03-26T10:15:25.000Z | cogs/fun/serverinfo.py | noaione/naoTimes | 39f3f1ae434baf4ff9f3ed4a19cbfd69f76f881d | [
"MIT"
] | 4 | 2019-06-26T14:18:09.000Z | 2021-02-08T08:21:39.000Z | import logging
from typing import Literal
import arrow
import discord
from discord.ext import commands
from naotimes.bot import naoTimesBot
from naotimes.context import naoTimesContext
from naotimes.converters import StealedEmote
IconLiteral = Literal[
"mfa_none",
"mfa_low",
"mfa_medium",
"mfa_high",
"mfa_extreme",
"boost",
"s_ol",
"s_off",
"s_idle",
"s_dnd",
]
| 42.034749 | 141 | 0.604758 | import logging
from typing import Literal
import arrow
import discord
from discord.ext import commands
from naotimes.bot import naoTimesBot
from naotimes.context import naoTimesContext
from naotimes.converters import StealedEmote
def humanize_size(num, mul=1024.0, suffix="B"):
for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]:
if abs(num) < mul:
return "%3.1f%s%s" % (num, unit, suffix)
num /= mul
return "%.1f%s %s" % (num, "Yi", suffix)
IconLiteral = Literal[
"mfa_none",
"mfa_low",
"mfa_medium",
"mfa_high",
"mfa_extreme",
"boost",
"s_ol",
"s_off",
"s_idle",
"s_dnd",
]
def fallback_custom_icons(icon_name: IconLiteral, customable: bool) -> str:
icon_name_maps = {
"mfa_none": "<:ntMFAL0:761931842923266050>",
"mfa_low": "<:ntMFAL1:761931852788924418>",
"mfa_medium": "<:ntMFAL2:761931862695870475>",
"mfa_high": "<:ntMFAL3:761931871708905483>",
"mfa_extreme": "<:ntMFAL4:761931880949219388>",
"boost": "<:ntIconBoost:761958456865062923>",
"s_ol": "<:ntStatL3:761945479511670794>",
"s_off": "<:ntStatL0:761945452987285545>",
"s_idle": "<:ntStatL2:761945472432209940>",
"s_dnd": "<:ntStatL1:761945462424338493>",
}
fallback_name_maps = {
"mfa_none": "0️⃣",
"mfa_low": "1️⃣",
"mfa_medium": "2️⃣",
"mfa_high": "3️⃣",
"mfa_extreme": "4️⃣",
"boost": "🚀",
"s_ol": "🟢",
"s_off": "⚫",
"s_idle": "🟡",
"s_dnd": "🔴",
}
if customable:
return icon_name_maps.get(icon_name, "")
return fallback_name_maps.get(icon_name, "")
class FunServerInfo(commands.Cog):
def __init__(self, bot: naoTimesBot):
self.bot = bot
self.logger = logging.getLogger("Fun.ServerInfo")
@commands.command(name="serverinfo", aliases=["si"])
@commands.guild_only()
async def _fun_server_info(self, ctx: naoTimesContext):
the_guild: discord.Guild = ctx.guild
all_channels = the_guild.channels
bot_member = the_guild.get_member(self.bot.user.id)
bot_permissions = bot_member.guild_permissions
real_bot_perms = []
for perm_name, perm_val in bot_permissions:
if perm_val:
real_bot_perms.append(perm_name)
can_use_custom = False
emote_guild = self.bot.get_guild(761916689113284638)
if "external_emojis" in real_bot_perms and emote_guild is not None:
can_use_custom = True
mfa_levels_map = {
"none": f"{fallback_custom_icons('mfa_none', can_use_custom)} Tidak ada",
"low": f"{fallback_custom_icons('mfa_low', can_use_custom)} Rendah (Surel harus terverifikasi)",
"medium": f"{fallback_custom_icons('mfa_medium', can_use_custom)} Menengah (Terdaftar di Discord selama 5 menit)", # noqa: E501
"high": f"{fallback_custom_icons('mfa_high', can_use_custom)} Tinggi (Berada di peladen ini selama 10 menit)", # noqa: E501
"extreme": f"{fallback_custom_icons('mfa_extreme', can_use_custom)} Tertinggi (Nomor telepon harus terverifikasi)", # noqa: E501
}
region_map = {
"amsterdam": "🇳🇱 Amsterdam",
"brazil": "🇧🇷 Brasil",
"dubai": "🇪🇬 Dubai",
"europe": "🇪🇺 Eropa",
"eu_central": "🇪🇺 Eropa Tengah",
"eu_west": "🇪🇺 Eropa Barat",
"frankfurt": "🇩🇪 Frankfurt",
"hongkong": "🇭🇰 Hong Kong",
"india": "🇮🇳 India",
"japan": "🇯🇵 Jepang",
"london": "🇬🇧 London",
"russia": "🇷🇺 Rusia",
"singapore": "🇸🇬 Singapura",
"southafrica": "🇿🇦 Afrika Selatan",
"south_korea": "🇰🇷 Korea Selatan",
"sydney": "🇦🇺 Sidney",
"us_central": "🇺🇸 Amerika Tengah",
"us_east": "🇺🇸 Amerika Timur",
"us_south": "🇺🇸 Amerika Selatan",
"us_west": "🇺🇸 Amerika Barat",
"vip_amsterdam": "🇳🇱 Amsterdam (💳 VIP)",
"vip_us_east": "🇺🇸 Amerika Timur (💳 VIP)",
"vip_us_west": "🇺🇸 Amerika Barat (💳 VIP)",
}
text_channels = voice_channels = news_channels = stage_channels = []
for channel in all_channels:
if channel.type == discord.ChannelType.text:
text_channels.append(channel)
elif channel.type == discord.ChannelType.voice:
voice_channels.append(channel)
elif channel.type == discord.ChannelType.news:
news_channels.append(channel)
elif channel.type == discord.ChannelType.stage_voice:
stage_channels.append(channel)
total_channels = len(text_channels) + len(voice_channels) + len(news_channels) + len(stage_channels)
channels_data = []
channels_data.append(f"⌨ **{len(text_channels)}** kanal teks")
channels_data.append(f"🔉 **{len(voice_channels)}** kanal suara")
if len(news_channels) > 0:
channels_data.append(f"📰 **{len(news_channels)}** kanal berita")
if len(stage_channels) > 0:
channels_data.append(f"📽 **{len(stage_channels)}** kanal panggung")
verification_level = mfa_levels_map.get(str(the_guild.verification_level))
mfa_status = "✔" if the_guild.mfa_level == 1 else "❌"
vc_region = region_map.get(the_guild.region.name, "Otomatis")
creation_date = arrow.get(the_guild.created_at).format("dddd[,] DD MMMM YYYY [@] HH[:]mm[:]ss")
server_members = the_guild.members
bot_accounts = []
online_users = idle_users = dnd_users = offline_users = invisible_users = []
for member in server_members:
if member.bot:
bot_accounts.append(member)
continue
if member.status == discord.Status.online:
online_users.append(member)
elif member.status == discord.Status.idle:
idle_users.append(member)
elif member.status == discord.Status.dnd:
dnd_users.append(member)
elif member.status == discord.Status.offline:
offline_users.append(member)
elif member.status == discord.Status.invisible:
invisible_users.append(member)
server_features = the_guild.features
server_type = "Peladen Pribadi"
if "PUBLIC" in server_features or "DISCOVERABLE" in server_features:
server_type = "Peladen Publik"
if "COMMUNITY" in server_features:
server_type = server_type.replace("Peladen", "Komunitas")
if "VERIFIED" in server_features:
server_type = f"✅ {server_type} **[Terverifikasi]**"
if "PARTNERED" in server_features:
server_type += " **[Berpartner]**"
server_type = f"🤝 {server_type}"
extra_infos_data = []
boost_count = the_guild.premium_subscription_count
if boost_count > 0:
boost_lvl = the_guild.premium_tier
text_data = fallback_custom_icons("boost", can_use_custom) + f" Level **{boost_lvl}**"
text_data += f" (**{boost_count}** boosts)"
extra_infos_data.append(text_data)
server_bits_and_guts = []
file_limit = humanize_size(the_guild.filesize_limit)
bitrate_limit = humanize_size(the_guild.bitrate_limit, 1000.0)
server_bits_and_guts.append(f"☺ **{the_guild.emoji_limit}** emojis limit")
server_bits_and_guts.append(f"🎞 **{file_limit}** file limit")
server_bits_and_guts.append(f"🎵 **{bitrate_limit}** bitrate limit")
extra_infos_data.append(" | ".join(server_bits_and_guts))
all_invites = []
try:
invite_url = await the_guild.invites()
for invite in invite_url:
if invite.max_uses is not None and invite.max_age is not None:
all_invites.append(f"👉 Invite: {invite.url}")
break
if "VANITY_URL" in server_features:
vanity_invite = await the_guild.vanity_invite()
if vanity_invite is not None:
all_invites.append(f"✨ Vanity Invite: {vanity_invite.url}")
except discord.Forbidden:
pass
embed = discord.Embed(colour=0xF7E43)
embed.set_author(name=the_guild.name, icon_url=the_guild.icon)
description = []
description.append(server_type)
description.append(f"👑 **Penguasa**: {self.bot.is_mentionable(ctx, the_guild.owner)}")
description.append(f"📅 **Dibuat**: {creation_date}")
description.append(vc_region)
user_data = []
user_data.append(
f"{fallback_custom_icons('s_ol', can_use_custom)} **{len(online_users)}** Daring | "
f"{fallback_custom_icons('s_off', can_use_custom)} **{len(offline_users)}** Luring"
)
user_data.append(
f"{fallback_custom_icons('s_idle', can_use_custom)} **{len(idle_users)}** Idle | "
f"{fallback_custom_icons('s_dnd', can_use_custom)} **{len(dnd_users)}** DnD"
)
user_data.append(f"🤖 **{len(bot_accounts)}** Bot")
embed.description = "\n".join(description)
embed.set_thumbnail(url=the_guild.icon)
if "INVITE_SPLASH" in server_features and the_guild.splash:
embed.set_image(url=the_guild.splash)
embed.add_field(name=f"Member [{len(server_members)}]", value="\n".join(user_data), inline=False)
embed.add_field(name=f"Kanal [{total_channels}]", value="\n".join(channels_data), inline=False)
embed.add_field(
name="Level Verifikasi",
value=f"{verification_level}\n**2FA** Enabled? {mfa_status}",
inline=False,
)
if all_invites:
embed.add_field(name="Invite Link", value="\n".join(all_invites), inline=False)
if extra_infos_data:
embed.add_field(name="Info Ekstra", value="\n".join(extra_infos_data), inline=False)
footer_part = f"💻 ID: {the_guild.id}"
if the_guild.shard_id is not None:
footer_part += f" | 🔮 Shard: {the_guild.shard_id}"
embed.set_footer(text=footer_part)
await ctx.send(embed=embed)
@commands.command(name="bigemote", aliases=["be", "bigemoji"])
async def _fun_server_bigemote(self, ctx: naoTimesContext, emoji: StealedEmote):
fmt_msg = f"`:{emoji.name}:`\n{emoji.url}"
await ctx.send(fmt_msg)
@_fun_server_bigemote.error
async def _fun_server_bigemote_error(self, ctx: naoTimesContext, error: Exception):
if isinstance(error, commands.ConversionError):
return await ctx.send("Gagal mendapatkan emote yang dimaksud.")
def setup(bot: naoTimesBot):
bot.add_cog(FunServerInfo(bot))
| 10,309 | 303 | 92 |
22fe8140a853e2665e9f05c4d14792ff02e2d81a | 4,702 | py | Python | osbot_aws/helpers/Lambda_Package.py | artem7902/OSBot-AWS | 4b676b8323f18d3d9809d41263f3a71745ec2828 | [
"Apache-2.0"
] | null | null | null | osbot_aws/helpers/Lambda_Package.py | artem7902/OSBot-AWS | 4b676b8323f18d3d9809d41263f3a71745ec2828 | [
"Apache-2.0"
] | null | null | null | osbot_aws/helpers/Lambda_Package.py | artem7902/OSBot-AWS | 4b676b8323f18d3d9809d41263f3a71745ec2828 | [
"Apache-2.0"
] | null | null | null | import os
import importlib
import pbx_gs_python_utils # needed for dependency import
from osbot_aws.Globals import Globals
from pbx_gs_python_utils.utils.Files import Files
from osbot_aws.tmp_utils.Temp_Files import Temp_Files
from osbot_aws.apis.Lambda import Lambda
from osbot_aws.apis.test_helpers.Temp_Aws_Roles import Temp_Aws_Roles
| 42.36036 | 151 | 0.618248 | import os
import importlib
import pbx_gs_python_utils # needed for dependency import
from osbot_aws.Globals import Globals
from pbx_gs_python_utils.utils.Files import Files
from osbot_aws.tmp_utils.Temp_Files import Temp_Files
from osbot_aws.apis.Lambda import Lambda
from osbot_aws.apis.test_helpers.Temp_Aws_Roles import Temp_Aws_Roles
class Lambda_Package:
def __init__(self,lambda_name):
self.lambda_name = lambda_name
self.aws_lambda = Lambda(self.lambda_name)
self.s3_bucket = Globals.lambda_s3_bucket
self.s3_key = f'{Globals.lambda_s3_key_prefix}/{self.lambda_name}.zip'
self.role_arn = Temp_Aws_Roles().for_lambda_invocation()
self.tmp_folder = Files.temp_folder('tmp_lambda_')
(self.aws_lambda.set_s3_bucket(self.s3_bucket )
.set_s3_key (self.s3_key )
.set_role (self.role_arn )
.set_folder_code (self.tmp_folder ))
# helper methods
@staticmethod
def get_root_folder():
return Files.path_combine(__file__, '../..')
# Lambda class wrappers
def create(self ): return self.aws_lambda.create()
def delete(self ): return self.aws_lambda.delete()
def invoke(self, params=None): return self.aws_lambda.invoke(params)
def update(self ): return self.aws_lambda.update()
def reset (self ): return self.aws_lambda.update_lambda_code()
# main methods
def add_file(self, source):
Files.copy(source, self.tmp_folder)
def add_folder(self, source):
destination = Files.path_combine(self.tmp_folder,Files.file_name(source))
Temp_Files.folder_copy(source, destination)
self.remove_files('__pycache__')
return self
def add_module(self,module_name):
module_path = importlib.import_module(module_name).__path__[0] # get folder of module
self.add_folder(module_path) # add module's folder
return self
def add_modules(self, modules_names):
for module_name in modules_names:
self.add_module(module_name)
return self
def add_root_folder(self):
self.add_folder(self.get_root_folder())
return self
def add_pbx_gs_python_utils(self):
self.add_module('osbot_utils') # todo: when all dependencies on pbx_gs_python_utils have been removed
lib_path = Files.folder_name(pbx_gs_python_utils.__file__) # refactor this to add_osbot_utils and remove pbx_gs_python_utils from codebase
self.add_folder(lib_path)
return self
def arn(self):
return self.aws_lambda.function_Arn()
def get_files(self):
all_files = []
for root, dirs, files in os.walk(self.tmp_folder):
for file in files:
file_path = Files.path_combine(root,file).replace(self.tmp_folder,'')
all_files.append(file_path)
return all_files
def remove_files(self,pattern):
for file in self.get_files():
if pattern in file:
file_to_delete = Files.path_combine(self.tmp_folder,file[1:])
Files.delete(file_to_delete)
def use_lambda_file(self,lambda_file):
file_path = Files.path_combine(self.get_root_folder(), lambda_file)
if Files.exists(file_path) is False:
return { 'status': 'error', 'data': 'could not find lambda file `{0}` in root folder `{1}`'.format(lambda_file, self.get_root_folder())}
target_file = Files.path_combine(self.tmp_folder, '{0}.py'.format(self.lambda_name))
Files.copy(file_path,target_file)
return { 'status': 'ok', 'file_path': file_path, 'target_file': target_file }
def update_with_root_folder(self,delete_before=False):
if delete_before:
self.delete()
self.add_root_folder()
self.add_pbx_gs_python_utils()
self.update()
return self
def update_code(self):
base_folder = self.lambda_name.split('.').pop(0) # will point to the top level module
self.add_module(base_folder) # add module's folder
self.add_root_folder() # add osbot-aws folder
self.add_pbx_gs_python_utils() # add pbx-gs-python-utils folder
self.update() # zip files up and update lambda
return self
| 3,689 | 595 | 23 |
51839f269df7ae03472ba65de055ebac95e5ea7b | 6,480 | py | Python | dji_asdk_to_python/mission_control/waypoint/waypoint_mission_operator.py | msanchezc/dji-asdk-to-python | cf3e56691524624314a28f5ebc6f3f59cbd4d8cb | [
"BSD-3-Clause"
] | null | null | null | dji_asdk_to_python/mission_control/waypoint/waypoint_mission_operator.py | msanchezc/dji-asdk-to-python | cf3e56691524624314a28f5ebc6f3f59cbd4d8cb | [
"BSD-3-Clause"
] | null | null | null | dji_asdk_to_python/mission_control/waypoint/waypoint_mission_operator.py | msanchezc/dji-asdk-to-python | cf3e56691524624314a28f5ebc6f3f59cbd4d8cb | [
"BSD-3-Clause"
] | 2 | 2021-01-05T13:25:25.000Z | 2022-01-29T06:02:35.000Z | from dji_asdk_to_python.utils.message_builder import MessageBuilder
from dji_asdk_to_python.errors import DJIError
from dji_asdk_to_python.utils.shared import checkParameters
from dji_asdk_to_python.utils.socket_utils import SocketUtils
| 30.280374 | 117 | 0.605401 | from dji_asdk_to_python.utils.message_builder import MessageBuilder
from dji_asdk_to_python.errors import DJIError
from dji_asdk_to_python.utils.shared import checkParameters
from dji_asdk_to_python.utils.socket_utils import SocketUtils
class WaypointMissionOperator:
def __init__(self, app_ip):
self.app_ip = app_ip
# ------------------------------ PREPARATION ------------------------
def loadMission(self, mission, callback=None, timeout=10):
"""Loads the WaypointMission into device memory.
This also verifies all the information of mission"""
checkParameters(callback=callback,
method_name="loadMission", timeout=timeout)
message = MessageBuilder.build_message(
message_method=MessageBuilder.LOAD_MISSION,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data={"data": mission.__dict__},
)
blocking = True
return_type = DJIError
return SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=callback,
timeout=timeout,
return_type=return_type,
blocking=blocking,
)
def getLoadedMission(self, callback=None, timeout=10):
"""Gets the currently loaded mission of the operator"""
checkParameters(
callback=callback, method_name="getLoadedMission", timeout=timeout
)
message = MessageBuilder.build_message(
message_method=MessageBuilder.GET_LOADED_MISSION,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data=None,
)
return_type = str
SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=callback,
timeout=timeout,
return_type=return_type,
)
def uploadMission(self, callback=None, timeout=10):
"""Starts to upload the getLoadedMission to the aircraft.
It can only be called when the getLoadedMission is complete and the getCurrentState is READY_TO_UPLOAD"""
checkParameters(callback=callback,
method_name="uploadMission", timeout=timeout)
message = MessageBuilder.build_message(
message_method=MessageBuilder.UPLOAD_MISSION,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data=None,
)
return_type = DJIError
SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=callback,
timeout=timeout,
return_type=return_type,
)
def retryUploadMission(self, callback=None, timeout=10):
"""Retry upload waypoint mission"""
checkParameters(
callback=callback, method_name="retryUploadMission", timeout=timeout
)
message = MessageBuilder.build_message(
message_method=MessageBuilder.RETRY_UPLOAD_MISSION,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data=None,
)
return_type = DJIError
SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=callback,
timeout=timeout,
return_type=return_type,
)
# ------------------------ MISSION EXECUTION --------------------
def startMission(self, callback=None, timeout=10):
"""Starts the uploaded mission"""
checkParameters(callback=callback,
method_name="startMission", timeout=timeout)
message = MessageBuilder.build_message(
message_method=MessageBuilder.START_MISSION,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data=None,
)
blocking = callback is None
return_type = DJIError
return SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=callback,
timeout=timeout,
return_type=return_type,
blocking=blocking,
)
def setAutoFlightSpeed(self, speed, callback=None, timeout=10):
"""Set the flight speed while the mission is executing automatically (without manual joystick speed input)"""
checkParameters(
callback=callback, method_name="setAutoFlightSpeed", timeout=timeout
)
message = MessageBuilder.build_message(
message_method=MessageBuilder.SET_AUTO_FLIGHT_SPEED,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data={"speed": speed},
)
return_type = DJIError
SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=callback,
timeout=timeout,
return_type=return_type,
)
# ------------------------------ LISTENER ------------------------------
def addListener(self, listener):
"""
Add listener to listen for events
Args:
listener (WaypointMissionOperatorListener): An WaypointMissionOperatorListener instance
"""
message = MessageBuilder.build_message(
message_method=MessageBuilder.ADD_LISTENER,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data={},
)
return_type = DJIError
result = SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=None,
timeout=10,
return_type=return_type,
listener=listener,
)
return result
def removeListener(self, listener):
"""
Remove waypoint mission operator listener
Args:
listener (WaypointMissionOperatorListener): An WaypointMissionOperatorListener instance
"""
message = MessageBuilder.build_message(
message_method=MessageBuilder.REMOVE_LISTENER,
message_class=MessageBuilder.WAYPOINT_MISSION_OPERATOR,
message_data={},
)
return_type = bool
result = SocketUtils.send(
message=message,
app_ip=self.app_ip,
callback=None,
timeout=10,
return_type=return_type,
blocking=True,
)
listener._close()
return result
| 35 | 6,184 | 23 |
69629f19814a96f977f1888e6f76aeae98a42e1a | 1,065 | py | Python | plunk/sb/pydantic_for_front/pydantic_streamlit_input.py | otosense/plunk | dccce4fcac548d937147de7d05639ead66fe5ef3 | [
"Apache-2.0"
] | null | null | null | plunk/sb/pydantic_for_front/pydantic_streamlit_input.py | otosense/plunk | dccce4fcac548d937147de7d05639ead66fe5ef3 | [
"Apache-2.0"
] | 1 | 2022-02-07T22:25:53.000Z | 2022-02-07T22:25:53.000Z | plunk/sb/pydantic_for_front/pydantic_streamlit_input.py | otosense/plunk | dccce4fcac548d937147de7d05639ead66fe5ef3 | [
"Apache-2.0"
] | null | null | null | # streamlit run pydantic_streamlit_input.py
import streamlit as st
from pydantic import BaseModel
from typing import List, Dict
import streamlit_pydantic as sp
from pydantic import BaseModel, Field, HttpUrl
from enum import Enum
st.title("Use of pydantic for complex inputs")
data = sp.pydantic_input(key="my_form", model=ExampleModel)
if data:
st.write(data)
| 22.659574 | 64 | 0.676995 | # streamlit run pydantic_streamlit_input.py
import streamlit as st
from pydantic import BaseModel
from typing import List, Dict
import streamlit_pydantic as sp
from pydantic import BaseModel, Field, HttpUrl
from enum import Enum
class SelectionValue(str, Enum):
FOO = "foo"
BAR = "bar"
class NestedData(BaseModel):
text: str
integerchoice: List[int]
class ExampleModel(BaseModel):
some_text: str
some_number: int
some_boolean: bool
some_list: List[int]
some_dict: Dict[str, int]
integer_in_range: int = Field(
20,
ge=10,
lt=30,
multiple_of=2,
description="Number property with a limited range.",
)
single_selection: SelectionValue = Field(
..., description="Only select a single item from a set."
)
nested_choice: NestedData = Field(
...,
description="Another object embedded into this model.",
)
st.title("Use of pydantic for complex inputs")
data = sp.pydantic_input(key="my_form", model=ExampleModel)
if data:
st.write(data)
| 0 | 626 | 69 |
265958c170e78d4c4ebdf0d711457cfd0edddbb5 | 2,638 | py | Python | src/openlets/settings/__init__.py | dnephin/openLETS | e5d14de1c4483dc2c47900ce521151c39b881543 | [
"BSD-3-Clause"
] | 6 | 2015-10-30T08:08:42.000Z | 2021-07-26T17:12:08.000Z | src/openlets/settings/__init__.py | dnephin/openLETS | e5d14de1c4483dc2c47900ce521151c39b881543 | [
"BSD-3-Clause"
] | null | null | null | src/openlets/settings/__init__.py | dnephin/openLETS | e5d14de1c4483dc2c47900ce521151c39b881543 | [
"BSD-3-Clause"
] | 4 | 2017-09-21T20:07:07.000Z | 2020-10-18T02:32:14.000Z | # Django settings for openlets project.
import os
# Environment path should match settings/<module name>
ENVIRONMENT = os.environ.get('OPENLETS_ENV', 'dev')
if ENVIRONMENT == 'dev':
from .dev import *
elif ENVIRONMENT == 'stage':
from .stage import *
elif ENVIRONMENT == 'prod':
from .prod import *
else:
raise ImportError("Unknown environment: %s" % ENVIRONMENT)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, 'www_root')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/m/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
# Additional locations of static files
STATICFILES_DIRS = (
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'openlets.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "openletsweb/templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.markup',
'openlets.core',
'openlets.openletsweb'
)
# Default post-login url
LOGIN_REDIRECT_URL = '/home'
# User Profile class
AUTH_PROFILE_MODULE = 'core.Person'
| 28.989011 | 79 | 0.756255 | # Django settings for openlets project.
import os
# Environment path should match settings/<module name>
ENVIRONMENT = os.environ.get('OPENLETS_ENV', 'dev')
if ENVIRONMENT == 'dev':
from .dev import *
elif ENVIRONMENT == 'stage':
from .stage import *
elif ENVIRONMENT == 'prod':
from .prod import *
else:
raise ImportError("Unknown environment: %s" % ENVIRONMENT)
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, 'www_root')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/m/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
# Additional locations of static files
STATICFILES_DIRS = (
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'openlets.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "openletsweb/templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.markup',
'openlets.core',
'openlets.openletsweb'
)
# Default post-login url
LOGIN_REDIRECT_URL = '/home'
# User Profile class
AUTH_PROFILE_MODULE = 'core.Person'
| 0 | 0 | 0 |
6e5e38a416d68035c6732c4ed32a7568bd3dcd82 | 1,137 | py | Python | lightly/models/__init__.py | lightly-ai/lightly | 0b98bda640d13d842fd13f9354271d0cef116ba5 | [
"MIT"
] | 1,515 | 2020-10-05T13:04:17.000Z | 2022-03-31T16:14:55.000Z | lightly/models/__init__.py | lightly-ai/lightly | 0b98bda640d13d842fd13f9354271d0cef116ba5 | [
"MIT"
] | 628 | 2020-10-14T11:38:51.000Z | 2022-03-31T14:40:54.000Z | lightly/models/__init__.py | lightly-ai/lightly | 0b98bda640d13d842fd13f9354271d0cef116ba5 | [
"MIT"
] | 108 | 2020-10-17T08:31:06.000Z | 2022-03-20T16:44:22.000Z | """The lightly.models package provides model implementations.
*Note that the high-level building blocks will be deprecated with
lightly version 1.2.0. Instead, use low-level building blocks to build the
models yourself.
Have a look at the benchmark code to see a reference implementation:*
`lightly benchmarks <https://github.com/lightly-ai/lightly/tree/master/docs/source/getting_started/benchmarks>`_
The package contains an implementation of the commonly used ResNet and
adaptations of the architecture which make self-supervised learning simpler.
The package also hosts the Lightly model zoo - a list of downloadable ResNet
checkpoints.
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from lightly.models.resnet import ResNetGenerator
from lightly.models.barlowtwins import BarlowTwins
from lightly.models.simclr import SimCLR
from lightly.models.simsiam import SimSiam
from lightly.models.byol import BYOL
from lightly.models.moco import MoCo
from lightly.models.nnclr import NNCLR
from lightly.models.zoo import ZOO
from lightly.models.zoo import checkpoints
from lightly.models import utils | 37.9 | 112 | 0.819701 | """The lightly.models package provides model implementations.
*Note that the high-level building blocks will be deprecated with
lightly version 1.2.0. Instead, use low-level building blocks to build the
models yourself.
Have a look at the benchmark code to see a reference implementation:*
`lightly benchmarks <https://github.com/lightly-ai/lightly/tree/master/docs/source/getting_started/benchmarks>`_
The package contains an implementation of the commonly used ResNet and
adaptations of the architecture which make self-supervised learning simpler.
The package also hosts the Lightly model zoo - a list of downloadable ResNet
checkpoints.
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from lightly.models.resnet import ResNetGenerator
from lightly.models.barlowtwins import BarlowTwins
from lightly.models.simclr import SimCLR
from lightly.models.simsiam import SimSiam
from lightly.models.byol import BYOL
from lightly.models.moco import MoCo
from lightly.models.nnclr import NNCLR
from lightly.models.zoo import ZOO
from lightly.models.zoo import checkpoints
from lightly.models import utils | 0 | 0 | 0 |
b5698f415ec536a1b26ca4dda1be8db3b1ca905e | 227 | py | Python | commands/lcd.py | bad-hombres/thug-shell | 02aabd0c3023ac84b24077fc963fe1a35f0298e7 | [
"MIT"
] | null | null | null | commands/lcd.py | bad-hombres/thug-shell | 02aabd0c3023ac84b24077fc963fe1a35f0298e7 | [
"MIT"
] | null | null | null | commands/lcd.py | bad-hombres/thug-shell | 02aabd0c3023ac84b24077fc963fe1a35f0298e7 | [
"MIT"
] | null | null | null | import os
def server_lcd(cmd, data):
"""Changes the local directory
Usage: lcd /dir
"""
pass
| 11.947368 | 36 | 0.581498 | import os
def client_lcd(cmd):
pass
def server_lcd(cmd, data):
"""Changes the local directory
Usage: lcd /dir
"""
pass
def server_lcd_custom(cmd):
d = " ".join(cmd.split(" ")[1:])
os.chdir(d)
| 67 | 0 | 46 |
0c55fcc0a8d78ae463daee6c916428383f8d0bd4 | 1,337 | py | Python | tdw_image_dataset/image_position.py | alters-mit/tdw_image_dataset | 1f6fccaa8057080c1fc3fb289bdea0d53712aea1 | [
"MIT"
] | 1 | 2022-02-03T15:06:49.000Z | 2022-02-03T15:06:49.000Z | tdw_image_dataset/image_position.py | alters-mit/tdw_image_dataset | 1f6fccaa8057080c1fc3fb289bdea0d53712aea1 | [
"MIT"
] | 1 | 2022-02-03T15:52:53.000Z | 2022-02-03T16:00:06.000Z | tdw_image_dataset/image_position.py | alters-mit/tdw_image_dataset | 1f6fccaa8057080c1fc3fb289bdea0d53712aea1 | [
"MIT"
] | null | null | null | from typing import Dict
class ImagePosition:
"""
The positions and rotations of the avatar and object for an image.
Positions are stored as (x, y, z) dictionaries, for example: `{"x": 0, "y": 0, "z": 0}`.
Rotations are stored as (x, y, z, w) dictionaries, for example: `{"x": 0, "y": 0, "z": 0, "w": 1}`.
"""
def __init__(self, avatar_position: Dict[str, float],
camera_rotation: Dict[str, float],
object_position: Dict[str, float],
object_rotation: Dict[str, float]):
"""
:param avatar_position: The position of the avatar.
:param camera_rotation: The rotation of the avatar.
:param object_position: The position of the object.
:param object_rotation: The rotation of the object.
"""
""":field
The position of the avatar.
"""
self.avatar_position: Dict[str, float] = avatar_position
""":field
The rotation of the avatar.
"""
self.camera_rotation: Dict[str, float] = camera_rotation
""":field
The position of the object.
"""
self.object_position: Dict[str, float] = object_position
""":field
The rotation of the object.
"""
self.object_rotation: Dict[str, float] = object_rotation
| 34.282051 | 103 | 0.583396 | from typing import Dict
class ImagePosition:
"""
The positions and rotations of the avatar and object for an image.
Positions are stored as (x, y, z) dictionaries, for example: `{"x": 0, "y": 0, "z": 0}`.
Rotations are stored as (x, y, z, w) dictionaries, for example: `{"x": 0, "y": 0, "z": 0, "w": 1}`.
"""
def __init__(self, avatar_position: Dict[str, float],
camera_rotation: Dict[str, float],
object_position: Dict[str, float],
object_rotation: Dict[str, float]):
"""
:param avatar_position: The position of the avatar.
:param camera_rotation: The rotation of the avatar.
:param object_position: The position of the object.
:param object_rotation: The rotation of the object.
"""
""":field
The position of the avatar.
"""
self.avatar_position: Dict[str, float] = avatar_position
""":field
The rotation of the avatar.
"""
self.camera_rotation: Dict[str, float] = camera_rotation
""":field
The position of the object.
"""
self.object_position: Dict[str, float] = object_position
""":field
The rotation of the object.
"""
self.object_rotation: Dict[str, float] = object_rotation
| 0 | 0 | 0 |
a6f78d66d6881f93206e9742562d77cb69de942d | 4,117 | py | Python | tests/run_visual_test_framed_text.py | underwatergrasshopper/PyUnderGUI | 9a3107bbcf04168eb131a6dae5d50ff35b00ea7f | [
"MIT"
] | null | null | null | tests/run_visual_test_framed_text.py | underwatergrasshopper/PyUnderGUI | 9a3107bbcf04168eb131a6dae5d50ff35b00ea7f | [
"MIT"
] | null | null | null | tests/run_visual_test_framed_text.py | underwatergrasshopper/PyUnderGUI | 9a3107bbcf04168eb131a6dae5d50ff35b00ea7f | [
"MIT"
] | null | null | null | import TestKit
from UnderGUI import *
from UnderGUI._Private import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
################################################################################
WIDTH = 800
HEIGHT = 600
g = Global()
################################################################################
################################################################################
if __name__ == "__main__":
glutInit()
glutInitDisplayMode(GLUT_RGBA)
glutInitWindowSize(WIDTH, HEIGHT)
x = int((glutGet(GLUT_SCREEN_WIDTH) - WIDTH) / 2)
y = int((glutGet(GLUT_SCREEN_HEIGHT) - HEIGHT) / 2)
glutInitWindowPosition(x, y)
window = glutCreateWindow(b"OpenGL Window")
glutDisplayFunc(display)
glutIdleFunc(do_on_idle)
glutMouseFunc(do_on_mouse)
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION)
create()
glutMainLoop()
destroy() | 30.954887 | 173 | 0.617197 | import TestKit
from UnderGUI import *
from UnderGUI._Private import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
################################################################################
WIDTH = 800
HEIGHT = 600
class Global:
is_redraw = False
shunter = None
font_fetcher = None
font = None
font2 = None
big_font = None
text_drawer = None
framed_text = None
scroll_test_framed_text = None
g = Global()
################################################################################
def create():
global g
g.shunter = Shunter()
g.font_fetcher = FontFetcher()
g.font_fetcher.add_font_source("Courier New", FontSource(normal_url = "cour.ttf", bold_url = "courbd.ttf", italic_url = "couri.ttf", bold_and_italic_url = "courbi.ttf"))
g.font_fetcher.add_font_source("Arial", FontSource(normal_url = "arial.ttf", bold_url = "arialbd.ttf", italic_url = "ariali.ttf", bold_and_italic_url = "arialbi.ttf"))
g.font_fetcher.set_font_texture_minimal_size(Size(512, 512))
g.font_fetcher.add_glyph_block_group(UnicodeBlockGroup.EUROPE)
g.font = Font(g.font_fetcher, FontInfo("Courier New", 16, size_unit = SizeUnit.PIXEL))
g.font2 = Font(g.font_fetcher, FontInfo("Arial", 16, size_unit = SizeUnit.PIXEL))
g.big_font = Font(g.font_fetcher, FontInfo("Courier New", 32, style = FontStyle.BOLD))
g.text_drawer = TextDrawer(g.font)
g.shunter.setup_window_client_area(Size(WIDTH, HEIGHT))
g.framed_text = FramedText(
area = Area(0, 0, 0, 0),
text = "This is some real text for wrapping and other stuff.\nAnd another line with very-much-long-word.\n\t*Option 1.\n\t*Option 2.",
font = g.font,
tint = ColorF(1, 1, 1),
background_color = ColorF(0, 0, 0, 0.2)
)
g.scroll_test_framed_text = FramedText(
area = Area(510, 200, 100, 150),
text = "This is some real text for wrapping and other stuff.\nAnd another line with very-much-long-word.\n\t*Option 1.\n\t*Option 2.",
font = g.font,
tint = ColorF(1, 1, 1),
background_color = ColorF(0, 0, 0, 0.2),
is_restrict_draw_to_area = False
)
g.scroll_test_framed_text.scroll_text_to(ScrollPlace.END)
def destroy():
global g
def display():
global g
g.shunter.setup_draw(ColorF(0, 0, 0.5))
g.framed_text.set_area(Area(10, 200, 150, 300))
g.framed_text.draw()
g.framed_text.set_pos(Pos(210, 200))
g.framed_text.set_size(Size(200, 300))
g.framed_text.draw()
g.scroll_test_framed_text.draw()
glutSwapBuffers()
def do_on_idle():
global g
if g.is_redraw:
display()
g.is_redraw = False
def do_on_mouse(button, state, x, y):
global g
def get_mouse_wheel_direction(button, state):
if button == 3 and state == GLUT_DOWN:
return -1
elif button == 4 and state == GLUT_DOWN:
return 1
return 0
# print(button, state, x, y) # debug
wheel_direction = get_mouse_wheel_direction(button, state)
# print(wheel_direction) # debug
# g.scroll_test_framed_text.scroll_text_by(5 * wheel_direction)
g.scroll_test_framed_text.scroll_text_by(0.25 * wheel_direction, ScrollUnit.OUTSIDE_TEXT)
# g.scroll_test_framed_text.scroll_text_by(wheel_direction, ScrollUnit.LINE)
g.is_redraw = True
################################################################################
if __name__ == "__main__":
glutInit()
glutInitDisplayMode(GLUT_RGBA)
glutInitWindowSize(WIDTH, HEIGHT)
x = int((glutGet(GLUT_SCREEN_WIDTH) - WIDTH) / 2)
y = int((glutGet(GLUT_SCREEN_HEIGHT) - HEIGHT) / 2)
glutInitWindowPosition(x, y)
window = glutCreateWindow(b"OpenGL Window")
glutDisplayFunc(display)
glutIdleFunc(do_on_idle)
glutMouseFunc(do_on_mouse)
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION)
create()
glutMainLoop()
destroy() | 2,755 | 230 | 142 |