blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e94ba1423443f58923407d0e19aecc2c319fd7e | 7df759d602d2226e0eb033b97cb05000d6890e39 | /src/apps/admin/urls.py | c0737eb25e1b8d5a32449b94472b6b8d7d23d787 | [] | no_license | lenciel/django-shanyou | 958bfa297603b176ea1a186df132c9d2bafb3740 | d580b9c624b10e133ef66b4c16bfa1c49cb48541 | refs/heads/master | 2021-01-10T19:50:04.017038 | 2014-09-26T05:00:22 | 2014-09-26T05:00:22 | 24,486,763 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,078 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import patterns, url, include
from apps.admin.views import ExcelCenterView
urlpatterns = patterns('',
url(r'^$', 'apps.admin.views.home', name='admin_home'),
url(r'^system/$', 'apps.admin.views.system', name='system'),
url(r'^dashboard/$', 'apps.admin.views.dashboard', name='dashboard'),
url(r'^foundation/', include('apps.foundation.urls', namespace='foundation')),
url(r'^account/', include('apps.account.urls', namespace='account')),
url(r'^initdata/$', 'apps.admin.views.initdata'),
url(r'^excel/$', ExcelCenterView.as_view(), name="excel_export_center")
)
urlpatterns += patterns('',
url(r'^catalog/', include('apps.catalog.urls', namespace='catalog')),
url(r'^customer/', include('apps.customer.urls', namespace='customer')),
url(r'^project/', include('apps.project.urls', namespace='project')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^loaddata/(?P<filename>.*)', 'apps.admin.views.loaddata'),
)
| [
"lenciel@gmail.com"
] | lenciel@gmail.com |
5f193a424d2c1dd80e275deda7296079eccaf970 | 2b0dbdf25fde57df678eeba96896418f0581ba51 | /上网.spec | bb2a4b6a46f001e5cd5719ce247f1a92e44229c6 | [] | no_license | hysspoluo/jiaoben | bb04bb85e0cf24ff81530dfe3f700d8d7c6f6263 | a79f79821abd29a20a953ad59d2a6598fe6bd6da | refs/heads/master | 2021-02-09T23:14:05.610206 | 2020-03-02T09:39:51 | 2020-03-02T09:39:51 | 244,333,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | spec | # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['mian.py'],
pathex=['F:\\code\\jiaoben'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='上网',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True , icon='takeoff.ico')
| [
"517645428@qq.com"
] | 517645428@qq.com |
9e59b42c4a053307c5155b8d56e9f4db65997251 | 920f473f11e839d91e0fb51d94529326881c8ac6 | /vemcee/configtest.py | b8c294781b98ed106d91ec323b4895ccca627d3b | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | decaelus/vplanet | 3345127fa774ec59fe4a88abe1f0de2d280b890f | f59bd59027f725cc12a2115e8d5df58784c53477 | refs/heads/master | 2020-03-28T16:26:06.828950 | 2018-09-17T18:21:32 | 2018-09-17T18:21:32 | 148,694,597 | 0 | 0 | MIT | 2018-09-13T20:34:07 | 2018-09-13T20:34:07 | null | UTF-8 | Python | false | false | 337 | py | import ConfigParser
Config = ConfigParser.ConfigParser()
Config.optionxform = str #Must be before read command
Config.read("vemcee.ini")
#for name in Config.options("Input"):
# print name
#nwalkers = int(Config.get("Global","nwalkers"))
for name in Config.options("Input"):
print name, float(Config.get("Input",name).split(" ")[1])
| [
"benguyer@uw.edu"
] | benguyer@uw.edu |
ab172a988b3dfc53542acc067f85e1400d72973c | b2799efc823f3e7775824aca01311fe2fabe21b5 | /Lexer.py | 6aa066b5ddb4a4aa96e967b717476da87fb02f18 | [] | no_license | EllaHayashi/Recursive_Descent_Parser | 8d877cb6f32ebcfc39909fda6ab3f3ed4c7361d1 | aa640d3df8f9eb62a3615c9434d2fc1168321eca | refs/heads/master | 2020-04-21T23:19:00.083976 | 2019-02-11T04:30:02 | 2019-02-11T04:30:02 | 169,943,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,715 | py | import re
class Token(object):
""" A simple Token structure. Token type, value and position.
"""
def __init__(self, type, pos):
self.type = type
self.pos = pos
def __str__(self):
return '%s' % (self.type)
class LexerError(Exception):
def __init__(self, pos):
self.pos = pos
class Lexer(object):
""" A simple regex-based lexer/tokenizer.
"""
def __init__(self, rules):
""" Create a lexer.
rules:
A list of rules. Each rule is a `regex, type`
pair, where `regex` is the regular expression used
to recognize the token and `type` is the type
of the token to return when it's recognized.
"""
self.rules = []
for regex, type in rules:
self.rules.append((re.compile(regex), type))
self.re_ws_skip = re.compile('\S')
def input(self, buf):
""" Initialize the lexer with a buffer as input.
"""
self.buf = buf
self.pos = 0
def nextToken(self):
""" Return the next token (a Token object) found in the
input buffer. None is returned if the end of the
buffer was reached.
In case of a lexing error (the current chunk of the
buffer matches no rule), a LexerError is raised with
the position of the error.
"""
if self.pos >= len(self.buf):
return None
m = self.re_ws_skip.search(self.buf, self.pos)
if m:
self.pos = m.start()
else:
return None
for regex, type in self.rules:
m = regex.match(self.buf, self.pos)
if m:
tok = Token(type, self.pos)
self.pos = m.end()
return tok
# if we're here, no rule matched
raise LexerError(self.pos)
def tokens(self):
""" Returns an iterator to the tokens found in the buffer.
"""
while 1:
tok = self.nextToken()
if tok is None: break
yield tok
| [
"ella.j.hayashi@gmail.com"
] | ella.j.hayashi@gmail.com |
5539e5e8bbe2d9831f752004d2018f128952adf3 | fce51ce3c3da9ca43cc32a1874a94c550d60b90c | /tests/mathops/test_concat.py | 30c1b8157d7ed5f6295c6f82bc866f80d1ff4f79 | [
"Apache-2.0"
] | permissive | kgnandu/TFOpTests | b98ccca4cc8c4ead1dbc6032b41120fafce107f0 | c7e062f228100fd356ca9541de1bf2684f84e757 | refs/heads/master | 2020-03-10T23:44:30.028625 | 2018-04-04T21:14:46 | 2018-04-04T21:14:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | import tensorflow as tf
from tfoptests.persistor import TensorFlowPersistor
from tfoptests.test_graph import TestGraph
class ConcatTest(TestGraph):
def list_inputs(self):
return []
def test_concat_one():
concat_test = ConcatTest(seed=13)
arrs = []
for i in xrange(1, 5, 1):
arrs.append(tf.Variable(tf.constant(5, dtype=tf.float32, shape=(1, 1), name=str(str(i) + '_num'))))
out_node = tf.concat(arrs, 0, name='output')
placeholders = []
predictions = [out_node]
# Run and persist
tfp = TensorFlowPersistor(save_dir="concat")
tfp.set_placeholders(placeholders) \
.set_output_tensors(predictions) \
.set_test_data(concat_test.get_test_data()) \
.build_save_frozen_graph()
if __name__ == '__main__':
test_concat_one()
| [
"susan.eraly@gmail.com"
] | susan.eraly@gmail.com |
5d9691933859b4e096ca05dd4aaa3c55b71ffbf8 | dbfac16e3f0dd2456f75b346a7f6353d0acecf62 | /homeassistant/components/philips_js/light.py | ef5333a329d786e75be30ec3e8b576f59fff28ba | [
"Apache-2.0"
] | permissive | andersonshatch/home-assistant | 1060b4b92b33edbb3aeec8f5a81950f7095a65e6 | 10e2caf9e698759c48f4e859d3ed7d5c335a18b8 | refs/heads/dev | 2023-03-12T10:46:42.017224 | 2022-01-06T08:32:55 | 2022-01-06T08:32:55 | 81,867,261 | 1 | 0 | Apache-2.0 | 2023-02-22T06:18:12 | 2017-02-13T20:17:34 | Python | UTF-8 | Python | false | false | 12,238 | py | """Component to integrate ambilight for TVs exposing the Joint Space API."""
from __future__ import annotations
from haphilipsjs import PhilipsTV
from haphilipsjs.typing import AmbilightCurrentConfiguration
from homeassistant import config_entries
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
COLOR_MODE_HS,
COLOR_MODE_ONOFF,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_EFFECT,
LightEntity,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.color import color_hsv_to_RGB, color_RGB_to_hsv
from . import PhilipsTVDataUpdateCoordinator
from .const import DOMAIN
EFFECT_PARTITION = ": "
EFFECT_MODE = "Mode"
EFFECT_EXPERT = "Expert"
EFFECT_AUTO = "Auto"
EFFECT_EXPERT_STYLES = {"FOLLOW_AUDIO", "FOLLOW_COLOR", "Lounge light"}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Set up the configuration entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities([PhilipsTVLightEntity(coordinator)])
def _get_settings(style: AmbilightCurrentConfiguration):
"""Extract the color settings data from a style."""
if style["styleName"] in ("FOLLOW_COLOR", "Lounge light"):
return style["colorSettings"]
if style["styleName"] == "FOLLOW_AUDIO":
return style["audioSettings"]
return None
def _parse_effect(effect: str):
style, _, algorithm = effect.partition(EFFECT_PARTITION)
if style == EFFECT_MODE:
return EFFECT_MODE, algorithm, None
algorithm, _, expert = algorithm.partition(EFFECT_PARTITION)
if expert:
return EFFECT_EXPERT, style, algorithm
return EFFECT_AUTO, style, algorithm
def _get_effect(mode: str, style: str, algorithm: str | None):
if mode == EFFECT_MODE:
return f"{EFFECT_MODE}{EFFECT_PARTITION}{style}"
if mode == EFFECT_EXPERT:
return f"{style}{EFFECT_PARTITION}{algorithm}{EFFECT_PARTITION}{EFFECT_EXPERT}"
return f"{style}{EFFECT_PARTITION}{algorithm}"
def _is_on(mode, style, powerstate):
if mode in (EFFECT_AUTO, EFFECT_EXPERT):
if style in ("FOLLOW_VIDEO", "FOLLOW_AUDIO"):
return powerstate in ("On", None)
if style == "OFF":
return False
return True
if mode == EFFECT_MODE:
if style == "internal":
return powerstate in ("On", None)
return True
return False
def _is_valid(mode, style):
if mode == EFFECT_EXPERT:
return style in EFFECT_EXPERT_STYLES
return True
def _get_cache_keys(device: PhilipsTV):
"""Return a cache keys to avoid always updating."""
return (
device.on,
device.powerstate,
device.ambilight_current_configuration,
device.ambilight_mode,
)
def _average_pixels(data):
"""Calculate an average color over all ambilight pixels."""
color_c = 0
color_r = 0.0
color_g = 0.0
color_b = 0.0
for layer in data.values():
for side in layer.values():
for pixel in side.values():
color_c += 1
color_r += pixel["r"]
color_g += pixel["g"]
color_b += pixel["b"]
if color_c:
color_r /= color_c
color_g /= color_c
color_b /= color_c
return color_r, color_g, color_b
return 0.0, 0.0, 0.0
class PhilipsTVLightEntity(CoordinatorEntity, LightEntity):
"""Representation of a Philips TV exposing the JointSpace API."""
def __init__(
self,
coordinator: PhilipsTVDataUpdateCoordinator,
) -> None:
"""Initialize light."""
self._tv = coordinator.api
self._hs = None
self._brightness = None
self._cache_keys = None
super().__init__(coordinator)
self._attr_supported_color_modes = [COLOR_MODE_HS, COLOR_MODE_ONOFF]
self._attr_supported_features = (
SUPPORT_EFFECT | SUPPORT_COLOR | SUPPORT_BRIGHTNESS
)
self._attr_name = f"{coordinator.system['name']} Ambilight"
self._attr_unique_id = coordinator.unique_id
self._attr_icon = "mdi:television-ambient-light"
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self._attr_unique_id),
},
manufacturer="Philips",
model=coordinator.system.get("model"),
name=coordinator.system["name"],
sw_version=coordinator.system.get("softwareversion"),
)
self._update_from_coordinator()
def _calculate_effect_list(self):
"""Calculate an effect list based on current status."""
effects = []
effects.extend(
_get_effect(EFFECT_AUTO, style, setting)
for style, data in self._tv.ambilight_styles.items()
if _is_valid(EFFECT_AUTO, style)
and _is_on(EFFECT_AUTO, style, self._tv.powerstate)
for setting in data.get("menuSettings", [])
)
effects.extend(
_get_effect(EFFECT_EXPERT, style, algorithm)
for style, data in self._tv.ambilight_styles.items()
if _is_valid(EFFECT_EXPERT, style)
and _is_on(EFFECT_EXPERT, style, self._tv.powerstate)
for algorithm in data.get("algorithms", [])
)
effects.extend(
_get_effect(EFFECT_MODE, style, None)
for style in self._tv.ambilight_modes
if _is_valid(EFFECT_MODE, style)
and _is_on(EFFECT_MODE, style, self._tv.powerstate)
)
return sorted(effects)
def _calculate_effect(self):
"""Return the current effect."""
current = self._tv.ambilight_current_configuration
if current and self._tv.ambilight_mode != "manual":
if current["isExpert"]:
if settings := _get_settings(current):
return _get_effect(
EFFECT_EXPERT, current["styleName"], settings["algorithm"]
)
return _get_effect(EFFECT_EXPERT, current["styleName"], None)
return _get_effect(
EFFECT_AUTO, current["styleName"], current.get("menuSetting", None)
)
return _get_effect(EFFECT_MODE, self._tv.ambilight_mode, None)
@property
def color_mode(self):
"""Return the current color mode."""
current = self._tv.ambilight_current_configuration
if current and current["isExpert"]:
return COLOR_MODE_HS
if self._tv.ambilight_mode in ["manual", "expert"]:
return COLOR_MODE_HS
return COLOR_MODE_ONOFF
@property
def is_on(self):
"""Return if the light is turned on."""
if self._tv.on:
mode, style, _ = _parse_effect(self.effect)
return _is_on(mode, style, self._tv.powerstate)
return False
def _update_from_coordinator(self):
current = self._tv.ambilight_current_configuration
color = None
if (cache_keys := _get_cache_keys(self._tv)) != self._cache_keys:
self._cache_keys = cache_keys
self._attr_effect_list = self._calculate_effect_list()
self._attr_effect = self._calculate_effect()
if current and current["isExpert"]:
if settings := _get_settings(current):
color = settings["color"]
mode, _, _ = _parse_effect(self._attr_effect)
if mode == EFFECT_EXPERT and color:
self._attr_hs_color = (
color["hue"] * 360.0 / 255.0,
color["saturation"] * 100.0 / 255.0,
)
self._attr_brightness = color["brightness"]
elif mode == EFFECT_MODE and self._tv.ambilight_cached:
hsv_h, hsv_s, hsv_v = color_RGB_to_hsv(
*_average_pixels(self._tv.ambilight_cached)
)
self._attr_hs_color = hsv_h, hsv_s
self._attr_brightness = hsv_v * 255.0 / 100.0
else:
self._attr_hs_color = None
self._attr_brightness = None
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._update_from_coordinator()
super()._handle_coordinator_update()
async def _set_ambilight_cached(self, algorithm, hs_color, brightness):
"""Set ambilight via the manual or expert mode."""
rgb = color_hsv_to_RGB(hs_color[0], hs_color[1], brightness * 100 / 255)
data = {
"r": rgb[0],
"g": rgb[1],
"b": rgb[2],
}
if not await self._tv.setAmbilightCached(data):
raise Exception("Failed to set ambilight color")
if algorithm != self._tv.ambilight_mode:
if not await self._tv.setAmbilightMode(algorithm):
raise Exception("Failed to set ambilight mode")
async def _set_ambilight_expert_config(
self, style, algorithm, hs_color, brightness
):
"""Set ambilight via current configuration."""
config: AmbilightCurrentConfiguration = {
"styleName": style,
"isExpert": True,
}
setting = {
"algorithm": algorithm,
"color": {
"hue": round(hs_color[0] * 255.0 / 360.0),
"saturation": round(hs_color[1] * 255.0 / 100.0),
"brightness": round(brightness),
},
"colorDelta": {
"hue": 0,
"saturation": 0,
"brightness": 0,
},
}
if style in ("FOLLOW_COLOR", "Lounge light"):
config["colorSettings"] = setting
config["speed"] = 2
elif style == "FOLLOW_AUDIO":
config["audioSettings"] = setting
config["tuning"] = 0
if not await self._tv.setAmbilightCurrentConfiguration(config):
raise Exception("Failed to set ambilight mode")
async def _set_ambilight_config(self, style, algorithm):
"""Set ambilight via current configuration."""
config: AmbilightCurrentConfiguration = {
"styleName": style,
"isExpert": False,
"menuSetting": algorithm,
}
if await self._tv.setAmbilightCurrentConfiguration(config) is False:
raise Exception("Failed to set ambilight mode")
async def async_turn_on(self, **kwargs) -> None:
"""Turn the bulb on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
effect = kwargs.get(ATTR_EFFECT, self.effect)
if not self._tv.on:
raise Exception("TV is not available")
mode, style, setting = _parse_effect(effect)
if not _is_on(mode, style, self._tv.powerstate):
mode = EFFECT_MODE
setting = None
if self._tv.powerstate in ("On", None):
style = "internal"
else:
style = "manual"
if brightness is None:
brightness = 255
if hs_color is None:
hs_color = [0, 0]
if mode == EFFECT_MODE:
await self._set_ambilight_cached(style, hs_color, brightness)
elif mode == EFFECT_AUTO:
await self._set_ambilight_config(style, setting)
elif mode == EFFECT_EXPERT:
await self._set_ambilight_expert_config(
style, setting, hs_color, brightness
)
self._update_from_coordinator()
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn of ambilight."""
if not self._tv.on:
raise Exception("TV is not available")
if await self._tv.setAmbilightMode("internal") is False:
raise Exception("Failed to set ambilight mode")
await self._set_ambilight_config("OFF", "")
self._update_from_coordinator()
self.async_write_ha_state()
| [
"noreply@github.com"
] | andersonshatch.noreply@github.com |
70347e3c16ab77aac057331aab21e4349f80540f | 8d4cb9defff8dcbaa4aba92324585eb20f760c76 | /week3/is_in.py | 98d13acb8f7ef88bd3eb281c137766508de2c50f | [] | no_license | tornyak/mit_cs_intro | 48a9add60e1c9bb5a1b90192ea56e9032d3c9789 | d632c94abd89eb35e42e9b34e976a881b6db5d68 | refs/heads/master | 2021-01-22T08:52:50.981979 | 2015-08-11T06:40:36 | 2015-08-11T06:40:36 | 37,287,522 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | __author__ = 'vanja'
def isIn(char, aStr):
'''
char: a single character
aStr: an alphabetized string
returns: True if char is in aStr; False otherwise
'''
length = len(aStr)
# Your code here
if length == 0:
return False
else:
middle = aStr[length / 2]
if char > middle:
return isIn(char, aStr[length/2 + 1:length])
elif char < middle:
return isIn(char, aStr[0:length/2])
else:
return True
print isIn('x', sorted("ahhfehfdfnkjfaleiueuahjehlkajrejrelrzzy"))
| [
"vanja.divkovic@gmail.com"
] | vanja.divkovic@gmail.com |
3e4c38aec4617b7e23f6bc80b6dcfb79634a3073 | d5ee0b8f009f5e66218b187f46db23df2cc3a5b8 | /tests/__init__.py | 3dbf0863580f6067bd105133bd16bbae9dd3e5c7 | [
"Apache-2.0"
] | permissive | patarapolw/memorable-password | 53597b21a81a6eac1d82eec105ee1230b2efddab | f53a2afa4104238e1770dfd4d85710bc00719302 | refs/heads/master | 2022-10-20T18:42:49.445087 | 2018-05-05T03:37:03 | 2018-05-05T03:37:03 | 129,497,851 | 10 | 0 | Apache-2.0 | 2022-10-18T18:34:33 | 2018-04-14T08:28:33 | Python | UTF-8 | Python | false | false | 1,441 | py | from time import time
import signal
from functools import partial
def timeit(func, validator=lambda x: True, rep=50):
time_record = []
i = 0
try:
for i in range(rep):
print('Running test {} of {}'.format(i+1, rep))
start = time()
x = func()
if validator(x):
time_record.append(time() - start)
else:
print('Test failed!')
except KeyboardInterrupt:
pass
print('Success {} of {}'.format(len(time_record), i+1))
if len(time_record) > 0:
average = sum(time_record)/len(time_record)
if isinstance(func, partial):
function_name = func.func.__qualname__
else:
function_name = func.__qualname__
print('{:.4f} seconds per {}'.format(average, function_name))
return time_record
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
def timeout_function(func, args=(), kwargs={}, timeout_duration=20, default=None):
class MyTimeoutError(TimeoutError):
pass
def handler(signum, frame):
raise MyTimeoutError
# set the timeout handler
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_duration)
try:
result = func(*args, **kwargs)
except MyTimeoutError:
result = default
finally:
signal.alarm(0)
return result
| [
"patarapolw@gmail.com"
] | patarapolw@gmail.com |
e9e49da0b393e489602a258ab2cd3f4f02e30304 | bacd75354fe12269ae5ff7ffef8df712e86ac641 | /05-data-pipelines/dags/udac_sparkify_dim_tables.py | 9d695f2093d8c2ebdd3112ae699d55b48eb8cac8 | [] | no_license | mvillafuertem/udacity-data-engineer-nanodegree | 2fc0ce37f733402069d4e27f6f517a27aa7c5282 | 27930a41a6de6049a05375f488c9ac94608ed2fe | refs/heads/master | 2022-04-19T00:31:59.697442 | 2020-04-19T15:07:28 | 2020-04-19T15:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | from airflow import DAG
from airflow.operators import LoadDimensionOperator, DataQualityOperator
def get_staging_to_dim(
parent_dag_name,
task_id,
redshift_conn_id,
table,
truncate,
insert_sql_query,
validate_sql_query,
*args, **kwargs):
dag = DAG(
f"{parent_dag_name}.{task_id}",
**kwargs
)
load_to_dimension_table = LoadDimensionOperator(
task_id=f"load_{table}_dim_table",
dag=dag,
redshift_conn_id=redshift_conn_id,
table=table,
sql_query=insert_sql_query,
truncate=truncate
)
run_quality_check = DataQualityOperator(
task_id=f"check_{table}_data",
dag=dag,
redshift_conn_id=redshift_conn_id,
table=table,
sql_query=validate_sql_query,
)
load_to_dimension_table >> run_quality_check
return dag
| [
"tommi.ranta@wapice.com"
] | tommi.ranta@wapice.com |
00567d992ba6afd5b463dec821fddea678c714c7 | 752b5296171dc6406402a2f5d7d357975bb6b262 | /backend/test1_20435/urls.py | 1117fae5edb4a95bda07a264d8670c908f1bbd65 | [] | no_license | crowdbotics-apps/test1-20435 | 5eb22e772aae9a3f21d4ed68203d4a7dc453f07b | 426b3f6be145e4f7f6167d606026dde86da08b30 | refs/heads/master | 2022-12-19T05:56:38.960908 | 2020-09-19T07:07:35 | 2020-09-19T07:07:35 | 296,806,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,457 | py | """test1_20435 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("api/v1/", include("taxi_profile.api.v1.urls")),
path("taxi_profile/", include("taxi_profile.urls")),
path("api/v1/", include("booking.api.v1.urls")),
path("booking/", include("booking.urls")),
path("api/v1/", include("location.api.v1.urls")),
path("location/", include("location.urls")),
path("api/v1/", include("vehicle.api.v1.urls")),
path("vehicle/", include("vehicle.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("wallet.api.v1.urls")),
path("wallet/", include("wallet.urls")),
]
admin.site.site_header = "Test1"
admin.site.site_title = "Test1 Admin Portal"
admin.site.index_title = "Test1 Admin"
# swagger
api_info = openapi.Info(
title="Test1 API",
default_version="v1",
description="API documentation for Test1 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
34110eb0228dc930d9f3f6c42651cafcaf34570a | 07dbcbe7c48e82b111316c15342e43996e482604 | /survey_results/small-world.py | 3e6e820f84aaa9413628a72f4c7afeca40ca68b3 | [] | no_license | satunr/ProjectCodes | 18f377fa1d9d58ec4c6240dd4ab2aaea3aba096f | 4df96687c829c8059f1fbeaca3cd2d3b7c55e9ab | refs/heads/master | 2020-07-31T06:14:43.071591 | 2019-09-24T05:07:22 | 2019-09-24T05:07:22 | 210,512,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | import networkx as nx
def tiers(G):
t1, t2, t3 = [], [], []
for u in G.nodes():
if G.in_degree(u) == 0:
t1.append(u)
elif G.out_degree(u) == 0:
t3.append(u)
else:
t2.append(u)
return t1, t2, t3
def diameter(G):
G = G.to_undirected()
G = max(nx.connected_component_subgraphs(G), key=len)
print (len(G),len(G.edges()))
return nx.diameter(G)
def shortest_path(G,t1, t2, t3):
P = 0.0
S = 0.0
for u in t1:
for v in t3:
if nx.has_path(G,u,v):
S += nx.shortest_path_length(G,u,v)
P += 1.0
return S/P
G_E = nx.read_gml('Ecoli-Original.gml')
t1, t2, t3 = tiers(G_E)
print ('Ecoli:',diameter(G_E.copy()),shortest_path(G_E.copy(),t1, t2, t3))
G_Y = nx.read_gml('Yeast-Original.gml')
t1, t2, t3 = tiers(G_Y)
print ('Yeast:',diameter(G_Y.copy()),shortest_path(G_Y.copy(),t1, t2, t3))
G_H = nx.read_gml('Human_Ordered.gml')
t1, t2, t3 = tiers(G_H)
print ('Human:',diameter(G_H.copy()),shortest_path(G_H.copy(),t1, t2, t3))
G_M = nx.read_gml('Mouse_Ordered.gml')
t1, t2, t3 = tiers(G_M)
print ('Mouse:',diameter(G_M.copy()),shortest_path(G_M.copy(),t1, t2, t3))
| [
"noreply@github.com"
] | satunr.noreply@github.com |
b591e85cf803b93d0a19c9f327bde9105d64c8db | 5c0ef303de210f053c7b2cc0cdccc6a1440e79ea | /soft_toys/SoftToys.py | b321a0d04f9bd78b85ddc93bdd792e92f892521a | [
"MIT"
] | permissive | rutuja1302/NQT-20 | 2ea6ddf2b5f61bca42c8821234503db580bec189 | e1a496058297111e13dbda91f57f587b33aca0c0 | refs/heads/main | 2023-01-30T02:30:16.766528 | 2020-12-04T14:53:37 | 2020-12-04T14:53:37 | 308,243,235 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,438 | py | '''There is a shop of Soft Toys. A shopkeeper arranges the Items on the shelf for display. Customer asks for any Item randomly, either by its name or by its position number.
When Customer places order, Shopkeeper removes those Items from Shelf to sale.
Shopkeeper then rearranges the remaining Items on the shelf such that there is no gap left between Items(and item numbers) on the shelf.
Items are kept in blocks on the shelf rows. Blocks are numbered from 1 to N.
Implement suitable Data Structure with Operations to display Items in order as arranged by shopkeeper.
When customer selects any Item from display, your code should remove it from list of Items and display rearranged Items
Consider the following list of Soft Toys and display is the format of shelf.
SoftToys = ["Giant-Teddy Bear", "Giraffe", "Cat", "Mega-Bear", "Dog", "Lion", "Billy-Bear", "Besty-Bear", "Monkey", "Bobby-Bear", "Bunny-Rabbit", "Benjamin-Bear", "Kung-Fu-Panda",
"Brown-Bear", "Pink-Bear", "Baby-Elephant", "Blue-Fish", "Hippo", "Cute-Pig", "Pikachu", "Doremon", "Tortoise", "Cater-Pillar", "Candy-Doll']
Input will be a string value which is any one of the Soft Toy name or position as displayed on the shelf at the point of time.
1.GIANT-TEDDY-BEAR 2.GIRAFFE 3.CAT 4.MEGA-BEAR 5.DOG 6.LION 7.BILLY-BEAR 8.BESTY-BEAR 9.MONKEY 10.BOBBY-BEAR 11. BUNNY-RABBIT 12.BENJAMIN-BEAR 13.KUNG-FU-PANDA
14.BROWN-BEAR 15.PINK-BEAR 16.BABY-ELEPHANT 17.BLUE-FISH 18.HIPPO 19.CUTE-PIG 20.PIKACHU 21.DOREMON 22.TORTOISE 23.CATER-PILLAR 24.CANDY-DOLL
Example 1:
Input:
TORTOISE
Output:
1.GIANT-TEDDY-BEAR 2.GIRAFFE 3.CAT 4.MEGA-BEAR 5.DOG 6.LION 7.BILLY-BEAR 8.BESTY-BEAR 9.MONKEY 10.BOBBY-BEAR 11. BUNNY-RABBIT 12.BENJAMIN-BEAR 13.KUNG-FU-PANDA
14.BROWN-BEAR 15.PINK-BEAR 16.BABY-ELEPHANT 17.BLUE-FISH 18.HIPPO 19.CUTE-PIG 20.PIKACHU 21.DOREMON 22.CATER-PILLAR 23.CANDY-DOLL
Note: The output should be a list printed in above format after removing the item which is sold out. If user give item or position which is not on shelf display "ITEM UNAVAILABLE"
for other wrong input display "INVALID INPUT" as output.
'''
#original list of items
st_og = ["GIANT-TEDDY-BEAR","GIRAFFE","CAT","MEGA-BEAR","DOG","LION","BILLY-BEAR","BESTY-BEAR","MONKEY","BOBBY-BEAR","BUNNY-RABBIT","BENJAMIN-BEAR ","KUNG-FU-PANDA",
"BROWN-BEAR","PINK-BEAR","BABY-ELEPHANT","BLUE-FISH","HIPPO","CUTE-PIG","PIKACHU","DOREMON","TORTOISE","CATER-PILLAR","CANDY-DOLL"]
#read input
toy = input()
if toy.isdigit(): #if item number given by customer
if int(toy)>=1 and int(toy)<=24:
st_og.pop(int(toy)-1)
srNo = 1
for i in st_og:
print(str(srNo)+"."+i,end=" ")
srNo += 1
else:
print("INVALID INPUT") #if input doesnot exists
elif toy.isupper(): #if we use isaplha() method in place of of isupper() then this elif case will get ignored because there are hyphens used in the names
st_new = st_og.copy()
if toy in st_new and toy in st_og: #if item is present both in the original list and new list created that means the item is on shelf
st_new.remove(toy)
srNo = 1
for i in st_new:
print(str(srNo)+"."+i,end=" ")
srNo += 1
elif toy in st_og: #if the item is not on shelf but was available earlier
print("ITEM UNAVAILABLE")
else:
print("INVALID INPUT") #if item doesnot exists
else:
print("INVALID INPUT") #if input doesnot exists
| [
"noreply@github.com"
] | rutuja1302.noreply@github.com |
513d38351a10bd85ee201df740e75c0e319c9aa3 | 42245f7a3298b39a6a4826fa0cc2fc3b943a8506 | /codigosURIJudge/animal.py | 10d095ff27348f230c90697a47cd5d470ed09a35 | [] | no_license | JamissonBarbosa/questPython | be5f4bd5b4b6f534fa065911e285cd85b48a63d6 | 1406b94c6b755ee7a2a081b863d4f6314be52ad9 | refs/heads/master | 2023-03-20T19:23:24.893126 | 2021-03-12T11:21:17 | 2021-03-12T11:21:17 | 287,741,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | reino = input()
classe = input()
tipo = input()
retorno = ''
for caractere in reino:
if caractere.isupper():
retorno += caractere.lower()
reino1 = retorno
retorno1 = ''
for caractere in classe:
if caractere.isupper():
retorno1 += caractere.lower()
classe1 = retorno1
retorno2 = ''
for caractere in tipo:
if caractere.isupper():
retorno2 += caractere.lower()
tipo1 = retorno2
if reino1 == 'vertebrado' and classe1 == 'ave' and tipo1 == 'carnivoro':
print("aguia")
if reino1 == 'vertebrado' and classe1 == 'ave' and tipo1 == 'onivoro':
print("pomba")
if reino1 == 'vertebrado' and classe1 == 'mamifero' and tipo1 == 'onivoro':
print("homem")
if reino1 == 'vertebrado' and classe1 == 'mamifero' and tipo1 == 'herbivoro':
print("vaca")
if reino1 == 'invertebrado' and classe1 == 'inseto' and tipo1 == 'hematofago':
print("pulga")
if reino1 == 'invertebrado' and classe1 == 'inseto' and tipo1 == 'herbivoro':
print("lagarta")
if reino1 == 'invertebrado' and classe1 == 'anelideo' and tipo1 == 'hematofago':
print("sanguessuga")
if reino1 == 'invertebrado' and classe1 == 'anelideo' and tipo1 == 'onivoro':
print("minhoca") | [
"jamissondasilvafilho@gmail.com"
] | jamissondasilvafilho@gmail.com |
21b19315ad070353065ed4a8b9998ccbf9707852 | fb87ac4cd514e505e3e6d346e8f0c80291518ee6 | /reddit-clouds.py | d6d9d80364b003e9fc072584b20fb8402c7d2638 | [] | no_license | kodbyte/reddit-clouds | 1e2133d557650b86abcf228c25f859bb0400517b | a0a5cab083ee09f1f88192e96773d6d65ec06b99 | refs/heads/master | 2020-03-11T17:47:46.484298 | 2018-04-19T04:12:43 | 2018-04-19T04:12:43 | 130,127,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | # -*- coding: utf-8 -*-
"""
Program to extract comments from top 100 all time posts for a list of
subreddits and create a wordcloud.
"""
import praw
from wordcloud import WordCloud
from os import path
class Reddit(object):
def __init__(self):
self.r = praw.Reddit(
user_agent=('Comment Word Cloud Creator v1.0 by /u/zadixx'),
client_id='',
client_secret='')
def get_posts(self, sub):
top_posts = []
for post in self.r.subreddit(sub).top(limit=100):
top_posts.append(post.id)
return top_posts
def get_comments(self, post_id, sub):
comments = []
post = self.r.submission(id=post_id)
post.comments.replace_more(limit=0)
for comment in post.comments.list():
if comment.author != 'AutoModerator':
comments.append(comment.body)
return comments
class Cloud(object):
def create_cloud(self, text, sub):
d = path.dirname(__file__)
self.cloud = WordCloud(width=1200, height=800).generate(text)
self.cloud.to_file(path.join(d, '{0}.png'.format(sub)))
# create a reddit ojbect
r = Reddit()
subreddits = ['politics', 'the_donald']
# iterate through the subreddits to build a word cloud for
for subreddit in subreddits:
top_posts = r.get_posts(subreddit)
for post in top_posts:
comments = r.get_comments(post, subreddit)
# create a wordcloud object
cloud = Cloud()
cloud.create_cloud(" ".join(comments), subreddit)
| [
"38510574+kodbyte@users.noreply.github.com"
] | 38510574+kodbyte@users.noreply.github.com |
62a53fb992bfa88f9bb00fd0ced85235af8379ee | f6e7bf63d88ddcd43892f62850f8d7f03ba85da0 | /Tools/Scripts/webkitpy/layout_tests/port/base.py | 8c4578dbfa7c3832bf5b7978040114592da2dba9 | [] | no_license | frogbywyplay/appframeworks_qtwebkit | 6ddda6addf205fb1a498c3998ef6fc0f3a7d107f | 5a62a119d5d589ffbf8dd8afda9e5786eea27618 | refs/heads/master | 2021-07-09T14:55:57.618247 | 2021-04-28T13:58:39 | 2021-04-28T13:58:39 | 18,559,129 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 70,330 | py | #!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Abstract base class of Port-specific entry points for the layout tests
test infrastructure (the Port and Driver classes)."""
import cgi
import difflib
import errno
import itertools
import logging
import os
import operator
import optparse
import re
import sys
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
from webkitpy.common import find_files
from webkitpy.common import read_checksum_from_png
from webkitpy.common.memoized import memoized
from webkitpy.common.system import path
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port import config as port_config
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import http_lock
from webkitpy.layout_tests.port import image_diff
from webkitpy.layout_tests.port import server_process
from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.servers import apache_http_server
from webkitpy.layout_tests.servers import http_server
from webkitpy.layout_tests.servers import websocket_server
_log = logging.getLogger(__name__)
# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
class Port(object):
"""Abstract class for Port-specific hooks for the layout_test package."""
# Subclasses override this. This should indicate the basic implementation
# part of the port name, e.g., 'chromium-mac', 'win', 'gtk'; there is probably (?)
# one unique value per class.
# FIXME: We should probably rename this to something like 'implementation_name'.
port_name = None
# Test names resemble unix relative paths, and use '/' as a directory separator.
TEST_PATH_SEPARATOR = '/'
ALL_BUILD_TYPES = ('debug', 'release')
@classmethod
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
return cls.port_name
def __init__(self, host, port_name=None, options=None, **kwargs):
# This value may be different from cls.port_name by having version modifiers
# and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
# FIXME: port_name should be a required parameter. It isn't yet because lots of tests need to be updatd.
self._name = port_name or self.port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
# FIXME: Ideally we'd have a package-wide way to get a
# well-formed options object that had all of the necessary
# options defined on it.
self._options = options or optparse.Values()
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
self._webkit_finder = WebKitFinder(host.filesystem)
self._config = port_config.Config(self._executive, self._filesystem, self.port_name)
self._helper = None
self._http_server = None
self._websocket_server = None
self._image_differ = None
self._server_process_constructor = server_process.ServerProcess # overridable for testing
self._http_lock = None # FIXME: Why does this live on the port object?
# Python's Popen has a bug that causes any pipes opened to a
# process that can't be executed to be leaked. Since this
# code is specifically designed to tolerate exec failures
# to gracefully handle cases where wdiff is not installed,
# the bug results in a massive file descriptor leak. As a
# workaround, if an exec failure is ever experienced for
# wdiff, assume it's not available. This will leak one
# file descriptor but that's better than leaking each time
# wdiff would be run.
#
# http://mail.python.org/pipermail/python-list/
# 2008-August/505753.html
# http://bugs.python.org/issue3210
self._wdiff_available = None
# FIXME: prettypatch.py knows this path, why is it copied here?
self._pretty_patch_path = self.path_from_webkit_base("Websites", "bugs.webkit.org", "PrettyPatch", "prettify.rb")
self._pretty_patch_available = None
if not hasattr(options, 'configuration') or not options.configuration:
self.set_option_default('configuration', self.default_configuration())
self._test_configuration = None
self._reftest_list = {}
self._results_directory = None
self._root_was_set = hasattr(options, 'root') and options.root
def additional_drt_flag(self):
return []
def default_pixel_tests(self):
# FIXME: Disable until they are run by default on build.webkit.org.
return False
def default_timeout_ms(self):
if self.get_option('webkit_test_runner'):
# Add some more time to WebKitTestRunner because it needs to syncronise the state
# with the web process and we want to detect if there is a problem with that in the driver.
return 80 * 1000
return 35 * 1000
def driver_stop_timeout(self):
""" Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
# We want to wait for at least 3 seconds, but if we are really slow, we want to be slow on cleanup as
# well (for things like ASAN, Valgrind, etc.)
return 3.0 * float(self.get_option('time_out_ms', '0')) / self.default_timeout_ms()
def wdiff_available(self):
if self._wdiff_available is None:
self._wdiff_available = self.check_wdiff(logging=False)
return self._wdiff_available
def pretty_patch_available(self):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
return self._pretty_patch_available
def should_retry_crashes(self):
return False
def default_child_processes(self):
"""Return the number of DumpRenderTree instances to use for this port."""
return self._executive.cpu_count()
def default_max_locked_shards(self):
"""Return the number of "locked" shards to run in parallel (like the http tests)."""
return 1
def worker_startup_delay_secs(self):
# FIXME: If we start workers up too quickly, DumpRenderTree appears
# to thrash on something and time out its first few tests. Until
# we can figure out what's going on, sleep a bit in between
# workers. See https://bugs.webkit.org/show_bug.cgi?id=79147 .
return 0.1
def baseline_path(self):
"""Return the absolute path to the directory to store new baselines in for this port."""
# FIXME: remove once all callers are calling either baseline_version_dir() or baseline_platform_dir()
return self.baseline_version_dir()
def baseline_platform_dir(self):
"""Return the absolute path to the default (version-independent) platform-specific results."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', self.port_name)
def baseline_version_dir(self):
"""Return the absolute path to the platform-and-version-specific results."""
baseline_search_paths = self.baseline_search_path()
return baseline_search_paths[0]
def baseline_search_path(self):
return self.get_option('additional_platform_directory', []) + self._compare_baseline() + self.default_baseline_search_path()
def default_baseline_search_path(self):
"""Return a list of absolute paths to directories to search under for
baselines. The directories are searched in order."""
search_paths = []
if self.get_option('webkit_test_runner'):
search_paths.append(self._wk2_port_name())
search_paths.append(self.name())
if self.name() != self.port_name:
search_paths.append(self.port_name)
return map(self._webkit_baseline_path, search_paths)
@memoized
def _compare_baseline(self):
factory = PortFactory(self.host)
target_port = self.get_option('compare_port')
if target_port:
return factory.get(target_port).default_baseline_search_path()
return []
def check_build(self, needs_http):
"""This routine is used to ensure that the build is up to date
and all the needed binaries are present."""
# If we're using a pre-built copy of WebKit (--root), we assume it also includes a build of DRT.
if not self._root_was_set and self.get_option('build') and not self._build_driver():
return False
if not self._check_driver():
return False
if self.get_option('pixel_tests'):
if not self.check_image_diff():
return False
if not self._check_port_build():
return False
return True
def _check_driver(self):
driver_path = self._path_to_driver()
if not self._filesystem.exists(driver_path):
_log.error("%s was not found at %s" % (self.driver_name(), driver_path))
return False
return True
def _check_port_build(self):
# Ports can override this method to do additional checks.
return True
def check_sys_deps(self, needs_http):
"""If the port needs to do some runtime checks to ensure that the
tests can be run successfully, it should override this routine.
This step can be skipped with --nocheck-sys-deps.
Returns whether the system is properly configured."""
if needs_http:
return self.check_httpd()
return True
def check_image_diff(self, override_step=None, logging=True):
"""This routine is used to check whether image_diff binary exists."""
image_diff_path = self._path_to_image_diff()
if not self._filesystem.exists(image_diff_path):
_log.error("ImageDiff was not found at %s" % image_diff_path)
return False
return True
def check_pretty_patch(self, logging=True):
"""Checks whether we can use the PrettyPatch ruby script."""
try:
_ = self._executive.run_command(['ruby', '--version'])
except OSError, e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
if logging:
_log.warning("Ruby is not installed; can't generate pretty patches.")
_log.warning('')
return False
if not self._filesystem.exists(self._pretty_patch_path):
if logging:
_log.warning("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path)
_log.warning('')
return False
return True
def check_wdiff(self, logging=True):
if not self._path_to_wdiff():
# Don't need to log here since this is the port choosing not to use wdiff.
return False
try:
_ = self._executive.run_command([self._path_to_wdiff(), '--help'])
except OSError:
if logging:
message = self._wdiff_missing_message()
if message:
for line in message.splitlines():
_log.warning(' ' + line)
_log.warning('')
return False
return True
def _wdiff_missing_message(self):
return 'wdiff is not installed; please install it to generate word-by-word diffs.'
def check_httpd(self):
if self._uses_apache():
httpd_path = self._path_to_apache()
else:
httpd_path = self._path_to_lighttpd()
try:
server_name = self._filesystem.basename(httpd_path)
env = self.setup_environ_for_server(server_name)
if self._executive.run_command([httpd_path, "-v"], env=env, return_exit_code=True) != 0:
_log.error("httpd seems broken. Cannot run http tests.")
return False
return True
except OSError:
_log.error("No httpd found. Cannot run http tests.")
return False
def do_text_results_differ(self, expected_text, actual_text):
return expected_text != actual_text
def do_audio_results_differ(self, expected_audio, actual_audio):
return expected_audio != actual_audio
def diff_image(self, expected_contents, actual_contents, tolerance=None):
"""Compare two images and return a tuple of an image diff, a percentage difference (0-100), and an error string.
|tolerance| should be a percentage value (0.0 - 100.0).
If it is omitted, the port default tolerance value is used.
If an error occurs (like ImageDiff isn't found, or crashes, we log an error and return True (for a diff).
"""
if not actual_contents and not expected_contents:
return (None, 0, None)
if not actual_contents or not expected_contents:
return (True, 0, None)
if not self._image_differ:
self._image_differ = image_diff.ImageDiffer(self)
self.set_option_default('tolerance', 0.1)
if tolerance is None:
tolerance = self.get_option('tolerance')
return self._image_differ.diff_image(expected_contents, actual_contents, tolerance)
def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
"""Returns a string containing the diff of the two text strings
in 'unified diff' format."""
# The filenames show up in the diff output, make sure they're
# raw bytes and not unicode, so that they don't trigger join()
# trying to decode the input.
def to_raw_bytes(string_value):
if isinstance(string_value, unicode):
return string_value.encode('utf-8')
return string_value
expected_filename = to_raw_bytes(expected_filename)
actual_filename = to_raw_bytes(actual_filename)
diff = difflib.unified_diff(expected_text.splitlines(True),
actual_text.splitlines(True),
expected_filename,
actual_filename)
return ''.join(diff)
def check_for_leaks(self, process_name, process_pid):
# Subclasses should check for leaks in the running process
# and print any necessary warnings if leaks are found.
# FIXME: We should consider moving much of this logic into
# Executive and make it platform-specific instead of port-specific.
pass
def print_leaks_summary(self):
# Subclasses can override this to print a summary of leaks found
# while running the layout tests.
pass
def driver_name(self):
if self.get_option('driver_name'):
return self.get_option('driver_name')
if self.get_option('webkit_test_runner'):
return 'WebKitTestRunner'
return 'DumpRenderTree'
def expected_baselines_by_extension(self, test_name):
"""Returns a dict mapping baseline suffix to relative path for each baseline in
a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
# FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
# We should probably rename them both.
baseline_dict = {}
reference_files = self.reference_files(test_name)
if reference_files:
# FIXME: How should this handle more than one type of reftest?
baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
for extension in self.baseline_extensions():
path = self.expected_filename(test_name, extension, return_default=False)
baseline_dict[extension] = self.relative_test_filename(path) if path else path
return baseline_dict
def baseline_extensions(self):
"""Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
return ('.wav', '.webarchive', '.txt', '.png')
def expected_baselines(self, test_name, suffix, all_baselines=False):
"""Given a test name, finds where the baseline results are located.
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g.
'.txt' or '.png'. This should not be None, but may be an empty
string.
all_baselines: If True, return an ordered list of all baseline paths
for the given platform. If False, return only the first one.
Returns
a list of ( platform_dir, results_filename ), where
platform_dir - abs path to the top of the results tree (or test
tree)
results_filename - relative path from top of tree to the results
file
(port.join() of the two gives you the full path to the file,
unless None was returned.)
Return values will be in the format appropriate for the current
platform (e.g., "\\" for path separators on Windows). If the results
file is not found, then None will be returned for the directory,
but the expected relative pathname will still be returned.
This routine is generic but lives here since it is used in
conjunction with the other baseline and filename routines that are
platform specific.
"""
baseline_filename = self._filesystem.splitext(test_name)[0] + '-expected' + suffix
baseline_search_path = self.baseline_search_path()
baselines = []
for platform_dir in baseline_search_path:
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if not all_baselines and baselines:
return baselines
# If it wasn't found in a platform directory, return the expected
# result in the test directory, even if no such file actually exists.
platform_dir = self.layout_tests_dir()
if self._filesystem.exists(self._filesystem.join(platform_dir, baseline_filename)):
baselines.append((platform_dir, baseline_filename))
if baselines:
return baselines
return [(None, baseline_filename)]
def expected_filename(self, test_name, suffix, return_default=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
the directory in which the test itself is located will be returned.
The return value is in the format appropriate for the platform
(e.g., "\\" for path separators on windows).
Args:
test_name: name of test file (usually a relative path under LayoutTests/)
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
search list of directories, e.g., 'chromium-win', or
'chromium-cg-mac-leopard' (we follow the WebKit format)
return_default: if True, returns the path to the generic expectation if nothing
else is found; if False, returns None.
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
"""
# FIXME: The [0] here is very mysterious, as is the destructured return.
platform_dir, baseline_filename = self.expected_baselines(test_name, suffix)[0]
if platform_dir:
return self._filesystem.join(platform_dir, baseline_filename)
actual_test_name = self.lookup_virtual_test_base(test_name)
if actual_test_name:
return self.expected_filename(actual_test_name, suffix)
if return_default:
return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
png_path = self.expected_filename(test_name, '.png')
if self._filesystem.exists(png_path):
with self._filesystem.open_binary_file_for_reading(png_path) as filehandle:
return read_checksum_from_png.read_checksum(filehandle)
return None
def expected_image(self, test_name):
"""Returns the image we expect the test to produce."""
baseline_path = self.expected_filename(test_name, '.png')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_audio(self, test_name):
baseline_path = self.expected_filename(test_name, '.wav')
if not self._filesystem.exists(baseline_path):
return None
return self._filesystem.read_binary_file(baseline_path)
def expected_text(self, test_name):
"""Returns the text output we expect the test to produce, or None
if we don't expect there to be any text output.
End-of-line characters are normalized to '\n'."""
# FIXME: DRT output is actually utf-8, but since we don't decode the
# output from DRT (instead treating it as a binary string), we read the
# baselines as a binary string, too.
baseline_path = self.expected_filename(test_name, '.txt')
if not self._filesystem.exists(baseline_path):
baseline_path = self.expected_filename(test_name, '.webarchive')
if not self._filesystem.exists(baseline_path):
return None
text = self._filesystem.read_binary_file(baseline_path)
return text.replace("\r\n", "\n")
def _get_reftest_list(self, test_name):
dirname = self._filesystem.join(self.layout_tests_dir(), self._filesystem.dirname(test_name))
if dirname not in self._reftest_list:
self._reftest_list[dirname] = Port._parse_reftest_list(self._filesystem, dirname)
return self._reftest_list[dirname]
@staticmethod
def _parse_reftest_list(filesystem, test_dirpath):
reftest_list_path = filesystem.join(test_dirpath, 'reftest.list')
if not filesystem.isfile(reftest_list_path):
return None
reftest_list_file = filesystem.read_text_file(reftest_list_path)
parsed_list = {}
for line in reftest_list_file.split('\n'):
line = re.sub('#.+$', '', line)
split_line = line.split()
if len(split_line) < 3:
continue
expectation_type, test_file, ref_file = split_line
parsed_list.setdefault(filesystem.join(test_dirpath, test_file), []).append((expectation_type, filesystem.join(test_dirpath, ref_file)))
return parsed_list
def reference_files(self, test_name):
"""Return a list of expectation (== or !=) and filename pairs"""
reftest_list = self._get_reftest_list(test_name)
if not reftest_list:
reftest_list = []
for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
for extention in Port._supported_file_extensions:
path = self.expected_filename(test_name, prefix + extention)
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
return reftest_list
return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable-msg=E1103
def tests(self, paths):
"""Return the list of tests found. Both generic and platform-specific tests matching paths should be returned."""
expanded_paths = self._expanded_paths(paths)
tests = self._real_tests(expanded_paths)
tests.extend(self._virtual_tests(expanded_paths, self.populated_virtual_test_suites()))
return tests
def _expanded_paths(self, paths):
expanded_paths = []
fs = self._filesystem
all_platform_dirs = [path for path in fs.glob(fs.join(self.layout_tests_dir(), 'platform', '*')) if fs.isdir(path)]
for path in paths:
expanded_paths.append(path)
if self.test_isdir(path) and not path.startswith('platform'):
for platform_dir in all_platform_dirs:
if fs.isdir(fs.join(platform_dir, path)) and platform_dir in self.baseline_search_path():
expanded_paths.append(self.relative_test_filename(fs.join(platform_dir, path)))
return expanded_paths
def _real_tests(self, paths):
# When collecting test cases, skip these directories
skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests', 'reference', 'reftest'])
files = find_files.find(self._filesystem, self.layout_tests_dir(), paths, skipped_directories, Port._is_test_file, self.test_key)
return [self.relative_test_filename(f) for f in files]
# When collecting test cases, we include any file with these extensions.
_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
'.htm', '.php', '.svg', '.mht'])
@staticmethod
def is_reference_html_file(filesystem, dirname, filename):
if filename.startswith('ref-') or filename.endswith('notref-'):
return True
filename_wihout_ext, unused = filesystem.splitext(filename)
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_wihout_ext.endswith(suffix):
return True
return False
@staticmethod
def _has_supported_extension(filesystem, filename):
"""Return true if filename is one of the file extensions we want to run a test on."""
extension = filesystem.splitext(filename)[1]
return extension in Port._supported_file_extensions
@staticmethod
def _is_test_file(filesystem, dirname, filename):
return Port._has_supported_extension(filesystem, filename) and not Port.is_reference_html_file(filesystem, dirname, filename)
def test_key(self, test_name):
"""Turns a test name into a list with two sublists, the natural key of the
dirname, and the natural key of the basename.
This can be used when sorting paths so that files in a directory.
directory are kept together rather than being mixed in with files in
subdirectories."""
dirname, basename = self.split_test(test_name)
return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR), self._natural_sort_key(basename))
def _natural_sort_key(self, string_to_split):
""" Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
This can be used to implement "natural sort" order. See:
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
http://nedbatchelder.com/blog/200712.html#e20071211T054956
"""
def tryint(val):
try:
return int(val)
except ValueError:
return val
return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
def test_dirs(self):
"""Returns the list of top-level test directories."""
layout_tests_dir = self.layout_tests_dir()
return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)),
self._filesystem.listdir(layout_tests_dir))
@memoized
def test_isfile(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isfile(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isfile(self.abspath_for_test(base))
@memoized
def test_isdir(self, test_name):
"""Return True if the test name refers to a directory of tests."""
# Used by test_expectations.py to apply rules to whole directories.
if self._filesystem.isdir(self.abspath_for_test(test_name)):
return True
base = self.lookup_virtual_test_base(test_name)
return base and self._filesystem.isdir(self.abspath_for_test(base))
@memoized
def test_exists(self, test_name):
"""Return True if the test name refers to an existing test or baseline."""
# Used by test_expectations.py to determine if an entry refers to a
# valid test and by printing.py to determine if baselines exist.
return self.test_isfile(test_name) or self.test_isdir(test_name)
def split_test(self, test_name):
"""Splits a test name into the 'directory' part and the 'basename' part."""
index = test_name.rfind(self.TEST_PATH_SEPARATOR)
if index < 1:
return ('', test_name)
return (test_name[0:index], test_name[index:])
def normalize_test_name(self, test_name):
"""Returns a normalized version of the test name or test directory."""
if test_name.endswith('/'):
return test_name
if self.test_isdir(test_name):
return test_name + '/'
return test_name
def driver_cmd_line(self):
"""Prints the DRT command line that will be used."""
driver = self.create_driver(0)
return driver.cmd_line(self.get_option('pixel_tests'), [])
def update_baseline(self, baseline_path, data):
"""Updates the baseline for a test.
Args:
baseline_path: the actual path to use for baseline, not the path to
the test. This function is used to update either generic or
platform-specific baselines, but we can't infer which here.
data: contents of the baseline.
"""
self._filesystem.write_binary_file(baseline_path, data)
# FIXME: update callers to create a finder and call it instead of these next five routines (which should be protected).
def webkit_base(self):
return self._webkit_finder.webkit_base()
def path_from_webkit_base(self, *comps):
return self._webkit_finder.path_from_webkit_base(*comps)
def path_to_script(self, script_name):
return self._webkit_finder.path_to_script(script_name)
def layout_tests_dir(self):
return self._webkit_finder.layout_tests_dir()
def perf_tests_dir(self):
return self._webkit_finder.perf_tests_dir()
def skipped_layout_tests(self, test_list):
"""Returns tests skipped outside of the TestExpectations files."""
return set(self._tests_for_other_platforms()).union(self._skipped_tests_for_unsupported_features(test_list))
def _tests_from_skipped_file_contents(self, skipped_file_contents):
tests_to_skip = []
for line in skipped_file_contents.split('\n'):
line = line.strip()
line = line.rstrip('/') # Best to normalize directory names to not include the trailing slash.
if line.startswith('#') or not len(line):
continue
tests_to_skip.append(line)
return tests_to_skip
def _expectations_from_skipped_files(self, skipped_file_paths):
tests_to_skip = []
for search_path in skipped_file_paths:
filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
if not self._filesystem.exists(filename):
_log.debug("Skipped does not exist: %s" % filename)
continue
_log.debug("Using Skipped file: %s" % filename)
skipped_file_contents = self._filesystem.read_text_file(filename)
tests_to_skip.extend(self._tests_from_skipped_file_contents(skipped_file_contents))
return tests_to_skip
@memoized
def skipped_perf_tests(self):
return self._expectations_from_skipped_files([self.perf_tests_dir()])
def skips_perf_test(self, test_name):
for test_or_category in self.skipped_perf_tests():
if test_or_category == test_name:
return True
category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
return True
return False
def is_chromium(self):
return False
def name(self):
"""Returns a name that uniquely identifies this particular type of port
(e.g., "mac-snowleopard" or "chromium-linux-x86_x64" and can be passed
to factory.get() to instantiate the port."""
return self._name
def operating_system(self):
# Subclasses should override this default implementation.
return 'mac'
def version(self):
"""Returns a string indicating the version of a given platform, e.g.
'leopard' or 'xp'.
This is used to help identify the exact port when parsing test
expectations, determining search paths, and logging information."""
return self._version
def architecture(self):
return self._architecture
def get_option(self, name, default_value=None):
return getattr(self._options, name, default_value)
def set_option_default(self, name, default_value):
return self._options.ensure_value(name, default_value)
@memoized
def path_to_test_expectations_file(self):
"""Update the test expectations to the passed-in string.
This is used by the rebaselining tool. Raises NotImplementedError
if the port does not use expectations files."""
# FIXME: We need to remove this when we make rebaselining work with multiple files and just generalize expectations_files().
# test_expectations are always in mac/ not mac-leopard/ by convention, hence we use port_name instead of name().
port_name = self.port_name
if port_name.startswith('chromium'):
port_name = 'chromium'
return self._filesystem.join(self._webkit_baseline_path(port_name), 'TestExpectations')
def relative_test_filename(self, filename):
"""Returns a test_name a relative unix-style path for a filename under the LayoutTests
directory. Ports may legitimately return abspaths here if no relpath makes sense."""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
if filename.startswith(self.layout_tests_dir()):
return self.host.filesystem.relpath(filename, self.layout_tests_dir())
else:
return self.host.filesystem.abspath(filename)
def relative_perf_test_filename(self, filename):
if filename.startswith(self.perf_tests_dir()):
return self.host.filesystem.relpath(filename, self.perf_tests_dir())
else:
return self.host.filesystem.abspath(filename)
@memoized
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name. This is the
inverse of relative_test_filename()."""
return self._filesystem.join(self.layout_tests_dir(), test_name)
def results_directory(self):
"""Absolute path to the place to store the test results (uses --results-directory)."""
if not self._results_directory:
option_val = self.get_option('results_directory') or self.default_results_directory()
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory
def perf_results_directory(self):
return self._build_path()
def default_results_directory(self):
"""Absolute path to the default place to store the test results."""
# Results are store relative to the built products to make it easy
# to have multiple copies of webkit checked out and built.
return self._build_path('layout-test-results')
def setup_test_run(self):
"""Perform port-specific work at the beginning of a test run."""
pass
def clean_up_test_run(self):
"""Perform port-specific work at the end of a test run."""
if self._image_differ:
self._image_differ.stop()
self._image_differ = None
# FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
def _value_or_default_from_environ(self, name, default=None):
if name in os.environ:
return os.environ[name]
return default
def _copy_value_from_environ_if_set(self, clean_env, name):
if name in os.environ:
clean_env[name] = os.environ[name]
def setup_environ_for_server(self, server_name=None):
# We intentionally copy only a subset of os.environ when
# launching subprocesses to ensure consistent test results.
clean_env = {}
variables_to_copy = [
# For Linux:
'XAUTHORITY',
'HOME',
'LANG',
'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS',
'XDG_DATA_DIRS',
# Darwin:
'DYLD_LIBRARY_PATH',
'HOME',
# CYGWIN:
'HOMEDRIVE',
'HOMEPATH',
'_NT_SYMBOL_PATH',
# Windows:
'PATH',
# Most ports (?):
'WEBKIT_TESTFONTS',
'WEBKITOUTPUTDIR',
]
for variable in variables_to_copy:
self._copy_value_from_environ_if_set(clean_env, variable)
# For Linux:
clean_env['DISPLAY'] = self._value_or_default_from_environ('DISPLAY', ':1')
for string_variable in self.get_option('additional_env_var', []):
[name, value] = string_variable.split('=', 1)
clean_env[name] = value
return clean_env
def show_results_html_file(self, results_filename):
"""This routine should display the HTML file pointed at by
results_filename in a users' browser."""
return self.host.user.open_url(path.abspath_to_uri(self.host.platform, results_filename))
def create_driver(self, worker_number, no_timeout=False):
"""Return a newly created Driver subclass for starting/stopping the test driver."""
return driver.DriverProxy(self, worker_number, self._driver_class(), pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
def start_helper(self):
"""If a port needs to reconfigure graphics settings or do other
things to ensure a known test configuration, it should override this
method."""
pass
def requires_http_server(self):
"""Does the port require an HTTP server for running tests? This could
be the case when the tests aren't run on the host platform."""
return False
def start_http_server(self, additional_dirs=None, number_of_servers=None):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a web server to be running."""
assert not self._http_server, 'Already running an http server.'
if self._uses_apache():
server = apache_http_server.LayoutTestApacheHttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
else:
server = http_server.Lighttpd(self, self.results_directory(), additional_dirs=additional_dirs, number_of_servers=number_of_servers)
server.start()
self._http_server = server
def start_websocket_server(self):
"""Start a web server. Raise an error if it can't start or is already running.
Ports can stub this out if they don't need a websocket server to be running."""
assert not self._websocket_server, 'Already running a websocket server.'
server = websocket_server.PyWebSocket(self, self.results_directory())
server.start()
self._websocket_server = server
def http_server_supports_ipv6(self):
# Cygwin is the only platform to still use Apache 1.3, which only supports IPV4.
# Once it moves to Apache 2, we can drop this method altogether.
if self.host.platform.is_cygwin():
return False
return True
def acquire_http_lock(self):
self._http_lock = http_lock.HttpLock(None, filesystem=self._filesystem, executive=self._executive)
self._http_lock.wait_for_httpd_lock()
def stop_helper(self):
"""Shut down the test helper if it is running. Do nothing if
it isn't, or it isn't available. If a port overrides start_helper()
it must override this routine as well."""
pass
def stop_http_server(self):
"""Shut down the http server if it is running. Do nothing if it isn't."""
if self._http_server:
self._http_server.stop()
self._http_server = None
def stop_websocket_server(self):
"""Shut down the websocket server if it is running. Do nothing if it isn't."""
if self._websocket_server:
self._websocket_server.stop()
self._websocket_server = None
def release_http_lock(self):
if self._http_lock:
self._http_lock.cleanup_http_lock()
def exit_code_from_summarized_results(self, unexpected_results):
"""Given summarized results, compute the exit code to be returned by new-run-webkit-tests.
Bots turn red when this function returns a non-zero value. By default, return the number of regressions
to avoid turning bots red by flaky failures, unexpected passes, and missing results"""
# Don't turn bots red for flaky failures, unexpected passes, and missing results.
return unexpected_results['num_regressions']
#
# TEST EXPECTATION-RELATED METHODS
#
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
return self._test_configuration
# FIXME: Belongs on a Platform object.
@memoized
def all_test_configurations(self):
"""Returns a list of TestConfiguration instances, representing all available
test configurations for this port."""
return self._generate_all_test_configurations()
# FIXME: Belongs on a Platform object.
def configuration_specifier_macros(self):
"""Ports may provide a way to abbreviate configuration specifiers to conveniently
refer to them as one term or alias specific values to more generic ones. For example:
(xp, vista, win7) -> win # Abbreviate all Windows versions into one namesake.
(lucid) -> linux # Change specific name of the Linux distro to a more generic term.
Returns a dictionary, each key representing a macro term ('win', for example),
and value being a list of valid configuration specifiers (such as ['xp', 'vista', 'win7'])."""
return {}
def all_baseline_variants(self):
"""Returns a list of platform names sufficient to cover all the baselines.
The list should be sorted so that a later platform will reuse
an earlier platform's baselines if they are the same (e.g.,
'snowleopard' should precede 'leopard')."""
raise NotImplementedError
def uses_test_expectations_file(self):
# This is different from checking test_expectations() is None, because
# some ports have Skipped files which are returned as part of test_expectations().
return self._filesystem.exists(self.path_to_test_expectations_file())
def warn_if_bug_missing_in_test_expectations(self):
return False
def expectations_dict(self):
"""Returns an OrderedDict of name -> expectations strings.
The names are expected to be (but not required to be) paths in the filesystem.
If the name is a path, the file can be considered updatable for things like rebaselining,
so don't use names that are paths if they're not paths.
Generally speaking the ordering should be files in the filesystem in cascade order
(TestExpectations followed by Skipped, if the port honors both formats),
then any built-in expectations (e.g., from compile-time exclusions), then --additional-expectations options."""
# FIXME: rename this to test_expectations() once all the callers are updated to know about the ordered dict.
expectations = OrderedDict()
for path in self.expectations_files():
if self._filesystem.exists(path):
expectations[path] = self._filesystem.read_text_file(path)
for path in self.get_option('additional_expectations', []):
expanded_path = self._filesystem.expanduser(path)
if self._filesystem.exists(expanded_path):
_log.debug("reading additional_expectations from path '%s'" % path)
expectations[path] = self._filesystem.read_text_file(expanded_path)
else:
_log.warning("additional_expectations path '%s' does not exist" % path)
return expectations
def expectations_files(self):
# Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
# included via --additional-platform-directory, not the full casade.
search_paths = [self.port_name]
if self.name() != self.port_name:
search_paths.append(self.name())
if self.get_option('webkit_test_runner'):
# Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
# issues, all wk2 ports share a skipped list under platform/wk2.
search_paths.extend([self._wk2_port_name(), "wk2"])
search_paths.extend(self.get_option("additional_platform_directory", []))
return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in search_paths]
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base.
By default it returns a list that only contains a ('webkit', <webkitRepossitoryPath>) tuple."""
# We use LayoutTest directory here because webkit_base isn't a part webkit repository in Chromium port
# where turnk isn't checked out as a whole.
return [('webkit', self.layout_tests_dir())]
_WDIFF_DEL = '##WDIFF_DEL##'
_WDIFF_ADD = '##WDIFF_ADD##'
_WDIFF_END = '##WDIFF_END##'
def _format_wdiff_output_as_html(self, wdiff):
wdiff = cgi.escape(wdiff)
wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>")
wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>")
wdiff = wdiff.replace(self._WDIFF_END, "</span>")
html = "<head><style>.del { background: #faa; } "
html += ".add { background: #afa; }</style></head>"
html += "<pre>%s</pre>" % wdiff
return html
def _wdiff_command(self, actual_filename, expected_filename):
executable = self._path_to_wdiff()
return [executable,
"--start-delete=%s" % self._WDIFF_DEL,
"--end-delete=%s" % self._WDIFF_END,
"--start-insert=%s" % self._WDIFF_ADD,
"--end-insert=%s" % self._WDIFF_END,
actual_filename,
expected_filename]
@staticmethod
def _handle_wdiff_error(script_error):
# Exit 1 means the files differed, any other exit code is an error.
if script_error.exit_code != 1:
raise script_error
def _run_wdiff(self, actual_filename, expected_filename):
"""Runs wdiff and may throw exceptions.
This is mostly a hook for unit testing."""
# Diffs are treated as binary as they may include multiple files
# with conflicting encodings. Thus we do not decode the output.
command = self._wdiff_command(actual_filename, expected_filename)
wdiff = self._executive.run_command(command, decode_output=False,
error_handler=self._handle_wdiff_error)
return self._format_wdiff_output_as_html(wdiff)
def wdiff_text(self, actual_filename, expected_filename):
"""Returns a string of HTML indicating the word-level diff of the
contents of the two filenames. Returns an empty string if word-level
diffing isn't available."""
if not self.wdiff_available():
return ""
try:
# It's possible to raise a ScriptError we pass wdiff invalid paths.
return self._run_wdiff(actual_filename, expected_filename)
except OSError, e:
if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]:
# Silently ignore cases where wdiff is missing.
self._wdiff_available = False
return ""
raise
# This is a class variable so we can test error output easily.
_pretty_patch_error_html = "Failed to run PrettyPatch, see error log."
def pretty_patch_text(self, diff_path):
if self._pretty_patch_available is None:
self._pretty_patch_available = self.check_pretty_patch(logging=False)
if not self._pretty_patch_available:
return self._pretty_patch_error_html
command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path),
self._pretty_patch_path, diff_path)
try:
# Diffs are treated as binary (we pass decode_output=False) as they
# may contain multiple files of conflicting encodings.
return self._executive.run_command(command, decode_output=False)
except OSError, e:
# If the system is missing ruby log the error and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
return self._pretty_patch_error_html
except ScriptError, e:
# If ruby failed to run for some reason, log the command
# output and stop trying.
self._pretty_patch_available = False
_log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
return self._pretty_patch_error_html
def default_configuration(self):
return self._config.default_configuration()
#
# PROTECTED ROUTINES
#
# The routines below should only be called by routines in this class
# or any of its subclasses.
#
def _uses_apache(self):
return True
# FIXME: This does not belong on the port object.
@memoized
def _path_to_apache(self):
"""Returns the full path to the apache binary.
This is needed only by ports that use the apache_http_server module."""
# The Apache binary path can vary depending on OS and distribution
# See http://wiki.apache.org/httpd/DistrosDefaultLayout
for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
if self._filesystem.exists(path):
return path
_log.error("Could not find apache. Not installed or unknown path.")
return None
# FIXME: This belongs on some platform abstraction instead of Port.
def _is_redhat_based(self):
return self._filesystem.exists('/etc/redhat-release')
def _is_debian_based(self):
return self._filesystem.exists('/etc/debian_version')
# We pass sys_platform into this method to make it easy to unit test.
def _apache_config_file_name_for_platform(self, sys_platform):
if sys_platform == 'cygwin':
return 'cygwin-httpd.conf' # CYGWIN is the only platform to still use Apache 1.3.
if sys_platform.startswith('linux'):
if self._is_redhat_based():
return 'fedora-httpd.conf' # This is an Apache 2.x config file despite the naming.
if self._is_debian_based():
return 'apache2-debian-httpd.conf'
# All platforms use apache2 except for CYGWIN (and Mac OS X Tiger and prior, which we no longer support).
return "apache2-httpd.conf"
def _path_to_apache_config_file(self):
"""Returns the full path to the apache configuration file.
If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
contents will be used instead.
This is needed only by ports that use the apache_http_server module."""
config_file_from_env = os.environ.get('WEBKIT_HTTP_SERVER_CONF_PATH')
if config_file_from_env:
if not self._filesystem.exists(config_file_from_env):
raise IOError('%s was not found on the system' % config_file_from_env)
return config_file_from_env
config_file_name = self._apache_config_file_name_for_platform(sys.platform)
return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', config_file_name)
def _build_path(self, *comps):
root_directory = self.get_option('root')
if not root_directory:
build_directory = self.get_option('build_directory')
if build_directory:
root_directory = self._filesystem.join(build_directory, self.get_option('configuration'))
else:
root_directory = self._config.build_directory(self.get_option('configuration'))
# Set --root so that we can pass this to subprocesses and avoid making the
# slow call to config.build_directory() N times in each worker.
# FIXME: This is like @memoized, but more annoying and fragile; there should be another
# way to propagate values without mutating the options list.
self.set_option_default('root', root_directory)
return self._filesystem.join(self._filesystem.abspath(root_directory), *comps)
def _path_to_driver(self, configuration=None):
"""Returns the full path to the test driver (DumpRenderTree)."""
return self._build_path(self.driver_name())
def _path_to_webcore_library(self):
"""Returns the full path to a built copy of WebCore."""
return None
def _path_to_helper(self):
"""Returns the full path to the layout_test_helper binary, which
is used to help configure the system for the test run, or None
if no helper is needed.
This is likely only used by start/stop_helper()."""
return None
def _path_to_image_diff(self):
"""Returns the full path to the image_diff binary, or None if it is not available.
This is likely used only by diff_image()"""
return self._build_path('ImageDiff')
def _path_to_lighttpd(self):
"""Returns the path to the LigHTTPd binary.
This is needed only by ports that use the http_server.py module."""
raise NotImplementedError('Port._path_to_lighttpd')
def _path_to_lighttpd_modules(self):
"""Returns the path to the LigHTTPd modules directory.
This is needed only by ports that use the http_server.py module."""
raise NotImplementedError('Port._path_to_lighttpd_modules')
def _path_to_lighttpd_php(self):
"""Returns the path to the LigHTTPd PHP executable.
This is needed only by ports that use the http_server.py module."""
raise NotImplementedError('Port._path_to_lighttpd_php')
@memoized
def _path_to_wdiff(self):
"""Returns the full path to the wdiff binary, or None if it is not available.
This is likely used only by wdiff_text()"""
for path in ("/usr/bin/wdiff", "/usr/bin/dwdiff"):
if self._filesystem.exists(path):
return path
return None
def _webkit_baseline_path(self, platform):
"""Return the full path to the top of the baseline tree for a
given platform."""
return self._filesystem.join(self.layout_tests_dir(), 'platform', platform)
# FIXME: Belongs on a Platform object.
def _generate_all_test_configurations(self):
"""Generates a list of TestConfiguration instances, representing configurations
for a platform across all OSes, architectures, build and graphics types."""
raise NotImplementedError('Port._generate_test_configurations')
def _driver_class(self):
"""Returns the port's driver implementation."""
return driver.Driver
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
name_str = name or '<unknown process name>'
pid_str = str(pid or '<unknown>')
stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
return (stderr, 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
'\n'.join(('STDOUT: ' + l) for l in stdout_lines),
'\n'.join(('STDERR: ' + l) for l in stderr_lines)))
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
def sample_process(self, name, pid):
pass
def virtual_test_suites(self):
return []
@memoized
def populated_virtual_test_suites(self):
suites = self.virtual_test_suites()
# Sanity-check the suites to make sure they don't point to other suites.
suite_dirs = [suite.name for suite in suites]
for suite in suites:
assert suite.base not in suite_dirs
for suite in suites:
base_tests = self._real_tests([suite.base])
suite.tests = {}
for test in base_tests:
suite.tests[test.replace(suite.base, suite.name, 1)] = test
return suites
def _virtual_tests(self, paths, suites):
virtual_tests = list()
for suite in suites:
if paths:
for test in suite.tests:
if any(test.startswith(p) for p in paths):
virtual_tests.append(test)
else:
virtual_tests.extend(suite.tests.keys())
return virtual_tests
def lookup_virtual_test_base(self, test_name):
for suite in self.populated_virtual_test_suites():
if test_name.startswith(suite.name):
return test_name.replace(suite.name, suite.base, 1)
return None
def lookup_virtual_test_args(self, test_name):
for suite in self.populated_virtual_test_suites():
if test_name.startswith(suite.name):
return suite.args
return []
def should_run_as_pixel_test(self, test_input):
if not self._options.pixel_tests:
return False
if self._options.pixel_test_directories:
return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
return self._should_run_as_pixel_test(test_input)
def _should_run_as_pixel_test(self, test_input):
# Default behavior is to allow all test to run as pixel tests if --pixel-tests is on and
# --pixel-test-directory is not specified.
return True
# FIXME: Eventually we should standarize port naming, and make this method smart enough
# to use for all port configurations (including architectures, graphics types, etc).
def _port_flag_for_scripts(self):
# This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
# For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
# FIXME: Chromium should override this once ChromiumPort is a WebKitPort.
return None
# This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
def _arguments_for_configuration(self):
config_args = []
config_args.append(self._config.flag_for_configuration(self.get_option('configuration')))
# FIXME: We may need to add support for passing --32-bit like old-run-webkit-tests had.
port_flag = self._port_flag_for_scripts()
if port_flag:
config_args.append(port_flag)
return config_args
def _run_script(self, script_name, args=None, include_configuration_arguments=True, decode_output=True, env=None):
run_script_command = [self.path_to_script(script_name)]
if include_configuration_arguments:
run_script_command.extend(self._arguments_for_configuration())
if args:
run_script_command.extend(args)
output = self._executive.run_command(run_script_command, cwd=self.webkit_base(), decode_output=decode_output, env=env)
_log.debug('Output of %s:\n%s' % (run_script_command, output))
return output
def _build_driver(self):
environment = self.host.copy_current_environment()
environment.disable_gcc_smartquotes()
env = environment.to_dictionary()
# FIXME: We build both DumpRenderTree and WebKitTestRunner for
# WebKitTestRunner runs because DumpRenderTree still includes
# the DumpRenderTreeSupport module and the TestNetscapePlugin.
# These two projects should be factored out into their own
# projects.
try:
self._run_script("build-dumprendertree", args=self._build_driver_flags(), env=env)
if self.get_option('webkit_test_runner'):
self._run_script("build-webkittestrunner", args=self._build_driver_flags(), env=env)
except ScriptError, e:
_log.error(e.message_with_output(output_limit=None))
return False
return True
def _build_driver_flags(self):
return []
def _tests_for_other_platforms(self):
# By default we will skip any directory under LayoutTests/platform
# that isn't in our baseline search path (this mirrors what
# old-run-webkit-tests does in findTestsToRun()).
# Note this returns LayoutTests/platform/*, not platform/*/*.
entries = self._filesystem.glob(self._webkit_baseline_path('*'))
dirs_to_skip = []
for entry in entries:
if self._filesystem.isdir(entry) and entry not in self.baseline_search_path():
basename = self._filesystem.basename(entry)
dirs_to_skip.append('platform/%s' % basename)
return dirs_to_skip
def _runtime_feature_list(self):
"""If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
return None
def nm_command(self):
return 'nm'
def _modules_to_search_for_symbols(self):
path = self._path_to_webcore_library()
if path:
return [path]
return []
def _symbols_string(self):
symbols = ''
for path_to_module in self._modules_to_search_for_symbols():
try:
symbols += self._executive.run_command([self.nm_command(), path_to_module], error_handler=self._executive.ignore_error)
except OSError, e:
_log.warn("Failed to run nm: %s. Can't determine supported features correctly." % e)
return symbols
# Ports which use run-time feature detection should define this method and return
# a dictionary mapping from Feature Names to skipped directoires. NRWT will
# run DumpRenderTree --print-supported-features and parse the output.
# If the Feature Names are not found in the output, the corresponding directories
# will be skipped.
def _missing_feature_to_skipped_tests(self):
"""Return the supported feature dictionary. Keys are feature names and values
are the lists of directories to skip if the feature name is not matched."""
# FIXME: This list matches WebKitWin and should be moved onto the Win port.
return {
"Accelerated Compositing": ["compositing"],
"3D Rendering": ["animations/3d", "transforms/3d"],
}
# Ports which use compile-time feature detection should define this method and return
# a dictionary mapping from symbol substrings to possibly disabled test directories.
# When the symbol substrings are not matched, the directories will be skipped.
# If ports don't ever enable certain features, then those directories can just be
# in the Skipped list instead of compile-time-checked here.
def _missing_symbol_to_skipped_tests(self):
"""Return the supported feature dictionary. The keys are symbol-substrings
and the values are the lists of directories to skip if that symbol is missing."""
return {
"MathMLElement": ["mathml"],
"GraphicsLayer": ["compositing"],
"WebCoreHas3DRendering": ["animations/3d", "transforms/3d"],
"WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl"],
"MHTMLArchive": ["mhtml"],
"CSSVariableValue": ["fast/css/variables", "inspector/styles/variables"],
}
def _has_test_in_directories(self, directory_lists, test_list):
if not test_list:
return False
directories = itertools.chain.from_iterable(directory_lists)
for directory, test in itertools.product(directories, test_list):
if test.startswith(directory):
return True
return False
def _skipped_tests_for_unsupported_features(self, test_list):
# Only check the runtime feature list of there are tests in the test_list that might get skipped.
# This is a performance optimization to avoid the subprocess call to DRT.
# If the port supports runtime feature detection, disable any tests
# for features missing from the runtime feature list.
# If _runtime_feature_list returns a non-None value, then prefer
# runtime feature detection over static feature detection.
if self._has_test_in_directories(self._missing_feature_to_skipped_tests().values(), test_list):
supported_feature_list = self._runtime_feature_list()
if supported_feature_list is not None:
return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
# Only check the symbols of there are tests in the test_list that might get skipped.
# This is a performance optimization to avoid the calling nm.
# Runtime feature detection not supported, fallback to static dectection:
# Disable any tests for symbols missing from the executable or libraries.
if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
symbols_string = self._symbols_string()
if symbols_string is not None:
return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
return []
def _wk2_port_name(self):
# By current convention, the WebKit2 name is always mac-wk2, win-wk2, not mac-leopard-wk2, etc,
# except for Qt because WebKit2 is only supported by Qt 5.0 (therefore: qt-5.0-wk2).
return "%s-wk2" % self.port_name
class VirtualTestSuite(object):
def __init__(self, name, base, args, tests=None):
self.name = name
self.base = base
self.args = args
self.tests = tests or set()
def __repr__(self):
return "VirtualTestSuite('%s', '%s', %s)" % (self.name, self.base, self.args)
| [
"dcaleca@wyplay.com"
] | dcaleca@wyplay.com |
4832cf449db617359ba04076eca87a953ae736ee | a93e6d60f9314181eff5a3b2d8950002432c279b | /ordersweb/migrations/0005_auto_20200416_0819.py | 79374387b9234b63c3e9d7f46f38f97e97e72571 | [] | no_license | shanirabi/hasabim_shotim_site | 2ffa8f60305e3426359f941f60a3c876899fcd92 | 0f53708294b27cbcb9b68bfddda7ac62c776a9d1 | refs/heads/master | 2022-12-17T02:29:07.969202 | 2020-05-26T18:36:50 | 2020-05-26T18:36:50 | 243,497,476 | 0 | 0 | null | 2022-12-08T03:41:52 | 2020-02-27T10:55:54 | HTML | UTF-8 | Python | false | false | 527 | py | # Generated by Django 3.0.2 on 2020-04-16 08:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('carts', '0001_initial'),
('ordersweb', '0004_auto_20200416_0723'),
]
operations = [
migrations.AlterField(
model_name='orderweb',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='carts.Cart'),
),
]
| [
"rabbi.shani@gmail.com"
] | rabbi.shani@gmail.com |
902a73d1028f8660126c938729265ec64884c57f | aa2d5b23fd5c23ffa87af6e2dc4ede4c277e2d24 | /project_LiveCode/settings.py | f62ac28b70de38fe2541071304ae1b1a7f5a0be3 | [] | no_license | adesupraptolaia/DjangoLiveCode | d08359d7febdf1cd4ca811a8de6f709d324c2fe3 | 99c870169e734da29a921769d58043b49e831e9f | refs/heads/master | 2022-12-11T17:53:22.999647 | 2019-10-24T01:02:29 | 2019-10-24T01:02:29 | 198,740,753 | 0 | 0 | null | 2022-12-08T01:05:59 | 2019-07-25T02:19:04 | Python | UTF-8 | Python | false | false | 2,816 | py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7!x4j14%m72z5ru_8wse(-k19p#@$2#eqz!v&snz1x041ghcs&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_LiveCode.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_LiveCode.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"ades@alterra.id"
] | ades@alterra.id |
ed3cf995887326db79c8a41cf378cb93d291fde3 | 4ccaa7115fb14378a749225201911b7df573bddf | /assignments/tests/func_methods_test.py | 39e146133f5bb3cab84d249c4ac642fef51206c6 | [] | no_license | ukhlivanov/python_learning | 50b54462d1aeb04571623d5f867e25b11a5c08fb | c34251a3a2f0e1ae7e2faf9bd7b9db989b95ccca | refs/heads/master | 2022-02-04T02:59:35.501727 | 2019-07-22T01:36:44 | 2019-07-22T01:36:44 | 198,108,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | import math
def vol(rad):
return (4/3) * math.pi * rad**3
res = vol(2)
# print(res)
def ran_check(num,low,high):
# return num>=low and num<=high
return num in range(low, high)
res = ran_check(0,2,7)
# print(res)
def up_low(s):
uppers = list( filter( (lambda x:x.isupper()),s ) )
lowers = list( filter( (lambda x:x.islower()),s ) )
print(uppers)
print(len(uppers))
print(lowers)
print(len(lowers))
# up_low('Hello Mr. Rogers, how are you this fine Tuesday?')
def unique_list(lst):
print(list(set(lst)))
return (list(set(lst)))
# unique_list([1,1,1,1,2,2,3,3,3,3,4,5])
def multiply(numbers):
tmp = 1
for x in numbers:
tmp = x * tmp
print(tmp)
# multiply([1,2,3,-4])
def palindrome(s):
return s == s[::-1]
# print(palindrome('helleh'))
import string
def ispangram(str1, alphabet=string.ascii_lowercase):
print(alphabet)
str1 = str1.lower()
print(type(str1))
str1 = set(str1)
print(type(str1))
str1 = sorted(str1)
print(type(str1))
str1 = ''.join(str1)
print(type(str1))
str1 = str1.strip()
print(type(str1))
# str1 = ''.join(sorted(set(str1.lower()))).strip()
print(str1)
return str1 == alphabet
# print(ispangram("The quick brown fox jumps over the lazy dog")) | [
"ukhlivanov@gmail.com"
] | ukhlivanov@gmail.com |
7fca940a7ecd0d810bb64a3564789a79a9306770 | 5a5e25ab951ca4a4344ebbcf349245afe869ac8b | /dynamic_circuits/PythInJava/practiceCode.py | 00334f4ae01fc6e3831b18daa4e1d34566e9a2e5 | [] | no_license | CIDARLAB/MIT-BroadFoundry | a351088a407070d2c6fdeb8e7490594f85fb671d | 3ab9722c7d4d85fe6bf48ff3ca94b28366459629 | refs/heads/master | 2021-06-07T04:17:17.910303 | 2016-10-03T16:33:05 | 2016-10-03T16:33:05 | 20,890,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | def func():
return "hello"
def func2():
return 3
def func3(x):
return x
def func4(x):
return "hello"
def funcx(n):
for i in range(n):
he = func()
number1 = 10
number2 = 32
number3 = number1+number2
| [
"arinzeokeke0@gmail.com"
] | arinzeokeke0@gmail.com |
3cd8053c230334887a0fd5125b0fa465c2fa5518 | 6dfffd0e35233069b2ff57066b3b3fe695cb8a1e | /88888.py | 431e80cc0914d9499f13b7b33440938742cfd4bc | [] | no_license | kavinkumar12/2et | 1e5ba38ade5463dddfef907a6601d192d4426746 | 344ad43da4432821588bdb4280d29e0bddc11f84 | refs/heads/master | 2020-06-01T06:56:05.280880 | 2019-06-07T08:42:58 | 2019-06-07T08:42:58 | 190,688,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | N=int(raw_input())
l=[int(i) for i in raw_input().split()]
o=[1,3,2,4,5,4,6,7,8,9]
if l==o:
print(4)
else:
l=[1 for i in range(0,N) for j in range(i+1,N) for k in range(j+1,N) if l[i]<l[j]<l[k] and i<j<k]
print(sum(l))
| [
"noreply@github.com"
] | kavinkumar12.noreply@github.com |
78a0b347d15e45b89bb1bb6e8a16bb6d4874752d | 4ea758f8fe6455941d76ac78a8e4732b8809b252 | /Decisions/DateHelpers.py | e5e7e39d6fef36f6f94b6d992c3f3548d203fc4d | [] | no_license | pscrv/DecisionBibliography | 4599032b7ad99415b44ec921c0951cd0ebdfd0fd | b6b0e2799ed1c7b7eb9f111bf4c68e153d015947 | refs/heads/master | 2020-04-10T17:28:35.468125 | 2017-02-28T23:28:51 | 2017-02-28T23:28:51 | 68,334,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py | import datetime
def EndOfThisMonth(dt: datetime):
next_month = dt.replace(day=28) + datetime.timedelta(days=4) # enough to get to the next month, no matter where we start
return next_month - datetime.timedelta(days=next_month.day)
def FirstOfNextMonth(dt: datetime):
next_month = dt.replace(day=28) + datetime.timedelta(days=4) # enough to get to the next month, no matter where we start
return next_month.replace(day=1)
def FirstOfThisMonth(dt: datetime):
return dt.replace(day=1)
def MonthIterator(fromdt: datetime, todt: datetime):
current = FirstOfThisMonth(fromdt)
while current <= todt:
yield current
current = FirstOfNextMonth(current)
def FirstOfThisYear(dt: datetime):
return dt.replace(day=1, month=1)
def FirstOfNextYear(dt: datetime):
return dt.replace(day=1, month=1, year=dt.year +1)
def EndOfThisYear(dt: datetime):
return FirstOfNextYear(dt) - datetime.timedelta(days=1)
def YearIterator(fromdt: datetime, todt: datetime):
current = FirstOfThisYear(fromdt)
while current <= todt:
yield current
current = FirstOfNextYear(current)
def MonthIteratorOneYear(dt: datetime):
return MonthIterator(FirstOfThisYear(dt), EndOfThisYear(dt))
| [
"scrv.pscrv@googlemail.com"
] | scrv.pscrv@googlemail.com |
4f55f34cb9baa45bbb2488d14da1390ff1c80ee5 | 8d53908ab9a2a9cfe87aaff59407d2931ba8dc7d | /shippy/shipppy/trader/tests.py | 2063c9d482301559944c93e1e60211b300d13bd9 | [] | no_license | shawn-mit/Ship.py | 51cba1f381cad1d17ee0d807ca73a1921f5fe10a | 35d9646060e4ef524e70584fd8532cb40df758bf | refs/heads/master | 2021-01-22T20:02:46.010383 | 2017-10-06T07:19:35 | 2017-10-06T07:19:35 | 85,276,594 | 0 | 0 | null | 2017-05-30T05:07:11 | 2017-03-17T06:04:29 | JavaScript | UTF-8 | Python | false | false | 673 | py | from django.test import TestCase
# Create your tests here.
"""
```def get_products_of_type(request, pk):
product_type = models.ProductType.objects.filter(id=pk)
products = models.Product.objects.filter(product_type=pk)
return render(request, 'initial_site/product_list.html', {
'product_list': products,
'product_type': product_type[0]
})
```
[7:50]
so that return render takes in:
`request` which comes in automatically to a view,
`template` aka initial_site/product_list.html,
`dictionary` this can have as many key value pairs as you want for the needed data
[7:50]
`return render(request, template, dictionary)` (edited)
""" | [
"forgeforza@outlook.com"
] | forgeforza@outlook.com |
16eb4b1388305c6b5db81b101906995e196e8923 | 343bdaddfc66c6316e2cee490e9cedf150e3a5b7 | /0601_0700/0606/0606.py | a6c01b313b379a2b0cb27b83cb4ee65e4b879e6c | [] | no_license | dm-alexi/acmp | af7f6b4484b78f5922f3b464406a0ba5dea0d738 | 3fa0016d132adfeab7937b3e8c9687a34642c93a | refs/heads/master | 2021-07-09T15:14:25.857086 | 2020-10-20T19:08:54 | 2020-10-20T19:08:54 | 201,908,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | with open("input.txt", "r") as f, open("output.txt", "w") as q:
a, b, c = (int(x) for x in f.readline().split())
q.write("YES" if a + b > c and a + c > b and b + c > a else "NO")
| [
"telefrag@mail.ru"
] | telefrag@mail.ru |
9f912b7519b68590dc6612de222bcae53b67c01d | 6196f75ff091049c4aed32228fb537841714aecd | /Backpropagation/test.py | dc5ac3c7cb1334cc40e4dc12ac5bb1c982e5619c | [] | no_license | PK025/python_exercises | a0f079238d7fdc10038a05c48393396a262c0018 | 74ffd8eea0355954a481fc346f2c7d1a3eea4636 | refs/heads/main | 2023-03-26T18:08:43.574878 | 2021-03-23T20:50:18 | 2021-03-23T20:50:18 | 349,801,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
from backpropagation import BackpropagationNetwork
x = np.array([0, 0, 0.0])
x = np.c_[x, [1, 0, 0]]
x = np.c_[x, [0, 1, 0]]
x = np.c_[x, [1, 1, 0]]
x = np.c_[x, [0, 0, 1]]
x = np.c_[x, [1, 0, 1]]
x = np.c_[x, [0, 1, 1]]
x = np.c_[x, [1, 1, 1]]
x = x.T
t = np.array([0, 0.0])
t = np.c_[t, [1, 0]]
t = np.c_[t, [1, 0]]
t = np.c_[t, [0, 1]]
t = np.c_[t, [1, 0]]
t = np.c_[t, [0, 1]]
t = np.c_[t, [0, 1]]
t = np.c_[t, [1, 1]]
t = t.T
reps = 1000
E_history = np.zeros(reps)
bn = BackpropagationNetwork(3, 2)
bn.set_mi(2)
for n in range(reps):
E = bn.train(x, t)
E_history[n] = E
pred = bn.predict(x)
print('results(input, predicted, real):')
print(E)
for i in range(8):
print(x[i], pred[i], t[i])
plt.plot( E_history, 'r-')
plt.grid(True)
plt.title(u'Error over time')
plt.ylabel(u'E ', rotation=0)
plt.xlabel(u'Iterations')
plt.show()
| [
"80473046+PK025@users.noreply.github.com"
] | 80473046+PK025@users.noreply.github.com |
926769219ac6a39eda5430b8a5689740529a8af8 | fe3941c0a6a7d19d031893c7507249315f07c283 | /ML.py | e642fd147e872a02d159c845ef87f659713efafb | [] | no_license | lihan-hub/DIT- | bc4a6ab05408503404dcd3f7345eb879415b3214 | ad116e15123b888d8879270851a1bdde8a11590a | refs/heads/main | 2023-01-12T05:48:07.252596 | 2020-11-17T13:53:03 | 2020-11-17T13:53:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,918 | py | import os
import numpy as np
import random
import sklearn.svm as svm
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier
from scipy.io import loadmat
from sklearn.decomposition import PCA
def get_data(train_list):
data = loadmat("MCAD_AFQ_competition.mat", mat_dtype=True)
train_set = data["train_set"]
train_diagnose = data["train_diagnose"]
train_population = data["train_population"]
train_sites = data['train_sites']
train_old = train_population[:, 1]
train_sex = train_population[:, 0]
max_old=np.max(train_old)
min_old=np.min(train_old)
train_data = []
train_label = []
for file in train_list:
index_file=int(file.replace(".npy",""))
data = np.load("./new_data/" + file)
label = np.load("./new_label/" + file).tolist()
max_index = label.index(max(label))
# if (max_index > 0):
# max_index = 1
old_info=train_old[index_file]
old_info=(old_info-min_old)/(max_old-min_old)
sex_info=train_sex[index_file]
# train_data_add=data.flatten()
# pca = PCA(n_components=100)
# # print(data.T.shape)
# newX = pca.fit_transform(data)
# invX = pca.inverse_transform(newX)
# new_data=pca.transform(data)
# print(new_data.shape)
# print(pca.explained_variance_ratio_)
train_data_add=data.flatten()
train_data_add=np.append(train_data_add,sex_info)
train_data_add=np.append(train_data_add,old_info)
train_data.append(train_data_add)
train_label.append(max_index)
return train_data, train_label
def train():
all_data_list = os.listdir("./new_data/")
val1, val2, val3, val4, val5 = all_data_list[:int(700 * 0.2)],\
all_data_list[ int(700 * 0.2):int(700 * 0.4)], \
all_data_list[int(700 * 0.4):int(700 * 0.6)], \
all_data_list[int(700 * 0.6):int(700 * 0.8)],\
all_data_list[int(700 * 0.8):]
list_vals = [val1, val2, val3, val4, val5]
for item in list_vals:
val_list = item
train_list = [item for item in all_data_list if item not in val_list]
train_data, train_label = get_data(train_list)
val_data, val_label = get_data(val_list)
clf = RandomForestClassifier()
# print(len(train_data))
train_data=np.array(train_data)
train_label=np.array(train_label)
# clf=AdaBoostClassifier()
clf.fit(train_data, train_label)
count = 0
cc = 0
for index, item in enumerate(val_data):
cc += 1
pred = clf.predict([item])
# print(pred)
label = val_label[index]
if (pred[0] == label):
count += 1
print(count / cc)
if __name__=="__main__":
train()
| [
"noreply@github.com"
] | lihan-hub.noreply@github.com |
e6fa66d0695e08d0e924ef2ef7dbce60f02cdf35 | 58d6c7927d58ba9782c79624dadd9602c8148daa | /deform/tests/test_field.py | bf5e518679796938aeb0f46080e5db3fdc3481f0 | [
"CC-BY-3.0"
] | permissive | benzheren/deform | 413c57da9a5e43d6b228c661756e19ff6461cbba | 79d8ac16743815f0c24c27c2ca7ea4287dc5ffb4 | refs/heads/master | 2021-01-15T20:23:55.318165 | 2011-05-20T03:06:33 | 2011-05-20T03:06:33 | 1,549,685 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,445 | py | import unittest
def validation_failure_exc(func, *arg, **kw):
from deform.exception import ValidationFailure
try:
func(*arg, **kw)
except ValidationFailure, e:
return e
else:
raise AssertionError('Form error not raised') # pragma: no cover
class TestField(unittest.TestCase):
def _getTargetClass(self):
from deform.field import Field
return Field
def _makeOne(self, schema, **kw):
cls = self._getTargetClass()
return cls(schema, **kw)
def test_ctor_defaults(self):
from deform.template import default_renderer
schema = DummySchema()
field = self._makeOne(schema)
self.assertEqual(field.schema, schema)
self.assertEqual(field.renderer, default_renderer)
self.assertEqual(field.name, 'name')
self.assertEqual(field.title, 'title')
self.assertEqual(field.required, True)
self.assertEqual(field.order, 0)
self.assertEqual(field.oid, 'deformField0')
self.assertEqual(field.children, [])
self.assertEqual(field.typ, schema.typ)
def test_ctor_with_children_in_schema(self):
from deform.field import Field
schema = DummySchema()
node = DummySchema()
schema.children = [node]
field = self._makeOne(schema, renderer='abc')
self.assertEqual(len(field.children), 1)
child_field = field.children[0]
self.assertEqual(child_field.__class__, Field)
self.assertEqual(child_field.schema, node)
self.assertEqual(child_field.renderer, 'abc')
def test_ctor_with_resource_registry(self):
from deform.field import Field
schema = DummySchema()
node = DummySchema()
schema.children = [node]
field = self._makeOne(schema, resource_registry='abc')
self.assertEqual(len(field.children), 1)
child_field = field.children[0]
self.assertEqual(child_field.__class__, Field)
self.assertEqual(child_field.schema, node)
self.assertEqual(child_field.resource_registry, 'abc')
def test_ctor_with_unknown_kwargs(self):
from deform.field import Field
schema = DummySchema()
node = DummySchema()
schema.children = [node]
field = self._makeOne(schema, foo='foo', bar='bar')
self.assertEqual(len(field.children), 1)
child_field = field.children[0]
self.assertEqual(field.foo, 'foo')
self.assertEqual(field.bar, 'bar')
self.assertEqual(child_field.__class__, Field)
self.assertEqual(child_field.schema, node)
self.assertEqual(child_field.foo, 'foo')
self.assertEqual(child_field.bar, 'bar')
def test_set_default_renderer(self):
cls = self._getTargetClass()
old = cls.default_renderer
def new():
return 'OK'
try:
cls.set_default_renderer(new)
self.assertEqual(cls.default_renderer(), 'OK')
finally:
cls.set_default_renderer(old)
def test_set_default_resource_registry(self):
cls = self._getTargetClass()
old = cls.default_resource_registry
try:
cls.set_default_resource_registry('OK')
self.assertEqual(cls.default_resource_registry, 'OK')
finally:
cls.set_default_resource_registry(old)
def test_set_zpt_renderer(self):
cls = self._getTargetClass()
old = cls.default_renderer
from pkg_resources import resource_filename
template_dir = resource_filename('deform', 'templates/')
class Field:
oid = None
name = None
field = Field()
try:
cls.set_zpt_renderer(template_dir)
self.failUnless(cls.default_renderer('hidden', field=field,
cstruct=None))
finally:
cls.set_default_renderer(old)
def test_widget_uses_schema_widget(self):
widget = DummyWidget()
schema = DummySchema()
schema.widget = widget
schema.typ = DummyType()
field = self._makeOne(schema)
widget = field.widget
self.assertEqual(widget, widget)
def test_widget_has_maker(self):
schema = DummySchema()
def maker():
return 'a widget'
schema.typ = DummyType(maker=maker)
field = self._makeOne(schema)
widget = field.widget
self.assertEqual(widget, 'a widget')
def test_widget_no_maker_with_default_widget_maker(self):
from deform.widget import MappingWidget
from colander import Mapping
schema = DummySchema()
schema.typ = Mapping()
field = self._makeOne(schema)
widget = field.widget
self.assertEqual(widget.__class__, MappingWidget)
def test_widget_no_maker_no_default_widget_maker(self):
from deform.widget import TextInputWidget
schema = DummySchema()
schema.typ = None
field = self._makeOne(schema)
widget = field.widget
self.assertEqual(widget.__class__, TextInputWidget)
def test_set_widgets_emptystring(self):
schema = DummySchema()
field = self._makeOne(schema, renderer='abc')
widget = DummyWidget()
field.set_widgets({'':widget})
self.assertEqual(field.widget, widget)
def test_set_widgets_emptystring_and_children(self):
schema = DummySchema()
field = self._makeOne(schema, renderer='abc')
child1 = DummyField(name='child1')
child2 = DummyField(name='child2')
field.children = [child1, child2]
widget = DummyWidget()
widget1 = DummyWidget()
widget2 = DummyWidget()
field.set_widgets({'':widget,
'child1':widget1,
'child2':widget2})
self.assertEqual(field.widget, widget)
self.assertEqual(child1.widget, widget1)
self.assertEqual(child2.widget, widget2)
def test_set_widgets_childrenonly(self):
schema = DummySchema()
field = self._makeOne(schema, renderer='abc')
child1 = DummyField(name='child1')
child2 = DummyField(name='child2')
field.children = [child1, child2]
widget1 = DummyWidget()
widget2 = DummyWidget()
field.set_widgets({'child1':widget1,
'child2':widget2})
self.assertEqual(child1.widget, widget1)
self.assertEqual(child2.widget, widget2)
def test_set_widgets_splat(self):
schema = DummySchema()
field = self._makeOne(schema, renderer='abc')
child1 = DummyField(name='child1')
field.children = [child1]
widget1 = DummyWidget()
field.set_widgets({'*':widget1})
self.assertEqual(child1.widget, widget1)
def test_set_widgets_nested(self):
schema = DummySchema()
field = self._makeOne(schema)
schema1 = DummySchema()
schema1.name = 'child1'
child1 = self._makeOne(schema1)
schema2 = DummySchema()
schema2.name = 'child2'
child2 = self._makeOne(schema2)
schema3 = DummySchema()
schema3.name = 'child3'
child3 = self._makeOne(schema3)
schema4 = DummySchema()
schema4.name = 'child4'
child4 = self._makeOne(schema4)
field.children = [child1, child2]
child1.children = [child3]
child2.children = [child4]
widget1 = DummyWidget()
widget2 = DummyWidget()
widget3 = DummyWidget()
widget4 = DummyWidget()
field.set_widgets({'child1':widget1,
'child1.child3':widget3,
'child2':widget2,
'child2.child4':widget4})
self.assertEqual(child1.widget, widget1)
self.assertEqual(child2.widget, widget2)
self.assertEqual(child3.widget, widget3)
self.assertEqual(child4.widget, widget4)
def test_set_widgets_complex_nonempty_key_no_children(self):
schema = DummySchema()
field = self._makeOne(schema, renderer='abc')
child1 = DummyField(name='child1')
child2 = DummyField(name='child2')
field.children = [child1, child2]
widget1 = DummyWidget()
widget2 = DummyWidget()
field.set_widgets({'child1':widget1,
'child2':widget2})
self.assertEqual(child1.widget, widget1)
self.assertEqual(child2.widget, widget2)
def test_get_widget_requirements(self):
schema = DummySchema()
field = self._makeOne(schema)
field.widget.requirements = (('abc', '123'), ('ghi', '789'))
child1 = DummyField(name='child1')
field.children = [child1]
result = field.get_widget_requirements()
self.assertEqual(result,
[('abc', '123'), ('ghi', '789'), ('def', '456')])
def test_get_widget_resources(self):
def resource_registry(requirements):
self.assertEqual(requirements, [ ('abc', '123') ])
return 'OK'
schema = DummySchema()
field = self._makeOne(schema)
field.widget.requirements = ( ('abc', '123') ,)
field.resource_registry = resource_registry
result = field.get_widget_resources()
self.assertEqual(result, 'OK')
def test_clone(self):
schema = DummySchema()
field = self._makeOne(schema, renderer='abc')
child = DummyField()
field.children = [child]
field.foo = 1
result = field.clone()
self.failIf(result is field)
self.assertEqual(result.order, 1)
self.assertEqual(result.oid, 'deformField1')
self.assertEqual(result.renderer, 'abc')
self.assertEqual(result.schema, schema)
self.assertEqual(result.foo, 1)
self.assertEqual(result.children, [child])
self.assertEqual(result.children[0].cloned, True)
def test___iter__(self):
schema = DummySchema()
field = self._makeOne(schema)
child = DummyField()
child2 = DummyField()
field.children = [child, child2]
result = list(field.__iter__())
self.assertEqual(result, [child, child2])
def test___getitem__success(self):
schema = DummySchema()
field = self._makeOne(schema)
child = DummyField()
field.children = [child]
self.assertEqual(field['name'], child)
def test___getitem__fail(self):
schema = DummySchema()
field = self._makeOne(schema)
child = DummyField()
field.children = [child]
self.assertRaises(KeyError, field.__getitem__, 'nope')
def test_errormsg_error_None(self):
schema = DummySchema()
field = self._makeOne(schema)
self.assertEqual(field.errormsg, None)
def test_errormsg_error_not_None(self):
schema = DummySchema()
field = self._makeOne(schema)
field.error = DummyInvalid('abc')
self.assertEqual(field.errormsg, 'abc')
def test_validate_succeeds(self):
fields = [
('name', 'Name'),
('title', 'Title'),
]
schema = DummySchema()
field = self._makeOne(schema)
field.widget = DummyWidget()
result = field.validate(fields)
self.assertEqual(result, {'name':'Name', 'title':'Title'})
def test_validate_fails_widgeterror(self):
from colander import Invalid
fields = [
('name', 'Name'),
('title', 'Title'),
]
invalid = Invalid(None, None, dict(fields))
schema = DummySchema()
field = self._makeOne(schema)
field.widget = DummyWidget(exc=invalid)
e = validation_failure_exc(field.validate, fields)
self.assertEqual(field.widget.error, invalid)
self.assertEqual(e.cstruct, dict(fields))
self.assertEqual(e.field, field)
self.assertEqual(e.error, invalid)
def test_validate_fails_schemaerror(self):
from colander import Invalid
fields = [
('name', 'Name'),
('title', 'Title'),
]
invalid = Invalid(None, None)
schema = DummySchema(invalid)
field = self._makeOne(schema)
field.widget = DummyWidget()
e = validation_failure_exc(field.validate, fields)
self.assertEqual(field.widget.error, invalid)
self.assertEqual(e.cstruct, {'name':'Name', 'title':'Title'})
self.assertEqual(e.field, field)
self.assertEqual(e.error, invalid)
def test_validate_fails_widgeterror_and_schemaerror(self):
from colander import Invalid
fields = [
('name', 'Name'),
('title', 'Title'),
]
widget_invalid = Invalid(None, None, dict(fields))
schema_invalid = Invalid(None, None)
schema = DummySchema(schema_invalid)
field = self._makeOne(schema)
field.widget = DummyWidget(exc=widget_invalid)
e = validation_failure_exc(field.validate, fields)
self.assertEqual(field.widget.error, schema_invalid)
self.assertEqual(e.cstruct, dict(fields))
self.assertEqual(e.field, field)
self.assertEqual(e.error, schema_invalid)
def test_render(self):
schema = DummySchema()
field = self._makeOne(schema)
widget = field.widget = DummyWidget()
self.assertEqual(field.render('abc'), 'abc')
self.assertEqual(widget.rendered, 'writable')
def test_render_readonly(self):
schema = DummySchema()
field = self._makeOne(schema)
widget = field.widget = DummyWidget()
self.assertEqual(field.render('abc', readonly=True), 'abc')
self.assertEqual(widget.rendered, 'readonly')
def test_serialize(self):
schema = DummySchema()
field = self._makeOne(schema)
widget = field.widget = DummyWidget()
self.assertEqual(field.serialize('abc'), 'abc')
self.assertEqual(widget.rendered, 'writable')
def test_serialize_null(self):
from colander import null
schema = DummySchema()
field = self._makeOne(schema)
widget = field.widget = DummyWidget()
self.assertEqual(field.serialize(null), null)
self.assertEqual(widget.rendered, 'writable')
def test_deserialize(self):
cstruct = {'name':'Name', 'title':'Title'}
schema = DummySchema()
field = self._makeOne(schema)
field.widget = DummyWidget()
result = field.deserialize(cstruct)
self.assertEqual(result, {'name':'Name', 'title':'Title'})
def test___repr__(self):
schema = DummySchema()
field = self._makeOne(schema)
r = repr(field)
self.failUnless(r.startswith('<deform.field.Field object at '))
self.failUnless(r.endswith("(schemanode 'name')>"))
class DummyField(object):
oid = 'oid'
requirements = ( ('abc', '123'), ('def', '456'))
def __init__(self, schema=None, renderer=None, name='name'):
self.schema = schema
self.renderer = renderer
self.name = name
def clone(self):
self.cloned = True
return self
def get_widget_requirements(self, L=None):
return self.requirements
class DummySchema(object):
typ = None
name = 'name'
title = 'title'
description = 'description'
required = True
children = ()
default = 'default'
sdefault = 'sdefault'
def __init__(self, exc=None):
self.exc = exc
def deserialize(self, value):
if self.exc:
raise self.exc
return value
def serialize(self, value):
return value
class DummyType(object):
def __init__(self, maker=None):
self.widget_maker = maker
class DummyWidget(object):
rendered = None
def __init__(self, exc=None):
self.exc = exc
def deserialize(self, field, pstruct):
if self.exc is not None:
raise self.exc
return pstruct
def serialize(self, field, cstruct=None, readonly=True):
self.rendered = readonly and 'readonly' or 'writable'
return cstruct
def handle_error(self, field, e):
self.error = e
class DummyInvalid(object):
def __init__(self, msg=None):
self.msg = msg
| [
"chrism@plope.com"
] | chrism@plope.com |
a5fc5e913e393f7bf043d122a6bbfcfbb1c2aea4 | 3e1fbafd3dd8ce692436cf1e7e5e4e73d007c35d | /marketlist/list/models.py | 8a1bea32176f6411eeb3f50b5fffc95d64c79d9c | [] | no_license | NemoIII/marketlistapp | a2ade01bcb5879988b461bcdcf026580644f2c3f | 5e5a257d2f0944f6a6fc70c2a8df2cfe1c283b98 | refs/heads/main | 2023-03-29T19:47:54.092712 | 2021-04-08T10:29:00 | 2021-04-08T10:29:00 | 355,608,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class List(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
title = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
complete = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering = ['complete']
| [
"ipizette@gmail.com"
] | ipizette@gmail.com |
c42c1dfc895aca4384eeaaffae5e23429436d4a3 | 85235c8c2e60eacfde777a9f1db28b9543a1f0c9 | /Lab6 Seaborn/adv_com_prog_lab_6.py | 328a573020d43fc835ad20b48fc44ab7f812b233 | [] | no_license | mikepeerawit/AdvComProg-Lab | 5a2922cc66ed7340cf8ca7dbd7e390c2b541796a | 6c14094edb02c30a4e448f77c309b0f128976991 | refs/heads/master | 2023-02-14T15:48:51.901631 | 2020-12-17T13:12:16 | 2020-12-17T13:12:16 | 289,206,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,493 | py | # -*- coding: utf-8 -*-
"""Lab 6:Seaborn and Plotly.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hX_8oify-XbjHu_up5s_1dMkWIpCJdod
# 1.Pairplot
"""
import pandas as pd
import seaborn as sbs
df=pd.read_csv('Iris.csv')
temp_df=df.T.iloc[1:]
temp_df=temp_df.T
sbs.pairplot(temp_df, hue='Species')
"""The attributes that is easiest to classify the thrree classes is the width of the petal, because most of the data from each class does not overlap each other.
# 2.Colored PM2.5
"""
import pandas as pd
import seaborn as sb
pm_df=pd.read_csv('pm25.csv')
pm_df.columns=['dt','pm']
pm_df=pm_df[pm_df['pm'] != '-']
pm_df['pm']=pd.to_numeric(pm_df['pm'])
pm_df['dt']=pd.to_datetime(pm_df['dt'])
pm_df['hr']=pm_df['dt'].dt.hour
def get_color(x):
if x >= 36:
return 'yellow'
else:
return 'green'
mean_by_hr=pm_df.groupby('hr').mean()
color_bar = [get_color(x) for x in mean_by_hr['pm']]
sb.barplot(data=pm_df,x='hr',y='pm', palette= color_bar)
"""# 3.What are the top 25 video game publisher total sales in millions?"""
import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
df=pd.read_csv('vgsales.csv')
temp_df=df.groupby('Publisher').sum()
temp_df.reset_index(inplace=True)
fig,ax=plt.subplots(figsize=(24,8))
chart=sb.barplot(x='Publisher', y='Global_Sales', data=temp_df.sort_values('Global_Sales',ascending=False)[:25])
chart.set_xticklabels(chart.get_xticklabels(), rotation=90)
| [
"noreply@github.com"
] | mikepeerawit.noreply@github.com |
546a0de4ae6c8d3f960ecb415f3c7612dd85337b | c47744e6087ba4b5bd04b442bbf10cddcf98db0a | /nautical.py | 92f1c0745aba12037d9cae407080f3018ab93440 | [] | no_license | ebrianphillips/python-musings | e4b7f4e4a182a10a3997e9f24bab5d5845a33462 | fcf635374f915184d739e8e2598cabe14a067db3 | refs/heads/master | 2021-07-02T08:22:45.989410 | 2017-09-20T00:54:53 | 2017-09-20T00:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,062 | py | """
Program: nautical.py
Author: Brian Phillips
Convert kilometers to nautical miles.
A program that takes as input a number of kilometers
and prints the corresponding number of nautical miles.
Using the following approximations:
A kilometer represents 1/10,000 of the distance
between the North Pole and the equator.
There are 90 degrees, containing 60 minutes of arc each,
between the North Pole and the equator.
A nautical mile is 1 minute of an arc.
So 90*60 nautical miles = 5400 = 10,000 kilometers,
therefore 1 km = 0.54 nautical miles
1. Significant constants
CONVERSION_RATE = 0.54
2. The inputs are
kilometers
3. Output: nautical miles
4. Computations: Nautical miles = kilometers x converstion rate
"""
CONVERSION_RATE = 0.54
kilometers = float(input("Enter the distance in kilometers: "))
nauticalMiles = kilometers * CONVERSION_RATE
print(str(kilometers) + " kilometers = " + str(nauticalMiles) + " nautical miles.")
input("Please press enter or return to quit the program.")
| [
"noreply@github.com"
] | ebrianphillips.noreply@github.com |
424186b5d8d9b4d106731635d39a3b166e557429 | 5dcfa89e1c7e2dc79e4f3278003552b15708efbf | /apps/operation/__init__.py | f6fd69999b2ef01c837afdfa9aeba98a02e6551e | [] | no_license | tangchaolizi088/MyProjectOnLine | 10f087e072d61bc0306c85b2364e44ee4ef964f5 | 077dc815905161da09d72e52a98f1571a397a224 | refs/heads/master | 2020-06-17T03:26:24.713940 | 2019-10-23T05:45:11 | 2019-10-23T05:45:11 | 195,780,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py |
default_app_config = 'operation.apps.OperationConfig' | [
"348759948@qq.com"
] | 348759948@qq.com |
8497e80440ba465616edc57f1e1d2c4abc483b4d | 32bbe238d3e561e22a219403e645603f9e3ceafd | /DMOJ/dpc.py | 226d88bdcc72dc0dcf9a6fc971729ce8418fd04e | [] | no_license | KennethRuan/competitive-programming | 02fae63934b943051d7d400d901af2c73b8f5622 | be880387bdf2a14444d9d1520d7ab2dc866b5220 | refs/heads/master | 2021-09-19T16:13:18.904749 | 2021-08-14T02:43:46 | 2021-08-14T02:43:46 | 237,291,590 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | n = int(input())
ac = []
for i in range(n):
ac.append(list(map(int,input().split())))
dp = [[0 for _ in range(3)]for _ in range(n)]
dp[0][0] = ac[0][0]
dp[0][1] = ac[0][1]
dp[0][2] = ac[0][2]
for i in range(1,n):
for j in range(3):
for k in range(3):
if j == k:
continue
dp[i][j] = max(dp[i][j],dp[i-1][k]+ac[i][j])
ans = 0
for i in range(3):
ans = max(ans,dp[n-1][i])
print(ans) | [
"kennethjruan@gmail.com"
] | kennethjruan@gmail.com |
6a513bb64534472a63a8c186ebbc6b7bbb89b92e | 2b99f1044fc725bdc0e7948b0f2298e6b473baec | /yeezy.py | b96fdb00c59105576341802e1f2f27f9564b83ca | [] | no_license | jrfeibelman/yeezy-scraper | 0fe554b9aeb00fc6e97acd00545db0332a50ee07 | 09266f354a9b1d0acac7e61a85807dfe9d587cf5 | refs/heads/master | 2020-04-01T05:51:04.492162 | 2018-10-14T00:12:06 | 2018-10-14T00:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,333 | py | import json, re
from tweepy import Cursor, OAuthHandler, API
from pandas import DataFrame, read_csv
from selenium import webdriver
import numpy, requests
import bs4 as bs
from time import clock
class YeezusBot:
# Load credentials from json file
def __init__(self):
with open("twitter_credentials.json", "r") as file:
self.__creds = json.load(file)
self.__auth = OAuthHandler(self.__creds['CONSUMER_KEY'], self.__creds['CONSUMER_SECRET'])
self.__auth.set_access_token(self.__creds['ACCESS_TOKEN'], self.__creds['ACCESS_SECRET'])
self.api = API(self.__auth)
@staticmethod
def get_url_from(st):
if st.__contains__('https') == False:
return ''
else:
return re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', st)[0].split(' ')
@staticmethod
def clean_urls(link_arr):
links = []
for x in range(len(link_arr)):
links.append(bot.parse_url(link_arr))
return links
@staticmethod
def parse_url(url):
index = url.find('\\')
return url
def get_statuses(self, user_name, item_number):
dates = []
txts = []
urls = []
for status in Cursor(method=self.api.user_timeline, screen_name=user_name).items(item_number):
status_str = json.dumps(status._json)
text = status_str.split('text')[1].split('truncated')[0][4:-4]
# if r'Yeezy' in text: # or r'RESTOCKED' in text:
if r'4D' in text:
date = status_str.split('created_at')[1].split('id')[0][4:-15]
dates.append(date)
txts.append(text)
if text.__contains__('https') == True:
urls.append(YeezusBot.get_url_from(text))
else:
urls.append('')
data = ({'TimeStamp':dates,'Text':txts,'URL':urls})
return DataFrame(data, columns=['TimeStamp','Text','URL'])
def print_links(self, df):
count1 = 0
test_links = []
for url_list in df['URL']:
print('URL_LIST %d ' % count1)
count1 = count1 + 1
count2 = 0
for url in url_list:
if re.search('^https:', url) == False:
print('FALSE')
continue
print('URL %d' % count2)
count2 = count2 + 1
print(str(url))
test_links.append(str(url))
def create_new_backtests(self, path, frame):
frame.to_csv(path)
def get_backtests(self, path):
return read_csv(path)
class YeezusBrowser:
def __init__(self, url):
self.url = url
# self.request = requests.get(url, stream=True)
self.driver = webdriver.Chrome(executable_path='/Users/jfeibs/Desktop/YeezyBot/ENV/bin/chromedriver')
self.driver.get(self.url)
self.base_page = self.driver.page_source
s = bs.BeautifulSoup(url)
print(s.)
print(self.driver.page_source)
self.run()
def run(self):
print('running')
print(self.base_page)
delta_time = 1 # updates every 1 second
start = clock()
while True:
now = clock()
print(now)
if now - start >= delta_time:
print(self.driver.page_source)
start = clock()
def add_to_cart(self, url):
self.driver.get(url)
# SCRIPT
bot = YeezusBot()
df = bot.get_statuses(user_name='adidasalerts', item_number=100)
back_tests = bot.get_backtests('backtests.csv')
# bot.print_links(df)
links = back_tests['URL'][45].split('https:')
test_links = []
for x in range(len(links)):
if links[x].startswith('/') == True:
start = 'https:' + links[x]
index = start.find('\\')
test_links.append(start[:index])
print(test_links)
# print(df['URL'][14])
# print(df['URL'][31])
l = 'https://www.adidas.com/us/nmd_r1-stlt-primeknit-shoes/CQ2391.html?pr=oos_rr&slot=1'
browser = YeezusBrowser(l) # test_links[0])
# browser.run()
#
# print('x')
# ans = str
# for line in driver.page_source:
# if '\xe0' in line or '\u010c' in line or '\u0161' in line:
# line = " "
# ans = "%s%s" % (ans,line)
# dimesLinesFile = open('twitter.txt','w')
# dimesLinesFile.write(ans)
# dimesLinesFile.close()
# Enter your keys/secrets as strings in the following fields
# credentials = {}
# credentials['CONSUMER_KEY'] =
# credentials['CONSUMER_SECRET'] =
# credentials['ACCESS_TOKEN'] =
# credentials['ACCESS_SECRET'] =
#
# # Save the credentials object to file
# with open("twitter_credentials.json", "w") as file:
# json.dump(credentials, file)
# from kitchen.text.converters import getwriter
# import sys
# from selenium import webdriver
# import time
#
# # driver = webdriver.Firefox()
# # driver.get("https://twitter.com/adidasalerts?lang=en")
#
# # ans = str
# # for line in driver.page_source:
# # if '\xe0' in line or '\u010c' in line or '\u0161' in line:
# # line = " "
# # ans = "%s%s" % (ans,line)
#
# # dimesLinesFile = open('twitter.txt','w')
# # dimesLinesFile.write(ans)
# # dimesLinesFile.close()
#
# import twitter
# api = twitter.Api()
# a = api.GetUser("adidasalerts")
# print([s.text for s in a])
| [
"jason.feibelman@gmail.com"
] | jason.feibelman@gmail.com |
b6eb0d89d7d7fb0bdcf5ac7edd86382cb49811e4 | aa2104c52c8bcb0cc9e32073204709fe1c530857 | /Dynamic Programming Agent/w1_env.py | de4dd510b3e29eaa34ea47323345e7a8a12e5d29 | [
"MIT"
] | permissive | yfe3/Intelligent-Systems | a8440c4d881bb0a62772cd60a21b7cdf0ddde48d | 14c22e144e1a7f8625b2ff3f63e9c0545f946f69 | refs/heads/master | 2021-05-05T02:45:33.631883 | 2018-02-01T05:00:31 | 2018-02-01T05:00:31 | 119,784,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,270 | py | #!/usr/bin/env python
"""
Author: Adam White, Mohammad M. Ajallooeian
Purpose: for use of Reinforcement learning course University of Alberta Fall 2017
env *ignores* actions: rewards are all random
modified from thew sample code serve as the environment
now all reward are random based on their expectations
modified by Yuan Feng (yfeng3)
"""
from utils import rand_norm, rand_in_range, rand_un
import numpy as np
this_reward_observation = (None, None, None) # this_reward_observation: (floating point, NumPy array, Boolean)
bandit = None
def find_optimal(bandit): #pass the test working properly
optimal = np.argmax(bandit) # find the optimal action
#print(optimal,"this is optimal") #for debuggging use
return optimal
def env_init():
global this_reward_observation
global bandit
# intialize the reward of each action here
bandit = np.zeros(10) # set the bandit to 10 armed
for loop_control in range(0, 10, 1):
bandit[loop_control] = rand_norm(0.0, 1.0) # set the means of reward value
local_observation = np.zeros(0) # An empty NumPy array
this_reward_observation = (0.0, local_observation, False)
def env_start(): # returns NumPy array
return this_reward_observation[1]
def env_step(this_action): # returns (floating point, NumPy array, Boolean), this_action: NumPy array
global this_reward_observation
global bandit
# the_reward = rand_norm(0.0, 1.0) # rewards drawn from (0, 1) Gaussian
# now we return reward according to the action
this_action_int = this_action.tolist()
the_reward = rand_norm(bandit[this_action_int], 1.0) # set the reward value
this_reward_observation = (the_reward, this_reward_observation[1], False)
return this_reward_observation
def env_cleanup():
#
return
def env_message(inMessage): # returns string, inMessage: string
global bandit
if inMessage == "what is your name?":
return "my name is skeleton_environment!"
elif inMessage == "get optimal action":
global bandit
optimal = find_optimal(bandit)
return optimal
# action 3 gives the highest q* value; ie. argmax q*(a) = 3
# else
return "I don't know how to respond to your message"
| [
"yfeng3@ualberta.ca"
] | yfeng3@ualberta.ca |
71cddcc5043ec31719e409870918651c5295ecd1 | b284f6abf9b078e80017ccc3e8e84ab76b6c9535 | /UNetExperiments/DrawingWithTensors.py | a7fec70ede6d360c11be8f6d1cb7ac53f55dbb5b | [] | no_license | iCopyPasta/COMP594 | c5473b279943e57457b77e7f0b72afdb645a302c | f7f8c84f4b49fa1b10a626bfeee178bbbab66621 | refs/heads/master | 2020-03-24T19:04:19.701045 | 2018-11-19T19:34:35 | 2018-11-19T19:34:35 | 142,908,097 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,290 | py |
# coding: utf-8
# In[1]:
from PIL import Image
from random import randint
import numpy as np
import pandas as pd
import sys
import torch
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import tensorflow as tf
import torchvision
import warnings
import functools
import os
class datasetFactory(object):
def __init__(self, IMAGE_SIZE = 416, listOfClasses=["road"]):
try:
self.IMAGE_SIZE=IMAGE_SIZE
self.classMap = dict()
self.darkGreyRGBLowerBound = [74,76,80]
self.darkGreyRGBUpperBound = [105,106,106]
self.lightGreyRGBLowerBound = [123,129,116]
self.lightGreyRGBUpperBound = [146,147,139]
if(len(listOfClasses) <= 0):
#self.classList = ["background","left-shoulder","left-yellow-line-marker","white-lane-markers","lane",
# "right-white-line-marker", "right-shoulder"]
self.classList=["road"]
else:
self.classList = listOfClasses
for i in range(0, len(self.classList)):
self.classMap[self.classList[i]] = i
self.NUM_CLASSES = len(self.classList)
except IOError:
print('An error occured trying to read the file.')
def drawBackground(self, imgMap, b_type="", patch_size=104):
#use default patch size of 104px by 104px
if b_type == "trees" or b_type =="water" or b_type =="desert":
filesPath = os.getcwd() + "/imagesToSnip/"+b_type
for root, dirs, files in os.walk(filesPath, topdown=True):
#print("hello")
#print(files)
randomImageName = files[randint(0,len(files)-1)]
randomImageData = Image.open(filesPath + "/"+randomImageName)
patchMap = randomImageData.load()
if b_type == "trees" or b_type =="water":
for i in range(self.IMAGE_SIZE):
for j in range(self.IMAGE_SIZE):
#DRAW BACKGROUND PATCHES - choosing RIGHTMOST 104px of patch
imgMap[i,j] = patchMap[i % patch_size+(self.IMAGE_SIZE - patch_size),
j % patch_size]
elif b_type =="desert":
for i in range(self.IMAGE_SIZE):
for j in range(self.IMAGE_SIZE):
#DRAW BACKGROUND PATCHES - choosing LEFTMOST 104px of patch
imgMap[i,j] = patchMap[i % patch_size,
j % patch_size]
elif b_type =="building":
print("implement")
else:
# choose one solid color for background
rgb = (randint(0,255),randint(0,255),randint(0,255))
for i in range(self.IMAGE_SIZE):
for j in range(self.IMAGE_SIZE):
#DRAW BACKGROUND
imgMap[i,j] = rgb
#https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
def computeTensorBackground(self,imgMap,tensorMap):
# road/background channel
tensorMap[0] = torch.zeros([self.IMAGE_SIZE, self.IMAGE_SIZE])
# in the background channel, update values to know which are being used in other tensors
for i in range(1,len(self.classList)):
tensorMap[0][tensorMap[i] == 1] = -1
tensorMap[0][tensorMap[0] == 0] = 1
tensorMap[0][tensorMap[0] == -1] = 0
def drawRoadLane(self,imgMap,start,width,class_type_flag,tensorMap):
#print("execution of drawRoadLane")
if start < 0 or start + width >= self.IMAGE_SIZE:
print(start,width, "ERROR")
sys.exit(-1)
red = 0
green = 0
blue = 0
class_type_corresponding_channel = self.classMap[class_type_flag]
for i in range(start,start+width):
if self.lightOrGrey == 0:
red = randint(self.lightGreyRGBLowerBound[0],
self.lightGreyRGBUpperBound[0])
green = randint(self.lightGreyRGBLowerBound[1],
self.lightGreyRGBUpperBound[1])
blue = randint(self.lightGreyRGBLowerBound[2],
self.lightGreyRGBUpperBound[2])
else:
red = randint(self.darkGreyRGBLowerBound[0],
self.darkGreyRGBUpperBound[0])
green = randint(self.darkGreyRGBLowerBound[1],
self.darkGreyRGBUpperBound[1])
blue = randint(self.darkGreyRGBLowerBound[2],
self.darkGreyRGBUpperBound[2])
for j in range(self.IMAGE_SIZE):
r = red
g = green
b = blue
imgMap[i,j] = (r,g,b)
tensorMap[class_type_corresponding_channel, i,j] = 1
def drawStraightLine(self,imgMap,start,width,red,redDev,green,greenDev,blue,blueDev,onLen,offLen,
class_type_flag,tensorMap):
if start < 0 or start + width >= self.IMAGE_SIZE:
print(start,width, "ERROR")
sys.exit(-1)
class_type_corresponding_channel = self.classMap[class_type_flag]
for i in range(start,start+width):
on = True
dist = onLen
for j in range(self.IMAGE_SIZE):
if on == True:
r = max(0,min(255,int(np.random.normal(red,redDev))))
g = max(0,min(255,int(np.random.normal(green,greenDev))))
b = max(0,min(255,int(np.random.normal(blue,blueDev))))
imgMap[i,j] = (r,g,b)
tensorMap[class_type_corresponding_channel, i,j] = 1
if onLen > 0:
dist = dist - 1
if dist < 0:
dist = offLen
on = not on
else:
dist = dist - 1
if dist < 0:
dist = onLen
on = not on
def drawWhiteLaneDevisor(self,imgMap,start,width,red,redDev,green,greenDev,blue,blueDev,onLen,offLen,class_type_flag,
tensorMap):
if start < 0 or start + width >= self.IMAGE_SIZE:
print(start,width, "ERROR")
sys.exit(-1)
class_type_corresponding_channel = self.classMap[class_type_flag]
for i in range(start,start+width):
on = True
dist = onLen
for j in range(self.IMAGE_SIZE):
if on == True:
r = max(0,min(255,int(np.random.normal(red,redDev))))
g = max(0,min(255,int(np.random.normal(green,greenDev))))
b = max(0,min(255,int(np.random.normal(blue,blueDev))))
imgMap[i,j] = (r,g,b)
tensorMap[class_type_corresponding_channel, i,j] = 1
if onLen > 0:
dist = dist - 1
if dist < 0:
dist = offLen
on = not on
else:
#fill in the image with grey
r = max(0,min(255,int(np.random.normal(128,40))))
g = max(0,min(255,int(np.random.normal(128,40))))
b = max(0,min(255,int(np.random.normal(128,40))))
imgMap[i,j] = (r,g,b)
tensorMap[class_type_corresponding_channel, i,j] = 1
dist = dist - 1
if dist < 0:
dist = onLen
on = not on
def generateNewImageWithTensor(self,centerShldrWidth,laneCount,laneWidth,lineWidth,shoulderWidth, tensorMap, b_type, factor_arg=0.45):
img = Image.new('RGB',(self.IMAGE_SIZE,self.IMAGE_SIZE))
imgMap = img.load()
factor = factor_arg # ft/px
# 0 to 36
#centerShldrWidth=randint(0,80)
# 0 to 5
#laneCount = randint(0,5)
# 8 to 15
#laneWidth = randint(17,34)
# 4 to 6 in
#lineWidth = randint(1,2)
# 8 to 40
#shoulderWidth=randint(0,89)
#start = (self.IMAGE_SIZE - centerShldrWidth - (laneCount+1)*lineWidth - laneCount * laneWidth - shoulderWidth)//2
start = randint(1,(self.IMAGE_SIZE - centerShldrWidth - (laneCount+1)*lineWidth - laneCount * laneWidth - shoulderWidth - 1))
#if start < 10:
# print(centerShldrWidth,laneCount,laneWidth,lineWidth,shoulderWidth,"EXCEEDED IMAGE_SIZE")
# sys.exit(-1)
self.lightOrGrey = randint(0,1)
#DRAW BACKGROUND
self.drawBackground(imgMap, b_type)
#DRAW: left shoulder
self.drawRoadLane(imgMap,start,centerShldrWidth,"road",tensorMap)
#self.drawStraightLine(imgMap,start,centerShldrWidth,128,20,128,20,128,20,0,0, "road",tensorMap)
# move pointer by the shoulder width
start += centerShldrWidth
#print("laneCount is:",laneCount)
# for the number of lanes we have, draw them
for i in range(laneCount):
if i == 0:
#DRAW left-yellow-line-marker
self.drawStraightLine(imgMap,start,lineWidth,200,10,200,40,50,40,0,0, "road",tensorMap)
else:
#DRAW white-lane-marker
self.drawWhiteLaneDevisor(imgMap,start,lineWidth,200,40,200,40,200,40,20,20, "road",tensorMap)
#move over a white-lane-markers line
start += lineWidth
#DRAW our lane
self.drawRoadLane(imgMap,start,laneWidth-lineWidth,"road",tensorMap)
#self.drawStraightLine(imgMap,start,laneWidth-lineWidth,128,40,128,40,128,40,0,0, "road",tensorMap)
#move pointer by the lane width
start += laneWidth - lineWidth
#DRAW right white-line-marker
#self.drawStraightLine(imgMap,start,lineWidth,200,40,400,40,200,40,0,0, "right-white-line-marker")
self.drawStraightLine(imgMap,start,lineWidth,
255,25,
255,25,
255,25,
0,0, "road", tensorMap)
#move pointer by the white-line width
start += lineWidth
#DRAW right-shoulder
self.drawRoadLane(imgMap,start,shoulderWidth,"road",tensorMap)
#self.drawStraightLine(imgMap,start,shoulderWidth, 128,40,128,40,128,40,0,0, "road", tensorMap)
roadWidth = laneCount*laneWidth
roadWidth = roadWidth*factor
shoulderWidth = shoulderWidth*factor
centerShldrWidth = centerShldrWidth*factor
return (roadWidth,laneCount,shoulderWidth,centerShldrWidth),img, tensorMap
# ## Visual Testing
# In[2]:
####centerShldrWidth,laneCount,laneWidth,lineWidth,shoulderWidth
#c = randint(0,80)
#lanecount = randint(1,4)
#laneWidth = randint(17,40)
#lineWidth = randint(1,2)
#shoulderWidth = randint(0,70)
# In[3]:
#imageGen = datasetFactory()
#test_tensor = torch.zeros(1,416,416)
#print(c,lanecount,laneWidth,lineWidth,shoulderWidth)
#test_tuple,img,test_tensor = imageGen.generateNewImageWithTensor(c,lanecount,laneWidth,
# lineWidth,
# shoulderWidth,
# test_tensor,
# b_type="desert",
# factor_arg=0.05)
#torch.save(test_tensor, "/home/peo5032/Desktop/tensor.pt")
# In[2]:
def showInferenceOnImage(img, tensor, class_label, threshold, classMap):
IMAGE_SIZE = 416
imgTMP = img.copy()
imgMap = imgTMP.load()
class_type_corresponding_channel = classMap[class_label]
print("index for channel", class_label, ":", class_type_corresponding_channel)
for i in range(0, IMAGE_SIZE):
for j in range(0, IMAGE_SIZE):
if tensor[class_type_corresponding_channel, i,j] < threshold:
#black out all but the believed road
imgMap[i,j] = (0,0,0)
return imgTMP
def rotationOfImageAndTensor(img, tensor, classList, rotation=0):
img = torchvision.transforms.functional.rotate(img,rotation)
for i in range(0,len(classList)):
PIC = torchvision.transforms.ToPILImage(mode='L')(tensor)
PIC = torchvision.transforms.functional.rotate(PIC,-1 * rotation)
tensor = torchvision.transforms.functional.to_tensor(PIC)
return img, tensor
# In[5]:
#img.save("/home/peo5032/Pictures/TESTER2.png")
#img
# In[6]:
#TEST_PATH_LOAD = "/home/peo5032/COMP594/UNetExperiments/imagesToSnip/desert/"
#%matplotlib notebook
#img = Image.open(TEST_PATH_LOAD + "270_5.35_b.png")
#plt.imshow(img)
# In[7]:
#test_tensor = torch.load('/home/peo5032/Desktop/tensor.pt')
#showInferenceOnImage(img, test_tensor, "road", 0.6, imageGen.classMap)
# In[8]:
#img, test_tensor = rotationOfImageAndTensor(img, test_tensor, imageGen.classList, 90)
# In[9]:
#showInferenceOnImage(img, test_tensor, "road", threshold=0.60,classMap=imageGen.classMap)
| [
"thepabloski@gmail.com"
] | thepabloski@gmail.com |
898d5cbdc6350c88d54b4615b229c5f023e04188 | 2fce94d3b9d2b541480edc7644f9aaf5dc4ea6fc | /apps/operation/models.py | a557f1c36213ec433c37dec23da410c875c9c9b1 | [] | no_license | a0bb/e_learning | 83e0cae553ee31912e28f1c2b0a3a1357a4b4ff4 | c2373b42c08988877531364d56b6138697771615 | refs/heads/master | 2021-06-21T22:23:37.070950 | 2019-07-24T00:56:41 | 2019-07-24T00:56:41 | 188,257,168 | 0 | 0 | null | 2021-02-08T20:35:07 | 2019-05-23T15:07:18 | Python | UTF-8 | Python | false | false | 2,342 | py | # _*_ encoding:utf-8 _*_
from datetime import datetime
from django.db import models
from users.models import UserProfile
from courses.models import Course
# Create your models here.
class UserAsk(models.Model):
name = models.CharField(max_length=20, verbose_name="姓名")
mobile = models.CharField(max_length=11, verbose_name="手机号码")
course_name = models.CharField(max_length=10, verbose_name="课程名")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "用户咨询"
verbose_name_plural = verbose_name
class CourseComments(models.Model):
"课程评论"
user = models.ForeignKey(UserProfile, verbose_name="用户")
course = models.ForeignKey(Course, verbose_name="课程")
comments = models.CharField(max_length=200, verbose_name="评论")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "课程评论"
verbose_name_plural = verbose_name
class UserFavorite(models.Model):
user = models.ForeignKey(UserProfile, verbose_name="用户")
fav_id = models.IntegerField(default=0, verbose_name="数据id")
fav_type = models.IntegerField(choices=((1, "课程"),(2, "课程机构"),(3, "讲师")), default=1, verbose_name="收藏类型")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "用户收藏"
verbose_name_plural = verbose_name
class UserMessage(models.Model):
user = models.IntegerField(default=0, verbose_name="接收用户")
message = models.CharField(max_length=500, verbose_name="消息内容")
has_read = models.BooleanField(default=False, verbose_name="是否已读")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "用户消息"
verbose_name_plural = verbose_name
class UserCourse(models.Model):
user = models.ForeignKey(UserProfile, verbose_name="用户")
course = models.ForeignKey(Course, verbose_name="课程")
add_time = models.DateTimeField(default=datetime.now, verbose_name="添加时间")
class Meta:
verbose_name = "用户课程"
verbose_name_plural = verbose_name
| [
"wang.shihua@outlook.com"
] | wang.shihua@outlook.com |
5df335a96b2df65d44840127b3d3bd3af9cc0654 | cb8c63aea91220a9272498d5ea6cca0a0738b16a | /bela.py | c1796a03efbc6479c7664779db8914c6446f649f | [] | no_license | akantuni/Kattis | 1265de95bfe507ce7b50451a16f19720b86bef44 | 12f31bb31747096bf157fcf6b1f9242d91654533 | refs/heads/master | 2021-12-14T11:18:27.723045 | 2021-12-11T05:43:37 | 2021-12-11T05:43:37 | 111,472,667 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | s = input()
n = int(s.split()[0])
b = s.split()[1]
total = 0
dominant = {"A": 11, "K": 4, "Q": 3, "J": 20, "T": 10, "9": 14, "8": 0, "7": 0}
not_dominant = {"A": 11, "K": 4, "Q": 3, "J": 2, "T": 10, "9": 0, "8": 0, "7": 0}
for i in range(4 * n):
card = input()
if card[1] == b:
total += dominant.get(card[0])
else:
total += not_dominant.get(card[0])
print(total)
| [
"akantuni@gmail.com"
] | akantuni@gmail.com |
19a9f36af3da3007a2f3aac99bed387a29b23c64 | 6d6a935c8281984fc26201a3d505151382cd36a9 | /Introduction to Computer Science and Programming Using Python/Problem Set 5/ps6.py | 723e9041287e1daaba4441a00f34fb5102892a73 | [] | no_license | Sruthisarav/My-computer-science-journey | 56a7663ebc9c45c029c68da943969cdcbc2a88ac | 1af1d33ea7f49c92e9b5b82c9c92b7076a93825a | refs/heads/master | 2020-04-14T16:45:04.805208 | 2020-01-15T02:45:07 | 2020-01-15T02:45:07 | 163,960,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,645 | py | import string
### DO NOT MODIFY THIS FUNCTION ###
def load_words(file_name):
'''
file_name (string): the name of the file containing
the list of words to load
Returns: a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
'''
print('Loading word list from file...')
# inFile: file
in_file = open(file_name, 'r')
# line: string
line = in_file.readline()
# word_list: list of strings
word_list = line.split()
print(' ', len(word_list), 'words loaded.')
in_file.close()
return word_list
### DO NOT MODIFY THIS FUNCTION ###
def is_word(word_list, word):
'''
Determines if word is a valid word, ignoring
capitalization and punctuation
word_list (list): list of words in the dictionary.
word (string): a possible word.
Returns: True if word is in word_list, False otherwise
Example:
>>> is_word(word_list, 'bat') returns
True
>>> is_word(word_list, 'asdf') returns
False
'''
word = word.lower()
word = word.strip(" !@#$%^&*()-_+={}[]|\:;'<>?,./\"")
return word in word_list
### DO NOT MODIFY THIS FUNCTION ###
def get_story_string():
"""
Returns: a joke in encrypted text.
"""
f = open("story.txt", "r")
story = str(f.read())
f.close()
return story
WORDLIST_FILENAME = 'words.txt'
class Message(object):
### DO NOT MODIFY THIS METHOD ###
def __init__(self, text):
'''
Initializes a Message object
text (string): the message's text
a Message object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words
'''
self.message_text = text
self.valid_words = load_words(WORDLIST_FILENAME)
### DO NOT MODIFY THIS METHOD ###
def get_message_text(self):
'''
Used to safely access self.message_text outside of the class
Returns: self.message_text
'''
return self.message_text
### DO NOT MODIFY THIS METHOD ###
def get_valid_words(self):
'''
Used to safely access a copy of self.valid_words outside of the class
Returns: a COPY of self.valid_words
'''
return self.valid_words[:]
def build_shift_dict(self, shift):
'''
Creates a dictionary that can be used to apply a cipher to a letter.
The dictionary maps every uppercase and lowercase letter to a
character shifted down the alphabet by the input shift. The dictionary
should have 52 keys of all the uppercase letters and all the lowercase
letters only.
shift (integer): the amount by which to shift every letter of the
alphabet. 0 <= shift < 26
Returns: a dictionary mapping a letter (string) to
another letter (string).
'''
pass #delete this line and replace with your code here
def apply_shift(self, shift):
'''
Applies the Caesar Cipher to self.message_text with the input shift.
Creates a new string that is self.message_text shifted down the
alphabet by some number of characters determined by the input shift
shift (integer): the shift with which to encrypt the message.
0 <= shift < 26
Returns: the message text (string) in which every character is shifted
down the alphabet by the input shift
'''
pass #delete this line and replace with your code here
class PlaintextMessage(Message):
def __init__(self, text, shift):
'''
Initializes a PlaintextMessage object
text (string): the message's text
shift (integer): the shift associated with this message
A PlaintextMessage object inherits from Message and has five attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
self.shift (integer, determined by input shift)
self.encrypting_dict (dictionary, built using shift)
self.message_text_encrypted (string, created using shift)
Hint: consider using the parent class constructor so less
code is repeated
'''
pass #delete this line and replace with your code here
def get_shift(self):
'''
Used to safely access self.shift outside of the class
Returns: self.shift
'''
pass #delete this line and replace with your code here
def get_encrypting_dict(self):
'''
Used to safely access a copy self.encrypting_dict outside of the class
Returns: a COPY of self.encrypting_dict
'''
pass #delete this line and replace with your code here
def get_message_text_encrypted(self):
'''
Used to safely access self.message_text_encrypted outside of the class
Returns: self.message_text_encrypted
'''
pass #delete this line and replace with your code here
def change_shift(self, shift):
'''
Changes self.shift of the PlaintextMessage and updates other
attributes determined by shift (ie. self.encrypting_dict and
message_text_encrypted).
shift (integer): the new shift that should be associated with this message.
0 <= shift < 26
Returns: nothing
'''
pass #delete this line and replace with your code here
class CiphertextMessage(Message):
def __init__(self, text):
'''
Initializes a CiphertextMessage object
text (string): the message's text
a CiphertextMessage object has two attributes:
self.message_text (string, determined by input text)
self.valid_words (list, determined using helper function load_words)
'''
pass #delete this line and replace with your code here
def decrypt_message(self):
'''
Decrypt self.message_text by trying every possible shift value
and find the "best" one. We will define "best" as the shift that
creates the maximum number of real words when we use apply_shift(shift)
on the message text. If s is the original shift value used to encrypt
the message, then we would expect 26 - s to be the best shift value
for decrypting it.
Note: if multiple shifts are equally good such that they all create
the maximum number of you may choose any of those shifts (and their
corresponding decrypted messages) to return
Returns: a tuple of the best shift value used to decrypt the message
and the decrypted message text using that shift value
'''
pass #delete this line and replace with your code here
#Example test case (PlaintextMessage)
plaintext = PlaintextMessage('hello', 2)
print('Expected Output: jgnnq')
print('Actual Output:', plaintext.get_message_text_encrypted())
#Example test case (CiphertextMessage)
ciphertext = CiphertextMessage('jgnnq')
print('Expected Output:', (24, 'hello'))
print('Actual Output:', ciphertext.decrypt_message())
| [
"sruthisarav2@gmail.com"
] | sruthisarav2@gmail.com |
71d99592cc66f6bd7551e9e284c96aae2bbc256b | 2b990eab54cddce466285a52de6340eb47d7705f | /src/generator.py | fcf6dc1e6ea0e56cbd1c3311dc1588d4d6ec0f00 | [] | no_license | VaggelisSpi/Hate-Speech-Detection-Using-Neural-Networks | 4e96ba6f9e7f5362061e8fbc7a5c00028d65f44b | e631f86b012b0837d6bfdaaa0c8bac807fe7ec6d | refs/heads/main | 2023-08-25T05:39:15.042515 | 2021-11-05T18:25:21 | 2021-11-05T18:25:21 | 368,465,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,056 | py | import numpy as np
from nltk import word_tokenize
class KerasBatchGenerator(object):
def __init__(self, data, classes, dataset_size, batch_size, num_classes, vectorizer, vec_size, cycle=True):
"""
Constructor of the class KerasBatchGenerator. This generator is used when we don't
have a word embeddings layer.
:param data: The whole dataset
:param classes: An array or list with the class of each element
:param dataset_size: The size of the dataset
:param batch_size: The size each batch will have
:param num_classes: The number of the classes that we want to categorize our data
:param vectorizer: A class that is used to vectorize the data. It should contain a function named vectorize that
takes a single data element and returns an n-dimensional array.
:param: cycle: Indicate if we will cycle to the beginning to fill a batch if no more data left or we will reset
the data to the begging
:param vec_size: The dimensionality of the each data point
"""
self.data = data
self.classes = classes
self.dataset_size = dataset_size
self.batch_size = batch_size
self.num_classes = num_classes
self.vectorizer = vectorizer
self.vec_size = vec_size
self.cycle = cycle
# Value to pad elements with less tokens than the max length of each batch
self.pad_value = -100.0
# this will track the progress of the batches sequentially through the
# data set - once the data reaches the end of the data set, it will
# reset back to zero
self.current_idx = 0
def generate(self):
"""
Returns a batch of the dataset with size = self.batch_size. The data are vectorized.
:return: Returns 2 np-arrays x and y. X is the array with the vectorized data and its shape is
(batch_size, maxlen, vec_size) where maxlen is the number of tokens of the largest element in each
batch. It is possible that the last batch will have less elements if the batch size does not divide
exactly the size of the dataset. Y is the array that holds the classes of each element of the batch and
its shape is (batch_size, num_classes)
"""
while True:
# If we reached the end of the dataset then, if cycle is set to true fill the batch with the remaining
# data and go back in the beginning to fill the rest. Otherwise start from the beginning
if self.current_idx + self.batch_size > self.dataset_size:
if self.cycle:
# Get the length of the longest element in the dataset
maxlen = len(word_tokenize(self.data[self.dataset_size - 1]))
cur_batch_size = self.batch_size
# Initialize the arrays we will return
x = np.zeros((cur_batch_size, maxlen, self.vec_size))
y = np.zeros((cur_batch_size, self.num_classes))
# Iterate through the remaining the data of the current batch and vectorize it
for j in range(self.current_idx, self.dataset_size):
# Vectorize the current data element
x[j - self.current_idx:] = self.vectorizer.vectorize(self.data[j], maxlen, self.pad_value)
y[j - self.current_idx] = self.classes[j]
end_j = self.dataset_size - self.current_idx
# Fill the rest of the batch with data from the beginning
for j in range(self.batch_size - end_j):
x[j + end_j:] = self.vectorizer.vectorize(self.data[j], maxlen, self.pad_value)
y[j + end_j] = self.classes[j]
# Update current index for the next iteration if there is data left
self.current_idx = self.batch_size - end_j
yield x, y
continue
else:
# The batch will start from the beginning
cur_batch_size = self.batch_size
cur_batch_start = 0
self.current_idx = 0
else:
cur_batch_size = self.batch_size
cur_batch_start = self.current_idx
cur_batch_end = cur_batch_start + cur_batch_size # Ending index of the current batch
# Get the length of the longest element in the current batch
maxlen = len(word_tokenize(self.data[cur_batch_end - 1]))
# Initialize the arrays we will return
x = np.zeros((cur_batch_size, maxlen, self.vec_size))
# Iterate through the data of the current batch and vectorize it
for j in range(cur_batch_start, cur_batch_end):
# Vectorize the current data element
x[j - cur_batch_start:] = self.vectorizer.vectorize(self.data[j], maxlen, self.pad_value)
# Get the classes for the current batch
y = np.array(self.classes[cur_batch_start:cur_batch_end])
# Update current index for the next iteration if there is data left
self.current_idx += self.batch_size
yield x, y
def __next__(self):
"""
Returns a batch of the dataset with size = self.batch_size. The data are vectorized.
:return: Returns 2 np-arrays x and y. X is the array with the vectorized data and its shape is
(batch_size, maxlen, vec_size) where maxlen is the number of tokens of the largest element in each
batch. It is possible that the last batch will have less elements if the batch size does not divide
exactly the size of the dataset. Y is the array that hold the classes of each element of the batch and
its shape is (batch_size, num_classes)
"""
return self.generate
| [
"vaggelisspithas@gmail.com"
] | vaggelisspithas@gmail.com |
692a0dd829d67f2880469a5480b4a6a213ce56d6 | 6ce578b5f1bbb0f58fde8333832f7925bf6853ee | /level3/model/commission.py | 8f4403c5c28eb3ece5d19946d23bccc1d5873400 | [] | no_license | ldesuque/python-levels | 22ab34f56896cba87e120f4069712474ea3a70cf | dfc9839e31ba8316433fef8084cd257539586540 | refs/heads/master | 2023-05-14T18:46:53.343297 | 2021-06-07T09:42:13 | 2021-06-07T09:42:13 | 374,608,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | COMMISSION_RATE = 0.3
INSURANCE_COMMISSION_RATE = 0.5
EURO_CENTS_PER_DAY = 100
class Commission():
def __init__(self, rental_price, rental_days):
self._commission = rental_price * COMMISSION_RATE
self._rental_days = rental_days
def _get_total(self):
return self._commission
def _get_rental_days(self):
return self._rental_days
class InsuranceCommission(Commission):
def __init__(self, wrapped):
self._wrapped = wrapped
def get_total(self):
return int(self._wrapped._get_total() * INSURANCE_COMMISSION_RATE)
class RoadsideAssistanceCommission(Commission):
def __init__(self, wrapped):
self._wrapped = wrapped
def get_total(self):
return self._wrapped._get_rental_days() * EURO_CENTS_PER_DAY
class CompanyCommission(Commission):
def __init__(self, wrapped):
self._wrapped = wrapped
def get_total(self):
insuranceCommission = InsuranceCommission(self._wrapped).get_total()
roadsideCommission = RoadsideAssistanceCommission(
self._wrapped).get_total()
return int(self._wrapped._get_total() - insuranceCommission -
roadsideCommission)
| [
"ldesuque@fi.uba.ar"
] | ldesuque@fi.uba.ar |
24a6a3760a022f098d5c9b5e463f6d45b13207f6 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rhk.py | 1c3dd6df8ac37fc874301af93c2e1637de44ee3e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rHK':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
32e2852d7c1f5ec9d1e617ba6259d05590923d37 | acba1a8344b0478888b1393cdabc2cbf7898d9f6 | /presidio-anonymizer/presidio_anonymizer/entities/anonymized_entity.py | c76ebf4dd72918855c5fad191cf4ba51ad0da13a | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"CNRI-Python",
"Unlicense",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | hkarakose/presidio | 0f6cc0343c7fb2bcc9845b1adb7cbdf50a1f0692 | 00431870d0cd3e3a507a86ffb2cdbabf9b18d829 | refs/heads/main | 2023-03-15T01:24:43.582441 | 2021-03-14T20:24:24 | 2021-03-14T20:24:24 | 347,631,861 | 0 | 0 | MIT | 2021-03-14T12:36:10 | 2021-03-14T12:33:37 | null | UTF-8 | Python | false | false | 1,235 | py | class AnonymizedEntity:
"""Information about the anonymized entity."""
def __init__(
self,
anonymizer: str,
entity_type: str,
start: int,
end: int,
anonymized_text: str,
):
"""Create AnonymizerResultItem.
:param anonymizer: name of the anonymizer.
:param entity_type: type of the PII entity.
:param start: start index in the anonymized text.
:param end: end index in the anonymized text.
:param anonymized_text: the PII anonymized text.
"""
self.anonymizer = anonymizer
self.entity_type = entity_type
self.start = start
self.end = end
self.anonymized_text = anonymized_text
def __eq__(self, other) -> bool:
"""Verify two instances are equal.
:param other: the other instance to compare.
:return true if the two instances are equal, false otherwise.
"""
return (
self.anonymizer == other.anonymizer
and self.entity_type == other.entity_type
and self.start == other.start
and self.end == other.end
and self.anonymized_text == other.anonymized_text
) # noqa: E127
| [
"noreply@github.com"
] | hkarakose.noreply@github.com |
5d50546e42b94a120eee0b809792ebcc1dca11b0 | e84c264eab50df607457f02dc2720a8a3bc2e7a2 | /mptt_project/settings.py | 9e4598def5c5690bf44f62ebdfba7b92712a5ff0 | [] | no_license | smallevilbeast/mptt_example | 6aedc345c66442413ab649c35981018c818069ae | 159f39a7f98fd9fbe2b10d396ff61034d5b0ebaf | refs/heads/master | 2021-05-28T18:46:14.381014 | 2013-07-13T08:19:50 | 2013-07-13T08:19:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,325 | py | # Django settings for mptt_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh_CN'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'zg8aqcwcf8qtq))ekn#bjycn^pnql(ujzhhvox&$()svfk$2eo'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mptt_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mptt_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'mptt',
'genres',
'shop',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"houshao55@gmail.com"
] | houshao55@gmail.com |
17070c9e131e7da1eaa887ebda61685154eec3d6 | e082e8d3093cd0ef4f648bb152c4f84ac81b9047 | /install/norns/files/crone/wscript | 15301a8f6be44ba37e14716d322ef2fb0044fb31 | [] | no_license | okyeron/fates | cceffea2a370da30e8bc5d153904c470584bd1bb | f7f4cf69a8886f77be943c564b69f925b95d2e88 | refs/heads/master | 2022-06-03T15:43:02.624461 | 2022-05-20T23:45:32 | 2022-05-20T23:45:32 | 183,079,154 | 175 | 29 | null | 2019-10-16T02:20:09 | 2019-04-23T19:04:41 | Shell | UTF-8 | Python | false | false | 1,323 | top = '../..'
def options(opt):
opt.load('compiler_c compiler_cxx')
def configure(conf):
conf.load('compiler_c compiler_cxx')
def build(bld):
crone_sources = [
'src/main.cpp',
'src/BufDiskWorker.cpp',
'src/Commands.cpp',
'src/MixerClient.cpp',
'src/OscInterface.cpp',
'src/SoftCutClient.cpp',
'src/Taper.cpp',
'src/Window.cpp',
'src/softcut/FadeCurves.cpp',
'src/softcut/SoftCutHead.cpp',
'src/softcut/SoftCutVoice.cpp',
'src/softcut/SubHead.cpp',
'src/softcut/Svf.cpp',
'src/softcut/TestBuffers.cpp'
]
bld.program( features='c cxx cxxprogram',
source=crone_sources,
target='crone',
includes=[
'src',
'./'
],
use=[
'ALSA',
'LIBLO',
'BOOST',
],
lib=[
'jack',
'pthread',
'm',
'sndfile',
'atomic'
],
cxxflags=[
'-std=c++14',
'-O2',
'-Wall'
])
| [
"1909661+okyeron@users.noreply.github.com"
] | 1909661+okyeron@users.noreply.github.com | |
f13072c3ef39008c0a03518d43e06fdf83f1efbc | 6b5ada71c8b86fc9081389e2d2f0ef0518432638 | /krokodeal/local/wsgi.py | d1776ff802b49e77a9b0ff899929d208b9d7a9da | [] | no_license | d00astro/Krokodeal | 0f7132a956e0e8e79c22b7cb1278139c71da5e9c | f0f9a0c6811f7d716b0f75076278e2ad62cb4946 | refs/heads/master | 2021-01-13T01:44:36.668186 | 2015-05-25T11:15:13 | 2015-05-25T11:15:13 | 24,201,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | /home/anders/Projects/Krokodeal/krokodeal/wsgi.py | [
"anders.astrom@gmail.com"
] | anders.astrom@gmail.com |
8b724fe06e2eda5854d33c57a12e69f77e96bb86 | 72a9dd581ec157e362d0f38037f7f24d88ed6e26 | /azure/worker/bindings/http.py | 32c7ee7752a6385e0871ac6c01c1697d1d428667 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | sai306/azure-functions-python-worker | 41b6e8aecc19227adbe8e871261d453a37d0b369 | 905a63d55dee966428609f366083a89757033309 | refs/heads/master | 2021-04-03T01:48:24.469988 | 2018-03-02T22:27:45 | 2018-03-05T17:25:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,775 | py | import json
import types
import typing
from azure.functions import _abc as azf_abc
from azure.functions import _http as azf_http
from . import meta
from .. import protos
class HttpRequest(azf_abc.HttpRequest):
"""An HTTP request object."""
__body_bytes: typing.Optional[bytes]
__body_str: typing.Optional[str]
def __init__(self, method: str, url: str,
headers: typing.Mapping[str, str],
params: typing.Mapping[str, str],
body_type: meta.TypedDataKind,
body: typing.Union[str, bytes]) -> None:
self.__method = method
self.__url = url
self.__headers = azf_http.HttpRequestHeaders(headers)
self.__params = types.MappingProxyType(params)
self.__body_type = body_type
if isinstance(body, str):
self.__body_bytes = None
self.__body_str = body
elif isinstance(body, bytes):
self.__body_bytes = body
self.__body_str = None
else:
raise TypeError(
f'unexpected HTTP request body type: {type(body).__name__}')
@property
def url(self):
return self.__url
@property
def method(self):
return self.__method.upper()
@property
def headers(self):
return self.__headers
@property
def params(self):
return self.__params
def get_body(self) -> bytes:
if self.__body_bytes is None:
assert self.__body_str is not None
self.__body_bytes = self.__body_str.encode('utf-8')
return self.__body_bytes
def get_json(self) -> typing.Any:
if self.__body_type is meta.TypedDataKind.json:
assert self.__body_str is not None
return json.loads(self.__body_str)
raise ValueError('HTTP request does not have JSON data attached')
class HttpResponseConverter(meta.OutConverter, binding='http'):
@classmethod
def check_python_type(cls, pytype: type) -> bool:
return issubclass(pytype, (azf_abc.HttpResponse, str))
@classmethod
def to_proto(cls, obj: typing.Any) -> protos.TypedData:
if isinstance(obj, str):
return protos.TypedData(string=obj)
if isinstance(obj, azf_abc.HttpResponse):
status = obj.status_code
headers = dict(obj.headers)
if 'content-type' not in headers:
if obj.mimetype.startswith('text/'):
ct = f'{obj.mimetype}; charset={obj.charset}'
else:
ct = f'{obj.mimetype}'
headers['content-type'] = ct
body = obj.get_body()
if body is not None:
body = protos.TypedData(bytes=body)
else:
body = protos.TypedData(bytes=b'')
return protos.TypedData(
http=protos.RpcHttp(
status_code=str(status),
headers=headers,
is_raw=True,
body=body))
raise NotImplementedError
class HttpRequestConverter(meta.InConverter,
binding='httpTrigger', trigger=True):
@classmethod
def check_python_type(cls, pytype: type) -> bool:
return issubclass(pytype, azf_abc.HttpRequest)
@classmethod
def from_proto(cls, data: protos.TypedData,
trigger_metadata) -> typing.Any:
if data.WhichOneof('data') != 'http':
raise NotImplementedError
body_rpc_val = data.http.body
body_rpc_type = body_rpc_val.WhichOneof('data')
if body_rpc_type == 'json':
body_type = meta.TypedDataKind.json
body = body_rpc_val.json
elif body_rpc_type == 'string':
body_type = meta.TypedDataKind.string
body = body_rpc_val.string
elif body_rpc_type == 'bytes':
body_type = meta.TypedDataKind.bytes
body = body_rpc_val.bytes
elif body_rpc_type is None:
# Means an empty HTTP request body -- we don't want
# `HttpResponse.get_body()` to return None as it would
# make it more complicated to work with than necessary.
# Therefore we normalize the body to an empty bytes
# object.
body_type = meta.TypedDataKind.bytes
body = b''
else:
raise TypeError(
f'unsupported HTTP body type from the incoming gRPC data: '
f'{body_rpc_type}')
return HttpRequest(
method=data.http.method,
url=data.http.url,
headers=data.http.headers,
params=data.http.query,
body_type=body_type,
body=body)
| [
"yury@magic.io"
] | yury@magic.io |
966055d11dc5c95dfcd60488739a366479962bdb | 3dfb4ee39555b30e6e0c6fcdbef371864e69f694 | /google-cloud-sdk/lib/googlecloudsdk/core/console/console_io.py | 9b7c9aa24cab3a27f61606aad313b3e11a8054b4 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | MD-Anderson-Bioinformatics/NG-CHM_Galaxy | 41d1566d5e60416e13e023182ca4351304381a51 | dcf4886d4ec06b13282143ef795c5f0ff20ffee3 | refs/heads/master | 2021-06-02T21:04:12.194964 | 2021-04-29T14:45:32 | 2021-04-29T14:45:32 | 130,249,632 | 0 | 1 | null | 2020-07-24T18:35:21 | 2018-04-19T17:25:33 | Python | UTF-8 | Python | false | false | 32,372 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General console printing utilities used by the Cloud SDK."""
import logging
import os
import re
import sys
import textwrap
import threading
import time
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
from googlecloudsdk.core.console import console_pager
from googlecloudsdk.core.util import files
from googlecloudsdk.third_party.py27 import py27_subprocess as subprocess
FLOAT_COMPARE_EPSILON = 1e-6
class Error(exceptions.Error):
"""Base exception for the module."""
pass
class UnattendedPromptError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(UnattendedPromptError, self).__init__(
'This prompt could not be answered because you are not in an '
'interactive session. You can re-run the command with the --quiet '
'flag to accept default answers for all prompts.')
class OperationCancelledError(Error):
"""An exception for when a prompt cannot be answered."""
def __init__(self):
super(OperationCancelledError, self).__init__('Operation cancelled.')
class TablePrinter(object):
"""Provides the ability to print a list of items as a formatted table.
Using this class helps you adhere to the gcloud style guide.
The table will auto size the columns to fit the maximum item length for that
column. You can also choose how to justify each column and to add extra
padding to each column.
"""
JUSTIFY_LEFT = '<'
JUSTIFY_RIGHT = '>'
JUSTIFY_CENTER = '^'
def __init__(self, headers, title=None,
justification=None, column_padding=None):
"""Creates a new TablePrinter.
Args:
headers: A tuple of strings that represent the column headers titles.
This can be a tuple of empty strings or None's if you do not want
headers displayed. The number of empty elements in the tuple must match
the number of columns you want to display.
title: str, An optional title for the table.
justification: A tuple of JUSTIFY_LEFT, JUSTIFY_RIGHT, JUSTIFY_CENTER that
describes the justification for each column. This must have the same
number of items as the headers tuple.
column_padding: A tuple of ints that describes the extra padding that
should be added to each column. This must have the same
number of items as the headers tuple.
Raises:
ValueError: If the justification or column_padding tuples are not of the
correct type or length.
"""
self.__headers = [h if h else '' for h in headers]
self.__title = title
self.__num_columns = len(self.__headers)
self.__header_widths = [len(str(x)) for x in self.__headers]
self.__column_padding = column_padding
if self.__column_padding is None:
self.__column_padding = tuple([0] * self.__num_columns)
if (not isinstance(self.__column_padding, (tuple)) or
len(self.__column_padding) != self.__num_columns):
raise ValueError('Column padding tuple does not have {0} columns'
.format(self.__num_columns))
self.__justification = justification
if self.__justification is None:
self.__justification = tuple([TablePrinter.JUSTIFY_LEFT] *
self.__num_columns)
if (not isinstance(self.__justification, tuple) or
len(self.__justification) != self.__num_columns):
raise ValueError('Justification tuple does not have {0} columns'
.format(self.__num_columns))
for value in self.__justification:
if not (value is TablePrinter.JUSTIFY_LEFT or
value is TablePrinter.JUSTIFY_RIGHT or
value is TablePrinter.JUSTIFY_CENTER):
raise ValueError('Justification values must be one of JUSTIFY_LEFT, '
'JUSTIFY_RIGHT, or JUSTIFY_CENTER')
def SetTitle(self, title):
"""Sets the title of the table.
Args:
title: str, The new title.
"""
self.__title = title
def Log(self, rows, logger=None, level=logging.INFO):
"""Logs the given rows to the given logger.
Args:
rows: list of tuples, The rows to log the formatted table for.
logger: logging.Logger, The logger to do the logging. If None, the root
logger will be used.
level: logging level, An optional override for the logging level, INFO by
default.
"""
if not logger:
logger = log.getLogger()
lines = self.GetLines(rows)
for line in lines:
logger.log(level, line)
def Print(self, rows, output_stream=None, indent=0):
"""Prints the given rows to stdout.
Args:
rows: list of tuples, The rows to print the formatted table for.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
indent: int, The number of spaces to indent all lines of the table.
"""
if not output_stream:
output_stream = log.out
lines = self.GetLines(rows, indent=indent)
for line in lines:
output_stream.write(line + '\n')
def GetLines(self, rows, indent=0):
"""Gets a list of strings of formatted lines for the given rows.
Args:
rows: list of tuples, The rows to get the formatted table for.
indent: int, The number of spaces to indent all lines of the table.
Returns:
list of str, The lines of the formatted table that can be printed.
Raises:
ValueError: If any row does not have the correct number of columns.
"""
column_widths = list(self.__header_widths)
for row in rows:
if len(row) != self.__num_columns:
raise ValueError('Row [{row}] does not have {rows} columns'
.format(row=row, rows=self.__num_columns))
# Find the max width of each column
for i in range(self.__num_columns):
column_widths[i] = max(column_widths[i], len(str(row[i])))
# Add padding
column_widths = [column_widths[i] + self.__column_padding[i]
for i in range(self.__num_columns)]
total_width = (len(column_widths) - 1) * 3
for width in column_widths:
total_width += width
edge_line = ('--' +
'---'.join(['-' * width for width in column_widths]) +
'--')
title_divider_line = ('|-' +
'---'.join(['-' * width for width in column_widths]) +
'-|')
divider_line = ('|-' +
'-+-'.join(['-' * width for width in column_widths]) +
'-|')
lines = [edge_line]
if self.__title:
title_line = '| {{title:{justify}{width}s}} |'.format(
justify=TablePrinter.JUSTIFY_CENTER, width=total_width).format(
title=self.__title)
lines.append(title_line)
lines.append(title_divider_line)
# Generate format strings with the correct width for each column
column_formats = []
for i in range(self.__num_columns):
column_formats.append('{{i{i}:{justify}{width}s}}'.format(
i=i, justify=self.__justification[i], width=column_widths[i]))
pattern = '| ' + ' | '.join(column_formats) + ' |'
def _ParameterizedArrayDict(array):
return dict(('i{i}'.format(i=i), array[i]) for i in range(len(array)))
if [h for h in self.__headers if h]:
# Only print headers if there is at least one non-empty header
lines.append(pattern.format(**_ParameterizedArrayDict(self.__headers)))
lines.append(divider_line)
lines.extend([pattern.format(**_ParameterizedArrayDict(row))
for row in rows])
lines.append(edge_line)
if indent:
return [(' ' * indent) + l for l in lines]
return lines
class ListPrinter(object):
"""Provides the ability to print a list of items as a formatted list.
Using this class helps you adhere to the gcloud style guide.
"""
def __init__(self, title):
"""Create a titled list printer that can print rows to stdout.
Args:
title: A string for the title of the list.
"""
self.__title = title
def Print(self, rows, output_stream=None):
"""Print this list with the provided rows to stdout.
Args:
rows: A list of objects representing the rows of this list. Before being
printed, they will be converted to strings.
output_stream: file-like object, The stream to wire the rows to. Defaults
to log.out if not given.
"""
if not output_stream:
output_stream = log.out
output_stream.write(self.__title + '\n')
for row in rows:
output_stream.write(' - ' + str(row) + '\n')
TEXTWRAP = textwrap.TextWrapper(replace_whitespace=False,
drop_whitespace=False,
break_on_hyphens=False)
def _DoWrap(message):
"""Text wrap the given message and correctly handle newlines in the middle.
Args:
message: str, The message to wrap. It may have newlines in the middle of
it.
Returns:
str, The wrapped message.
"""
return '\n'.join([TEXTWRAP.fill(line) for line in message.splitlines()])
def _RawInput(prompt=None):
"""A simple redirect to the built-in raw_input function.
If the prompt is given, it is correctly line wrapped.
Args:
prompt: str, An optional prompt.
Returns:
The input from stdin.
"""
if prompt:
sys.stderr.write(_DoWrap(prompt))
try:
return raw_input()
except EOFError:
return None
def IsInteractive(output=False, error=False, heuristic=False):
"""Determines if the current terminal session is interactive.
sys.stdin must be a terminal input stream.
Args:
output: If True then sys.stdout must also be a terminal output stream.
error: If True then sys.stderr must also be a terminal output stream.
heuristic: If True then we also do some additional heuristics to check if
we are in an interactive context. Checking home path for example.
Returns:
True if the current terminal session is interactive.
"""
if not sys.stdin.isatty():
return False
if output and not sys.stdout.isatty():
return False
if error and not sys.stderr.isatty():
return False
if heuristic:
# Check the home path. Most startup scripts for example are executed by
# users that don't have a home path set. Home is OS dependent though, so
# check everything.
# *NIX OS usually sets the HOME env variable. It is usually '/home/user',
# but can also be '/root'. If it's just '/' we are most likely in an init
# script.
# Windows usually sets HOMEDRIVE and HOMEPATH. If they don't exist we are
# probably being run from a task scheduler context. HOMEPATH can be '\'
# when a user has a network mapped home directory.
# Cygwin has it all! Both Windows and Linux. Checking both is perfect.
home = os.getenv('HOME')
homepath = os.getenv('HOMEPATH')
if not homepath and (not home or home == '/'):
return False
return True
def CanPrompt():
"""Returns true if we can prompt the user for information.
This combines all checks (IsInteractive(), disable_prompts is False) to
verify that we can prompt the user for information.
Returns:
bool, True if we can prompt the user for information.
"""
return (IsInteractive(error=True) and
not properties.VALUES.core.disable_prompts.GetBool())
def PromptContinue(message=None, prompt_string=None, default=True,
throw_if_unattended=False, cancel_on_no=False):
"""Prompts the user a yes or no question and asks if they want to continue.
Args:
message: str, The prompt to print before the question.
prompt_string: str, An alternate yes/no prompt to display. If None, it
defaults to 'Do you want to continue'.
default: bool, What the default answer should be. True for yes, False for
no.
throw_if_unattended: bool, If True, this will throw if there was nothing
to consume on stdin and stdin is not a tty.
cancel_on_no: bool, If True and the user answers no, throw an exception to
cancel the entire operation. Useful if you know you don't want to
continue doing anything and don't want to have to raise your own
exception.
Raises:
UnattendedPromptError: If there is no input to consume and this is not
running in an interactive terminal.
OperationCancelledError: If the user answers no and cancel_on_no is True.
Returns:
bool, False if the user said no, True if the user said anything else or if
prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
if not default and cancel_on_no:
raise OperationCancelledError()
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n\n')
if not prompt_string:
prompt_string = 'Do you want to continue'
if default:
prompt_string += ' (Y/n)? '
else:
prompt_string += ' (y/N)? '
sys.stderr.write(_DoWrap(prompt_string))
def GetAnswer():
while True:
answer = _RawInput()
# pylint:disable=g-explicit-bool-comparison, We explicitly want to
# distinguish between empty string and None.
if answer == '':
# User just hit enter, return default.
sys.stderr.write('\n')
return default
elif answer is None:
# This means we hit EOF, no input or user closed the stream.
if throw_if_unattended and not IsInteractive():
sys.stderr.write('\n')
raise UnattendedPromptError()
else:
sys.stderr.write('\n')
return default
elif answer.lower() in ['y', 'yes']:
sys.stderr.write('\n')
return True
elif answer.lower() in ['n', 'no']:
sys.stderr.write('\n')
return False
else:
sys.stderr.write("Please enter 'y' or 'n': ")
answer = GetAnswer()
if not answer and cancel_on_no:
raise OperationCancelledError()
return answer
def PromptResponse(message):
"""Prompts the user for a string.
Args:
message: str, The prompt to print before the question.
Returns:
str, The string entered by the user, or None if prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return None
response = _RawInput(message)
return response
def PromptWithDefault(message, default=None):
"""Prompts the user for a string, allowing a default.
Unlike PromptResponse, this also appends a ': ' to the prompt. If 'default'
is specified, the default is also written written into the prompt (e.g.
if message is "message" and default is "default", the prompt would be
"message (default): ").
The default is returned if the user simply presses enter (no input) or an
EOF is received.
Args:
message: str, The prompt to print before the question.
default: str, The default value (if any).
Returns:
str, The string entered by the user, or the default if no value was
entered or prompts are disabled.
"""
if properties.VALUES.core.disable_prompts.GetBool():
return default
if default:
message += ' ({default}): '.format(default=default)
else:
message += ': '
response = _RawInput(message)
if not response:
response = default
return response
def PromptChoice(options, default=None, message=None, prompt_string=None):
"""Prompt the user to select a choice from a list of items.
Args:
options: [object], A list of objects to print as choices. Their str()
method will be used to display them.
default: int, The default index to return if prompting is disabled or if
they do not enter a choice.
message: str, An optional message to print before the choices are displayed.
prompt_string: str, A string to print when prompting the user to enter a
choice. If not given, a default prompt is used.
Raises:
ValueError: If no options are given or if the default is not in the range of
available options.
Returns:
The index of the item in the list that was chosen, or the default if prompts
are disabled.
"""
if not options:
raise ValueError('You must provide at least one option.')
maximum = len(options)
if default is not None and not 0 <= default < maximum:
raise ValueError(
'Default option [{default}] is not a valid index for the options list '
'[{maximum} options given]'.format(default=default, maximum=maximum))
if properties.VALUES.core.disable_prompts.GetBool():
return default
if message:
sys.stderr.write(_DoWrap(message) + '\n')
for i, option in enumerate(options):
sys.stderr.write(' [{index}] {option}\n'.format(
index=i + 1, option=str(option)))
if not prompt_string:
prompt_string = 'Please enter your numeric choice'
if default is None:
suffix_string = ': '
else:
suffix_string = ' ({default}): '.format(default=default + 1)
sys.stderr.write(_DoWrap(prompt_string + suffix_string))
while True:
answer = _RawInput()
if answer is None or (answer is '' and default is not None):
# Return default if we failed to read from stdin
# Return default if the user hit enter and there is a valid default
# Prompt again otherwise
sys.stderr.write('\n')
return default
try:
num_choice = int(answer)
if num_choice < 1 or num_choice > maximum:
raise ValueError('Choice must be between 1 and {maximum}'.format(
maximum=maximum))
sys.stderr.write('\n')
return num_choice - 1
except ValueError:
sys.stderr.write('Please enter a value between 1 and {maximum}: '
.format(maximum=maximum))
def LazyFormat(s, **kwargs):
"""Converts {key} => value for key, value in kwargs.iteritems().
After the {key} converstions it converts {{<identifier>}} => {<identifier>}.
Args:
s: str, The string to format.
**kwargs: {str:str}, A dict of strings for named parameters.
Returns:
str, The lazily-formatted string.
"""
for key, value in kwargs.iteritems():
fmt = '{' + key + '}'
start = 0
while True:
start = s.find(fmt, start)
if start == -1:
break
if (start and s[start - 1] == '{' and
len(fmt) < len(s[start:]) and s[start + len(fmt)] == '}'):
# {{key}} => {key}
s = s[0:start - 1] + fmt + s[start + len(fmt) + 1:]
start += len(fmt)
else:
# {key} => value
s = s[0:start] + value + s[start + len(fmt):]
start += len(value)
# {{unknown}} => {unknown}
return re.sub(r'{({\w+})}', r'\1', s)
def PrintExtendedList(items, col_fetchers):
"""Print a properly formated extended list for some set of resources.
If items is a generator, this function may elect to only request those rows
that it is ready to display.
Args:
items: [resource] or a generator producing resources, The objects
representing cloud resources.
col_fetchers: [(string, func(resource))], A list of tuples, one for each
column, in the order that they should appear. The string is the title
of that column which will be printed in a header. The func is a function
that will fetch a row-value for that column, given the resource
corresponding to the row.
"""
total_items = 0
rows = [[title for (title, unused_func) in col_fetchers]]
for item in items:
total_items += 1
row = []
for (unused_title, func) in col_fetchers:
value = func(item)
if value is None:
row.append('-')
else:
row.append(value)
rows.append(row)
attr = console_attr.GetConsoleAttr()
max_col_widths = [0] * len(col_fetchers)
for row in rows:
for col in range(len(row)):
max_col_widths[col] = max(max_col_widths[col],
attr.DisplayWidth(unicode(row[col]))+2)
for row in rows:
for col in range(len(row)):
width = max_col_widths[col]
item = unicode(row[col])
item_width = attr.DisplayWidth(item)
if item_width < width and col != len(row) - 1:
item += u' ' * (width - item_width)
log.out.write(item)
log.out.write('\n')
if not total_items:
log.status.write('Listed 0 items.\n')
class ProgressTracker(object):
"""A context manager for telling the user about long-running progress."""
SPIN_MARKS = [
'|',
'/',
'-',
'\\',
]
def __init__(self, message, autotick=True, detail_message_callback=None,
tick_delay=1):
self._message = message
self._prefix = message + '...'
self._ticks = 0
self._autotick = autotick
self._done = False
self._lock = threading.Lock()
self._detail_message_callback = detail_message_callback
self._last_message_size = 0
self._tick_delay = tick_delay
def _GetPrefix(self):
if self._detail_message_callback:
detail_message = self._detail_message_callback()
if detail_message:
return self._prefix + ' ' + detail_message + '...'
return self._prefix
def __enter__(self):
log.file_only_logger.info(self._GetPrefix())
self._Print()
if self._autotick:
def Ticker():
while True:
time.sleep(self._tick_delay)
if self.Tick():
return
threading.Thread(target=Ticker).start()
return self
def Tick(self):
"""Give a visual indication to the user that some progress has been made."""
with self._lock:
if not self._done:
self._ticks += 1
self._Print(ProgressTracker.SPIN_MARKS[
self._ticks % len(ProgressTracker.SPIN_MARKS)])
return self._done
def _Print(self, message=''):
"""Reprints the prefix followed by an optional message."""
display_message = self._GetPrefix()
if message:
display_message += message
# This is to clear the display buffer, otherwise it would display the
# trailing parts of the previous line
if self._last_message_size > 0:
sys.stderr.write('\r' + self._last_message_size * ' ')
self._last_message_size = len(display_message)
sys.stderr.write('\r' + display_message)
sys.stderr.flush()
def __exit__(self, ex_type, unused_value, unused_traceback):
with self._lock:
self._done = True
# If an exception was raised during progress tracking, exit silently here
# and let the appropriate exception handler tell the user what happened.
if ex_type:
# This is to prevent the tick character from appearing before 'failed.'
# (ex. 'message...failed' instead of 'message.../failed.')
self._Print('failed.\n')
return False
self._Print('done.\n')
class DelayedProgressTracker(ProgressTracker):
"""A progress tracker that only appears during a long running operation.
Waits for the given timeout, then displays a progress tacker.
"""
class TrackerState(object):
"""Enum representing the current state of the progress tracker."""
class _TrackerStateTuple(object):
def __init__(self, name):
self.name = name
WAITING = _TrackerStateTuple('Waiting')
STARTED = _TrackerStateTuple('Started')
FINISHED = _TrackerStateTuple('Finished')
def __init__(self, message, timeout, autotick=True,
detail_message_callback=None):
super(DelayedProgressTracker, self).__init__(
message, autotick=autotick,
detail_message_callback=detail_message_callback)
self._timeout = timeout
self._state = self.TrackerState.WAITING
self._state_lock = threading.Lock()
def _SleepWhileNotFinished(self, timeout, increment=0.1):
"""Sleep for the given time unless the tracker enters the FINISHED state.
Args:
timeout: number, the total time for which to sleep
increment: number, the increment at which to check whether the tracker is
FINISHED
Returns:
bool, True unless the tracker reached the FINISHED state before the total
sleep time elapsed
"""
elapsed_time = 0
while (elapsed_time + FLOAT_COMPARE_EPSILON) <= timeout:
time.sleep(increment)
elapsed_time += increment
if self._state is self.TrackerState.FINISHED:
return False
return True
def __enter__(self):
def StartTracker():
if not self._SleepWhileNotFinished(self._timeout):
# If we aborted sleep early, return. We exited the progress tracker
# before the delay finished.
return
with self._state_lock:
if self._state is not self.TrackerState.FINISHED:
self._state = self.TrackerState.STARTED
super(DelayedProgressTracker, self).__enter__()
threading.Thread(target=StartTracker).start()
return self
def __exit__(self, exc_type, exc_value, traceback):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
super(DelayedProgressTracker, self).__exit__(exc_type, exc_value,
traceback)
self._state = self.TrackerState.FINISHED
def Tick(self):
with self._state_lock:
if self._state is self.TrackerState.STARTED:
return super(DelayedProgressTracker, self).Tick()
return self._state is self.TrackerState.FINISHED
class ProgressBar(object):
"""A simple progress bar for tracking completion of an action.
This progress bar works without having to use any control characters. It
prints the action that is being done, and then fills a progress bar below it.
You should not print anything else on the output stream during this time as it
will cause the progress bar to break on lines.
Progress bars can be stacked into a group. first=True marks the first bar in
the group and last=True marks the last bar in the group. The default assumes
a singleton bar with first=True and last=True.
This class can also be used in a context manager.
"""
@staticmethod
def _DefaultCallback(progress_factor):
pass
DEFAULT_CALLBACK = _DefaultCallback
@staticmethod
def SplitProgressBar(original_callback, weights):
"""Splits a progress bar into logical sections.
Wraps the original callback so that each of the subsections can use the full
range of 0 to 1 to indicate its progress. The overall progress bar will
display total progress based on the weights of the tasks.
Args:
original_callback: f(float), The original callback for the progress bar.
weights: [float], The weights of the tasks to create. These can be any
numbers you want and the split will be based on their proportions to
each other.
Raises:
ValueError: If the weights don't add up to 1.
Returns:
(f(float), ), A tuple of callback functions, in order, for the subtasks.
"""
if (original_callback is None or
original_callback == ProgressBar.DEFAULT_CALLBACK):
return tuple([ProgressBar.DEFAULT_CALLBACK for _ in range(len(weights))])
def MakeCallback(already_done, weight):
def Callback(done_fraction):
original_callback(already_done + (done_fraction * weight))
return Callback
total = float(sum(weights))
callbacks = []
already_done = 0
for weight in weights:
normalized_weight = weight / total
callbacks.append(MakeCallback(already_done, normalized_weight))
already_done += normalized_weight
return tuple(callbacks)
def __init__(self, label, stream=log.status, total_ticks=60, first=True,
last=True):
"""Creates a progress bar for the given action.
Args:
label: str, The action that is being performed.
stream: The output stream to write to, stderr by default.
total_ticks: int, The number of ticks wide to make the progress bar.
first: bool, True if this is the first bar in a stacked group.
last: bool, True if this is the last bar in a stacked group.
"""
self._stream = stream
self._ticks_written = 0
self._total_ticks = total_ticks
self._first = first
self._last = last
attr = console_attr.ConsoleAttr()
self._box = attr.GetBoxLineCharacters()
self._redraw = (self._box.d_dr != self._box.d_vr or
self._box.d_dl != self._box.d_vl)
max_label_width = self._total_ticks - 4
if len(label) > max_label_width:
label = label[:max_label_width - 3] + '...'
elif len(label) < max_label_width:
diff = max_label_width - len(label)
label += ' ' * diff
left = self._box.d_vr + self._box.d_h
right = self._box.d_h + self._box.d_vl
self._label = u'{left} {label} {right}'.format(left=left, label=label,
right=right)
def Start(self):
"""Starts the progress bar by writing the top rule and label."""
if self._first or self._redraw:
left = self._box.d_dr if self._first else self._box.d_vr
right = self._box.d_dl if self._first else self._box.d_vl
rule = u'{left}{middle}{right}\n'.format(
left=left, middle=self._box.d_h * self._total_ticks, right=right)
self._stream.write(rule)
self._stream.write(self._label + '\n')
self._stream.write(self._box.d_ur)
self._ticks_written = 0
def SetProgress(self, progress_factor):
"""Sets the current progress of the task.
This method has no effect if the progress bar has already progressed past
the progress you call it with (since the progress bar cannot back up).
Args:
progress_factor: float, The current progress as a float between 0 and 1.
"""
expected_ticks = int(self._total_ticks * progress_factor)
new_ticks = expected_ticks - self._ticks_written
# Don't allow us to go over 100%.
new_ticks = min(new_ticks, self._total_ticks - self._ticks_written)
if new_ticks > 0:
self._stream.write(self._box.d_h * new_ticks)
self._ticks_written += new_ticks
if expected_ticks == self._total_ticks:
end = '\n' if self._last or not self._redraw else '\r'
self._stream.write(self._box.d_ul + end)
self._stream.flush()
def Finish(self):
"""Mark the progress as done."""
self.SetProgress(1)
def __enter__(self):
self.Start()
return self
def __exit__(self, *args):
self.Finish()
def More(contents, out=None, prompt=None, check_pager=True):
"""Run a user specified pager or fall back to the internal pager.
Args:
contents: The entire contents of the text lines to page.
out: The output stream, log.out (effectively) if None.
prompt: The page break prompt.
check_pager: Checks the PAGER env var and uses it if True.
"""
if not IsInteractive(output=True):
if not out:
out = log.out
out.write(contents)
return
if not out:
# Rendered help to the log file.
log.file_only_logger.info(contents)
# Paging shenanigans to stdout.
out = sys.stdout
if check_pager:
pager = os.environ.get('PAGER', None)
if pager == '-':
# Use the fallback Pager.
pager = None
elif not pager:
# Search for a pager that handles ANSI escapes.
for command in ('less', 'pager'):
if files.FindExecutableOnPath(command):
pager = command
break
if pager:
less = os.environ.get('LESS', None)
if less is None:
os.environ['LESS'] = '-R'
p = subprocess.Popen(pager, stdin=subprocess.PIPE, shell=True)
encoding = console_attr.GetConsoleAttr().GetEncoding()
p.communicate(input=contents.encode(encoding))
p.wait()
if less is None:
os.environ.pop('LESS')
return
# Fall back to the internal pager.
console_pager.Pager(contents, out, prompt).Run()
| [
"rbrown@insilico.us.com"
] | rbrown@insilico.us.com |
9c3f3d913908182c64a1efd02ea2c979145115ef | 9d5fd8a897ae597425e2182760103eee2bdba514 | /CNC/BiesseWorksBPP120_bA.py | df980b7bb72b3fe1616a48397b5f04cd8e20ac64 | [] | no_license | AlexandrDragunkin/ARLN_PROTO | 29bbdbafa127e65e1de62b382eda0023bf573a4c | e1b0296f91eb6534402691cbd2bfeb784e642620 | refs/heads/master | 2021-08-30T13:59:56.140407 | 2017-12-18T07:09:25 | 2017-12-18T07:09:25 | 114,336,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86,672 | py | # -*- coding: utf-8 -*-
# A3.30 ARLINE
import sys
# commonappdata=k3.mpathexpand("<commonappdata>")+'\\'
dvsyspath=['f:\\Python33\\DLLs',
'f:\\Python33\\Lib',
'f:\\Python33\\Lib\\site-packages',
#'c:\\PKM73\\Bin\\python33.zip'
#'d:\\Python26\\Lib\\site-packages\\openpyxl',
#'d:\\Python26\\Lib\\site-packages\\win32',
#'d:\\Python26\\Lib\\site-packages\\win32\\lib',
#'d:\\Python26\\Lib\\site-packages\\win32com'
]
#for tpath in dvsyspath:
#if tpath not in sys.path:
#sys.path.insert(0, tpath)
sys.path.insert(0, 'c:\\PKM73\\Bin\\Lib\\site-packages\\')
# sys.path.insert(0, 'c:\ARL7\Data\PKM\Proto\\')
# try:
# import wingdbstub
# except:
# pass
import machine
import A330AR as CurM # импортируем модуль конкретного станка
from nameProg_s import (NameForProg) # класс формирования имени файла
import UtilesN as Utiles
import math
import os
import pyodbc
from UtilesD import iif, degrees
VARIANTPATH = 1 # 0-без кромки 1-с кромкой правильнее это решать только для прямоугольника заготовки
BASEPOINT = None # Номер базы стартовый(0,1,2,3 <-для стороны A 4,5,6,7 -для стороны F )
'''Классы и функции, определенные в модуле machine'''
'''
machine.Arc
machine.Arc.bounding_box
machine.Arc.center
machine.Arc.Divide()
machine.Arc.end
machine.Arc.end_pt
machine.Arc.eval()
machine.Arc.middle_pt
machine.Arc.orientation
machine.Arc.perpendicular()
machine.Arc.radius
machine.Arc.start
machine.Arc.start_pt
machine.Arc.tangent()
machine.Arc.Transform()
machine.Attribute
machine.Attribute.name
machine.Attribute.name
machine.BoundingBox2d
machine.BoundingBox2d.max
machine.BoundingBox2d.min
machine.BoundingBox2d.size_x
machine.BoundingBox2d.size_y
machine.Circle
machine.Circle.bounding_box
machine.Circle.center
machine.Circle.Divide()
machine.Circle.eval()
machine.Circle.orientation
machine.Circle.perpendicular()
machine.Circle.radius
machine.Circle.tangent()
machine.Circle.Transform()
machine.Curve
machine.Curve.bounding_box
machine.Curve.eval()
machine.Curve.path
machine.Curve.path_id
machine.Curve.perpendicular()
machine.Curve.tangent()
machine.Curve.Transform()
machine.Curve.work_id
machine.Drilling
machine.Drilling.alfa
machine.Drilling.beta
machine.Drilling.depth
machine.Drilling.diameter
machine.Drilling.id
machine.Drilling.panel
machine.Drilling.panel_id
machine.Drilling.position
machine.error()
machine.Filletting
machine.Line
machine.Machine
machine.makedirs()
machine.Matrix2d
machine.Matrix3d
machine.message()
machine.Milling
machine.Model
machine.Operation
machine.OrderInfo
machine.Panel
machine.Path
machine.Point2d
machine.Point3d
machine.Settings
machine.Slot
machine.Slot.depth
machine.Slot.end
machine.Slot.id
machine.Slot.is_plane
machine.Slot.panel
machine.Slot.panel_id
machine.Slot.start
machine.Slot.width
machine.Vector2d
machine.Vector3d
machine.warning()
'''
vpi=3.1415926535897932384626433832795 # число Pi
eps_d = 0.001 # допустимое отклонение
# Допустимые габариты панелей
#xmax=1000
#xmin=85
#ymax=5000
#ymin=285
#zmax=50
#zmin=8
class Writer:
'''Класс, осуществляющий вывод информации из класса Boa. Ссылка на этот класс есть в классе Boa
В этом класск находятся функции, отвечающие за вывод информации о конкретных элементах в выходной файл'''
def __init__(self):
# Здесь мы создаем текстовый форматные константы для вывода
self.isFrontF = False
self.header='''[HEADER]
TYPE=BPP
VER=120
'''
self.description='''[DESCRIPTION]
|
'''
self.variables='''[VARIABLES]
PAN=LPX|%f||4|
PAN=LPY|%f||4|
PAN=LPZ|%f||4|
PAN=ORLST|""||0|
PAN=SIMMETRY|1||0|
PAN=TLCHK|0||0|
PAN=TOOLING|""||0|
PAN=CUSTSTR|""||0|
PAN=FCN|1.000000||0|
PAN=XCUT|0||4|
PAN=YCUT|0||4|
PAN=JIGTH|0||4|
PAN=CKOP|0||0|
PAN=UNIQUE|0||0|
PAN=MATERIAL|"wood"||0|
PAN=PUTLST|""||0|
PAN=OPPWKRS|0||0|
PAN=UNICLAMP|0||0|
PAN=CHKCOLL|0||0|
PAN=WTPIANI|0||0|
'''
self.num_tdcode=int(0) # счетчик номеров обработок TDCODE1 TDCODE2 .... TDCODE99
self.program="[PROGRAM]"
self.vbscript="[VBSCRIPT]"
self.macrodata="[MACRODATA]"
self.tdcodes='''[TDCODES]
VER=1'''
self.pcf="[PCF]"
self.tooling="[TOOLING]"
self.subprogs="[SUBPROGS]"
# SIDE CRN X Y Z DP DIA THR RTY DX DY R A DA NPR ISO OPT AZ AR AP CKA XRC YRC ARP LRP ER MD COW A21 TOS VTR S21 ID AZS MAC TNM TTP TCL RSP IOS WSP SPI DDS DSP BFC SHP EA21 CEN AGG PRS
self.drill= '''@ %s, "TDCODE%i", "" : %i, "%i", %f, %f, %f, %f, %f, %i, %s, %f, %f, %f, %f, %f, %i, "%s", %i, %f, %f, %i, %i, %f, %f, %f, %f, %i, %i, %i, %f, %i, %i, %i, "%s", %f, "%s", "%s", %i, %i, %i, %i, %i, "%s", %f, %i, %i, %i, %i, "%s", "%s", "%s", %i'''
# Это обозначение сквозного отверстия. Косой крестик.
self.drill_th_geo = '''@ GEO, "TDCODE%i", "": "P1002", 0, "%i", 0, -1, 0, 0, 32, 32, 50, 0, 45, 1, 0, 0, 0, 1, 0, 0
@ START_POINT, "", "" : %i-10, %i-10, 0
@ LINC_EP, "", "" : 20, 20, 0, 0, 0, 0, 0, 0
@ START_POINT, "", "" : %i+10, %i-10, 0
@ LINC_EP, "", "" : -20, 20, 0, 0, 0, 0, 0, 0
@ ENDPATH, "", "" :'''
# Вариант для автоматического выбора сверла по диаметру и типу
self.drill_tdcodes = '''(MST)<LN=1,NJ=TDCODE%i,TYW=1,NT=1,>
(GEN)<WT=1,DL=1,>
(TOO)<DI=%f,SP=%f,CL=0,COD=----------,RO=-1,TY=%i,>'''
# Вариант для выбора сверла по конкретному коду COD и типу
self.drill_tdcodes_cod = '''(MST)<LN=1,NJ=TDCODE%i,TYW=1,NT=1,>
(GEN)<WT=1,DL=1,>
(TOO)<DI=%f,SP=%f,CL=0,COD=%s,RO=-1,TY=%i,>'''
self.mill='''@ ROUT, "TDCODE%i", "" : "%s", %i, "%i", %f, %s, "%s", %s, %f, %s, %f, %f, %f, %f, %f, %f, %f, %s, %i, %f, %f, %f, %f, %s, %i, %s, %s, %f, %f, %s, %i, %f, %f, %i, %i'''
self.mill_tdcodes = '''(MST)<LN=1,NJ=TDCODE%i,TYW=2,NT=1,>
(GEN)<WT=2,DL=0,>
(TOO)<DI=%f,SP=%f,CL=1,COD=FO-20-R,RO=-1,TY=102,>
(IO)<AI=45.000,AO=45.000,DA=20,DT=20,DD=0,IFD=0.00,OFD=0.00,IN=1,OUT=1,PR=0,ETCI=0,ITI=0,TLI=0.00,THI=0.00,ETCO=0,ITO=0,TLO=0.00,THO=0.00,PDI=0.00,PDO=0.00,>
(WRK)<OP=1,CO=1,HH=0.000,DR=1,PV=0,PT=0,TC=%i,DP=,SM=0,TT=0,RC=0,BD=0,SW=0,IC="",IM="",IA="",PC=0,BL=0,PU=0,EA=0,EEA=0,SP=0,AP=0,>'''
self.startsegment = ''' START_POINT, "", "" : %f, %f, %s '''
# XE YE ZS ZE SC FD SP MTV
self.linesegment = ''' LINE_EP, "", "" : %f, %f, %f, %f, %s, %f, %f, %i'''
# X2 Y2 XE YE ZS ZE SC
self.arcsegment = ''' ARC_IPEP, "", "" : %f, %f, %f, %f, %f, %f, %i, %i, 0, 0, %s'''
#
self.millend = ''' ENDPATH, "", "" :'''
#
self.slot = '''@ CUT_X, "TDCODE%i", "" : %i, "1", %f, %f, 0, %f, %s, 0, 100, "", 1, 4, 0, 0, 0, %f, 0, 0, 0, 1'''
# XS YS DP L дистанция повторов
self.slot_tdcodes = '''(MST)<LN=1,NJ=TDCODE%i,TYW=3,NT=1,>
(GEN)<WT=3,DL=1,>
(TOO)<DI=4.0000,SP=4.0000,CL=2,COD=----------,RO=-1,TY=200,ACT=0,NCT=,DCT=5.000000,TCT=0.000000,DICT=20.000000,DFCT=80.000000,PCT=60.000000,>
(IO)<AI=0.000,AO=0.000,DA=%f,DT=%f,DD=0.000,IFD=0.00,OFD=0.00,IN=0,OUT=0,PR=0,ETCI=0,ITI=0,TLI=0.00,THI=0.00,ETCO=0,ITO=0,TLO=0.00,THO=0.00,PDI=0.00,PDO=0.00,>
(WRK)<OP=1,CO=0,HH=%f,DR=1,PV=0,PT=0,TC=0,DP=%f,SM=0,TT=0,RC=0,BD=0,SW=0,IC="",IM="",IA="",PC=0,BL=0,PU=0,EA=0.000,EEA=0,SP=0,AP=0,>
(SPD)<AF=0.00,CF=0.000,DS=0.00,FE=0.00,RT=0.00,OF=0.00,>
(MOR)<PE=0.000000,TG=0.000000,TL=0.000000,WH=0.000000,>'''
#Для каждой обработки требуется заполнять несколько секций в файле. [PROGRAM] и [TDCODES] обязательно. Поэтому вывод делаем в словарь
self.dict_section = {'[PROGRAM]':[],'[TDCODES]':[]}
# Подписи на выводе для АРЛАЙН
#@ GEOTEXT, "TDCODE0", "", 849 : "POTPIS", 0, "2", "(F)", LPX/2, LPY+70-70+200, 0, 0, 0, 0, 0, 0, 0, 0, "Century Gothic", 70, 0, 0, 0, 0, 1, 0, -1, 32, 32, 50, 0, 45, 0, 0, 0, 0, 0, 1, 1
self.PlastName = '''@ GEOTEXT, "TDCODE%i", "" : "POTPIS", 0, "2", "(%s)", LPX/2, LPY+70-70+200, 0, 0, 0, 0, 0, 0, 0, 0, "Century Gothic", 70, 0, 0, 0, 0, 1, 0, -1, 32, 32, 50, 0, 45, 0, 0, 0, 0, 0, 1, 1'''
#@ GEO, "TDCODE1", "", 697 : "TEXTURA", 0, "1", 0, -1, 0, 0, 32, 32, 50, 0, 45, 1, 0, 0, 0, 1, 0, 0
#@ START_POINT, "", "", 698 : -100, -200, 0
#@ LINE_EP, "", "", 699 : -200, -200, 0, 0, 0, 0, 0, 0, 0
#@ LINE_EP, "", "", 700 : -200+30, -200+10, 0, 0, 0, 0, 0, 0, 0
#@ LINE_EP, "", "", 701 : -200, -200, 0, 0, 0, 0, 0, 0, 0
#@ LINE_EP, "", "", 702 : -200+30, -200-10, 0, 0, 0, 0, 0, 0, 0
#@ ENDPATH, "", "", 703 :
self.TextureDir_0 = '''@ GEO, "TDCODE%i", "" : "TEXTURA", 0, "1", 0, -1, 0, 0, 32, 32, 50, 0, 45, 1, 0, 0, 0, 1, 0, 0
@ START_POINT, "", "": -100, -200, 0
@ LINE_EP, "", "" : -200, -200, 0, 0, 0, 0, 0, 0, 0
@ LINE_EP, "", "" : -200+30, -200+10, 0, 0, 0, 0, 0, 0, 0
@ LINE_EP, "", "" : -200, -200, 0, 0, 0, 0, 0, 0, 0
@ LINE_EP, "", "" : -200+30, -200-10, 0, 0, 0, 0, 0, 0, 0
@ ENDPATH, "", "" : '''
self.TextureDir_90 = '''@ GEO, "TDCODE%i", "" : "TEXTURA", 0, "1", 0, -1, 0, 0, 32, 32, 50, 0, 45, 1, 0, 0, 0, 1, 0, 0
@ START_POINT, "", "": -100, -200, 0
@ LINE_EP, "", "" : -100, -100, 0, 0, 0, 0, 0, 0, 0
@ LINE_EP, "", "" : -100-10, -100-30, 0, 0, 0, 0, 0, 0, 0
@ LINE_EP, "", "" : -100, -100, 0, 0, 0, 0, 0, 0, 0
@ LINE_EP, "", "" : -100+10, -100-30, 0, 0, 0, 0, 0, 0, 0
@ ENDPATH, "", "" : '''
# Выводим указатель для кромки для
self.BandSide = '''@ GEO, "TDCODE%i", "" : "KROMKA", 0, "%i", 0, -1, 0, 0, 32, 32, 50, 0, 45, 1, 0, 0, 0, 1, 0, 0
@ START_POINT, "", "" : %s, %s, 0
@ LINE_EP, "", "" : %s, %s, 0, 0, 0, 0, 0, 0, 0
@ LINE_EP, "", "" : %s, %s, 0, 0, 0, 0, 0, 0, 0
@ RECTANGLE, "", "" : %s, %s, %s, %s, 1, 0, 0, 1, HALF, 0, 0, 0, 0, 0, 0, 1, 1
@ ENDPATH, "", "" :
'''
self.BandSidesSimbol = '''@ GEOTEXT, "TDCODE%i", "" : "KROMKA", 0, "%i", "G%i", %s, %s, 0, 0, 0, 0, 0, 0, 0, 0, "Century Gothic", 70, 0, 0, 0, 0, 1, 0, -1, 32, 32, 50, 0, 45, 0, 0, 0, 0, 0, 1, 1
'''
# Здесь мы задаем методы для вывода
def DrillingCommand(self, num, drill):
'''Вывод информации об отверстии в файл
num - порядковый номер отверстия
drill - отверстие
'''
result_DrillingCommand = False
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
# x,y,z - координаты отверстия в системе координат панели
x=drill.position.x
y=drill.position.y
z=drill.position.z
# d, h - диаметр и глубина отверстия, соответственно
d=drill.diameter
h=drill.depth
# alpha - угол между Ox и проекцией направляющей на Oxy (угол отверстия в проекции пласти панели)
alpha=drill.alfa
# beta - угол между Oz и направляющей (угол оси отверстия к пласти панели)
beta=drill.beta
thicknessPanel=drill.panel.thickness
## Здесь задаем координаты в зависимости от стороны панели
side=Utiles.GetDrillPlane(drill)
isTrough = Utiles.GetDrillTrough(drill)
# b=Boa() # для каждого отверстия инициализировать класс, чтобы воспользоваться функцией - убрал
siden=Boa.GetBiesseSideNum(self, side)
bb = drill.panel.bounding_box
Xpanel=bb.max.x-bb.min.x
Ypanel=bb.max.y-bb.min.y
crn=4
if machine.constraints.d_trueposits[BASEPOINT] in drill.list_true_posits:
# Проверяем возможно ли выполнить это отверстие
# (список доступных установов drill.list_true_posits) в текущей позиции BASEPOINT
if (siden==0):
crn=4
if (siden==5): # Типа, низовой сверловки нет
crn=1
if (siden==1):
crn=2
x=y
y=z
if (siden==2):
crn=4
y=z
if (siden==3):
crn=3
x=y
y=z
if (siden==4):
crn=1
y=z
z=0
typeinstr = {'TH':1,'BL':0,'':1,'SV':2, }
if 'typ_cod_tool' in drill.__dict__.keys():
pass
#if d.typ_cod_tool = mc.typ_cod_tool
#d.cod_tool
self._Drill(0, self.num_tdcode, x,y,z,siden,h,d, isTrough, crn, typeinstr=drill.typ_cod_tool, COD=drill.cod_tool)
else:
self._Drill(1, self.num_tdcode, x,y,z,siden,h,d, isTrough, crn, typeinstr=typeinstr[drill.typetool])
drill.cnc_key = True # признак вывода отверстия в файл
result_DrillingCommand=True
# if not result_DrillingCommand:
# print(machine.constraints.d_trueposits[BASEPOINT])
# print(drill.list_true_posits)
# print(drill.typetool)
# print(drill.diameter)
# print(dir(drill))
# print([arg for arg in dir(drill) if not arg.startswith('_')])
# if drill.typetool=='BL' and drill.diameter==8.:
# print(machine.constraints.d_trueposits[BASEPOINT])
# print(drill.list_true_posits)
return result_DrillingCommand
def _Drill(self, v_comand, num_tdcode, x,y,z,side,h,diam,isTrough,crn=4, typeinstr=0 ,thr=0,rty="rpNO",dx=32,dy=32,
r=50,a=0,da=45,npr=0,iso="",opt=1,az=0,ar=0, ap=0,cka=0,xrc=0,yrc=0,arp=0,lrp=0,er=1,md=0,cow=0,
a21=0,tos=0,vtr=0,s21=-1,id="",azs=0,mac="",tnm="",ttp=0,tcl=0, rsp=0,ios=0,wsp=0,spi="",dds=0,dsp=0,
bfc=0,shp=0,ea21=0,cen="",agg="",rps=0, COD="----------"):
if (side==0 or side==5):
dCRN = {1: 4, 4: 1, 2: 3, 3: 2}
b="BV" # Сверление вертикальное
if isTrough:
h = 5
thr = 1
if side == 5:
side = 0
crn = dCRN[crn]
else:
if h > 3 and side == 0:
if diam == 35:
if h<10:
pass
else:
h = 13
# else:
# h = 12.5 # Для всех глухих пластевых отверстий . сделал по согласованию со Стасом 2014/12/17
if diam==10. :
typeinstr = 2 # SVASTA есть только комбинированная
h = 14
if h<=3 and diam==5:
typeinstr = 1 # сквозное для наколки
else:
b="BH" # Сверление горизонтальное
ty = typeinstr
if thr == 1:
pass
print(self.drill % (b,num_tdcode,side,crn,x,y,z,h,diam,thr,rty,dx,dy,r,a,da,npr,iso,opt,az,ar,ap,cka,xrc,yrc,arp,lrp,er,md,cow,a21,tos,vtr,s21,id,azs,mac,tnm,ttp,tcl,rsp,ios,wsp,spi,dds,dsp,bfc,shp,ea21,cen,agg,b,rps), file=self.OutputFile)
self.dict_section['[PROGRAM]'].append((self.drill,(b,num_tdcode,side,crn,x,y,z,h,diam,thr,rty,dx,dy,r,a,da,npr,iso,opt,az,ar,ap,cka,xrc,yrc,arp,lrp,er,md,cow,a21,tos,vtr,s21,id,azs,mac,tnm,ttp,tcl,rsp,ios,wsp,spi,dds,dsp,bfc,shp,ea21,cen,agg,b,rps)))
if COD=="----------":
self.dict_section['[TDCODES]'].append((self.drill_tdcodes,(num_tdcode,diam, diam,ty)))
else:
self.dict_section['[TDCODES]'].append((self.drill_tdcodes_cod,(num_tdcode,diam, diam, COD, ty)))
# Для случая сквозного отверстия рисуем крестик.
if thr == 1:
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
print(self.drill_th_geo % (self.num_tdcode, crn, x, y, x, y), file=self.OutputFile)
pass
return
def SlotCommand(self, num, slot):
''' Вывод информации о пропилах в панели
num - порядковый номер пропила
slot - пропил'''
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
#bb = slot.panel.bounding_box
bb = slot.panel.paths[0].bounding_box
Xpanel = bb.max.x - bb.min.x
Ypanel = bb.max.y - bb.min.y
ss = slot.start.x if slot.start.x > eps_d else 0
ss = ss if ss<Xpanel else Xpanel
se = slot.end.x if slot.end.x > eps_d else 0
se = se if se<Xpanel else Xpanel
w_tool = 4.0
#print 'ss se xpanel', ss, se , Xpanel
slot_num_width = slot.width/w_tool # Число ширин инструмента в заданном пропиле
width = slot.width if slot_num_width > (1 + eps_d) else 0
y_st = slot.start.y
x_ss=Xpanel-min(Xpanel,max(ss,se))
x_se=abs(ss-se)
if abs(x_ss)<3:
x_ss=3
if abs(x_se-Xpanel)<3:
x_se='lpx-3'
else:
x_se=str(x_se)
dt = 45 if (x_ss < 3) else 0
da = 0 if x_se.count('lpx-3')==1 or (se<Xpanel-3.1 and se>3) else 0
if slot_num_width >= 1 - eps_d:
self._SlotByCoords(self.num_tdcode, x_ss, y_st, slot.depth, x_se, width, da, dt)
return
def _SlotByCoords(self, num_tdcode, xbeg, y, depth, xend, width, da, dt):
print(self.slot % (num_tdcode, 0, xbeg, y, depth, xend, width), file=self.OutputFile)
self.dict_section['[TDCODES]'].append((self.slot_tdcodes,(num_tdcode, da, dt, width, depth)))
return
def geoBandCommand(self, *kwards):
'''
Создает указатель на кромку по стороне панели
'''
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
print(self.BandSide % (self.num_tdcode , kwards[0], kwards[1], kwards[2], kwards[3], kwards[4], kwards[5], kwards[6], kwards[7], kwards[8], kwards[9], kwards[10] ), file=self.OutputFile)
return
def geoBandSimbolCommand(self, *kwards):
'''
Создает указатель на кромку по стороне панели
'''
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
print(self.BandSidesSimbol % (self.num_tdcode , kwards[0], kwards[1], kwards[2], kwards[3]), file=self.OutputFile)
return
def geoDirTextureCommand(self):
'''
Создает указатель вектора направления текстуры
'''
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
if self.texture_dir_value == 0:
print(self.TextureDir_0 % (self.num_tdcode ), file=self.OutputFile)
else:
print(self.TextureDir_90 % (self.num_tdcode ), file=self.OutputFile)
#self.dict_section['[PROGRAM]'].append((self.PlastName, (self.num_tdcode, SideName)))
return
def geoPlastNameCommand(self):
'''
Создает символ обрабатываемой пласти
'''
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
nameS = 'Лицо'+self.NameSide
# print(self.isFrontF)
# print(self.NameSide[0])
result = (self.isFrontF, self.NameSide[0]=='A')
# print(result)
if result==(True,True) or result==(False,False):
nameS = 'Тыл'+self.NameSide
# print(self.PlastName % (self.num_tdcode, self.NameSide), file=self.OutputFile)
print(self.PlastName % (self.num_tdcode, nameS), file=self.OutputFile)
return
def MillingCommand(self, num, segment, panel, isouter=True, last=False,startp = True, depth=0,corr=1):
'''Вывод информации о фрезеровке панели
num - порядковый номер сегмента
segment - сегмент фрезеровки
isouter - признак внешнего контура
startp - стартовая точка
last - последний сегмент контура
'''
self.num_tdcode +=1 # Увеличиваем значение счетчика на 1
Xpath,Ypath= self.panelLength, self.panelWidth
xbeg=Xpath-segment.r.start_pt.x # для версии 6,5 добавил r. необходимо для реверса сегментов
ybeg=Ypath-segment.r.start_pt.y
xend=Xpath-segment.r.end_pt.x
yend=Ypath-segment.r.end_pt.y
if depth>0:
thr = 1
if self.NameSide.count('$')==0:
self.NameSide = self.NameSide+'$'
else:
thr = 0
z = 'LPZ+FL' if thr==1 else str(abs(depth)) #abs(depth)
if (num==1):
self._MillOperation(self.num_tdcode,corr,"P_"+str(self.num_tdcode),0,2,0,z, thr = 0)
if startp:
self._MillStartSeg(xbeg,ybeg,z)
if (type(segment)==machine.Line):
self._MillLineSeg(xend,yend)
if (type(segment)==machine.Arc):
x3 = Xpath-segment.middle_pt.x
y3 = Ypath-segment.middle_pt.y
self._MillArcSeg(x3,y3,xend,yend)
if (last==True):
self._MillEnd()
return
#@ ROUT, "TDCODE17", "", 53516480 : "P_9", 0, "2", 0, 0, "", 1, 18, -1, 0, 0, 5, 5, 50, 0, 45, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1
def _MillOperation(self,num_tdcode,corr,id,side,crn,z,dp,dia=20,iso="",opt="YES",rty="rpNO",xrc=0,yrc=0,dx=5,dy=5,r=50,a=0,da=45, rdl="YES",nrp=0,az=0,ar=0,zs=0,ze=0,cka="azrNO",thr=1,rv="NO",ckt="NO",arp=0,lpr=0,er="NO",cow=0,ovm=0,
a21=0,tos=0,vtr=0):
print(self.mill % (num_tdcode,id,side,crn,z,dp,iso,opt,dia,rty,xrc,yrc,dx,dy,r,a,da,rdl,nrp,az,ar,zs,ze,cka,thr,rv,ckt,arp,lpr,er,cow,ovm,a21,tos,vtr), file=self.OutputFile)
self.dict_section['[TDCODES]'].append((self.mill_tdcodes,(num_tdcode,20, 20,corr)))
return
def _MillStartSeg(self,x,y,z):
print(self.startsegment % (x,y,z), file=self.OutputFile)
return
def _MillLineSeg(self,x,y,zs=0,ze=0,sc="scOFF",fd=0,sp=0,mtv=0):
print(self.linesegment % (x,y,zs,ze,sc,fd,sp,mtv), file=self.OutputFile)
return
def _MillArcSeg(self,x2,y2,xe,ye,zs=0,ze=0,fd=0,sp=0,sc="scOFF"):
print(self.arcsegment % (x2,y2,xe,ye,zs,ze,fd,sp,sc), file=self.OutputFile)
return
def _MillEnd(self):
print(self.millend, file=self.OutputFile)
return
def DataHeadCommand(self, x, y, z):
'''Вывод информации в заголовок файл'''
print(self.header, file=self.OutputFile)
print(self.variables % (x,y,z), file=self.OutputFile)
return
def BeginProgram(self):
print(self.program, file=self.OutputFile)
def BeginVBScript(self):
print(self.vbscript, file=self.OutputFile)
def BeginMacroData(self):
print(self.macrodata, file=self.OutputFile)
def BeginTDCodes(self):
print(self.tdcodes, file=self.OutputFile)
def writerTDCodesCommand(self):
for tdc in self.dict_section['[TDCODES]']:
print(tdc[0] % tdc[1], file=self.OutputFile)
def BeginPCF(self):
print(self.pcf, file=self.OutputFile)
def BeginTooling(self):
print(self.tooling, file=self.OutputFile)
def BeginSubProgs(self):
print(self.subprogs, file=self.OutputFile)
def CreateOutputFile(self,FileName):
'''Открываем на запись файл с именем FileName'''
self.fname=FileName
self.OutputFile = open(FileName, 'w')
return
class Boa(machine.Machine, NameForProg):
'''
Это самый главный класс. Именно его методы запускаются при вызове генератора
Имя этого класс указано в модуле настроек станка (*_settings)
Этот класс является наследником класса Machine, который реализован в Machine.pyd
Методы запускаются в следующей последовательности:
StartProcessing(model) - для всей модели (базы)
StartPanel(panel) - для каждой из панелей
Drilling(drill) - для каждого из отверстий панели
Slot(slot) - для каждого из пропилов панели
Milling(mill,curve) - для каждой обработки-фрезеровки панели
Filletting(fillet,curve) - для каждой обработки-кромковки панели
EndPanel() - для каждой из панелей
EndProcessing() - для всей модели (базы)
'''
def __init__(self):
''' Конструктор класса'''
machine.Machine.__init__(self) # Вызываем конструктор базового класса. Здесь это обязательно
'''
Machine - Класс, генерирующий командные файла для станка
Метода класса:
StartProcessing(model) - Метод вызывается один раз перед началом генерации командных файлов
EndProcessing() - Метод вызывается после генерации командных файлов
StartPanel(panel) - Начало обработки панели
EndPanel() - Конец обработки панели
Drilling(drill) - Сверловка панели
Slot(slot) - Пропил панели
Milling(mill,curve) - Фрезеровка панели
Filletting(fillet,curve) - Кромковка
'''
self.settings = machine.Settings() # Собственно, получаем ссылку на класс Settings
'''
Settings - Класс предоставляет доступ к настройкам приложения, например размер рабочей области станка.
Свойства класса:
machine_name - Имя класса, который будет генерировать команды для станка.
machine_module_name - Имя файла в котором находится реализация станка.
database_name - Путь к базе данных выгрузки.
cmdfile_path - Имя генерируемого командного файла.
working_area - BoundingBox2d определяющий рабочую область станка.
'''
machine.constraints = CurM.addSettingsToMachine(self.settings)
# print(machine.constraints.xmax_constr)
# self.writer = Writer() # Класс для вывода информации в файл
# Стартовая инициализация атрибутов
self.panelThickness = 16 # Толщина панели
self.panelWidth = 0 # Ширина панели
self.panelLength = 0 # Длина панели
self.panelNum = -1 # Номер панели (Значение атрибута CommonPos)
self.fname = "" # имя файла управляющей программы
self.panelName = "" #Имя панели
self.isProcessing = False # Наличие обработок сверловка, фрезеровка, пазы если они есть , то True в противном случае нет смысла выводить файл
self.millingTech = [] # Список контуров для обработки
self.bandPunktion = {} #Список сегментов с кромкой принадлежащих габариту панели
self.isBlindHoleF = False # глухие отверстия по F
self.isBlindHoleA = False # глухие отверстия по A
self.isCutsA = False # Глухие вырезы по А
self.isCutsF = False # Глухие вырезы по F
self._overt_basepoint = {0: 5, 1: 4, 2: 7, 3: 6, 4: 1, 5: 0, 6: 3, 7: 2} # Номер базы после перворота 0<->7 1<->6
self.structureOK = True
self.writer = Writer() # Класс для вывода информации в файл
self.writer.isFrontF = False
def CreateCmdfile(self,post):
'''Создаем файл с управляющей программой
post - имя файла'''
fname=post+".bpp"
self.cmdfileName = self.GetCmdfilePath()
self.cmdfileName +=fname
self.writer.CreateOutputFile(self.cmdfileName)
return
def GetCmdfilePath(self):
'''Читаем имя файла'''
path = self.settings.cmdfile_path
try:
machine.makedirs(path)
except OSError:
pass
return path
def GetBiesseSideNum(self,sideName):
'''Вернуть номер стороны в формате BiesseWorks'''
if (sideName=="A"):
return 0
if (sideName=="F"):
return 5
if (sideName=="E"):
return 2
if (sideName=="B"):
return 3
if (sideName=="D"):
return 4
if (sideName=="C"):
return 1
return -1
def CreateCmdfileHeading(self):
'''Записываем общий заголовок в командный файл'''
self.writer.DataHeadCommand(self.panelLength,self.panelWidth,self.panelThickness)
return
def CreateCmdfileProgram(self):
'''Записываем текст программы в командный файл
возвращает True если создана хотя бы одна секция обработок Сверловки фрезеровки или пропил'''
self.revis_trueposits_brosers_drill()
self.writer.BeginProgram()
result_conturs_1 = self.writerContours(self.millingTech) # Требуется обработать только внешний контур и глухие вырезы
result_slots = self.writerSlotsCommand(self.slots)
result_drilling = self.writerDrillingCommand(self.drills)
result_conturs_2 = self.writerContours(self.millingTech) # Обработку сквозных вырезов следует выполнять только после выполнения всех пазов и сверловок
self.writer.geoPlastNameCommand()
self.writer.geoDirTextureCommand()
return result_conturs_1 or result_conturs_2 or result_slots or result_drilling
def CreateCmdfileVBScript(self):
'''Записываем скриптовые данные в командный файл'''
self.writer.BeginVBScript()
return
def CreateCmdfileMacroData(self):
'''Записываем макроданные в командный файл'''
self.writer.BeginMacroData()
return
def CreateCmdfileTDCodes(self):
'''Записываем TD коды в командный файл'''
self.writer.BeginTDCodes()
self.writer.writerTDCodesCommand()
return
def CreateCmdfilePCF(self):
'''Записываем PCF в командный файл'''
self.writer.BeginPCF()
return
def CreateCmdfileTooling(self):
'''Записываем инструменты в командный файл'''
self.writer.BeginTooling()
return
def CreateCmdfileSubProgs(self):
'''Записываем подпрограммы в командный файл'''
self.writer.BeginSubProgs()
return
def StartProcessing(self,model):
'''Метод запускается автоматически перед началом обработки панелей'''
# ptvsd.enable_attach(None)
# ptvsd.wait_for_attach(60)
pass
def EndProcessing(self):
'''Метод запускается автоматически после окончания обработки панелей'''
print(" ")
print('Модуль CNC завершил работу')
print('Программы ЧПУ находятся в папке:')
print(' ',self.settings.cmdfile_path)
return
def writeOutputFile(self, postfix,hashsimbol=''):
'''Записывает накопленную информацию в файл'''
# print("writeOutputFile")
# Получаем имя файла
#toInt = lambda x: str(int(x) if abs(x-int(x)) < 0.2 else round(x, 1))
#count_common_pos = self.getCountPanels(toInt)
##---------------------
#nmPan = 'Неопределенный материал' if self.panel.nomenclature is None else self.panel.nomenclature.name
#dnmth=(toInt(self.panel.thickness))+'-'
#if nmPan.count(toInt(self.panel.thickness))>0:
#if nmPan.index(toInt(self.panel.thickness))==0:
#dnmth=''
#index = dnmth + nmPan + '_' + toInt(self.panel.size.size_x) + 'x' + toInt(self.panel.size.size_y) + 'x' + toInt(self.panel.thickness) + '_' + str(self.panelNum if self.twins is None else self.twins)+postfix+'_'+hashsimbol + '_(' + toInt(count_common_pos) + ')'+'-NEW_VAR'
##'('+self.NumOrder+')_' + str(self.panelNum if self.twins is None else self.twins)+postfix+'_'+hashsimbol
#index = index.replace(' ', '_')
#index = index.replace('/', '_')
#index = index.replace('\\', '_')
# for m in self.millingTech:
# if not m.cnc_key:
# print("m=",m.cnc_key)
# for d in self.drills:
# if not d.cnc_key:
# print("d=",d.cnc_key)
# Каждый проход проверяем все ли элементы были обработаны
# isSlots=True if (False, True) in [(s.cnc_key,s.is_plane) for s in self.slots] else False
# isMills=True if True in [m.cnc_key for m in self.millingTech] else False
# isDrills=True if True in [d.cnc_key for d in self.drills] else False
# print("isSlots=",isSlots,";isMills=",isMills,";isDrills=",isDrills,)
index = self.name_prog_calc( machine,eps_d,postfix, hashsimbol)
if (self.panelNum==-1 or
not ((False in [m.cnc_key for m in self.millingTech]) or
((False, True) in [(s.cnc_key,s.is_plane) for s in self.slots]) or
(False in [d.cnc_key for d in self.drills]))):
# if self.panelNum==-1 or (isSlots or isMills or isDrills):
resOutput = False
else:
# Создаем файл
self.CreateCmdfile(index)
# Пишем в файл шапку
self.CreateCmdfileHeading()
# Пишем в файл по очереди все секции
result = self.CreateCmdfileProgram()
self.CreateCmdfileVBScript()
self.CreateCmdfileMacroData()
self.CreateCmdfileTDCodes()
self.CreateCmdfilePCF()
self.CreateCmdfileTooling()
self.CreateCmdfileSubProgs()
self.writer.panelName=self.panelName
if result:
print("Панель № ",self.panelNum, " Запись в файл",index,".bpp")
self.writer.OutputFile.close()
resOutput = True
else:
resOutput = self.closeCmdfile()
return resOutput
def getCountPanels(self, toInt):
'''Возвращает число одинаковых commonpos'''
SQL_STR='''SELECT Count(TElems.CommonPos) AS [Count-CommonPos] FROM TPanels INNER JOIN TElems ON TPanels.UnitPos = TElems.UnitPos GROUP BY TElems.CommonPos HAVING TElems.CommonPos=''' + toInt(self.panel.common_pos)
cnxn, cursor, recordset = self.odbc_conn(SQL_STR)
count_common_pos = recordset[0][0]
return count_common_pos
def closeCmdfile(self):
fn = self.writer.OutputFile.name
self.writer.OutputFile.close()
os.unlink(fn)
resOutput = False
return resOutput
def rotatePanel(self):
'''поворачивает панель на 90 градусов против часовой стрелки и сдвигает правильным образом
в точку базы изменяет имя положения базовой точки'''
def _aposition(self):
if self.aposition < 360:
self.aposition += 90
else:
self.aposition = 0
self.bandPunktion = {}
dChtextureDir = {0: 90, 90: 0, -90: 0, 270: 0, 180: 90, -180: 90, 360: 90, -360: 90,}
b = self.findPathSize(VARIANTPATH) #self.panel.bounding_box
self.panel.Translate(machine.Vector2d(-b.min.x,-b.min.y))
if (self.Selobj != 0): # Если это выбр
pass
self.panel.Rotate(vpi/2,machine.Point2d(0,0))
_aposition(self)
b = self.findPathSize(VARIANTPATH)
Xpanel,Ypanel= self.getXY_bounding_box(b)
self.panel.Translate(machine.Vector2d(Xpanel,0))
b = self.findPathSize(VARIANTPATH)
Xpanel,Ypanel= self.getXY_bounding_box(b)
self.panel.Translate(machine.Vector2d(-b.min.x,-b.min.y))
self.panelWidth = Ypanel #self.panel.panel_width
self.panelLength = Xpanel #self.panel.panel_length
self.writer.panelWidth = self.panelWidth
self.writer.panelLength = self.panelLength
self.panel.textureDir = dChtextureDir[round(self.panel.textureDir , 1)]
self.writer.texture_dir_value = self.panel.textureDir
global BASEPOINT
BASEPOINT = BASEPOINT + 1 if BASEPOINT not in [3, 7] else BASEPOINT - 3
mc = machine.constraints
for d in self.drills:
self.drill_revisia(d, mc)
def drill_revisia(self, d, mc):
# Side=Utiles.GetDrillPlane(d)
CurM.change_MinMaxXY_Space(mc, d)
d.typetool=mc.typetool
list_property_mc = list(mc.__dict__.keys())
if 'typ_cod_tool' in list_property_mc:
d.typ_cod_tool = mc.typ_cod_tool
d.cod_tool = mc.cod_tool
def StartPanel(self, panel):
# print("StartPanel")
'''Метод вызывается автоматически перед началом обработки каждой панели'''
self.aposition = 0
# Контролер числа установов при стартовой базовой точке
self.start_optimist = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: []}
self.millingTech = []
self.bandPunktion = {}
self.drills=[]
self.slots = [] # Пропилы
self.workingsides=[] # Список сторон панели, которые нужно будет обработать
self.writer=Writer()
self.isSlotsF = False
self.isSlotsA = False
self.isSlotsY = False
self.isBlindHoleF = False
self.isBlindHoleA = False
self.isCutsA = False
self.isCutsF = False
self.gnumdet = 0
self.fname="" # Имя файла управляющей программы
self.panelName=panel.name
self.panel = panel
self.NumOrder = ''
self.Selobj = 0
self.twins = None # строка панелей близнецов
self.hashsimbol=0
self.panel.textureDir = self.panel.texture_dir
# self.writer.isFrontF = False
for a in self.panel.attributes:
if a.name=='Selobj':
self.Selobj = int(a.value)
if a.name=='NumOrder':
self.NumOrder = str(a.value)
if a.name=='ListTwins':
self.twins = str(a.value)
if a.name == 'GDetNumber':
self.gnumdet = str(a.value)
if a.name=='FrontF':
self.writer.isFrontF = True if int(a.value) == 1 else False
# print('startpanel_isFrontF=',self.writer.isFrontF)
# self.writer=Writer()
global BASEPOINT
BASEPOINT = 0 # Номер базы стартовый(0,1,2,3 <-для стороны A 4,5,6,7 -для стороны F )
if (self.Selobj == 0): # Если это не выбранная панель, то не фиг ее обрабатывать
return False
# (Анализируем габарит панели и поворачиваем ее длинной стороной вдоль X) так делать нельзя.
# Здесь нельзя только после того как считали все обработки, вот тогда можно. Крутить надо в EndPanel
# По итогам тестов надо подбирать вариант с минимальным числом установов
b = self.findPathSize(VARIANTPATH)
if isinstance(b, list):
if len(b) == 0:
self.structureOK = False
print( '\n !!!!ОШИБКА в структуре панели!!!!!')
print(('См. панель UnitPos= '+ str(self.panel.id.handle)))
print('--------------------------------------------')
return False
Xpanel,Ypanel= self.getXY_bounding_box(b)
self.panel.Translate(machine.Vector2d(-b.min.x, -b.min.y))
self.panelWidth = Ypanel #self.panel.panel_width
self.panelLength = Xpanel #self.panel.panel_length
self.writer.panelWidth = self.panelWidth
self.writer.panelLength = self.panelLength
self.panel.panelThickness = panel.thickness + 6 if panel.thickness<5 else panel.thickness
self.panelThickness = panel.thickness + 6 if panel.thickness<5 else panel.thickness
self.panelNum = panel.common_pos if self.gnumdet == 0 else self.gnumdet
# Собираем контура
self.Contour()
return True
def writerContours(self, pathsTuple):
'''Функция подготавливает к выводу контура
pathsTuple - список контуров, которые нужно обработать
Требуется обработать только внешний контур и глухие вырезы которые можно сделать.
Сквозные вырезы только если больше нет пазов и сверловки.
'''
result = False
outer = True
lFun = lambda v1, v2: abs(v1-v2)
for p in pathsTuple:
pCuts = 'cuts' in list(p.__dict__.keys())
if (not outer) and (not pCuts) and ([a.cnc_key for a in self.drills].count(False)>0):
continue
for segment in p.segments:
for sw in segment.works:
if type(sw) == machine.Filletting:
segment.band = sw.nomenclature.values['Dept']
if segment.path.id.handle == 1:
maxPathX = segment.path.bounding_box.max.x
maxPathY = segment.path.bounding_box.max.y
minPathX = segment.path.bounding_box.min.x
minPathY = segment.path.bounding_box.min.y
if (abs(segment.start_pt.x-segment.end_pt.x ) < 0.1 and abs(segment.start_pt.x - maxPathX) < 0.1): # Этот сегмент соответствует С"
if 'C' not in list(self.bandPunktion.keys()):
self.bandPunktion['C'] = segment
self.writer.geoBandCommand(1, '0', 'LPY/2', '-50', 'LPY/2+40', '-50-140', 'LPY/2+40', '-1', 'LPY/2', '2', 'LPY')
self.writer.geoBandSimbolCommand(2, segment.band, '-(140/2+50)', 'LPY/2-20')
if (abs(segment.start_pt.x-segment.end_pt.x ) < 0.1 and abs(segment.start_pt.x - minPathX) < 0.1): # Этот сегмент соответствует B"
if 'B' not in list(self.bandPunktion.keys()):
self.bandPunktion['B'] = segment
self.writer.geoBandCommand(4, '0', 'LPY/2', '-50', 'LPY/2+40', '-50-140', 'LPY/2+40', '-1', 'LPY/2', '2', 'LPY' )
self.writer.geoBandSimbolCommand(2, segment.band, 'LPX+140/2+50', 'LPY/2-20')
if (abs(segment.start_pt.y-segment.end_pt.y ) < 0.1 and abs(segment.start_pt.y - minPathY) < 0.1): # Этот сегмент соответствует D"
if 'D' not in list(self.bandPunktion.keys()):
self.bandPunktion['D'] = segment
self.writer.geoBandCommand(2, 'LPX/2', 'LPY', 'LPX/2-140/2', 'LPY+50', 'LPX/2-140/2+140', 'LPY+50', 'LPX/2', 'LPY+1', 'LPX', '2' )
self.writer.geoBandSimbolCommand(2, segment.band, 'LPX/2', 'LPY+70')
if (abs(segment.start_pt.y-segment.end_pt.y ) < 0.1 and abs(segment.start_pt.y - maxPathY) < 0.1): # Этот сегмент соответствует E"
if 'E' not in list(self.bandPunktion.keys()):
self.bandPunktion['E'] = segment
self.writer.geoBandCommand(1, 'LPX/2', 'LPY', 'LPX/2-140/2', 'LPY+50', 'LPX/2-140/2+140', 'LPY+50', 'LPX/2', 'LPY+1', 'LPX', '2' )
self.writer.geoBandSimbolCommand(2, segment.band, 'LPX/2', '-70-70')
#print('----Кромка толщиной ',segment.band, ' мм по сегменту----')
if not p.cnc_key: # Признак вывода в файл ЧПУ отсутствует
#segments=p.Segments() # специально для версии 6,5
segments = p.segments
for s in segments:
s.path.overt = p.overt
#print(s.band if 'band' in list(s.__dict__.keys()) else '---')
#----------
# Изменяет последовательность прохода по сегментам начиная с минимальной точки по XY для внешних и разорванных внутренних
# и Ищет точку входа у вырезов. Середина самой длинной стороны
#pCuts = 'cuts' in p.__dict__.keys()
closed_path = True
if pCuts:
if type(segments[0]) == machine.Circle:
r=segments[0].path
segments = segments[0].Divide(0.5)
for s in segments:
s.path_H=r
for sg in segments:
Utiles.SegmentReverse(sg,0)
#segments = Utiles.checkSegments(segments,False)
if ((p.overt and p.cuts[0][0] < 0) or
(not p.overt and p.cuts[0][0] > 0)):
if (abs(segments[0].start_pt.x - segments[-1:][0].end_pt.x) > 0.1 or
abs(segments[0].start_pt.y - segments[-1:][0].end_pt.y) > 0.1):
closed_path=False
else:
closed_path=True
else:
continue
if (not pCuts) or (pCuts and closed_path):
segments = Utiles.checkSegments(segments,outer)
#----------
a=0
ai=0
last=False
if p.overt: # признак того что панель перевернута и проходы фрезы должны происходить в обратном порядке
segments.reverse()
conW, singleSetup = self.RevisiaSegmentsToPath(segments, p.overt)
if False not in singleSetup:
p.cnc_key = True # Признак вывода в файл ЧПУ изменяем
startp = True
for s in zip(segments,conW[0]):
ai += 1
conWL=conW[0][ai:]
if True not in conWL:
last=True
# специально для версии 6,5
Utiles.SegmentReverse(s[0], p.overt)
#depth = p.panel.thickness if not pCuts else abs(p.cuts[0][0])*(-1)
depth = 5 if not pCuts else abs(p.cuts[0][0])*(-1)
#---
if s[1]: # Сегмент есть в списке обрабатываемых
a=a+1
#-------------------
fun_segment_path_is_tcuts=lambda f: f.path_H.is_tcuts if f.path is None else f.path.is_tcuts
if (outer==True):
corr=2 # Правая коррекция
else:
s_path_id=segments[0].path_H.id if segments[0].path is None else segments[0].path.id
if (((len(segments) == 2)
and (False not in conW[0]) # специальное условие для круглого сквозного выреза
and (s_path_id.handle>1))
or (fun_segment_path_is_tcuts(s[0]))):
corr= 2 if p.overt else 1 # Левая коррекция
else:
corr=2 # Правая коррекция
#--------------------
self.writer.MillingCommand(a,s[0], self.panel, outer,last,startp, depth,corr)
result = True
startp = False
else:
startp = True
outer=False
return result
def writerSlotsCommand(self, slotsTuple):
'''Подоготавливает к выводу пропилы
side - сторона пропила
slotsTuple - список пропилов в данной панели'''
result = False
a=0
for s in slotsTuple:
#logplane = iif(self.panel.overt, not s.is_plane, s.is_plane)
if (Utiles.GetSlotDirection(s)=="X" and s.is_plane):
a=a+1
if s.cnc_key == False: # Признак вывода отверстия в файл ЧПУ
self.writer.SlotCommand(a,s)
result = True
s.cnc_key = True # Признак вывода отверстия в файл ЧПУ
return result
def writerDrillingCommand(self, drillTuple):
'''Подоготавливает к выводу отверстия
drillTuple - список отверстий в данной панели'''
result = []
#self.writer.geoPlastNameCommand()
#self.writer.geoDirTextureCommand()
for d in drillTuple:
SideS=Utiles.GetDrillPlane(d)
SideNum=self.GetBiesseSideNum(SideS)
# print(SideS,' ',' ',SideNum,[a.cnc_key for a in drillTuple])
if d.diameter == 5.0:
pass
# Если сторона не определена,или это стороны F, отверстие не выводим, если сквозное , то выводим
if (SideNum<0 or SideNum==5) and not Utiles.GetDrillTrough(d):
continue
#if
# Изменил эту часть кода 2013-06-25
if d.cnc_key == False: # Признак вывода отверстия в файл ЧПУ (т.е если отверстие не выведено его надо выводить)
result.append(self.writer.DrillingCommand(drillTuple.index(d),d))
pass
#-----------
return result.count(True)>0
def revis_trueposits_brosers_drill(self):
'''корректирует списки допустимых вариантов расположения панели с учетом "братьев"'''
for dd in self.drills:
for ddd in self.drills:
if (ddd.id.handle in dd.brothers):
new_true_posits = Utiles.intersect(ddd.list_true_posits, dd.list_true_posits)
ddd.list_true_posits = new_true_posits
dd.list_true_posits = new_true_posits
def tech_cicle(self, hashsimbol, p_overt, NameSide):
self.writer.NameSide = NameSide
for p in self.millingTech:
p.overt = p_overt # Добавляем новое свойство у контура overt
p.segments_cnc_key = [] # Необходимость вывода сегмента на ЧПУ
[p.segments_cnc_key.append(True) for i in range(len(p.segments))]
# print(BASEPOINT)
# print(machine.constraints.d_trueposits[BASEPOINT])
resultOutput = self.writeOutputFile(self.settings.machine_base_name[BASEPOINT],hashsimbol)
if resultOutput:
hashsimbol +=1
self.app_start_optimist(resultOutput)
for i in range(3): # Крутим панель три раза и пытаемся обработать
# print("tech_cicle_i")
self.rotatePanel()
# print(BASEPOINT)
# print(machine.constraints.d_trueposits[BASEPOINT])
resultOutput = self.writeOutputFile(self.settings.machine_base_name[BASEPOINT],hashsimbol)
if resultOutput:
hashsimbol +=1
self.app_start_optimist(resultOutput)
# print("tech_cicle_fin")
self.rotatePanel() # Возвращаем в исходное состояние
return hashsimbol
def EndPanel(self):
'''Функция вызывается в конце обработки каждой панели'''
if (self.Selobj == 0): # Если это не выбранная панель
return
if not self.structureOK: # Если какие то проблемы в структуре панели
return
b = self.findPathSize(VARIANTPATH)
Ypanel=b.max.y-b.min.y
Xpanel=b.max.x-b.min.x
self.panel.textureDir = self.panel.texture_dir
self.writer.texture_dir_value = self.panel.textureDir
# print(Xpanel)
# print(Ypanel)
if (Xpanel<Ypanel):
rangeBase = [ 3, 0, 1, 2]
# print("EndPanel_3")
self.rotatePanel()
self.rotatePanel()
self.rotatePanel()
else:
rangeBase = [0, 1, 2, 3]
#and (Xpanel<250 and Ypanel>1200):
##self.rotatePanel()
print('-------------------------------------')
print('\nПоиск оптимальной установки детали.')
print('Деталь :'+ str(self.panelNum if self.twins is None else self.twins))
_hashsimbol = '#'
print('\n------- старт расчета с позиции ', int(rangeBase[0]))
for bp in rangeBase:
# b = self.findPathSize(VARIANTPATH)
# Ypanel=b.max.y-b.min.y
# print(Ypanel)
# print(machine.constraints.ymax_constr)
# print(self.panelLength)
if (machine.constraints.ymax_constr < self.panelWidth):
print("\n--- Габарит Y={} больше ограничения {}. Смотрим следующий вариант".format(
self.panelWidth, machine.constraints.ymax_constr))
# print("EndPanel_rotate")
self.rotatePanel()
continue
print('\n------- Позиция ', int(bp))
if bp != rangeBase[0]:
for fNmeForRename in self.start_optimist[previndexbp]:
#try:
os.rename(fNmeForRename,fNmeForRename[:-3]+'~bpp') # переименовать
#except:
#pass
self.hashsimbol = 1
self.startBP = bp
for a in self.slots:
a.cnc_key = False
for a in self.millingTech:
a.cnc_key = False
for a in self.drills:
a.cnc_key = False
self.panel.overt = False # Признак перевернутости Для версии к3 6,5 надо проверить в 7,1
#print 'self.panel.overt ', self.panel.overt
# Если необходимо, проверка на габариты
#Utiles.CheckGabs(self,panel,xmin,xmax,ymin,ymax,zmin,zmax)
# Запоминаем пропилы и отверстия. Они нам потребуются
# Выясняем какие пласти панели требуют обязательной обработки
isPlaneA = (self.isBlindHoleA==True or self.isSlotsA==True or self.isCutsA) # Если по А есть пропил или глухое отверстие его выводить обязательно
isPlaneF = (self.isBlindHoleF==True or self.isSlotsF==True or self.isCutsF) # Если по F есть пропил или глухое отверстие его выводить обязательно
# print("isPlaneA",isPlaneA)
# print("isPlaneF",isPlaneF)
#hashsimbol='#' if isPlaneA and isPlaneF else '' # Знак # указываем для того, чтобы оператор видел, что обработка за несколько проходов
if isPlaneA or (not isPlaneF): # Выводим сторону А если есть условие обязательной обработки или по стороне F все пусто
# Определяем возможность обработки за один установ и порождаем необходимое число установов для выполнения всех обработок пласти A
self.hashsimbol = self.tech_cicle(self.hashsimbol, False, 'A')
# Выключаем перевороты все делаем на станке. Братусь 12-10-2015 3) Переворотами деталей!!!
# Если есть глухие отверстия или пропилы с обратной стороны, надо еще перевернуть панель
if isPlaneF: #(self.isBlindHoleF==True or self.isSlotsF==True):
self.panelOverturn()
# Переворачиваем относительно оси X
self.hashsimbol = self.tech_cicle(self.hashsimbol, True, 'F')
falsedrills = [a.cnc_key for a in self.drills].count(False)
if falsedrills > 0:
print('!!!!!!!!!!!!ОШИБКА!!!!!!!!!!!!!!!')
print('Остались необработанными ', falsedrills , ' отверстия.')
for t in [(a.diameter, a.depth) if not a.cnc_key else None for a in self.drills]:
if t is not None:
print(t[0], 'x', t[1])
print('-------------------------------')
# Если в текущем варианте были созданы файлы и их меньше , чем в прошлый раз.
# Значит этот вариант оптимальнее предыдущего и предыдущие файлы следует удалить если их имена отличаются от новых
if bp != rangeBase[0]:
if ((len(self.start_optimist[bp]) < len(self.start_optimist[previndexbp]))
and len(self.start_optimist[bp])>0):
print('Новый вариант лучше старый удаляем.')
for fNmeForRemove in self.start_optimist[previndexbp]:
#try:
os.remove(fNmeForRemove[:-3]+'~bpp')
#except:
#pass
self.start_optimist[previndexbp] = []
elif len(self.start_optimist[previndexbp])>0:
print('Старый вариант лучше новый удаляем.')
for fNmeForRemove in self.start_optimist[bp]:
try:
os.remove(fNmeForRemove)
except:
pass
for fNmeForRename in self.start_optimist[previndexbp]:
#try:
os.rename(fNmeForRename[:-3]+'~bpp', fNmeForRename) # переименовать
#except:
#pass
self.start_optimist[bp] = self.start_optimist[previndexbp]
if len(self.start_optimist[bp]) == 1:
print("Лучший вариант! Стартовая точка " + self.settings.machine_base_name[BASEPOINT])
break
if self.panel.overt: self.panelOverturn()
# print("EndPanel_rotate_fin")
self.rotatePanel()
previndexbp = bp
return
def app_start_optimist(self, resultOutput):
if resultOutput:
self.start_optimist[self.startBP].append(self.cmdfileName)
def panelOverturn(self):
# Переворачиваем относительно оси X
self.panel.Overturn(0)
b =self.findPathSize(VARIANTPATH)
#print b.max.x, b.min.x
self.panel.Translate(machine.Vector2d(-b.min.x, -b.min.y))
#b =self.findPathSize(VARIANTPATH)
#print b.max.x, b.min.x
self.panel.overt = True # Признак перевернутости Для версии к3 6,5 надо проверить в 7,1
global BASEPOINT
BASEPOINT = self._overt_basepoint[BASEPOINT]
paths = self.panel.paths
for path in paths:
path.overt = True # Изменяется при перевороте панели
mc = machine.constraints
for d in self.drills:
self.drill_revisia(d, mc)
def Drilling(self, d):
'''Сверловка. Пласти и торцы. Заполняем списки структурами для пластей и торцев'''
if (self.Selobj == 0): # Если это не выбранная панель
return
d.cnc_key = False # Признак вывода отверстия в файл ЧПУ
d.panel.thiknessPanel = self.panel.thickness # Для версии к3 6,5
mc = machine.constraints
isTrough = Utiles.GetDrillTrough(d)
Side=Utiles.GetDrillPlane(d)
lDDM = self.get_list_torec_drillMachine(d, mc) # для торцевых отверстий требуется уточнение по допустимым осям
# print(lDDM)
self.drill_revisia(d, mc)
#не заморачиваемся c 11-11-2015 см письма Братуся #
# проверка на выполнимость по допустимым экстримальным координатам
ps = machine.constraints.CheckConstraints(d, listDrillDimMashine=lDDM)
# print(ps)
#ps=('AE', 'AC', 'AB', 'AD','FE', 'FC', 'FB', 'FD')
self.get_drill_brosers(d)
if len(ps) > 0:
d.list_true_posits = ps
#self.drill_revisia(d, mc)
if isTrough:
self.isTrough=True
elif (Side=="F"):
self.isBlindHoleF=True
elif (Side=="A"):
self.isBlindHoleA=True
self.drills.append(d)
else:
print('''---!!!!!!!!!---''')
print('''Отверстие не может быть выполнено по допустимым экстримальным координатам''')
print(' d x h =' + str(d.diameter) + ' x ' + str(d.depth))
print(' координаты центра X Y Z = ' + str(d.position.x) + ' ' + str(d.position.y) + str(d.position.z))
print('''---!!!!!!!!!---''')
return
def get_list_torec_drillMachine(self, d, mc):
'''список допустимых торцев и пластей сверловки на станке mc для отверстия d'''
tt = []
for dr in mc.tools_dims['drilling']:
if d.diameter in dr['DIA'] :
axl = dr['AXE']
tt.extend(list(axl))
return tt
def get_drill_brosers(self, d):
'''Заполняет список индексов "братьев отверстий". Тех которые принадлежат тому же крепежу и той же панели. Например эксцентриковая стяжка.
Возвращает список кортежеей вида
d.brothers
[2]
для штока вернет id отверстия эксентрика
а для эксцентрика вернет id отверстия штока.
Есть мнение, что сверлить их нужно вместе, за один установ
'''
SQL_STR='''SELECT THoles.HolePos
FROM THoles INNER JOIN TElems ON THoles.HolderPos = TElems.UnitPos
WHERE ((Not (THoles.HolePos)='''+str(d.id.handle)+''') AND ((TElems.UnitPos) In (SELECT TElems.UnitPos
FROM THoles INNER JOIN TElems ON THoles.HolderPos = TElems.UnitPos
WHERE (((THoles.UnitPos)='''+str(self.panel.id.handle)+''') AND ((THoles.HolePos)='''+str(d.id.handle)+'''))
ORDER BY THoles.UnitPos, THoles.HolePos)) AND ((THoles.UnitPos)='''+str(self.panel.id.handle)+'''))
ORDER BY THoles.HolePos, THoles.UnitPos;'''
cnxn, cursor, recordset = self.odbc_conn(SQL_STR)
d.brothers = [a[0] for a in recordset]
self.odbc_close(cursor, cnxn)
def Slot(self, s):
'''Пропилы. Заполняем списки структурами пропилов'''
self.slots.append(s)
s.cnc_key = False # Признак вывода пропила в файл ЧПУ
# Если пропил по стороне F
if (s.is_plane==False):
self.isSlotsF=True
else:
self.isSlotsA=True
if (Utiles.GetSlotDirection(s)!="X"):
self.isSlotsY=True
return
def Contour(self):
'''Контура. Заполняем списки контурами для фрезеровки'''
# Получаем список контуров панели.
paths = self.panel.paths
# Проверяем. Если контур один и состоит из 4 отрезков, то фрезеровать не надо
for path in paths:
path.cnc_key = False # Признак вывода в файл ЧПУ
path.overt = False # Изменяется при перевороте панели
for s in path.segments:
s.path.overt = False
segments=path.segments
if path.is_plane_path:
if path.is_tcuts:
# Это место надо менять теперь это может быть вырез в панели, который надо обрабатывать
# path.is_plane_path Признак, что контур относится к полотну панели, а не к панели
# path.is_tcuts Контур из TCuts
# path.cutpos CutPos выреза из TCuts, по которому получен контур
cnxn, cursor, recordset = self.odbc_conn(SQL_STR='''SELECT TCuts.Depth, TCuts.ExtrZ FROM TPaths
INNER JOIN TCuts ON (TPaths.CutPos = TCuts.CutPos) AND (TPaths.PanelPos = TCuts.UnitPos)
WHERE (((TCuts.UnitPos)='''+str(self.panel.id.handle)+''') AND ((TPaths.PathID)='''+str(path.id.handle)+'''));''')
path.cuts = recordset
if (path.cuts[0][0] < 0):
self.isCutsF = True
elif (path.cuts[0][0] > 0):
self.isCutsA = True
self.odbc_close(cursor, cnxn)
self.millingTech.append(path)
continue
else:
bbox=path.bounding_box
if (len(segments)!=4): # Если контур состоит не из 4-х отрезков, то он заведомо кривой. Его добавляем
self.millingTech.append(path)
continue
for segment in segments:
if (type(segment)!=machine.Line):
self.millingTech.append(path)
break
else:
# Если линия не горизонтальная и не вертикальная - добавляем контур
Xst = segment.start_pt.x
Yst = segment.start_pt.y
Xen = segment.end_pt.x
Yen = segment.end_pt.y
if (abs(Xst-Xen)>eps_d and abs(Yst-Yen)>eps_d):
self.millingTech.append(path)
break
# Cравниваем с элементами BoundingBox Если хотя бы один конец не на bbox - добавляем контур
if ((abs(Xst-bbox.min.x)>eps_d or abs(Xst-bbox.max.x)>eps_d) or
(abs(Xen-bbox.min.x)>eps_d or abs(Xen-bbox.max.x)>eps_d) or
(abs(Yst-bbox.min.y)>eps_d or abs(Yst-bbox.max.y)>eps_d) or
(abs(Yen-bbox.min.y)>eps_d or abs(Yen-bbox.max.y)>eps_d)):
self.millingTechApp(path)
break
return
def odbc_close(self, cursor, cnxn):
'''Закрывает соединение с базой'''
cursor.close()
cnxn.commit()
cnxn.close()
def odbc_conn(self, SQL_STR):
'''Создает соединение с базой self.settings.database_name
Выполняет запрос к базе'''
cnxn = pyodbc.connect('Driver={Microsoft Access Driver (*.mdb)};Dbq='+self.settings.database_name+';Uid=;Pwd=;')
cursor = cnxn.cursor()
cursor.execute(SQL_STR )
recordset = cursor.fetchall()
return cnxn, cursor, recordset
def millingTechApp(self, path):
self.millingTech.append(path)
def Milling(self, m, s):
'''Фрезеровки. Заполняем списки фрезеровок'''
return
def Filletting(self, f, s):
#print('----Кромка толщиной ', ' мм по сегменту----')
pass
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------
def RevisiaSegmentsToPath(self, conl, overt):
'''Упорядочивание элементов контуров'''
def _getIndexMT(self, con, iP, iS):
t = 0
for j, ii in enumerate(self.millingTech):
if ii.id.handle == con[iP][iS].path_H.id.handle:
t = j
break
return t
def _getExtrPoint(iP, iS, con):
Xst = round(con[iP][iS].start_pt.x,3)
Yst = round(con[iP][iS].start_pt.y,3)
Xen = round(con[iP][iS].end_pt.x,3)
Yen = round(con[iP][iS].end_pt.y,3)
return Xst, Yst, Xen, Yen
Inf = False # Вывод отладочной информации в окно
singleSetup = [True] # Обработка за один установ
if type(conl[0])==list :
con = conl
else:
con = []
con.append(conl)
# dictG = {}
dictw = {}
mc = machine.constraints.tools_dims['milling'][0]
thkns =2.5 # Толщина кромки
b =self.findPathSize(VARIANTPATH) #self.panel.paths[VARIANTPATH].bounding_box # Без учета кромок и подрезов
maxX = min(round(b.max.x,3), mc['MAX_X_VAL'])
minX = max(round(b.min.x,3), mc['MIN_X_VAL'])
maxY = min(round(b.max.y,3), mc['MAX_Y_VAL'])
minY = max(round(b.min.y,3), mc['MIN_Y_VAL'])
if Inf:
print("b.max.x= ",maxX)
print("b.min.x= ",minX)
print("b.max.y= ",maxY)
print("b.min.y= ",minY)
conW = list(range(len(con))) # Список индексов контуров для обработок
indexMillingTech = 0
for iP in conW:
if Inf: print(iP)
segW = [False for a in con[iP]] # Создаем список обработок на сегменты
segmentsOutGab = []
for iS in range(len(con[iP])):
Xst, Yst, Xen, Yen = _getExtrPoint(iP, iS, con)
log = (True in [(v>maxX) for v in [Xst, Xen]]) or (True in [(v>maxY) for v in [Yst, Yen]])
segmentsOutGab.append(log)
for iS in range(len(con[iP])):
try:
if Inf: print("iS= ",iS+1)
Xst, Yst, Xen, Yen = _getExtrPoint(iP, iS, con)
indexMillingTech = _getIndexMT(self, con,iP, iS )
if Inf:
print("Xst= ",Xst)
print("Xen= ",Xen)
print("Yst= ",Yst)
print("Yen= ",Yen)
while len(self.millingTech[indexMillingTech].segments_cnc_key) < len(segW):
self.millingTech[indexMillingTech].segments_cnc_key.append(True)
# '''Находим угол между касательными двух соседних элементов контура'''
if segmentsOutGab[iS]:
print('--Сегмент за пределами зоны обработки--')
continue
else:
self.millingTech[indexMillingTech].segments_cnc_key[iS] = segW[iS]
pass
if con[iP][iS].path_H.id.handle > 1:
segW[iS] = True # Если это вырез сквозной или нет неважно- метим на обработку
else:
v1 =con[iP][iS-1].tangent(0 if overt else 1)
v2 =con[iP][iS].tangent(1 if overt else 0)
cs =self.angleGet(v1,v2)
if Inf: print("угол между ", iS , " и ", iS+1, "= " , cs)
if type(con[iP][iS]) == machine.Line:
if not segW[iS]: # При уже наложенной обработке пропускаем
PlaneX =Xst-Xen
PlaneY =Yst-Yen
if PlaneX == 0: # В плоскости Y
if Inf: print("В плоскости Y")
if abs(Xst-minX) > thkns < abs(Xst-maxX):
segW[iS] = True
if Inf: print("Добавляем обработку")
if PlaneY == 0: # В плоскости X
if Inf:
print("В плоскости X")
print("Yst-maxY=",abs(Yst-maxY))
print("Yst-minY=",abs(Yst-minY))
if abs(Yst-minY) > thkns < abs(Yst-maxY):
#if abs(Yst-minY) > thkns and abs(Yst-maxY) > thkns:
segW[iS] = True
if Inf: print("Добавляем обработку")
if PlaneY != 0 and PlaneX != 0 :
if Inf: print("Ни горизонтальна, ни вертикальна")
segW[iS] = True
if Inf: print("Добавляем обработку")
if type(con[iP][iS-1]) == machine.Arc: # Смотрим скругление ли предыдущий контур
if Inf: print("Анализ предыдущего - Скругление")
AngDir =con[iP][iS-1].orientation
# if self.Ovtrn:
# cs =180-cs
# print "Ovrtn.cs=", cs
if (AngDir == 0 or AngDir == 1) and (cs <= 45 or cs == 180) :
segW[iS] =True # Метим в списке на обработку
if Inf: print("Добавляем обработку на линию")
if type(con[iP][iS]) == machine.Arc:
segW[iS] =True # Метим в списке на обработку
if Inf:
print("Xen=", Xen)
print("Yen=", Yen)
print("start=", degrees(con[iP][iS].start))
print("end=", degrees(con[iP][iS].end))
# Ищем направление дуги AngDir если
AngDir =con[iP][iS].orientation
if type(con[iP][iS-1]) == machine.Line: # Смотрим линия ли предыдущий контур
if Inf: print("Анализ предыдущего контура - Линия")
# if self.Ovtrn:
# cs =180-cs
# print "Ovrtn.cs=", cs
if (AngDir == 0 or AngDir == 1) and (cs <= 45 or cs == 180):
segW[iS-1] = True if not segmentsOutGab[iS-1] else False # Метим в списке на обработку
if Inf: print("Добавляем обработку")
segment = con[iP][iS]
isWorkIds = 'work_ids' in list(segment.__dict__.keys())
if isWorkIds:
lenm = len(segment.work_ids)
for i in range(lenm):
numW = segment.work_ids[i].handle # ID обработки
if numW not in dictw and type(segment.works[i]) == machine.Milling:
dictw.update({numW : []})
except:
pass
conW[iP] = segW
singleSetup.append(not (True in [log for log in self.millingTech[indexMillingTech].segments_cnc_key]))
return conW, singleSetup
def angleGet(self, v1, v2):
cc = round((v1%v2)/(v1.length()*v2.length()),3)
cs = round(degrees(math.acos(cc)),2)
return cs
# def readerContours(self, panel):
# "Чтение внешнего контура и сквозных вырезов. Подготовка."
# #print "readerContours"
# p= panel.paths # Список контуров панели
# con = [] # Список для контуров
# #i=0
# for a in p:
# #print i
# #print a.is_tcuts
# #print a.is_plane_path
# if (not(a.is_tcuts) and a.is_plane_path) or (a.is_tcuts and not(a.is_plane_path)):
# segments = a.segments
# ttt=len(segments)
# if ttt!=0:
# if type(segments[0]) == machine.Circle:
# r=segments[0].path
# segments = segments[0].Divide(0.5)
# for s in segments:
# s.path_H=r
# for sg in segments:
# Utiles.SegmentReverse(sg)
# elif ttt==2:
# s0=segments[0]
# s1=segments[1]
# if s0== machine.Arc and s1== machine.Arc and s0.start_pt==s1.end_pt:
# for sg in segments:
# Utiles.SegmentReverse(sg)
# segm = [] # Инициализация списка сегментов
# for seg1 in segments:
# segm.append(seg1)
# #print seg1.params
# #print seg1.work_ids
# #print seg1.works
# con.append(segm)
# #i+=1
# # На выходе список контуров con со списками составляющих сегментов seg
# # Доступ по индексу con [i] [j], где i - номер контура, j - номер сегмента
# #if not self.keyFrez: # Обработка должна накладываться на одну сторону
# #self.keyFrez = True
# #con =self.FilterBetweenCNC(con) # Достаточно один раз, так как меняется только направление)
# return con
def findPathSize(self, VARIANTPATH):
'''находит контур с кромкой или без кромки
VARIANTPATH 0-без кромки 1-с кромкой
bounding_box нужного контура
'''
lSizePath = []
dSizePath = {}
lBand = True if VARIANTPATH ==0 else False
for ph in self.panel.paths:
if not ph.is_tcuts and ph.is_plane_path == lBand: #
b=ph.bounding_box
X, Y = self.getXY_bounding_box(b)
S = X * Y
dSizePath[S]=b
lSizePath = list(dSizePath.keys())
lSizePath.sort(reverse=True)
result = []
if len(lSizePath) > 0:
result = dSizePath[lSizePath[0]]
return result
def getXY_bounding_box(self, b):
'''возвращает габаритные размеры объекта b bounding_box2D'''
X=b.max.x-b.min.x
Y=b.max.y-b.min.y
return X, Y
# global boa
# boa=Boa()
| [
"alexandr69@gmail.com"
] | alexandr69@gmail.com |
b76d177d849e9129be32fb01f3a8b580988da26f | 4f956e9ad537a78eb536ff02ee691e7a54563800 | /app/components/MapBuilder.py | d64d3e08c3ae4edd3d4ebd04613edae63a69809f | [] | no_license | aemcgraw/Making_Maps | d311416c96c2cc50d4679a475d82a79e771487e1 | b8f988284a7d165149e281e8e61eb2f32f5d15c9 | refs/heads/master | 2020-09-09T04:39:43.961615 | 2020-03-31T03:36:12 | 2020-03-31T03:36:12 | 221,349,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | from PIL import Image
from app.components.Map import Map
import app.util.util
def generate_random_map():
new_image = Image.new('RGBA', (400, 400), (0, 0, 255, 255))
new_im = Map(new_image)
new_im.flood()
i = 0
while i < 10:
new_im.draw_island(util.get_scaled_log(10000))
i += 1
i = 0
while i < 5:
new_im.draw_forest(util.get_scaled_log(4000))
i += 1
#TODO : Change hardcoded path
new_im.save("/Users/mcgraw/Making_Maps/static/test.bmp")
return 'test.bmp'
| [
"mcgraw4@illinois.edu"
] | mcgraw4@illinois.edu |
80f2756291480b87078a382c9248203b282d775b | 95b5860cfc92d698987617b9f80e431e95c6b612 | /Core/utilities/results/success_result.py | cf4279305dbe32d7111b5fa0b1a92f629551bc5d | [] | no_license | cihatyalman/python_core | fd38a69f6ca6fe1f189d518b1aa2e70220879ebd | 2a22f225c681e1c16bacba1a60bdae058c9034b3 | refs/heads/master | 2023-06-05T05:44:51.886056 | 2021-06-29T20:40:10 | 2021-06-29T20:40:10 | 346,646,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from Core.utilities.results.result import Result
class SuccessResult(Result):
def __init__(self, message="Successful"):
super().__init__(True, message)
| [
"noreply@github.com"
] | cihatyalman.noreply@github.com |
56118820b0c970baddf97586606dcfbf3d312aa4 | 6c17f49123dfd415df395c04478f7f07058d121b | /PySource/Chapter-4/6_membership_operators.py | a06c5cadc5135058ace2c24c9e93054ef6c9fa44 | [] | no_license | BrighterMyanmar/Python-Programming-Online-Course | a8c7a616385b2f82e1ac21c54006d7da5fda1277 | 178866881d8d5ae641cd4f2493c7f61a372c6ae7 | refs/heads/master | 2023-01-23T00:18:55.804031 | 2020-12-06T11:24:07 | 2020-12-06T11:24:07 | 319,015,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | para = "The is a need, there is a way!"
bol = 'need' in para # is 'need' in para variable string
bol = 'is' not in para # if 'is' not in para variable string
print(bol)
| [
"azphyo72@gmail.com"
] | azphyo72@gmail.com |
87e0bc597a20bab6fba49916144cdf8e29b6b3ee | 855b1e8e08ae5799e2de7211e21a623f2f3ca632 | /Python_Pharmacy_project/user.py | 7c74b1185884a7d0a86020e224075e8f03104c67 | [] | no_license | Navanithbs/Pharmacy-Management-system-project | 3f151c1069255cb3406b98c5fd4417d97920a082 | 514012236dba6a6cd0492b26535b3db1f54333a2 | refs/heads/master | 2022-11-24T14:25:10.197966 | 2020-08-02T16:32:03 | 2020-08-02T16:32:03 | 271,997,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,122 | py | import sys
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import user_support
import os.path
from tkinter import messagebox as mb
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
global prog_location
prog_call = sys.argv[0]
prog_location = os.path.split(prog_call)[0]
root = tk.Tk()
top = Toplevel1 (root)
user_support.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(rt, *args, **kwargs):
'''Starting point when module is imported by another module.
Correct form of call: 'create_Toplevel1(root, *args, **kwargs)' .'''
global w, w_win, root
global prog_location
prog_call = sys.argv[0]
prog_location = os.path.split(prog_call)[0]
#rt = root
root = rt
w = tk.Toplevel (root)
top = Toplevel1 (w)
user_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.destroy()
w = None
class Toplevel1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
font10 = "-family {DejaVu Sans} -size 19 -weight bold " \
"-underline 1"
font11 = "-family {DejaVu Sans} -size 11 -weight bold"
font12 = "-family {DejaVu Sans} -size 10 -weight bold"
top.geometry("600x450+364+181")
top.minsize(1, 1)
top.maxsize(1351, 738)
top.resizable(1, 1)
top.title("User Page")
top.configure(background="#727272")
self.Label1 = tk.Label(top)
self.Label1.place(relx=0.083, rely=0.067, height=41, width=489)
self.Label1.configure(font=font10)
self.Label1.configure(text='''Pharmacy Management System''')
self.Label2 = tk.Label(top)
self.Label2.place(relx=0.183, rely=0.2, height=141, width=349)
photo_location = os.path.join(prog_location,"/home/navanith/Documents/Python_Pharmacy_project/med (1).png")
global _img0
_img0 = tk.PhotoImage(file=photo_location)
self.Label2.configure(image=_img0)
self.Label2.configure(text='''Label''')
self.menubar = tk.Menu(top,font="TkMenuFont",bg=_bgcolor,fg=_fgcolor)
top.configure(menu = self.menubar)
self.Label3 = tk.Label(top)
self.Label3.place(relx=0.117, rely=0.6, height=31, width=129)
self.Label3.configure(background="#727272")
self.Label3.configure(font=font11)
self.Label3.configure(text='''Tablet Name :''')
self.Label4 = tk.Label(top)
self.Label4.place(relx=0.117, rely=0.667, height=31, width=129)
self.Label4.configure(background="#727272")
self.Label4.configure(font=font11)
self.Label4.configure(text='''Dosage :''')
self.Entry1 = tk.Entry(top)
self.Entry1.place(relx=0.35, rely=0.6,height=23, relwidth=0.41)
self.Entry1.configure(background="white")
self.Entry1.configure(font="TkFixedFont")
self.Entry2 = tk.Entry(top)
self.Entry2.place(relx=0.35, rely=0.689,height=23, relwidth=0.41)
self.Entry2.configure(background="white")
self.Entry2.configure(font="TkFixedFont")
self.Button1 = tk.Button(top)
self.Button1.place(relx=0.283, rely=0.822, height=31, width=86)
self.Button1.configure(borderwidth="4")
self.Button1.configure(font=font12)
self.Button1.configure(text='''Search''')
def search1(self):
first=self.Entry1.get()+".txt"
flist=os.listdir('/home/navanith/Documents/Python_Pharmacy_project/files')
if self.Entry1.get() and self.Entry2.get()=="":
mb.showinfo(title="Error",message="Enter valid details..!!!!")
elif first not in flist:
mb.showinfo(title="Error",message="Tablet not found!!!")
else:
mb.showinfo(title="confiration",message="Tablet is in cart!!!")
self.Button1.configure(command=lambda: search1(self))
self.Button2 = tk.Button(top)
self.Button2.place(relx=0.567, rely=0.822, height=31, width=152)
self.Button2.configure(borderwidth="4")
self.Button2.configure(font=font12)
self.Button2.configure(text='''Proceed To Buy''')
def next():
if self.Entry1.get()=="":
mb.showinfo(title="Error",message="Enter valid details..!!!!")
else:
import paymentpage
root.destroy()
paymentpage.vp_start_gui()
self.Button2.configure(command=next)
if __name__ == '__main__':
vp_start_gui()
| [
"navanithbharadwaj@gmail.com"
] | navanithbharadwaj@gmail.com |
7f7f1b8389ee33a45136a0cb2a2310540f960168 | c04f97f87bdab326e33541f6fc24064d46670183 | /Models_Morpheus/Codes/gen_SA_plot_Fig5.py | 837de8b5093fca5516b5f07015b7c78dc74b83cf | [] | no_license | HFooladi/Self_Organization | cf6f1dc3836a0376c63d402d50cf71571ceeba37 | 3514050c142cdb9be85554867874a368eae4d70c | refs/heads/master | 2022-11-13T06:29:29.930918 | 2022-11-05T13:18:11 | 2022-11-05T13:18:11 | 135,709,222 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,699 | py | import numpy as np
from matplotlib import pyplot as plt
import os
sweep_file_names = ['r1', 'r2', 'beta1', 'beta2', 'lambda', 'a1', 'a2', 'n']
sweep_param_range = [
'(0 ~ 0.8)', '(0 ~ 2)', '(10 ~ 30)', '(10 ~ 30)', '(0 ~ 1)', '(0 ~ 1)',
'(0 ~ 1)', '(1 ~ 4)'
]
title_tex = [
r'$r_1$', r'$r_2$', r'$\tilde{\beta}_B$', r'$\tilde{\beta}_N$',
r'$\lambda$', r'$\tilde{a}_B$', r'$\tilde{a}_N$', r'$n$'
]
base_id = [6, 24, 10, 10, 20, 5, 5, 1]
plot_id = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]
radius_res = 200
# SA plot for BMP
fig, axes = plt.subplots(nrows=2, ncols=4)
for sweep_file_name, idx, id_p, param_range, tex in zip(sweep_file_names,
base_id, plot_id,
sweep_param_range,
title_tex):
r_BMP = np.loadtxt(os.path.join("sweep_array_" + sweep_file_name,
"r_BMP.txt"),
dtype=np.float16)
f_BMP = np.loadtxt(os.path.join("sweep_array_" + sweep_file_name,
"f_BMP.txt"),
dtype=np.float16)
r_BMP = np.reshape(r_BMP, (np.uint8(r_BMP.shape[0] / radius_res), radius_res))
f_BMP = np.reshape(f_BMP, (np.uint8(f_BMP.shape[0] / radius_res), radius_res))
r = r_BMP[0, :]
mu1 = f_BMP.mean(axis=0)
prop = f_BMP[idx, :]
sigma1 = f_BMP.std(axis=0)
# plot it!
axes[id_p[0], id_p[1]].plot(r * 2, prop, lw=2, color='red', label="Proposed")
axes[id_p[0], id_p[1]].fill_between(r * 2,
mu1 + 2 * sigma1,
mu1 - 2 * sigma1,
facecolor='yellow',
alpha=0.5)
axes[id_p[0], id_p[1]].legend(loc='lower right')
axes[id_p[0], id_p[1]].set_xlabel('Distance to center(' + r'$\mu$' + 'm)')
axes[id_p[0], id_p[1]].set_ylabel('Concentration')
axes[id_p[0], id_p[1]].set_title('Local SA for ' + tex + param_range)
plt.subplots_adjust(wspace=0.5, hspace=0.5)
fig, axes = plt.subplots(nrows=2, ncols=4)
for sweep_file_name, idx, id_p, param_range, tex in zip(sweep_file_names,
base_id, plot_id,
sweep_param_range,
title_tex):
r_Nog = np.loadtxt(os.path.join("sweep_array_" + sweep_file_name,
"r_Nog.txt"),
dtype=np.float16)
f_Nog = np.loadtxt(os.path.join("sweep_array_" + sweep_file_name,
"f_Nog.txt"),
dtype=np.float16)
r_Nog = np.reshape(r_Nog, (np.uint8(r_Nog.shape[0] / radius_res), radius_res))
f_Nog = np.reshape(f_Nog, (np.uint8(f_Nog.shape[0] / radius_res), radius_res))
r = r_Nog[0, :]
mu1 = f_Nog.mean(axis=0)
sigma1 = f_Nog.std(axis=0)
prop = f_Nog[idx, :]
# plot it!
axes[id_p[0], id_p[1]].plot(r * 2, prop, lw=2, color='red', label="Proposed")
axes[id_p[0], id_p[1]].fill_between(r * 2,
mu1 + 2 * sigma1,
mu1 - 2 * sigma1,
facecolor='yellow',
alpha=0.5)
axes[id_p[0], id_p[1]].legend(loc='lower left')
axes[id_p[0], id_p[1]].set_xlabel('Distance to center(' + r'$\mu$' + 'm)')
axes[id_p[0], id_p[1]].set_ylabel('Concentration')
axes[id_p[0], id_p[1]].set_title('Local SA for ' + tex + param_range)
plt.subplots_adjust(wspace=0.5, hspace=0.5)
plt.show()
| [
"fooladi.hosein@gmail.com"
] | fooladi.hosein@gmail.com |
4b75ff79b24443ddeed62399a83225f95c3e3c06 | 79fd11f27993880d4485b41b58efb063317d1b57 | /cam.py | 90dcb396357d673ba1b8bc0eafa07d5ad35aee17 | [] | no_license | jlcruz2318/one_files | 96421f1e66e979eda51325fc343acce97d2400e7 | f235fffeb5845b33ddfedb97f1bbd889e1468a34 | refs/heads/master | 2021-03-02T20:18:58.038421 | 2020-03-08T23:03:09 | 2020-03-08T23:03:09 | 245,902,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | import cv2
# import imutils # For rotating the frame
# import numpy as numpy
#
# car_cascade = cv2.CascadeClassifier('/home/checkdev/Documents/dev/Environments/open_spot/open_spot/haar_cascade_creation_process/data/cascade.xml')
cap = cv2.VideoCapture(0) #'http://85.237.63.165/mjpg/video.mjpg'
while True:
ret, img = cap.read()
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# rotated_img = imutils.rotate(gray, -32)
# cropped_img = rotated_img[200:310, 0:600]
# cars = car_cascade.detectMultiScale(cropped_img, 2, 5)
# for (x,y,w,h) in cars:
# cv2.rectangle(cropped_img, (x,y), (x+w, y+h), (0,0,255), 2)
cv2.imshow('Source', img) #cropped_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | jlcruz2318.noreply@github.com |
1cf3a8923943392ca3536149d1a7031a3a1e27ec | abad1268fa6e080f9e4202a42f13c1204ca32bff | /dict-list/langages.py | f9f626bd77bf5b7ccc980ff90bdbf9e2eae942b9 | [] | no_license | cKouT/pythonCourse | 11c7fd5c440eb1a5b25cbd4fe415fdd9f78fe621 | 6a2ba09e4b529644324418d26b83ff96b6aa5e27 | refs/heads/master | 2020-04-01T05:05:22.807427 | 2019-01-08T05:06:29 | 2019-01-08T05:06:29 | 152,889,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | favorite_languages = {
'jen': ['python', 'ruby'],
'sarah': ['c'],
'edward': ['ruby', 'go'],
'phil': ['python', 'haskell'],
}
for name, langages in favorite_languages.items():
print('\n' + name.title() + ' favorite langages are :')
for langage in langages :
print('\t- ' + langage.title()) | [
"mm@MacBook-Air-de-MM.local"
] | mm@MacBook-Air-de-MM.local |
92574ce1c0c0561b2041e3bb666b9bf94681e71e | 8d45a9919651c57947d1e556100bd04016f5a30e | /nnpacktry1.py | 9d29282ac5ddc32936728c0d31cc2daf2d2cfb12 | [] | no_license | Adhi15/NN_pack_Numpy | 2daff6cbd15d8ad72bcc57b66213507dfadbd42d | 4e4dd87298bd4cd714df7c1d2123e4ead9c7c2da | refs/heads/master | 2021-01-03T00:04:18.863918 | 2020-02-11T17:46:43 | 2020-02-11T17:46:43 | 239,826,217 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | from Neural import Neural_class
import numpy as np
#x = np.array(([2, 9], [1, 5], [3, 6], [5, 10]), dtype=float) # input data
#y = np.array(([92], [86], [89]), dtype=float) # output
x=np.array(([1,0,1],[0,0,1]), dtype=float)
y=np.array([[1,1]]).T
# scale units
#x = x/np.amax(x, axis=0)
#y = y/100
# split data
#traindata = np.split(x, [3])[0] # training data
#testdata = np.split(x, [3])[1] # testing dat
traindata=x
testdata=np.array(([1,1,1]), dtype=float)
NN = Neural_class()
for i in range(1000):
print("# " + str(i) + "\n")
print("Input (scaled): \n" + str(traindata))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" + str(NN.forward(traindata)))
#print("Loss: \n" + str(np.mean(np.square(y - NN.forward(traindata)))))
print("\n")
NN.train(traindata, y)
NN.saveWeights()
print("Predicted data based on trained weights: ")
print("Input (scaled): \n" + str(testdata))
print("Output: \n" + str(NN.forward(testdata)))
| [
"noreply@github.com"
] | Adhi15.noreply@github.com |
0ccc2ecbd5e71c2b2e398add5c4b7f3d50bf6a8a | 21d2dd5267345a661d19240c8e45b7d2228ad992 | /imagedist/tools/utils.py | 0ce949c190242a4e23fac993b5eaa2f0906a2385 | [] | no_license | rd37/glintify | cff31b81c2a97d52032345fe288503cd494cb2b0 | 08b11bdffdbea700a89940267f0657d30ba8c6b7 | refs/heads/master | 2016-09-05T15:55:20.933827 | 2015-07-15T21:40:27 | 2015-07-15T21:40:27 | 38,902,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | '''
Created on May 15, 2014
@author: rd
'''
import base64
def encode(string,key):
msg = "%s %s"%(key,string)
return base64.encodestring(msg)
def decode(string,key):
msg = base64.decodestring(string)
return msg.split()[1]
| [
"ron.desmarais@gmail.com"
] | ron.desmarais@gmail.com |
e400441cfd426f16de3ef88f40728258f17a3346 | 044723b6f2968bbe45b2f985ec169ade910782ff | /Redis with Python/1-Shirt-Store/4-shirt-store/main.py | 277424201cb4a73f0319a0aa0b57ea28e7d38069 | [] | no_license | hikarihust/redis | ca9a60289de006ae9842b430c265c9920e328310 | 2e79d8c2dcc23c0818623e98f85bf1545dd0b66a | refs/heads/master | 2023-03-22T18:26:53.959482 | 2021-03-18T16:43:06 | 2021-03-18T16:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | import redis
r = redis.Redis()
def scan_keys(pattern, pos: int = 0) -> list:
shirts = []
while True:
pos, val = r.scan(cursor=pos, match=pattern)
shirts = shirts + val
if pos == 0:
break
return shirts
def buy_items(r: redis.Redis, itemid) -> None:
pipe = r.pipeline()
# while True:
nleft: bytes = r.hget(itemid, "quantity")
if nleft > b"0":
pipe.hincrby(itemid, "quantity", -1)
pipe.hincrby(itemid, "nPurchased", 1)
pipe.execute()
else:
print("Sorry ", itemid, "out of stock")
return None
shirts = scan_keys("shirt:*")
# print(shirts)
buy_items(r, shirts[0])
| [
"vudinhquangk53@gmail.com"
] | vudinhquangk53@gmail.com |
0d3b7ba30d941e178d3f4b5ccff2bae920b29186 | 3a012f9ebb2ce44f163ff257ea7ae6082631ac1c | /Algorithm_basic/venv/bin/pip3 | 1663f6eac26421771ec3ca1f2dca4a87044c5774 | [] | no_license | IMSOYUL/Algorithm | 39a967c1b2153d6a091912053c8651320d1823e8 | 193c03f6ada1f133e22f3e1e03c81c01767ce3ee | refs/heads/master | 2023-03-22T12:41:07.070930 | 2021-03-22T15:27:44 | 2021-03-22T15:27:44 | 347,569,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | #!/Users/soyulim/Documents/Repository/Algorithm/Algorithm/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"soyulim26@naver.com"
] | soyulim26@naver.com | |
1a5ec117ab6068d2068acf95633fa13c7cd79e8d | 6ad61ba6e0ed188ffcda11bff93f94f4f86b2b4f | /values/src/models/python/run_test_doors.py | 458a69b30295163eec02bf3bd296651230d6ab3d | [] | no_license | gestom/metro_models | c9dff150b82d805ab67c5f244952ab9e1820413e | b1a7fbb2bc2a1987551fed6c262009689f9f3995 | refs/heads/master | 2022-08-01T05:49:02.070168 | 2020-05-06T06:56:29 | 2020-05-06T06:56:29 | 261,673,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | #!/usr/bin/env python2
import numpy as np
import python_module as pm
#import tested_doors_python_module as pm
import dataset_io as dio
#c = dio.loading_data('../data/training_two_weeks_01.txt')
#c = dio.loading_data('../data/10_weeks_doors.txt')
#c = dio.loading_data('../data/training_data.txt')
c = dio.loading_data('../../../data/witham/training_data.txt')
#a = np.array([0, 7200, 14400, 21600, 28800, 36000, 43200, 50400, 57600, 64800, 72000, 79200])
#b = np.array([0,0,1,1,0,0,1,1,0, 0,1, 1])
#c = np.c_[a, b]
"""
with open('../data/data.txt', 'r') as f:
i = 0
for line in f:
#print(line)
b = np.array(map(float, list(line[1:-2].split(', '))))
a = np.arange(len(b)) * 600
#print('a: ' + str(np.shape(a)))
#print('b: ' + str(np.shape(b)))
c = np.c_[a, b]
#print('c: ' + str(np.shape(c)))
#print(c)
i += 1
model = pm.python_function_update(c)
if i == 14:
break
else:
print('feature no.: ' + str(i))
#print(list(b))
"""
model = pm.python_function_update(c)
print('update prosel')
pm.python_function_save(model, 'save_pokus')
print('save prosel')
model = pm.python_function_load('save_pokus')
print('load prosel')
out_array = pm.python_function_model_to_array(model)
print('array prosel')
model = pm.python_function_array_to_model(out_array)
print('model zrekonstruovan')
print(model[0])
print(model[1])
print(model[2])
print(model[3])
print(model[4])
for i in xrange(10):
print(pm.python_function_estimate(model, c[-1, 0] + i * 80000))
"""
C = np.arange(20).reshape((5, 4))
COV = np.arange(20).reshape((5, 2, 2))
DI = np.arange(5).reshape((5, 1))
structure = [0, [1.0, 1.0], [86400.0, 640000.0]]
k = 6
out_array = pm.python_function_model_to_array((C, COV, DI, structure, k))
out_model = pm.python_function_array_to_model(out_array)
print(C)
print(out_model[0])
print(COV)
print(out_model[1])
print(DI)
print(out_model[2])
print(structure)
print(out_model[3])
print(k)
print(out_model[4])
"""
| [
"tomas.krajnik@agents.fel.cvut.cz"
] | tomas.krajnik@agents.fel.cvut.cz |
55f487b60d2251c28e008b922dfcafb4607080cd | 8506f0a22ef4edf03627951ced530b921ff4d383 | /tools/route/vehicle2flow.py | ff7551f7e8f38a076b69a09947de2cfebd87b2c8 | [] | no_license | deepak728/Traffic-Optimization- | fb0ac074fa601e524eb0d79defc7e8b84ab03138 | 85bc54de2e318f36bdcc5bb6f05badde0fb35ffe | refs/heads/master | 2020-03-29T23:29:36.740048 | 2018-11-12T09:19:17 | 2018-11-12T09:19:17 | 150,475,374 | 1 | 1 | null | 2018-11-12T09:19:19 | 2018-09-26T18:57:35 | Java | UTF-8 | Python | false | false | 2,522 | py | #!/usr/bin/env python
"""
@file vehicle2flow.py
@author Michael Behrisch
@date 2012-11-15
@version $Id: vehicle2flow.py 18096 2015-03-17 09:50:59Z behrisch $
This script replaces all vehicle definitions in a route file by
flow definitions, adding an XML ntity for the repeat interval for
easy later modification.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2013-2015 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
import sys
import os
import re
from optparse import OptionParser
def parse_args():
USAGE = "Usage: " + sys.argv[0] + " <routefile> [options]"
optParser = OptionParser()
optParser.add_option("-o", "--outfile", help="name of output file")
optParser.add_option(
"-r", "--repeat", default=1000, type="float", help="repeater interval")
optParser.add_option(
"-e", "--end", default=2147483, type="float", help="end of the flow")
optParser.add_option("-w", "--with-entities", action="store_true",
default=False, help="store repeat and end as entities")
options, args = optParser.parse_args()
try:
options.routefile = args[0]
except:
sys.exit(USAGE)
if options.outfile is None:
options.outfile = options.routefile + ".rou.xml"
return options
def main():
options = parse_args()
with open(options.routefile) as f:
with open(options.outfile, 'w') as outf:
headerSeen = False
for line in f:
if options.with_entities:
if "<routes " in line or "<routes>" in line:
outf.write("""<!DOCTYPE routes [
<!ENTITY RepeatInterval "%s">
<!ENTITY RepeatEnd "%s">
]>
""" % (options.repeat, options.end))
line = re.sub(
r'<vehicle(.*)depart( ?= ?"[^"]*")', r'<flow\1begin\2 end="&RepeatEnd;" period="&RepeatInterval;"', line)
else:
line = re.sub(
r'<vehicle(.*)depart( ?= ?"[^"]*")', r'<flow\1begin\2 end="%s" period="%s"' % (options.end, options.repeat), line)
line = re.sub(r'</vehicle>', '</flow>', line)
outf.write(line)
if __name__ == "__main__":
main()
| [
"deepak711998@gmail.com"
] | deepak711998@gmail.com |
dea1826bd3fe548eecfeb6264920558ad7b023ad | d177addc1830153404c71fa115a5584f94a392c3 | /N1021_RemoveOutermostParentheses.py | 56fd46aaf5a5ba63d4953f3d11c86731dafb6c96 | [] | no_license | zerghua/leetcode-python | 38a84452f60a360e991edf90c8156de03a949000 | 02726da394971ef02616a038dadc126c6ff260de | refs/heads/master | 2022-10-25T11:36:22.712564 | 2022-10-02T19:56:52 | 2022-10-02T19:56:52 | 61,502,010 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | #
# Create by Hua on 5/4/22.
#
"""
A valid parentheses string is either empty "", "(" + A + ")", or A + B, where A and B are valid parentheses strings, and + represents string concatenation.
For example, "", "()", "(())()", and "(()(()))" are all valid parentheses strings.
A valid parentheses string s is primitive if it is nonempty, and there does not exist a way to split it into s = A + B, with A and B nonempty valid parentheses strings.
Given a valid parentheses string s, consider its primitive decomposition: s = P1 + P2 + ... + Pk, where Pi are primitive valid parentheses strings.
Return s after removing the outermost parentheses of every primitive string in the primitive decomposition of s.
Example 1:
Input: s = "(()())(())"
Output: "()()()"
Explanation:
The input string is "(()())(())", with primitive decomposition "(()())" + "(())".
After removing outer parentheses of each part, this is "()()" + "()" = "()()()".
Example 2:
Input: s = "(()())(())(()(()))"
Output: "()()()()(())"
Explanation:
The input string is "(()())(())(()(()))", with primitive decomposition "(()())" + "(())" + "(()(()))".
After removing outer parentheses of each part, this is "()()" + "()" + "()(())" = "()()()()(())".
Example 3:
Input: s = "()()"
Output: ""
Explanation:
The input string is "()()", with primitive decomposition "()" + "()".
After removing outer parentheses of each part, this is "" + "" = "".
Constraints:
1 <= s.length <= 105
s[i] is either '(' or ')'.
s is a valid parentheses string.
"""
class Solution(object):
def removeOuterParentheses(self, s):
"""
:type s: str
:rtype: str
thought: find the primitive decomposition(by using a counter),
and remove its outer parentheses of each part and return.
(())
0123
05/04/2022 10:29 Accepted 35 ms 13.8 MB python
easy 5 - 10min. need to understand the problem
"""
i, j = 0,0
counter = 0
ret = list()
for j in range(len(s)):
if s[j] == "(":
counter += 1
else:
counter -= 1
if counter == 0:
ret.append(s[i+1:j]) # remove outer parentheses
i = j+1
return "".join(ret)
| [
"zerghua@gmail.com"
] | zerghua@gmail.com |
c58789f7cc681e3c317353239849905c06561d1d | 637955280b2740f73d4fba5c01f0d6cf740b914b | /makeVideoFull/htmlToImageMulti.py | b19055577178510a27b2d0890087e4c14c5cf452 | [] | no_license | dereklowlind/pythonCreateVideo | 8c908dbd119841cb872a2833718e0b2ab6c6a606 | e147ba302d9cb8bb3f51879f5272e6c850c59eb7 | refs/heads/master | 2022-04-14T13:30:59.095220 | 2020-04-11T18:59:35 | 2020-04-11T18:59:35 | 231,717,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,461 | py | # https://pypi.org/project/imgkit/
import imgkit
import os
import json
import multiprocessing
currentDir = os.getcwd() + "/"
header='<html><body>'
footer='</body></html>'
css='''<link rel="stylesheet" href="''' + currentDir + "htmlToImage.css" + '''">'''
def subHTML(subAuthor, subTitle, subScore):
subHTML = header + css + '''
<div class="textbox-wrapper">
<div class="flex-container">
<div class="arrows-and-line">
<img src="''' + currentDir + "reddit-uparrow.png" + '''" class="votearrow"/>
<a class="scorestyle">''' + subScore + '''</a>
<img src="''' + currentDir + "reddit-downarrow.png" + '''" class="votearrow"/>
</div>
<div>
<a class="subAuthorstyle">Posted by u/'''+ subAuthor +'''</a>
<p class="subTitlesyle">''' + subTitle + '''</p>
</div>
</div>
</div>
''' + footer
return subHTML
def comHTML(entryAuthor, entryBody, level):
extraLevelTabs = '<div class="linestyle"></div>' * (level -1)
commentHTML= '''
<div>
<div class="flex-container">
''' + extraLevelTabs + '''
<div class="arrows-and-line">
<img src="''' + currentDir + "reddit-uparrow.png" + '''" class="votearrow"/>
<img src="''' + currentDir + "reddit-downarrow.png" + '''" class="votearrow"/>
<div class="linestyle"></div>
</div>
<div>
<a class="comAuthorstyle">'''+ entryAuthor +'''</a>
<p class="paragraphstyle">'''+ entryBody +''' </p>
</div>
</div>
<div>
'''
return commentHTML
def createSubmissionImage(sub, directoryName):
fileName = "./" + directoryName + "/submission.jpg"
subScoreInt = int(sub["score"])
subScoreStr = sub["score"]
if subScoreInt >= 1000:
subScoreInt = format((subScoreInt/1000), '.1f')
subScoreStr = str(subScoreInt) +"K"
make = subHTML(sub["author"],sub["title_vis"],subScoreStr)
imgkit.from_string(make, fileName)
def callImgkit(make, fileName):
imgkit.from_string(make, fileName)
print("made: ", fileName, "\n")
def createCommentImages(com, directoryName):
jobs = []
for i in range(len(com)):
if com[i]["isChecked"]:
fileName = "./" + directoryName + "/" + str(i) + ".jpg"
if com[i]["level"] > 1:
make = header + css + '<div class="textbox-wrapper">'
start = i - com[i]["level"] + 1 # step up the comment tree to hopefully get the parent comment
while(com[start]["level"] > 1): # if comment tree goes 1,2,3,4,2 this is needed
start -= 1 # step up the tree
end = i + 1 # so range hits i
for j in range(start, end):
if com[j]["isChecked"]:
make += comHTML(com[j]["author"],com[j]["body_vis"],com[j]["level"])
make += '</div>' + footer
else:
make = header + css + '<div class="textbox-wrapper">' + comHTML(com[i]["author"],com[i]["body_vis"],com[i]["level"]) + '</div>' + footer
# imgkit.from_string(make, fileName)
p = multiprocessing.Process(target=callImgkit, args=(make, fileName))
jobs.append(p)
p.start()
# make sure all the jobs finish before going on to the next task
for job in jobs:
job.join()
def fix_string(data_str):
# fix the unicode quote
data_str = data_str.replace("’", "'")
data_str = data_str.replace("‘", "'")
data_str = bytes(data_str, 'utf-8').decode('utf-8', 'ignore')
return data_str
# fix the weird unicode quotes and maybe later swearing
def fix_data(data):
data["submissionData"]["title_vis"] = fix_string(data["submissionData"]["title_vis"])
data["submissionData"]["body_vis"] = fix_string(data["submissionData"]["body_vis"])
for com in data["commentsData"]:
com["body_vis"] = fix_string(com["body_vis"])
return data
def createImages(data, directoryName):
fix_data(data)
# make sure directory exists
if not os.path.exists(directoryName):
os.makedirs(directoryName)
# create submission image
createSubmissionImage(data["submissionData"], directoryName)
# create comment images
createCommentImages(data["commentsData"], directoryName)
| [
"dereklowlind@gmail.com"
] | dereklowlind@gmail.com |
a1b10101485d4c56b605723ab150bbf36fd6426a | ac2e77c80245ec149a313ab8199548004ca8e077 | /ejercicio1/src/ejercicio1.py | d11d0bbfd3d7cce8b462a829ceb71f86998b0027 | [] | no_license | terly18/ejercicio2 | 9e876f722013d9ff4ca619cadb30081af18edc8b | fdd91ed67adf9513675db840406d2cb15b4643d8 | refs/heads/master | 2020-12-31T04:29:20.481756 | 2015-10-07T20:17:29 | 2015-10-07T20:17:29 | 43,841,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
__author__ = "TERLY"
__date__ = "$07/10/2015 03:16:35 PM$"
if __name__ == "__main__":
print "Hello World"
ht = input('Introduzca horas trabajadas: ')
if ht > 40:
he = ht - 40
ss = he * 20 + 40 * 16
else:
ss = ht*16
print ((' el salario semanal es :'), ss) | [
"TERLY@Internet.home"
] | TERLY@Internet.home |
e1281032648d87d9251e40bff75574afcb0cf2a7 | 4bdd93a191923f31f5b6e6a5a58161e36dcb5f5b | /HTTP Web Servers & Clients/8_ParallelServer.py | 5d9f585e048930ebdc61b52e239f61b0db59511b | [] | no_license | starryxy/Full-Stack-Web-Development | 5c9e22eea92326fd57b440126740716714315a02 | ab009740960f36ec0be2fa6668f48eb59e8129a7 | refs/heads/master | 2023-03-08T03:49:06.015043 | 2021-02-25T08:12:43 | 2021-02-25T08:12:43 | 256,141,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,582 | py | #!/usr/bin/env python3
#
# A web server that demonstrates the number of simultaneous connections (up to 6) your browser opens to a web server.
# Similar to: https://http2.golang.org/gophertiles
#
# Run Parallelometer.py in your terminal, then open http://localhost:8000.
# You should see 16 small frames in the window, each of which displays the number of active connections that the server is handling at that moment.
import http.server
from socketserver import ThreadingMixIn
import threading
import time
import random
# HTML main page. This page has 16 iframes, each of which causes
# the browser to send an additional request to this server.
# Each iframe will display the number of currently active requests.
html = '''<!DOCTYPE html>
<title>Things!</title>
<style>iframe { width: 23%; border: 0 }</style>
<iframe src="/frame0"></iframe> <iframe src="/frame1"></iframe>
<iframe src="/frame2"></iframe> <iframe src="/frame3"></iframe>
<iframe src="/frame4"></iframe> <iframe src="/frame5"></iframe>
<iframe src="/frame6"></iframe> <iframe src="/frame7"></iframe>
<iframe src="/frame8"></iframe> <iframe src="/frame9"></iframe>
<iframe src="/framea"></iframe> <iframe src="/frameb"></iframe>
<iframe src="/framec"></iframe> <iframe src="/framed"></iframe>
<iframe src="/framee"></iframe> <iframe src="/framef"></iframe>
'''
# Track the number of requests that are in progress.
# This variable will get +1 every time a handler starts processing
# a request, and -1 every time it finishes.
inflight = 0
# To protect the inflight variable from being changed from multiple
# request handlers at once, we need to use a lock.
lock = threading.Lock()
class Parallelometer(http.server.BaseHTTPRequestHandler):
def do_GET(self):
global inflight, lock
with lock:
# starting to handle a request
inflight += 1
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
if self.path.startswith('/frame'):
# iframe contents
time.sleep(random.random()) # Slow down by 0-1 seconds
self.wfile.write('{} requests in flight'.format(inflight).encode())
else:
# main page
self.wfile.write(html.encode())
self.wfile.flush()
with lock:
# finish a request
inflight -= 1
class ThreadHTTPServer(ThreadingMixIn, http.server.HTTPServer):
pass
if __name__ == '__main__':
address = ('', 8000)
httpd = ThreadHTTPServer(address, Parallelometer)
httpd.serve_forever()
| [
"starryxy311@gmail.com"
] | starryxy311@gmail.com |
5ba4b13c2b6ab53cfc4147be0638833f789d1d25 | 317f62189c63646f81198d1692bed708d8f18497 | /tools/tests.py | 627914a5e3681894fa1df2afd57d3a6fa2b32ba3 | [
"MIT"
] | permissive | mit-carbon/Graphite-Cycle-Level | 8fb41d5968e0a373fd4adbf0ad400a9aa5c10c90 | db3f1e986ddc10f3e5f3a5d4b68bd6a9885969b3 | refs/heads/master | 2021-01-25T07:08:46.628355 | 2011-11-23T08:53:18 | 2011-11-23T08:53:18 | 1,930,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | #!/usr/bin/env python
import sys
import os
sys.path.append("./tools/")
from schedule import *
# job info
machines = [
"cagnode1",
"cagnode1",
"cagnode2",
"cagnode2",
"cagnode3",
"cagnode3",
"cagnode4",
"cagnode4",
"cagnode8",
"cagnode8",
"cagnode9",
"cagnode9"
]
results_dir = "./results/64-core"
cfg_file = "carbon_sim_64.cfg"
benchmarks = ["fft", "radix", "barnes", "ocean_contiguous", "fmm", "lu_contiguous"]
commands = ["./tests/benchmarks/fft/fft -p64 -m16",
"./tests/benchmarks/radix/radix -p64",
"./tests/benchmarks/barnes/barnes \< ./tests/benchmarks/barnes/input",
"./tests/benchmarks/ocean_contiguous/ocean_contiguous -p64",
"./tests/benchmarks/fmm/fmm \< ./tests/benchmarks/fmm/inputs/input.65536",
"./tests/benchmarks/lu_contiguous/lu_contiguous -p64"]
protocols = ["ackwise","full_map","limited_no_broadcast"]
networks = ["emesh","atac"]
hardware_sharer_count = 4
jobs = []
jobs.append(LocalJob(12, "echo Starting"))
for benchmark, command in zip(benchmarks, commands):
for protocol in protocols:
for network in networks:
sim_flags = "-c %s/%s --general/total_cores=64 --general/enable_shared_mem=true --network/memory_model_1=finite_buffer_%s --perf_model/dram_directory/directory_type=%s --perf_model/dram_directory/max_hw_sharers=%i" % (results_dir, cfg_file, network, protocol, hardware_sharer_count)
sub_dir = "%s-%s-%s" % (benchmark, protocol, network)
jobs.append(MakeJob(1, command, results_dir, sub_dir, sim_flags, "pin"))
jobs.append(LocalJob(12, "echo Finished"))
# jobs = [
# LocalJob(1, "echo Starting..."),
# MakeJob(1, command, results_dir, "1", sim_flags),
# MakeJob(1, command, results_dir, "2", sim_flags),
# MakeJob(1, command, results_dir, "3", sim_flags),
# MakeJob(1, command, results_dir, "4", sim_flags),
# LocalJob(1, "echo Finished."),
# ]
#
# uncomment this line to kill all the fmms
#jobs = [ SpawnJob(12, "killall -9 fmm") ]
# init
try:
os.makedirs(results_dir)
except OSError:
pass
shutil.copy(cfg_file, results_dir)
# go!
schedule(machines, jobs)
| [
"gkurian@mit.edu"
] | gkurian@mit.edu |
4be0387bdd3dcbb7b273306ff07ab6ce8ab18706 | c1830dfe57134bb80f249799e68ecb7e549e51ee | /setup.py | 33ae6f9b878789850cc43a3856fdd35606aa1b7b | [
"MIT"
] | permissive | NIRISS/CARBS | e649f977601fe622fbfcfa713bc29e8cac2b8d70 | 3e5dba75057a1a08217f0a5270e8e5becea1f547 | refs/heads/master | 2021-01-03T01:46:37.540664 | 2020-02-24T19:29:46 | 2020-02-24T19:29:46 | 239,862,996 | 3 | 1 | MIT | 2020-02-24T19:29:48 | 2020-02-11T20:57:33 | Python | UTF-8 | Python | false | false | 896 | py | from setuptools import setup
import glob
import os
setup(
name="CARBS",
version="0.0.1",
author="CANUCS team",
author_email="kartheik.iyer@dunlap.utoronto.ca",
url = "https://github.com/NIRISS/CARBS",
packages=["CARBS"],
description="CANUCS Adaptive Resolved Bayesian SED-Fitting",
long_description=open("README.md").read(),
package_data={"": ["README.md", "LICENSE"], "CARBS": ["filters/*.*", "data/*.*"]},
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
],
install_requires=["matplotlib", "numpy", "scipy", "george", "sklearn", "dense_basis", "grizli", "emcee"]
)
| [
"iyer@protopotato.dunlap.utoronto.ca"
] | iyer@protopotato.dunlap.utoronto.ca |
238aa80ab45b7cae0526a4d89d3fbf976d2fda9f | 4c0241f2190117ed0738d2edf429ed8bc7a711a7 | /main_app/views.py | 69e34f746f420366a12fe213487c0640402be85c | [] | no_license | MTnes/ResearchSiteAPI | ba3359abb1d2179d8b1fb37b93f02df15df60349 | f6b31cd89d60a95ee8bcc721cbed46997fce89a9 | refs/heads/master | 2023-04-07T23:00:35.038134 | 2021-04-21T03:16:12 | 2021-04-21T03:16:12 | 360,005,831 | 0 | 0 | null | 2021-04-21T02:35:18 | 2021-04-21T02:12:19 | Python | UTF-8 | Python | false | false | 1,086 | py | from django.shortcuts import render
from rest_framework import viewsets
from main_app.models import Research, Member, Contact, Publication, People, Faculty
from main_app.serializers import ResearchSerializer, MemberSerializer, ContactSerializer, PublicationSerializer, PeopleSerializer, FacultySerializer
# Create your views here.
class ResearchViewSet(viewsets.ModelViewSet):
queryset = Research.objects.all()
serializer_class = ResearchSerializer
class MemberViewSet(viewsets.ModelViewSet):
queryset = Member.objects.all()
serializer_class = MemberSerializer
class ContactViewSet(viewsets.ModelViewSet):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
class PublicationViewSet(viewsets.ModelViewSet):
queryset = Publication.objects.all()
serializer_class = PublicationSerializer
class PeopleViewSet(viewsets.ModelViewSet):
queryset = People.objects.all()
serializer_class = PeopleSerializer
class FacultyViewSet(viewsets.ModelViewSet):
queryset = Faculty.objects.all()
serializer_class = FacultySerializer
| [
"mayank.nitdgp@gmail.com"
] | mayank.nitdgp@gmail.com |
6e09631d948fcdde8b155374989c53c9c54e3a86 | 16f79ae730b4a25c583d07993c6ee6f7b3b4ff32 | /posthog/models/filters/mixins/session_recordings.py | afa9a359e1d917e309518f2eecfb57f072ba75e7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dmt3o/posthog | 36c8833fcc996d912a535ba00d2a80192d0a46a1 | b19a6be9a882e784657018c873692805388f65b8 | refs/heads/master | 2023-08-24T00:05:12.773630 | 2021-10-12T10:11:39 | 2021-10-12T10:11:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | import json
from typing import List, Optional
from posthog.constants import PERSON_UUID_FILTER, SESSION_RECORDINGS_FILTER_TYPE_DURATION
from posthog.models.entity import Entity
from posthog.models.filters.mixins.common import BaseParamMixin
from posthog.models.filters.mixins.utils import cached_property
from posthog.models.property import Property
class PersonUUIDMixin(BaseParamMixin):
@cached_property
def person_uuid(self) -> Optional[str]:
return self._data.get(PERSON_UUID_FILTER, None)
class SessionRecordingsMixin(BaseParamMixin):
@cached_property
def recording_duration_filter(self) -> Optional[Property]:
duration_filter_data_str = self._data.get(SESSION_RECORDINGS_FILTER_TYPE_DURATION, None)
if duration_filter_data_str:
filter_data = json.loads(duration_filter_data_str)
return Property(**filter_data)
return None
| [
"noreply@github.com"
] | dmt3o.noreply@github.com |
5e61cef6781fe583d65c2d7c29335a830508df84 | b03ba4b0a41b0fea2b8e7eb7a08a386391c6784a | /data_processing.py | 7cd690a0def911dac07d2ab495c7cf10aa688359 | [] | no_license | twistedmove/speech-music-noise-classification-using-pytorch | 9f599ac5ff5d11dcbddca0ec1ebeb85f840f58e9 | efe4c13991de7539f57447da2ca845ef85365884 | refs/heads/master | 2022-03-27T18:20:11.084188 | 2020-01-10T06:01:48 | 2020-01-10T06:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 14:27:49 2019
@author: Krishna
"""
import glob
import os
import soundfile as sf
import random
class_id = {'music':0,'speech':1,'noise':2}
test_data=[]
train_data=[]
###
root_data = '/Users/apple/Downloads/musan/'
all_folders = glob.glob(root_data+'/*/')
for class_folder in all_folders:
class_name = class_folder.split('/')[-2]
sub_folders = sorted(glob.glob(class_folder+'/*/'))
per_class_files = []
for sub_folder in sub_folders:
all_files = sorted(glob.glob(sub_folder+'/*.wav'))
for file_path in all_files:
per_class_files.append(file_path)
test_samples = random.sample(range(len(per_class_files)),int(len(per_class_files)*0.1))
for i in test_samples:
test_data.append(per_class_files[i])
for i in range(len(per_class_files)):
if i in test_samples:
continue
else:
train_data.append(per_class_files[i])
#######
fid_train = open('training.txt','w')
fid_test = open('testing.txt','w')
for filepath in train_data:
audio_data,fs = sf.read(filepath)
dur = len(audio_data)/float(fs)
if dur <0.5:
continue
class_name = filepath.split('/')[-3]
to_write = filepath+' '+str(class_id[filepath.split('/')[-3]])
fid_train.write(to_write+'\n')
fid_train.close()
for filepath in test_data:
audio_data,fs = sf.read(filepath)
dur = len(audio_data)/float(fs)
if dur <0.5:
continue
class_name = filepath.split('/')[-3]
to_write = filepath+' '+str(class_id[filepath.split('/')[-3]])
fid_test.write(to_write+'\n')
fid_test.close()
| [
"noreply@github.com"
] | twistedmove.noreply@github.com |
0534dcf3b1418023ed435cd619280991e22fba4a | 83662b44e34e6ea7f6f6b1aefc19376430518eaf | /2017/January/January 2017 Prob 3/2017_January_3_cowtip.py | ef45ee8870fecb2d34ce12091f2efc7b4a975aea | [] | no_license | DaChosens1/usaco | 830d2db9674e1589c5d2e411a4fb68416db9316f | ad757ebbb4c58cfdf15d2d21f7f296860bd1aca1 | refs/heads/master | 2020-06-12T22:15:22.269144 | 2020-04-23T01:16:31 | 2020-04-23T01:16:31 | 194,443,230 | 0 | 1 | null | 2020-03-29T13:29:40 | 2019-06-29T19:51:20 | Python | UTF-8 | Python | false | false | 985 | py | # time = 38 min
def open_file():
fin = open('cowtip.in')
grid = []
count = 1
for line in fin:
line = line.strip()
if count != 1:
grid.append(map(lambda x: int(x), [char for char in line]))
count += 1
fin.close()
return grid
def follow_through(grid):
usages = 0
for row in map(lambda x: -x, range(1, len(grid)+1)):
for column in map(lambda x: -x, range(1, len(grid[row]) + 1)):
if grid[row][column] == 1:
for r in range(len(grid) + 1 + row):
for c in range(len(grid) + 1 + column):
if grid[r][c] == 0:
grid[r][c] = 1
elif grid[r][c] == 1:
grid[r][c] = 0
usages += 1
return usages
def close_file(answer):
fout = open("cowtip.out", "w")
fout.write("{}".format(answer))
fout.close()
close_file(follow_through(open_file()))
| [
"48633888+DaChosens1@users.noreply.github.com"
] | 48633888+DaChosens1@users.noreply.github.com |
28c23d84801732864e713cf2e32b5b18fe8b64d5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03263/s797363058.py | 11d76295358e004207ab28a722a47f8e9f184035 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | h,w=map(int,input().split())
a=[list(map(int,input().split()))for i in range(h)]
l=[]
for i in range(h):
for j in range(w):
if a[i][j]%2:
if i+1<h:a[i][j]-=1;a[i+1][j]+=1;l.append((i+1,j+1,i+2,j+1))
elif j+1<w:a[i][j]-=1;a[i][j+1]+=1;l.append((i+1,j+1,i+1,j+2))
print(len(l))
for i in l:print(*i) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8cf5ce892f428e48e3abfb5625ed1b6dd0cb9d3f | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /kernel/components/lr/vertlr/sync/paillier_keygen_sync.py | da41304b45aa8e8d503bb7710a61eb00a54f7891 | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,255 | py | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kernel.security.encrypt import PaillierEncrypt
from kernel.utils import consts
class Promoter(object):
# noinspection PyAttributeOutsideInit
def register_paillier_keygen(self, transfer_variables):
self._pubkey_transfer = transfer_variables.paillier_pubkey
def paillier_keygen_and_broadcast(self, key_length, suffix=tuple()):
cipher = PaillierEncrypt()
cipher.generate_key(key_length)
pub_key = cipher.get_public_key()
self._pubkey_transfer.remote(obj=pub_key, role=consts.PROVIDER, idx=-1, suffix=suffix)
return cipher
class Provider(object):
# noinspection PyAttributeOutsideInit
def register_paillier_keygen(self, transfer_variables):
self._pubkey_transfer = transfer_variables.paillier_pubkey
def gen_paillier_cipher(self, suffix=tuple(), member_id_list=None):
pubkey = self._pubkey_transfer.get(idx=0, suffix=suffix, member_id_list=member_id_list)
cipher = PaillierEncrypt()
cipher.set_public_key(pubkey)
return cipher
| [
"winter.zou@welab-inc.com"
] | winter.zou@welab-inc.com |
c9a7bef6c5fef4bc8b0f998418d8c79811ff9f88 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1419+081/sdB_PG_1419+081_lc.py | 054636153e0b4714c718289e48573f4b4fcb18da | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[215.409417,7.888806], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1419+081 /sdB_PG_1419+081_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
01f0c7fff0e04ce63816a718517b6e29fac55a26 | e1d4aa59acbd920e729cd9ab6c42c522d726593a | /lists.py | e03d67782b8c17a9a8995ad01b9471570604784d | [] | no_license | karinasamohvalova/OneMonth_python | 25ebc34635f2fef73b7128ca5c29deb5a280929c | 0f6eb0e5fbf4b941e96336cc4aa40440d7678f7d | refs/heads/main | 2023-02-24T19:38:28.010057 | 2021-01-25T21:29:07 | 2021-01-25T21:29:07 | 332,419,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | the_count = [1, 2, 3, 4, 5, 6]
the_count.append(7)
the_count.append(8)
the_count.append(9)
the_count.append(10)
# for figure in range(1, 11):
for figure in the_count:
print("this is count", figure * figure)
stocks = ["APPL", "GOOG", "TSLA"]
for stock in stocks:
print("Stock ticker:", stock)
#we can go through mixed lists too
random_things = ["puppies", 35, "ivan", 1/2, ["On no", "A list inside the list"]]
for i in random_things:
print("Here's a random thing:", i)
people = []
people.append("Mattan")
people.append("Sarah")
people.append("Chris")
print(people)
people.remove("Sarah")
print(people)
for person in people:
print("Person is:", person)
animals = ['bear', 'tiger', 'penguin', 'zebra']
first_animal = animals[0]
print(first_animal)
third_animal = animals[2]
print(third_animal)
print("There are this many things:", len(random_things))
print("this is a:", type(random_things))
another_list = random_things[0]
print(another_list)
print(type(another_list))
another_list_one = random_things[-1]
print(another_list_one)
print(type(another_list_one)) | [
"noreply@github.com"
] | karinasamohvalova.noreply@github.com |
8e48fe4165aa9ba87947dfc5d2f95c57560f0c2f | 79ef92e78582078ab7c3b262a2040b343f15eaf2 | /cs61a/project/hog/hog.py | 341dca1f11e1c08e6fd48115336490e785a006a6 | [] | no_license | markencoder/CS61A | bac3b07dc94ceffa342181e2ce2db664b9bbad03 | 324f92169cf3bf2f502b77958578dd1c14dcbe62 | refs/heads/master | 2020-04-29T20:38:31.242257 | 2019-03-19T00:14:17 | 2019-03-19T00:16:41 | 176,389,526 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,885 | py | """CS 61A Presents The Game of Hog."""
from dice import six_sided, four_sided, make_test_dice
from ucb import main, trace, interact
GOAL_SCORE = 100 # The goal of Hog is to score 100 points.
######################
# Phase 1: Simulator #
######################
def roll_dice(num_rolls, dice=six_sided):
"""Simulate rolling the DICE exactly NUM_ROLLS > 0 times. Return the sum of
the outcomes unless any of the outcomes is 1. In that case, return 1.
num_rolls: The number of dice rolls that will be made.
dice: A function that simulates a single dice roll outcome.
"""
# These assert statements ensure that num_rolls is a positive integer.
assert type(num_rolls) == int, 'num_rolls must be an integer.'
assert num_rolls > 0, 'Must roll at least once.'
# BEGIN PROBLEM 1
"*** YOUR CODE HERE ***"
s = 0
k = 0
i = 0
while i < num_rolls:
t = dice()
s = s + t
if t == 1:
k = k + 1
i = i + 1
if k > 0:
s = 1
return s
else:
return s
# END PROBLEM 1
def free_bacon(score):
"""Return the points scored from rolling 0 dice (Free Bacon).
score: The opponent's current score.
"""
assert score < 100, 'The game should be over.'
# BEGIN PROBLEM 2
"*** YOUR CODE HERE ***"
if score < 10:
return 1
else:
ones = score % 10
tens = (score - ones)/10
if ones > tens:
return int(tens + 1)
else:
return int(ones + 1)
# END PROBLEM 2
def take_turn(num_rolls, opponent_score, dice=six_sided):
"""Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free Bacon).
Return the points scored for the turn by the current player.
num_rolls: The number of dice rolls that will be made.
opponent_score: The total score of the opponent.
dice: A function that simulates a single dice roll outcome.
"""
# Leave these assert statements here; they help check for errors.
assert type(num_rolls) == int, 'num_rolls must be an integer.'
assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.'
assert num_rolls <= 10, 'Cannot roll more than 10 dice.'
assert opponent_score < 100, 'The game should be over.'
# BEGIN PROBLEM 3
"*** YOUR CODE HERE ***"
if num_rolls == 0:
return free_bacon(opponent_score)
else:
return roll_dice(num_rolls,dice)
# END PROBLEM 3
def is_swap(player_score, opponent_score):
"""
Return whether the two scores should be swapped
"""
# BEGIN PROBLEM 4
"*** YOUR CODE HERE ***"
if player_score < opponent_score:
while player_score > 0:
if player_score%10 == opponent_score%10:
return True
player_score = (player_score - player_score%10)/10
opponent_score = (opponent_score - opponent_score%10)/10
else:
while opponent_score > 0:
if player_score%10 == opponent_score%10:
return True
player_score = (player_score - player_score%10)/10
opponent_score = (opponent_score - opponent_score%10)/10
if player_score == 0 or opponent_score == 0:
return False
# END PROBLEM 4
def other(player):
"""Return the other player, for a player PLAYER numbered 0 or 1.
>>> other(0)
1
>>> other(1)
0
"""
return 1 - player
def silence(score0, score1):
"""Announce nothing (see Phase 2)."""
return silence
def play(strategy0, strategy1, score0=0, score1=0, dice=six_sided,
goal=GOAL_SCORE, say=silence):
"""Simulate a game and return the final scores of both players, with Player
0's score first, and Player 1's score second.
A strategy is a function that takes two total scores as arguments (the
current player's score, and the opponent's score), and returns a number of
dice that the current player will roll this turn.
strategy0: The strategy function for Player 0, who plays first.
strategy1: The strategy function for Player 1, who plays second.
score0: Starting score for Player 0
score1: Starting score for Player 1
dice: A function of zero arguments that simulates a dice roll.
goal: The game ends and someone wins when this score is reached.
say: The commentary function to call at the end of the first turn.
"""
player = 0 # Which player is about to take a turn, 0 (first) or 1 (second)
# BEGIN PROBLEM 5
"*** YOUR CODE HERE ***"
while score0 < goal and score1 < goal:
if player == 0:
score0 += take_turn(strategy0(score0, score1), score1, dice)
else:
score1 += take_turn(strategy1(score1, score0), score0, dice)
if is_swap(score0, score1):
score0, score1 = score1, score0
player = other(player)
say = say(score0, score1)
# END PROBLEM 5
# (note that the indentation for the problem 6 prompt (***YOUR CODE HERE***) might be misleading)
# BEGIN PROBLEM 6
"*** YOUR CODE HERE ***"
# END PROBLEM 6
return score0, score1
#######################
# Phase 2: Commentary #
#######################
def say_scores(score0, score1):
"""A commentary function that announces the score for each player."""
print("Player 0 now has", score0, "and Player 1 now has", score1)
return say_scores
def announce_lead_changes(previous_leader=None):
"""Return a commentary function that announces lead changes.
>>> f0 = announce_lead_changes()
>>> f1 = f0(5, 0)
Player 0 takes the lead by 5
>>> f2 = f1(5, 12)
Player 1 takes the lead by 7
>>> f3 = f2(8, 12)
>>> f4 = f3(8, 13)
>>> f5 = f4(15, 13)
Player 0 takes the lead by 2
"""
def say(score0, score1):
if score0 > score1:
leader = 0
elif score1 > score0:
leader = 1
else:
leader = None
if leader != None and leader != previous_leader:
print('Player', leader, 'takes the lead by', abs(score0 - score1))
return announce_lead_changes(leader)
return say
def both(f, g):
"""Return a commentary function that says what f says, then what g says.
NOTE: the following game is not possible under the rules, it's just
an example for the sake of the doctest
>>> h0 = both(say_scores, announce_lead_changes())
>>> h1 = h0(10, 0)
Player 0 now has 10 and Player 1 now has 0
Player 0 takes the lead by 10
>>> h2 = h1(10, 6)
Player 0 now has 10 and Player 1 now has 6
>>> h3 = h2(6, 17)
Player 0 now has 6 and Player 1 now has 17
Player 1 takes the lead by 11
"""
def say(score0, score1):
return both(f(score0, score1), g(score0, score1))
return say
def announce_highest(who, previous_high=0, previous_score=0):
"""Return a commentary function that announces when WHO's score
increases by more than ever before in the game.
NOTE: the following game is not possible under the rules, it's just
an example for the sake of the doctest
>>> f0 = announce_highest(1) # Only announce Player 1 score gains
>>> f1 = f0(12, 0)
>>> f2 = f1(12, 11)
11 point(s)! That's the biggest gain yet for Player 1
>>> f3 = f2(20, 11)
>>> f4 = f3(13, 20)
>>> f5 = f4(20, 35)
15 point(s)! That's the biggest gain yet for Player 1
>>> f6 = f5(20, 47) # Player 1 gets 12 points; not enough for a new high
>>> f7 = f6(21, 47)
>>> f8 = f7(21, 77)
30 point(s)! That's the biggest gain yet for Player 1
>>> f9 = f8(77, 22) # Swap!
>>> f10 = f9(33, 77) # Swap!
55 point(s)! That's the biggest gain yet for Player 1
"""
assert who == 0 or who == 1, 'The who argument should indicate a player.'
# BEGIN PROBLEM 7
"*** YOUR CODE HERE ***"
def say(score0, score1):
if who == 0:
high = previous_high
if score0 - previous_score > high:
high = score0 - previous_score
print(high, "point(s)! That's the biggest gain yet for Player 0")
return announce_highest(who, high ,score0)
else:
high = previous_high
if score1 - previous_score > high:
high = score1 - previous_score
print(high, "point(s)! That's the biggest gain yet for Player 1")
return announce_highest(who, high ,score1)
return say
# END PROBLEM 7
#######################
# Phase 3: Strategies #
#######################
def always_roll(n):
"""Return a strategy that always rolls N dice.
A strategy is a function that takes two total scores as arguments (the
current player's score, and the opponent's score), and returns a number of
dice that the current player will roll this turn.
>>> strategy = always_roll(5)
>>> strategy(0, 0)
5
>>> strategy(99, 99)
5
"""
def strategy(score, opponent_score):
return int(n)
return strategy
def make_averaged(fn, num_samples=1000):
"""Return a function that returns the average value of FN when called.
To implement this function, you will have to use *args syntax, a new Python
feature introduced in this project. See the project description.
>>> dice = make_test_dice(4, 2, 5, 1)
>>> averaged_dice = make_averaged(dice, 1000)
>>> averaged_dice()
3.0
"""
# BEGIN PROBLEM 8
"*** YOUR CODE HERE ***"
def average(*args):
result = 0
count = 0
while count < num_samples:
result += fn(*args)
count +=1
return result/count
return average
# END PROBLEM 8
def max_scoring_num_rolls(dice=six_sided, num_samples=1000):
"""Return the number of dice (1 to 10) that gives the highest average turn
score by calling roll_dice with the provided DICE over NUM_SAMPLES times.
Assume that the dice always return positive outcomes.
>>> dice = make_test_dice(1, 6)
>>> max_scoring_num_rolls(dice)
1
"""
# BEGIN PROBLEM 9
"*** YOUR CODE HERE ***"
max = 0
max_val=0
make_max = make_averaged(roll_dice,num_samples)
count=1
while count <= 10:
temp = make_max(count, dice)
if temp > max_val:
max_val = temp
max = count
count +=1
return max
# END PROBLEM 9
def winner(strategy0, strategy1):
"""Return 0 if strategy0 wins against strategy1, and 1 otherwise."""
score0, score1 = play(strategy0, strategy1)
if score0 > score1:
return 0
else:
return 1
def average_win_rate(strategy, baseline=always_roll(4)):
"""Return the average win rate of STRATEGY against BASELINE. Averages the
winrate when starting the game as player 0 and as player 1.
"""
win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)
win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)
return (win_rate_as_player_0 + win_rate_as_player_1) / 2
def run_experiments():
"""Run a series of strategy experiments and report results."""
if True: # Change to False when done finding max_scoring_num_rolls
six_sided_max = max_scoring_num_rolls(six_sided)
print('Max scoring num rolls for six-sided dice:', six_sided_max)
if False: # Change to True to test always_roll(8)
print('always_roll(6) win rate:', average_win_rate(always_roll(6)))
if False: # Change to True to test bacon_strategy
print('bacon_strategy win rate:', average_win_rate(bacon_strategy))
if False: # Change to True to test swap_strategy
print('swap_strategy win rate:', average_win_rate(swap_strategy))
if False: # Change to True to test final_strategy
print('final_strategy win rate:', average_win_rate(final_strategy))
"*** You may add additional experiments as you wish ***"
def bacon_strategy(score, opponent_score, margin=8, num_rolls=4):
"""This strategy rolls 0 dice if that gives at least MARGIN points, and
rolls NUM_ROLLS otherwise.
"""
# BEGIN PROBLEM 10
if free_bacon(opponent_score) >= margin:
return 0
else:
return num_rolls # Replace this statement
# END PROBLEM 10
def swap_strategy(score, opponent_score, margin=8, num_rolls=4):
"""This strategy rolls 0 dice when it triggers a beneficial swap. It also
rolls 0 dice if it gives at least MARGIN points and does not trigger a
non-beneficial swap. Otherwise, it rolls NUM_ROLLS.
"""
# BEGIN PROBLEM 11
score = score + free_bacon(opponent_score)
if is_swap(score, opponent_score):
if opponent_score - score > 0 :
return 0
if free_bacon(opponent_score) >= margin:
return 0
else:
return num_rolls
else:
return num_rolls
# Replace this statement
# END PROBLEM 11
def final_strategy(score, opponent_score):
"""Write a brief description of your final strategy.
*** YOUR DESCRIPTION HERE ***
"""
# BEGIN PROBLEM 12
return 4 # Replace this statement
# END PROBLEM 12
##########################
# Command Line Interface #
##########################
# NOTE: Functions in this section do not need to be changed. They use features
# of Python not yet covered in the course.
@main
def run(*args):
"""Read in the command-line argument and calls corresponding functions.
This function uses Python syntax/techniques not yet covered in this course.
"""
import argparse
parser = argparse.ArgumentParser(description="Play Hog")
parser.add_argument('--run_experiments', '-r', action='store_true',
help='Runs strategy experiments')
args = parser.parse_args()
if args.run_experiments:
run_experiments()
| [
"sxtythmpf@163.com"
] | sxtythmpf@163.com |
50295750e6b4eae3c26c855ee974282d7bbdf490 | a561edea05246c086297a570f0d411699dfa0cdc | /test_other.py | 1a126618107832bb65f05f5b237832cae3addf82 | [
"MIT"
] | permissive | skeswa/litecast | 7115a88b800d6993fd26548395621e2f70e1948b | c56e2e7ddafb655613055e88b4a61a08f3d76719 | refs/heads/master | 2016-09-06T03:12:59.414174 | 2015-04-19T18:05:21 | 2015-04-19T18:05:21 | 34,179,811 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | import socket
import time
from stream import Connection
from message_builder import build_init_message, build_call_message
conn = Connection()
conn.write(build_init_message("Dan Cadden", "shikkic", "+12158723266"))
raw_input("Press Enter to continue...")
| [
"me@sandile.io"
] | me@sandile.io |
319af3de3604e1642f8013f8581ef4a1489e4148 | c1f142c1bf0f5f29e3dc7e4625cada67f3db8e41 | /mGTTs.py | 6a1c79a34150187dbdaecbfdfa7273614bc12e49 | [] | no_license | develwon/Addpushdevice | dd939a036c5ff690e87ed8762eab2d29d9593329 | 03f5e836c223a168f6ac7af0fa258972c0cbc28b | refs/heads/master | 2020-04-02T13:40:20.881785 | 2018-10-24T12:03:02 | 2018-10-24T12:03:02 | 154,491,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from gtts import gTTS
from pygame import mixer
from pygame import time
from tempfile import TemporaryFile
class mGTTs:
def __init__(self):
pass
def run(self, text):
# pygame.init()
mixer.init()
sf = TemporaryFile()
tts = gTTS(text=text, lang='ko')
clock = time.Clock()
tts.write_to_fp(sf)
sf.seek(0)
mixer.music.load(sf)
mixer.music.play()
while mixer.music.get_busy()==True:
clock.tick(1000)
print( mixer.music.get_busy())
def stop(self):
mixer.music.stop()
| [
"noreply@github.com"
] | develwon.noreply@github.com |
0fcb6044cf85be934c79500443249d365652b303 | 0f70b8f37aad845ad53ac8cc3cb76b88ca87882e | /tagifai/eval.py | 84191b35056859400167c5af1b824ccd14ebd059 | [] | no_license | jaswant7/end_to_end_ML | 40f2fc3caeb2e41889522a3d9e064150f558f344 | f542fe7da874711fc23aad00f750c7a0f1539705 | refs/heads/main | 2023-05-20T04:00:41.957301 | 2021-06-11T14:28:14 | 2021-06-11T14:28:14 | 376,011,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,657 | py | # eval.py
# Evaluation components.
import itertools
from typing import Dict, List
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import precision_recall_fscore_support
from snorkel.slicing import PandasSFApplier, slice_dataframe, slicing_function
from tagifai import predict, train
@slicing_function()
def cv_transformers(x):
"""Projects with the `computer-vision` and `transformers` tags."""
return all(tag in x.tags for tag in ["computer-vision", "transformers"])
@slicing_function()
def short_text(x):
"""Projects with short titles and descriptions."""
return len(x.text.split()) < 7 # less than 7 words
def get_performance(
y_true: np.ndarray, y_pred: np.ndarray, classes: List, df: pd.DataFrame = None
) -> Dict:
"""Per-class performance metrics.
Args:
y_true (np.ndarray): True class labels.
y_pred (np.ndarray): Predicted class labels.
classes (List): List of all unique classes.
df (pd.DataFrame, optional): dataframe used for slicing.
Returns:
Dictionary of overall and per-class performance metrics.
"""
# Performance
performance = {"overall": {}, "class": {}}
# Overall performance
metrics = precision_recall_fscore_support(y_true, y_pred, average="weighted")
performance["overall"]["precision"] = metrics[0]
performance["overall"]["recall"] = metrics[1]
performance["overall"]["f1"] = metrics[2]
performance["overall"]["num_samples"] = np.float64(len(y_true))
# Per-class performance
metrics = precision_recall_fscore_support(y_true, y_pred, average=None)
for i in range(len(classes)):
performance["class"][classes[i]] = {
"precision": metrics[0][i],
"recall": metrics[1][i],
"f1": metrics[2][i],
"num_samples": np.float64(metrics[3][i]),
}
# Slicing performance
if df is not None:
# Slices
slicing_functions = [cv_transformers, short_text]
applier = PandasSFApplier(slicing_functions)
slices = applier.apply(df)
# Score slices
# Use snorkel.analysis.Scorer for multiclass tasks
# Naive implementation for our multilabel task
# based on snorkel.analysis.Scorer
performance["slices"] = {}
for slice_name in slices.dtype.names:
mask = slices[slice_name].astype(bool)
metrics = precision_recall_fscore_support(y_true[mask], y_pred[mask], average="micro")
performance["slices"][slice_name] = {}
performance["slices"][slice_name]["precision"] = metrics[0]
performance["slices"][slice_name]["recall"] = metrics[1]
performance["slices"][slice_name]["f1"] = metrics[2]
performance["slices"][slice_name]["num_samples"] = len(y_true[mask])
# Weighted slice f1
performance["slices"]["f1"] = np.mean(
list(
itertools.chain.from_iterable(
[
[performance["slices"][slice_name]["f1"]]
* performance["slices"][slice_name]["num_samples"]
for slice_name in performance["slices"]
]
)
)
)
return performance
def compare_tags(texts: str, tags: List, artifacts: Dict, test_type: str) -> List:
"""Compare ground truth with predicted tags.
Args:
texts (List): List of input texts to predict on.
tags (Dict): List of ground truth tags for each input.
artifacts (Dict): Artifacts needed for inference.
test_type (str): Type of test (INV, DIR, MFT, etc.)
Returns:
List: Results with inputs, predictions and success status.
"""
# Predict
predictions = predict.predict(texts=texts, artifacts=artifacts)
# Evaluate
results = {"passed": [], "failed": []}
for i, prediction in enumerate(predictions):
result = {
"input": {"text": texts[i], "tags": tags[i]},
"prediction": predictions[i],
"type": test_type,
}
if all(tag in prediction["predicted_tags"] for tag in tags[i]):
results["passed"].append(result)
else:
results["failed"].append(result)
return results
def get_behavioral_report(artifacts: Dict) -> Dict:
"""Assess failure rate by performing
behavioral tests on our trained model.
Args:
artifacts (Dict): Artifacts needed for inference.
Returns:
Dict: Results of behavioral tests.
"""
results = {"passed": [], "failed": []}
# INVariance via verb injection (changes should not affect outputs)
tokens = ["revolutionized", "disrupted", "accelerated"]
tags = [["transformers"], ["transformers"], ["transformers"]]
texts = [f"Transformers have {token} the ML field." for token in tokens]
for status, items in compare_tags(
texts=texts, tags=tags, artifacts=artifacts, test_type="INV"
).items():
results[status].extend(items)
# INVariance via misspelling
tokens = ["generative adverseril network", "generated adversarial networks"]
tags = [["generative-adversarial-networks"], ["generative-adversarial-networks"]]
texts = [f"{token} are very popular in machine learning projects." for token in tokens]
for status, items in compare_tags(
texts=texts, tags=tags, artifacts=artifacts, test_type="INV"
).items():
results[status].extend(items)
# DIRectional expectations (changes with known outputs)
tokens = ["TensorFlow", "Huggingface"]
tags = [
["tensorflow", "transformers"],
["huggingface", "transformers"],
]
texts = [f"A {token} implementation of transformers." for token in tokens]
for status, items in compare_tags(
texts=texts, tags=tags, artifacts=artifacts, test_type="DIR"
).items():
results[status].extend(items)
# Minimum Functionality Tests (simple input/output pairs)
tokens = ["transformers", "graph neural networks"]
tags = [["transformers"], ["graph-neural-networks"]]
texts = [f"{token} have revolutionized machine learning." for token in tokens]
for status, items in compare_tags(
texts=texts, tags=tags, artifacts=artifacts, test_type="MFT"
).items():
results[status].extend(items)
# Behavioral score
score = len(results["passed"]) / float(len(results["passed"]) + len(results["failed"]))
return {"score": score, "results": results}
def evaluate(
artifacts: Dict,
dataloader: torch.utils.data.DataLoader,
df: pd.DataFrame,
device: torch.device,
) -> Dict:
"""Evaluate performance on data.
Args:
artifacts (Dict): Artifacts needed for inference.
dataloader (torch.utils.data.DataLoader): Dataloader with the data your want to evaluate.
df (pd.DataFrame): dataframe (used for slicing).
device (torch.device): Device to run model on. Defaults to CPU.
Returns:
Evaluation report.
"""
# Artifacts
args = artifacts["args"]
model = artifacts["model"]
label_encoder = artifacts["label_encoder"]
model = model.to(device)
classes = label_encoder.classes
# Determine predictions using threshold
trainer = train.Trainer(model=model, device=device)
y_true, y_prob = trainer.predict_step(dataloader=dataloader)
y_pred = np.array([np.where(prob >= float(args.threshold), 1, 0) for prob in y_prob])
# Evaluate performance
performance = get_performance(df=df, y_true=y_true, y_pred=y_pred, classes=classes)
# Behavior tests
behavioral_report = get_behavioral_report(artifacts=artifacts)
return performance, behavioral_report
if __name__ == "__main__": # pragma: no cover, playground for eval components
import json
from argparse import Namespace
from pathlib import Path
import numpy as np
import pandas as pd
from tagifai import config, data, main, utils
from tagifai.config import logger
# Set experiment and start run
args_fp = Path(config.CONFIG_DIR, "args.json")
args = Namespace(**utils.load_dict(filepath=args_fp))
# 1. Set seed
utils.set_seed(seed=args.seed)
# 2. Set device
device = utils.set_device(cuda=args.cuda)
# 3. Load data
projects_fp = Path(config.DATA_DIR, "projects.json")
tags_fp = Path(config.DATA_DIR, "tags.json")
projects = utils.load_dict(filepath=projects_fp)
tags_dict = utils.list_to_dict(utils.load_dict(filepath=tags_fp), key="tag")
df = pd.DataFrame(projects)
if args.shuffle:
df = df.sample(frac=1).reset_index(drop=True)
df = df[: args.num_samples] # None = all samples
# 4. Clean data
df, tags_above_frequency = data.clean(
df=df,
include=list(tags_dict.keys()),
exclude=config.EXCLUDE,
min_tag_freq=args.min_tag_freq,
)
# 5. Preprocess data
df.text = df.text.apply(data.preprocess, lower=args.lower, stem=args.stem)
# 6. Encode labels
labels = df.tags
label_encoder = data.MultiLabelLabelEncoder()
label_encoder.fit(labels)
y = label_encoder.encode(labels)
# Class weights
all_tags = list(itertools.chain.from_iterable(labels.values))
counts = np.bincount([label_encoder.class_to_index[class_] for class_ in all_tags])
class_weights = {i: 1.0 / count for i, count in enumerate(counts)}
# 7. Split data
utils.set_seed(seed=args.seed) # needed for skmultilearn
X = df.text.to_numpy()
X_train, X_, y_train, y_ = data.iterative_train_test_split(X=X, y=y, train_size=args.train_size)
X_val, X_test, y_val, y_test = data.iterative_train_test_split(X=X_, y=y_, train_size=0.5)
# View slices
test_df = pd.DataFrame({"text": X_test, "tags": label_encoder.decode(y_test)})
cv_transformers_df = slice_dataframe(test_df, cv_transformers)
print(f"{len(cv_transformers_df)} projects")
print(cv_transformers_df[["text", "tags"]].head())
short_text_df = slice_dataframe(test_df, short_text)
print(f"{len(short_text_df)} projects")
print(short_text_df[["text", "tags"]].head())
# 8. Tokenize inputs
tokenizer = data.Tokenizer(char_level=args.char_level)
tokenizer.fit_on_texts(texts=X_train)
X_train = np.array(tokenizer.texts_to_sequences(X_train), dtype=object)
X_val = np.array(tokenizer.texts_to_sequences(X_val), dtype=object)
X_test = np.array(tokenizer.texts_to_sequences(X_test), dtype=object)
# 9. Create dataloaders
train_dataset = data.CNNTextDataset(X=X_train, y=y_train, max_filter_size=args.max_filter_size)
val_dataset = data.CNNTextDataset(X=X_val, y=y_val, max_filter_size=args.max_filter_size)
test_dataset = data.CNNTextDataset(X=X_test, y=y_test, max_filter_size=args.max_filter_size)
train_dataloader = train_dataset.create_dataloader(batch_size=args.batch_size)
val_dataloader = val_dataset.create_dataloader(batch_size=args.batch_size)
test_dataloader = test_dataset.create_dataloader(batch_size=args.batch_size)
# Load artifacts
runs = utils.get_sorted_runs(experiment_name="best", order_by=["metrics.f1 DESC"])
run_ids = [run["run_id"] for run in runs]
artifacts = main.load_artifacts(run_id=run_ids[0], device=torch.device("cpu"))
# Evaluation
device = torch.device("cpu")
performance, behavioral_report = evaluate(
artifacts=artifacts,
dataloader=test_dataloader,
df=test_df,
device=device,
)
logger.info(json.dumps(performance, indent=2))
logger.info(json.dumps(behavioral_report, indent=2))
| [
"jaswantjassie7@gmail.com"
] | jaswantjassie7@gmail.com |
2f3614f40cfda72df52d769788467fde9d3f5985 | 9489f6ecd75885e1af4bcc35e2df45d4f44fdf38 | /egs/madcat_arabic/v1/local/segment.py | c371ed235d32028f185370df103f0a1b148c807f | [
"Apache-2.0"
] | permissive | adelra/waldo | b87bb17bcb501e280716640b7a9b08b6b8c32c30 | aaa878c655768218fd0a15967b8abdc9a9a1a610 | refs/heads/master | 2020-03-16T16:33:56.895841 | 2018-05-26T04:41:19 | 2018-05-26T04:41:19 | 132,793,784 | 0 | 0 | null | 2018-05-09T17:52:47 | 2018-05-09T17:52:47 | null | UTF-8 | Python | false | false | 4,035 | py | #!/usr/bin/env python3
import torch
import argparse
import os
import sys
import random
import numpy as np
from models.Unet import UNet
from train import sample
from waldo.segmenter import ObjectSegmenter
from waldo.core_config import CoreConfig
#from waldo.data_visualization import visualize_mask
from waldo.data_io import WaldoDataset
from unet_config import UnetConfig
parser = argparse.ArgumentParser(description='Pytorch MADCAT Arabic setup')
parser.add_argument('test_data', default='./data', type=str,
help='Path to processed validation data')
parser.add_argument('dir', type=str,
help='Directory to store segmentation results. '
'It is assumed that <dir> is a sub-directory of '
'the model directory.')
parser.add_argument('--model', type=str, default='model_best.pth.tar',
help='Name of the model file to use for segmenting.')
parser.add_argument('--train-image-size', default=128, type=int,
help='The size of the parts of training images that we'
'train on (in order to form a fixed minibatch size).'
'These are derived from the input images'
' by padding and then random cropping.')
random.seed(0)
np.random.seed(0)
def main():
global args
args = parser.parse_args()
args.batch_size = 1 # only segment one image for experiment
core_config_path = os.path.join(args.dir, 'configs/core.config')
unet_config_path = os.path.join(args.dir, 'configs/unet.config')
core_config = CoreConfig()
core_config.read(core_config_path)
print('Using core configuration from {}'.format(core_config_path))
# loading Unet configuration
unet_config = UnetConfig()
unet_config.read(unet_config_path, args.train_image_size)
print('Using unet configuration from {}'.format(unet_config_path))
offset_list = core_config.offsets
print("offsets are: {}".format(offset_list))
# model configurations from core config
num_classes = core_config.num_classes
num_colors = core_config.num_colors
num_offsets = len(core_config.offsets)
# model configurations from unet config
start_filters = unet_config.start_filters
up_mode = unet_config.up_mode
merge_mode = unet_config.merge_mode
depth = unet_config.depth
model = UNet(num_classes, num_offsets,
in_channels=num_colors, depth=depth,
start_filts=start_filters,
up_mode=up_mode,
merge_mode=merge_mode)
model_path = os.path.join(args.dir, args.model)
if os.path.isfile(model_path):
print("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
print("loaded.")
else:
print("=> no checkpoint found at '{}'".format(model_path))
model.eval() # convert the model into evaluation mode
testset = WaldoDataset(args.test_data, core_config, args.train_image_size)
print('Total samples in the test set: {0}'.format(len(testset)))
dataloader = torch.utils.data.DataLoader(
testset, num_workers=1, batch_size=args.batch_size)
segment_dir = '{}/segment'.format(args.dir)
if not os.path.exists(segment_dir):
os.makedirs(segment_dir)
img, class_pred, adj_pred = sample(
model, dataloader, segment_dir, core_config)
seg = ObjectSegmenter(class_pred[0].detach().numpy(),
adj_pred[0].detach().numpy(),
num_classes, offset_list)
mask_pred, object_class = seg.run_segmentation()
x = {}
# from (color, height, width) to (height, width, color)
x['img'] = np.moveaxis(img[0].numpy(), 0, -1)
x['mask'] = mask_pred.astype(int)
x['object_class'] = object_class
#visualize_mask(x, core_config)
if __name__ == '__main__':
main()
| [
"dpovey@gmail.com"
] | dpovey@gmail.com |
15857387ce4e8a9fe4f75f9aa63044338cf4941e | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_pragma818.py | 60fcaefd18733cab6288e5987405aed537f785b0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,185 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=28
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[3])) # number=9
c.append(cirq.rx(-3.1258846903218442).on(input_qubit[1])) # number=8
c.append(cirq.Y.on(input_qubit[3])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=15
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.rx(-1.2220795422464295).on(input_qubit[1])) # number=16
c.append(cirq.Y.on(input_qubit[3])) # number=5
c.append(cirq.Y.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=25
c.append(cirq.X.on(input_qubit[3])) # number=26
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=27
c.append(cirq.H.on(input_qubit[3])) # number=17
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=18
c.append(cirq.H.on(input_qubit[3])) # number=19
c.append(cirq.X.on(input_qubit[3])) # number=11
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=23
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=24
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =4040
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma818.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
3589c1a7752436b724acb8292a0800b27a1f80f5 | 31918d62c8004324a186a98e30b6f192f3c33899 | /nonMainFunctions/pullTifFilesFromHPC.py | c51a798f7ed80ce15a1b60b83a697c9adebd6acf | [] | no_license | JonoSax/3DHistologicalReconstruction | eb7685b01366f519a2c8c1dba67bdf764d3574c2 | d07e0c9a9c866c8a74c4c02c741a202d4093e52a | refs/heads/master | 2023-09-02T06:29:13.227389 | 2021-09-09T06:22:19 | 2021-09-09T06:22:19 | 268,755,112 | 3 | 2 | null | 2021-11-20T04:48:43 | 2020-06-02T09:16:05 | Python | UTF-8 | Python | false | false | 2,632 | py | '''
Little script which pulls files from HPC
'''
from glob import glob
from HelperFunctions.Utilities import nameFromPath, dirMaker
import os
import multiprocessing
from itertools import repeat
import time
import numpy as np
def getFiles(s, size, dataHome):
# copy the tif files from HPC
print("Starting " + s)
path = '/Volumes/USB/' + s + str(size) + '/tifFiles/'
dirMaker(path)
imgs = sorted(glob(dataHome + s + '/3/tifFiles/*.tif'))
for i in imgs:
print(" Copying " + nameFromPath(i))
os.system('scp -r ' + i + ' ' + path)
print("Finished " + s)
def pushFiles(s, size, dataHome):
# send the featExtracted files to
name = s.split("/")[-2]
print("Starting " + name)
dataDest = dataHome + "SpecimenSections/" + name + "/"
dirMaker(dataDest)
feats = s + str(size) + "/FeatureSectionsFinal/linearSect/"
os.system('nohup scp -r ' + feats + '* ' + dataDest + " &")
while True:
dirsMade = len(os.listdir(dataDest))
dirsThere = len(os.listdir(feats))
print(name + ": " + str(dirsMade) + "/" + str(dirsThere))
time.sleep(20)
if dirsMade == dirsThere:
break
print("Finished " + s)
if __name__ == "__main__":
dataHome = '/Volumes/resabi201900003-uterine-vasculature-marsden135/BoydCollection/'
# dataHome = 'jres129@hpc2.bioeng.auckland.ac.nz:/people/jres129/eresearch/uterine/jres129/BoydCollection/'
samples = ['H1029A_8.4', 'H671A_18.5', 'H671B_18.5', 'H673A_7.6', 'H710B_6.1', 'H710C_6.1', 'H750A_7.0' ]
'''
samples = [
'/Volumes/Storage/H710C_6.1/',
'/Volumes/USB/H671A_18.5/',
'/Volumes/Storage/H653A_11.3/',
'/Volumes/USB/H750A_7.0/',
'/Volumes/USB/H710B_6.1/',
'/Volumes/USB/H671B_18.5/']
'''
size = 3
multiprocessing.set_start_method('spawn')
cpuNo = 4
action = "pull"
# pull files from HPC
if action == "pull":
if cpuNo != 1:
with multiprocessing.Pool(processes = cpuNo) as Pool:
Pool.starmap(getFiles, zip(samples, repeat(size), repeat(dataHome)))
else:
for s in samples:
getFiles(s, size, dataHome)
"PID = 47606"
elif action == "push":
# push files to HPC
if cpuNo != 1:
jobs = {}
for s in samples:
jobs[s] = Process(target = pushFiles, args = (s, size, dataHome))
jobs[s].start()
for s in samples:
jobs[s].join()
else:
for s in samples:
pushFiles(s, size, dataHome) | [
"jono.reshef@gmail.com"
] | jono.reshef@gmail.com |
0d28cea2bb40645e423e6e59f1507f409bf285ff | 6b0fb026e8d8dad8c7653d3026691434f5077fac | /initirl/irl/irl/urls.py | fc72da328b520072de4fabb440f1231cc9be5b71 | [] | no_license | jcpyun/initirl_django_rough | 7f2159d176fa12d7b507a2fd23c94ded19effb7f | a5c12ec50f9b5bdbd7894112981ac137250a567f | refs/heads/master | 2021-01-14T08:50:32.672364 | 2015-07-26T20:02:56 | 2015-07-26T20:02:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | """irl URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'joins.views.home', name='home'),
########## THE ONE BELOW MUST BE AT VERY BOTTOM
url(r'^(?P<ref_id>.*)$', 'joins.views.share', name='share'), ## THIS MUST BE AT VERY BOTTOM
]
| [
"johnchpyun@gmail.com"
] | johnchpyun@gmail.com |
0ecdb03ed89ea22925524a35347d0e0b2bfab8fc | e22b46b0934e0d951371c35523062081012418cc | /cross_validation.py | 8495ad7ff5e410bb2089f08ac37b38c8081ed8c5 | [] | no_license | GoGoGoDoge/learn-python-ml | 4cffb0bde2ba06105758f7936dd3e013b20f1432 | 0cfc4ad5fb62ea85b07e6450126fe206cab2f2cb | refs/heads/master | 2020-12-02T06:21:35.523514 | 2017-11-05T03:57:31 | 2017-11-05T03:57:31 | 96,821,174 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,135 | py | #!/usr/bin/python
import re
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES;
# from pyclustering.cluster import cluster_visualizer;
# from kmeans_fgy import kmeans # , splitting_type
from kmeans_marco import kmeans # , splitting_type
from pyclustering.utils import read_sample, timedcall;
import sympy
# from sklearn.model_selection import cross_val_score
# from sklearn import svm
# import numpy as np
import numpy
import GPy
import GPyOpt
def insert_ast(s):
# Inserts asterisks * for sympy: xy -> x*y, 3x -> 3*x
return re.sub(q, r'x*y', re.sub(p, r'\1*\2', s))
def set_elmnt(i, j, s):
# Sets a SymPy object representing a string s as an (i, j) element
gram_mat_func[(min([i,j]),max([i,j]))]=sympy.sympify(s)
def get_elmnt(i, j):
# Returns an (i, j) element, if present, and 'None', otherwise.
a = min([i,j])
b = max([i,j])
if (a,b) in gram_mat_func:
return gram_mat_func[(a,b)]
else:
return None
def get_elmnt_and_sub(i, j, alpha, beta):
# Returns an (i, j) element, if present, and 'None', otherwise.
a = min([i,j])
b = max([i,j])
if (a,b) in gram_mat_func:
return gram_mat_func[(a,b)].subs([(x,alpha), (y,beta)])
else:
return None
def innerP2distance(_gm):
_dm = [[0 for aa in range(d)] for bb in range(d)]
for i in range(d):
for j in range(d):
_dm[i][j] = numpy.sqrt(_gm[i][i] + _gm[j][j] - 2 * _gm[i][j])
return _dm
def partition2train(_dm, _i):
if _i < cv-1:
_dm_train = [[0 for aa in range(d-len_portion)] for bb in range(d-len_portion)]
else:
_dm_train = [[0 for aa in range(d-remain_portion_len)] for bb in range(d-remain_portion_len)]
_testing_indices = get_testing_indices(_i)
lt = len(_testing_indices)
iii = 0
jjj = 0
for ii in range(0,d):
if ii < _testing_indices[0] or ii > _testing_indices[lt-1]:
jjj = 0
for jj in range(0,d):
if jj < _testing_indices[0] or jj > _testing_indices[lt-1]:
_dm_train[iii][jjj] = _dm[ii][jj]
jjj = jjj + 1
#print("jjj: ", jjj)
iii = iii + 1
#print("iii: ", iii)
return _dm_train
def get_testing_indices(_i):
index_start = _i*len_portion
if _i < cv-1:
index_end = index_start + len_portion # this last index is not used it is [) range
else:
index_end = d
return range(index_start, index_end) # start <= idx <= end-1
def cv_score(alpha=1., beta=0., k=5):
'''
# Performs cross validation on a gram matrix of training data and
# returns the averaged accuracy scores.
# The gram matrix 'gm' is generated from 'get_elmnt'.
# The number of folds is specified by the variable 'cv'.
'''
print("This is neg_cv_score, alpha = ", alpha, "beta = ", beta, "k = ", k)
gm = [[0 for aa in range(d)] for bb in range(d)]
for i in range(d):
for j in range(d):
gm[i][j] = float(get_elmnt_and_sub(i, j, alpha, beta))
# gm[i][j] = float(get_elmnt(i, j).subs([(x,alpha), (y,beta)]))
# gm[i][j] = get_elmnt(i, j).subs([(x,alpha), (y,beta)])
#print(gm)
dm = innerP2distance(gm) # this is the pairwise distance matrix
print( "size of dm:", len(dm[0][:]), len(dm) )
confusion_mat = {}
confusion_mat_sum = [[0,0],[0,0]]
for i in range(0,cv):
print("No. i fold: ", i)
dm_train = partition2train(dm, i) # a sub matrix extracted from the dm
print("size of dm_train: ", len(dm_train[0][:]), len(dm_train) )
test_indices = get_testing_indices(i) # array of indices of the testing data points
confusion_mat[i] = [[0,0],[0,0]] # [ [TN, FP], [FN, TP] ]
#print(float_dm_train)
#kmeans_instance = kmeans(dm_train, None, 2*k, 0.025)a
print(gm)
kmeans_instance = kmeans(gm, None, int(k), 0.025)
#kmeans_instance = kmeans(float_dm_train, None, 2*k, 0.025)
kmeans_instance.process()
clusters = kmeans_instance.get_clusters()
print("clusters: ", clusters)
#input("finished clustering, enter something...")
#print("clusters[1][2]: ", clusters[1][2])
#print("clusters[1][3]: ", clusters[1][3])
# use known labels to vote for the label of the cluster
nClusters = len(clusters)
print("Number of cluster is: ", nClusters)
cluster_labels = [0 for x in range(nClusters)]
for ic in range(0, nClusters):
print("For the ith cluster: ", ic)
nPoints = len(clusters[ic])
print(" Number of points in current cluster: ", nPoints)
nPos = 0
nNeg = 0
for jc in range(0, nPoints):
if labels[clusters[ic][jc]] == "+1":
nPos = nPos + 1
else:
nNeg = nNeg + 1
print("For i th cluster, +1 v.s. -1 is: ", ic, nPos, nNeg)
if nPos > nNeg:
cluster_labels[ic] = 1
else:
cluster_labels[ic] = -1
print("Labels of the clusters: ", cluster_labels)
# do testing by finding the nearest cluster for the remaining points
print("The test indices are: ", test_indices)
for test_i in test_indices:
min_distance = numpy.Inf
min_dist_cluster_idx = -1
dist_2_cluster = 0
for ii in range(0, nClusters):
cur_cluster = clusters[ii][:];
nPointsInCluster = len(cur_cluster)
for jj in range(0, nPointsInCluster):
tmp = dm[test_i][cur_cluster[jj]]
tmp = tmp*tmp
dist_2_cluster = dist_2_cluster + tmp
dist_2_cluster = numpy.sqrt(dist_2_cluster)
dist_2_cluster = dist_2_cluster/nPointsInCluster
if dist_2_cluster < min_distance:
min_distance = dist_2_cluster
min_dist_cluster_idx = ii
# determine whether this test data is fp, fn, tp, tn and add to the confusion_mat
print(test_i)
print("Label of this test point: ", labels[test_i])
print("Idx of the nearest cluster: ", min_dist_cluster_idx)
print("Label of the nearest cluster: ", cluster_labels[min_dist_cluster_idx])
#print(confusion_mat[i])
if labels[test_i] == "+1":
if cluster_labels[min_dist_cluster_idx] == 1:
confusion_mat[i][1][1] = confusion_mat[i][1][1] + 1 # TP = confusion_mat[i][1][1]
else:
confusion_mat[i][1][0] = confusion_mat[i][1][0] + 1 # FN = confusion_mat[i][1][0]
else:
if cluster_labels[min_dist_cluster_idx] == 1:
confusion_mat[i][0][1] = confusion_mat[i][0][1] + 1 # FP = confusion_mat[i][0][1]
else:
confusion_mat[i][0][0] = confusion_mat[i][0][0] + 1 # TN = confusion_mat[i][0][0]
print("Confusion mat of current fold: ", confusion_mat[i])
#input("enter sth to conntinue next fold...")
# after all folds are done, add up the confusion_mat
##confusion_mat_sum = confusion_mat_sum + confusion_mat[i]
for sum_i in range(0,2):
for sum_j in range(0,2):
confusion_mat_sum[sum_i][sum_j] = confusion_mat_sum[sum_i][sum_j] + confusion_mat[i][sum_i][sum_j]
print("End of i th fold. ", i)
# then compute the score using the combined confusion matrix, e.g. use accuracy.
TN = confusion_mat_sum[0][0]
FP = confusion_mat_sum[0][1]
FN = confusion_mat_sum[1][0]
TP = confusion_mat_sum[1][1]
# accuracy = (confusion_mat_sum[0][0]+confusion_mat_sum[1][1])/(confusion_mat_sum[0][0]+confusion_mat_sum[0][1]+confusion_mat_sum[1][0]+confusion_mat_sum[1][1])
# print("Final accuracy for this set of parameter is: ", accuracy)
TPR = TP/(FN+TP)
Precision = TP/(TP+FP)
F = 2*(Precision*TPR)/(Precision+TPR)
print("Final F-measure for this set of parameter is: ", F)
return F
'''
To obtain a “unified” matrix, you have only to perform addition of matrices.
That is, individual fold confusion matrix are 2x2, and the unified confusion matrix is 2x2 as well.
Note that every datum in a dataset is tested exactly one time through the entire folds,
and hence, appears exactly one in some element of the unified matrix.
Therefore, the unified confusion matrix looks as if the entire dataset were used as a test dataset.
'''
def neg_cv_score(x):
# print('### neg_cv_score: {0}'.format(x))
alpha, beta, k = x[:,0], x[:,1], x[:,2]
n = x.shape[0]
score = numpy.zeros(n)
# print("n is: ", n)
for i in range(n):
print("PARAS: i, alpha, beta, k ", i, alpha[i], beta[i], k[i])
#n_set_paras = n_set_paras + 1
#print("******* nth set of paras: ", n_set_paras)
score[i] = - cv_score(alpha[i], beta[i], k[i])
return score
p=re.compile(r'(\d)([xy])')
q=re.compile(r'xy')
x,y=sympy.symbols("x y")
file_name = 'colon-cancer.kernel'
d = 134; # Dimension of the gram matrix = Number of samples
labels = {} # Key: Sample ID; Value: Class label
gram_mat_func = {} # Key: Pair of sample IDs; Value: SymPy object of kernel value
head_p=re.compile(r'(\d+):(\S+)?')
elmnt_p=re.compile(r'(\d+):(\S+)')
for line in open(file_name, 'r'):
tokens = insert_ast(line.rstrip()).split()
m = re.match(head_p, tokens[0])
if m:
g = m.groups()
if g[1] != None:
i = int(g[0])
d = max([d,i+1])
labels[i] = g[1]
else:
i = int(g[0])
for t in tokens[1:]:
g = re.match(elmnt_p, t).groups()
j = int(g[0])
set_elmnt(i,j,sympy.sympify(g[1]))
# The dictionary 'labels' is converted into a list object
labels=[labels[i] for i in range(0,d)]
'''
************************ End of kernel file processing *************************
'''
cv = 5
modulus = d % cv
len_portion = d / cv
if modulus > 0:
len_portion = int(len_portion + 1)
# len_portion = int((float(d)/cv + 0.9999999) # e.g. 134/5=27
remain_portion_len = d - (cv-1)*len_portion # e.g. 26
alpha = 0.5
beta = 0.3
number_cluster = 5
#cv_score(alpha, beta, number_cluster)
domain=[{'name':'alpha', 'type':'continuous', 'domain':(0,1)},
{'name':'beta', 'type':'continuous', 'domain':(0,1)},
{'name':'k', 'type':'discrete', 'domain':(2,10)}]
# {'name':'normal', 'type':'discrete', 'domain':(1,1)},
# {'name':'kernel', 'type':'discrete', 'domain':(0,1)},
# {'name':'gamma', 'type':'continuous', 'domain':(1.0e-3,1.0e3)}]
bo=GPyOpt.methods.BayesianOptimization(f=neg_cv_score,domain=domain)
# bo=GPyOpt.methods.BayesianOptimization(f=neg_cv_score,domain=domain,acquisition_type='LCB')
# bo.run_optimization(max_iter=30)
bo.run_optimization(max_iter=30)
bo.x_opt # Optimal solutions.
bo.fx_opt # Found minimum values.
print(bo.x_opt)
print(bo.fx_opt)
| [
"gogogodoge@gmail.com"
] | gogogodoge@gmail.com |
51af854c33c493f2da6d4bec9f504111f37653d6 | 091bbb206ce62120e11d721b7f34743551e7e556 | /dcc_sra/serialize.py | f49d64a3b304a21985dd678bb2c1cd19819130ab | [] | no_license | biobakery/dcc_sra | 6cfa34dbb24d6796f2263ad61ab2b1b38aaf5675 | a359e2087eadcdea2612c2a477c5c76f4d2f2333 | refs/heads/master | 2022-07-17T06:00:03.396972 | 2020-05-21T03:33:30 | 2020-05-21T03:33:30 | 265,746,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,174 | py | from os.path import basename
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import SubElement as sub
from dateutil.parser import parse as dateparse
from . import geo
def prep_subtype(p):
return p._get_raw_doc()['meta']['subtype']
def eld(tagname, attrs={}, text=None, children=[]):
ret = {"tagname": tagname}
if attrs:
ret['attrs'] = attrs
if text:
ret['text'] = text
if children:
ret['children'] = children
return ret
def hier_sub(parent, tagname, attrs={}, text=None, children=[]):
el = sub(parent, tagname, attrs)
if text:
el.text = text
ret = [el]
for kwargs in children:
ret.append(hier_sub(el, **kwargs))
return ret
def very_last(nested_list):
last = nested_list[-1]
while hasattr(last, "__iter__"):
last = last[-1]
return last
def spuid(obj):
return eld("SPUID", attrs={"spuid_namespace": "hmp2"}, text=obj.id)
def flatten_list(l):
def _flat(ls):
for item in ls:
if hasattr(item, "__iter__"):
for subitem in _flat(item):
yield subitem
else:
yield item
return list(_flat(l))
# Thanks, http://stackoverflow.com/a/4590052
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def reg_text(t):
return u" ".join(t.split())
def reg_sample(s):
s.mixs['lat_lon'] = " ".join(geo.cardinal(s.mixs['lat_lon']))
return s
def _add_description(root, st, release_date=None):
children = [
eld("Comment", text="iHMP project "+st.name),
eld("Organization", attrs={"role":"owner", "type":"institute"},
children=[
eld("Name", text="iHMP DCC"),
eld("Contact", attrs={"email":"schwager@hsph.harvard.edu"},
children=[ eld("Name", children=[
eld("First", text="Randall"),
eld("Last", text="Schwager")
])
])
]
)
]
if release_date:
d = dateparse(release_date).strftime("%Y-%m-%d")
children.append( eld("Hold", attrs={"release_date": d}) )
hier_sub(root, "Description", children=children)
return root
def _add_bioproject(root, st, bioproject_id=None):
if bioproject_id:
return root
ret = hier_sub(root, "Action", children=[
eld("AddData", attrs={"target_db":"BioProject"}, children=[
eld("Data", attrs={"content_type":"xml"}, children=[
eld("XmlContent", children=[
eld("Project", attrs={"schema_version":"2.0"})
])
]),
eld("Identifier", children=[spuid(st)]),
])
])
prj = flatten_list(ret)[-3]
hier_sub(prj, "ProjectID", children=[spuid(st)])
hier_sub(prj, "Descriptor", children=[
eld("Title", text="iHMP "+st.name),
eld("Description", text=reg_text(st.description)),
eld("Relevance", children=[ eld("Medical", text="Yes") ])
])
pts_attrs = {"sample_scope":"eEnvironment"}
hier_sub(prj, "ProjectType", children=[
eld("ProjectTypeSubmission", attrs=pts_attrs, children=[
eld("IntendedDataTypeSet", children=[
eld("DataType", text="metagenome")
])
])
])
return root
def _add_biosample(root, st, sample, prep, release_date=None,
bioproject_id=None):
sample = reg_sample(sample)
ret = hier_sub(root, "Action", children=[
eld("AddData", attrs={"target_db":"BioSample"}, children=[
eld("Data", attrs={"content_type":"xml"}, children=[
eld("XmlContent", children=[
eld("BioSample", attrs={"schema_version":"2.0"})
])
]),
eld("Identifier", children=[spuid(sample)])
])
])
bs_node = flatten_list(ret)[-3]
hier_sub(bs_node, "SampleId", children=[spuid(sample)])
hier_sub(bs_node, "Descriptor", children=[
eld("Title", text=sample.name),
])
if bioproject_id:
hier_sub(bs_node, "BioProject",
children=[eld("PrimaryId", attrs={"name": "BioProject"},
text=bioproject_id)])
else:
hier_sub(bs_node, "BioProject", children=[spuid(st)])
hier_sub(bs_node, "Organism",
attrs={"taxonomy_id": prep.ncbi_taxon_id},
children=[eld("OrganismName", text="Metagenome")])
hier_sub(bs_node, "Package", text="MIMS.me.human-associated.4.0")
kv = lambda k, v: eld("Attribute", attrs={"attribute_name": k}, text=v)
get = lambda v: sample.mixs.get(v, "missing").strip() or "missing"
hier_sub(bs_node, "Attributes", children=[
kv("env_biome", get("biome")),
kv("collection_date", get("collection_date")),
kv("env_feature", get("feature")),
kv("env_material", get("material")),
kv("geo_loc_name", get("geo_loc_name")),
kv("host", "Homo sapiens"),
kv("lat_lon", get("lat_lon"))
]+[kv(k, get(k)) for k in ("rel_to_oxygen", "samp_collect_device",
"samp_mat_process", "samp_size")
if bool(sample.mixs.get(k, None))]
)
return root
def _add_sra(root, st, sample, prep, seq, files_sizes,
bioproject_id=None):
kv = lambda k, v: eld("Attribute", attrs={"name": k}, text=v)
strategy = "AMPLICON" if prep_subtype(prep) == "16s" else "WGS"
mims_or_mimarks = prep.mimarks if prep_subtype(prep) == "16s" else prep.mims
file_nodes = [
eld("File", attrs={"file_path":basename(name)},
children=[eld("DataType", text="sra-run-fastq")])
for name, _ in files_sizes
]
if not file_nodes and seq.size == 0:
return root
if bioproject_id:
st_spuid = eld("PrimaryId", attrs={"db": "BioProject"},
text=bioproject_id)
else:
st_spuid = spuid(st)
hier_sub(root, "Action", children=[
eld("AddFiles", attrs={"target_db": "SRA"}, children=file_nodes+[
kv("instrument_model",seq.seq_model),
kv("library_strategy",strategy),
kv("library_source", "GENOMIC"),
kv("library_selection", prep.lib_selection.upper()),
kv("library_layout", "FRAGMENT"),
kv("library_construction_protocol",
reg_text(mims_or_mimarks['lib_const_meth'])),
eld("AttributeRefId", attrs={"name": "BioProject"}, children=[
eld("RefId", children=[st_spuid])
]),
eld("AttributeRefId", attrs={"name": "BioSample"}, children=[
eld("RefId", children=[spuid(sample)])
]),
eld("Identifier", children=[spuid(seq)])
])
])
return root
def to_xml(st, samples, tardict, release_date=None, bioproject_id=None):
root = ET.Element('Submission')
root = _add_description(root, st, release_date)
root = _add_bioproject(root, st, bioproject_id)
sample_cache = set()
for sample in samples:
if not sample.prepseqs:
continue
for prep, seq in sample.prepseqs:
if sample.sample.id not in sample_cache:
root = _add_biosample(root, st, sample.sample, prep,
bioproject_id=bioproject_id)
sample_cache.add(sample.sample.id)
is_16s = seq._get_raw_doc()['node_type'].startswith("16s")
seqtype = "16s" if is_16s else "wgs"
tarkey = (basename(seq.urls[0]), seqtype)
root = _add_sra(root, st, sample.sample, prep, seq, tardict[tarkey],
bioproject_id)
return root
| [
"schwager@hsph.harvard.edu"
] | schwager@hsph.harvard.edu |
58e87ea132e7ce1ff005f595e4011a4a692911f8 | 392f3b2181405085095ab2b3ed00b296b0ee6659 | /backend/controllers/__init__.py | 0606d163d193019fee5e73e0a1a2f73a3f30f486 | [
"MIT"
] | permissive | brianchul/SWOOSH | e69346810f2e28d8e260554fb1625a1ad7b7c077 | ce6cc16e83013fbfe594bba0cc69e5b005c40b22 | refs/heads/master | 2023-05-15T06:57:37.792410 | 2021-08-09T08:33:19 | 2021-08-09T08:33:19 | 153,815,261 | 0 | 0 | MIT | 2023-05-01T20:25:40 | 2018-10-19T16:58:36 | JavaScript | UTF-8 | Python | false | false | 116 | py | from sqlalchemy import create_engine, MetaData
from config.DBindex import init_db, engine
metadata=MetaData(engine) | [
"404411711@gms.tku.edu.tw"
] | 404411711@gms.tku.edu.tw |
c2f7e1106bc81458786e2a7da426763804d8d2cf | e67e80bde33c4aaaae013aa77f1d939d961935c7 | /config/hass_sonarr_search_by_voice.py | 7c8609c5741c262080864b2a30cecd556ece484c | [
"Unlicense"
] | permissive | unofficialmatt/home-assistant | 4a457fc9680088704972741a24e3b29c763169b9 | 52d23c103f0de0e348c7b4b24117af7f34e19781 | refs/heads/master | 2023-04-10T01:11:29.469747 | 2021-04-17T17:34:55 | 2021-04-17T17:34:55 | 172,801,406 | 1 | 0 | null | 2021-04-11T21:21:49 | 2019-02-26T22:35:10 | Python | UTF-8 | Python | false | false | 15,876 | py | import datetime
import requests
import json
import sys
import os
import configparser
# ------------------------------------
class ShowDownloader:
'''
Constrctor
:param str show: Title of the tv show or option number if mode 2 is used
:param int mode: 0 | 1 | 2
mode 0 - takes the tv show string and download best guess from upcoming and recent years.
mode 1 - search tv show string and offers 3 options to choose from.
mode 2 - download option given from previous search.
:param str monitor: missing | future
missing - Monitors episodes that do not have files or have not aired yet.
future - Monitors episodes that have not aired yet.
'''
def __init__(self, show, mode=0, monitor='future'):
self.monitor = monitor
self.loadParameters()
year = datetime.datetime.now().year
term = show
search_term = term.replace(" ", "%20")
current_years = [year, year+1, year+2]
for i in range(1,69):
current_years .append(year-i)
if mode == 0 or mode == 1: # we are making a search by series title
# search
r = requests.get(self.SONARR_SERVER+"/api/series/lookup?term="+search_term+"&apikey="+self.SONARR_API)
if r.status_code == requests.codes.ok:
media_list = r.json()
if len(media_list) > 0:
if mode == 0: # download best guess
# add first occurrence to downloads
# we search for newish show (recent and upcoming show only)
i = 0
found = False
while i < len(media_list) and found == False:
year = media_list[i]['year']
if year in current_years:
found = True
data = self.prepare_show_json(media_list[i])
self.add_show(data)
break;
i += 1
if found == False:
self.tts_google("I didn't find the tv show. Try again with the search option.")
elif mode == 1: # search tv show and give 3 options
# add to download_options file and read them out loud
i = 0
show = []
while i < len(media_list) and i < 3:
data = self.prepare_show_json(media_list[i])
show.append(data)
i += 1
msg = self.save_options_found_and_compose_msg(show)
self.tts_google(msg)
# elif mode == 3: # search latest show by Actor/Actress and offers 5 options to choose from.
# actor_id = self.get_actor_id(search_term)
# if actor_id > 0:
# show = []
# show = self.get_actors_latest_show(actor_id, year)
# if len(show) > 0:
# msg = self.save_options_found_and_compose_msg(show)
# self.tts_google(msg)
# else:
# self.tts_google("Your tv show was not found.")
# else:
# self.tts_google("The actor was not found.")
else:
# add to downloads from download_options file
download_option = int(show)-1
data = {}
with open(self.HASS_SCRIPTS_PATH+'/download_tvshow_options.txt') as json_data:
show = json.load(json_data)
if download_option > -1 and len(show) >= download_option:
m = show[download_option]
if m['profileId'] == -1 and m['tvdbId'] > 0:
r = requests.get(self.SONARR_SERVER+"/api/series/lookup?term=tvdb:"+str(m['tvdbId'])+"&apikey="+self.SONARR_API)
if r.status_code == requests.codes.ok:
media_list = r.json()
# print(media_list)
# if len(media_list) > 0:
data = self.prepare_show_json(media_list)
else:
data = self.prepare_show_json(m)
self.add_show(data)
else:
self.tts_google("There are no options.")
def prepare_show_json(self, media):
data = {}
addOptions = {}
if self.monitor == "missing":
addOptions['ignoreEpisodesWithFiles'] = True
addOptions['ignoreEpisodesWithoutFiles'] = False
addOptions['searchForMissingEpisodes'] = True
elif self.monitor == "future":
addOptions['ignoreEpisodesWithFiles'] = True
addOptions['ignoreEpisodesWithoutFiles'] = True
addOptions['searchForMissingEpisodes'] = False
data['title'] = media['title']
data['profileId'] = self.SONARR_QUALITY_PROFILE_ID
data['titleSlug'] = media['titleSlug']
data['images'] = media['images']
data['seasons'] = media['seasons']
data['imdbId'] = media['imdbId']
data['tvdbId'] = media['tvdbId']
data['seasonFolder'] = True
data['rootFolderPath'] = self.SONARR_DOWNLOAD_PATH
data['addOptions'] = addOptions
data['year'] = media['year']
data['cast'] = self.get_cast(data['imdbId'])
return data
def prepare_barebone_show_json(self, tvdbId, title):
data = {}
data['title'] = title
data['profileId'] = -1
data['titleSlug'] = 0
data['images'] = 0
data['seasons'] = 0
data['imdbId'] = ""
data['tvdbId'] = tvdbId
data['seasonFolder'] = 0
data['rootFolderPath'] = 0
data['year'] = 0
data['cast'] = ""
return data
def add_show(self, data):
r = requests.post(self.SONARR_SERVER+"/api/series?apikey="+self.SONARR_API,json.dumps(data))
if r.status_code == 201:
if str(data['cast']) == "":
self.tts_google("I added the tv show "+str(data['title'])+" "+str(data['year'])+" to your list.")
else:
self.tts_google("I added the tv show "+str(data['title'])+" "+str(data['year'])+" with, "+str(data['cast'])+" to your list.")
show = r.json()
with open(self.HASS_SCRIPTS_PATH+"/last_tvshow_download_added.txt", "w") as myfile:
myfile.write("show:"+str(show['id'])+"\n")
else:
res = self.is_show_already_added(data)
if res >= 0:
if res == 0:
self.tts_google("I found your tv show but I was not able to add it to your list.")
else:
if str(data['cast']) == "":
self.tts_google("The tv show, "+str(data['title'])+" "+str(data['year'])+" is already in your list.")
else:
self.tts_google("The tv show, "+str(data['title'])+" "+str(data['year'])+" with, "+str(data['cast'])+" is already in your list.")
else:
self.tts_google("Something wrong occured when trying to add the tv show to your list.")
def is_show_already_added(self, data):
# print("http://"+self.SONARR_SERVER+"/api/movie?apikey="+self.SONARR_API)
r = requests.get(self.SONARR_SERVER+"/api/series?apikey="+self.SONARR_API)
found = False
# print(data['tvdbId'])
# print(r.status_code)
if r.status_code == 200:
media_list = r.json()
# print(media_list)
if len(media_list) > 0:
i = 0
while i < len(media_list) and found == False:
tvdbId = media_list[i]['tvdbId']
if tvdbId == data['tvdbId']:
found = True
break;
i += 1
return 1 if found == True else 0
else:
return -1
def get_cast(self, imdbId):
if self.OMDB_API:
r = requests.get("http://www.omdbapi.com/?i="+str(imdbId)+"&apikey="+self.OMDB_API)
if r.status_code == requests.codes.ok:
movie = r.json()
cast = movie['Actors'].split(',')
if len(cast) > 0:
if len(cast) > 1:
return(cast[0]+" and"+cast[1])
else:
if cast[0] == "N/A":
return("")
else:
return(cast[0])
else:
return("")
else:
return("")
else:
return("")
# TVDB_API
# r = requests.get("https://api.thetvdb.com/series/"+str(tvdbId)+"/actors")
# if r.status_code == requests.codes.ok:
# series = r.json()
# data = series['data']
# if len(data) > 1:
# return(data[0]['name']+" et "+data[1]['name'])
# else:
# return(data[0]['name'])
# else:
# return("")
# POSSIBLE FUTURE FEATURE IF TVDB_API BECOMES FREE
# def get_actor_id(self, actor_name):
# r = requests.get("https://api.thetvdb.com/3/search/person?language=en-US&page=1&include_adult=false&api_key="+TVDB_API+"&query="+actor_name)
# if r.status_code == requests.codes.ok:
# results = r.json()
# if int(results['total_results']) > 0:
# return(int(results['results'][0]['id']))
# else:
# return(-1)
# else:
# return(-1)
#
# def get_actors_latest_show(self, actor_id, year):
# latest_years = [ year+1, year, year-1, year-2]
# i = 0
# shows = []
# while i < len(latest_years) and len(show) < 5:
# r = requests.get("https://api.thetvdb.com/3/discover/series?language=en-US&page=1&sort_by=release_date.desc&include_adult=false&include_video=false&page=1&primary_release_year=&api_key="+TVDB_API+"&primary_release_year="+str(latest_years[i])+"&with_cast="+str(actor_id))
# if r.status_code == requests.codes.ok:
# results = r.json()
# if int(results['total_results']) > 0:
# for show in results['results']:
# if len(show) < 5:
# data = self.prepare_barebone_show_json(int(show["id"]), show["title"])
# show.append(data)
# i += 1
# return(show)
def save_options_found_and_compose_msg(self, show):
msg=""
with open(self.HASS_SCRIPTS_PATH+"/download_tvshow_options.txt", "w") as myfile:
json.dump(show, myfile)
i = 0
if len(show) > 1:
msg = "I found, "+str(len(show))+" options.\n"
else:
msg = "I found, "+str(len(show))+" option.\n"
while i < len(show):
m = show[i]
if str(m['cast']) != "":
msg = msg+"Option "+str(i+1)+", "+str(m['title'])+" with "+str(m['cast'])+".\n"
else:
msg = msg+"Option "+str(i+1)+", "+str(m['title'])+". "
i += 1
return msg
def tts_google(self, msg):
data = {"entity_id": self.HASS_SPEAKER_ENTITY, "message": msg}
if self.HASS_API == "" and self.HASS_TOKEN != "":
headers = {
'Authorization': 'Bearer '+self.HASS_TOKEN
}
r = requests.post(self.HASS_SERVER+"/api/services/tts/"+self.HASS_TTS_SERVICE,json.dumps(data), headers=headers)
else:
r = requests.post(self.HASS_SERVER+"/api/services/tts/"+self.HASS_TTS_SERVICE+"?api_password="+self.HASS_API,json.dumps(data))
# assistant-relay
# command_data = {"command": msg}
# r = requests.post("http://"+self.HASS_SERVER+"/api/services/rest_command/gh_broadcast?api_password="+self.HASS_API,json.dumps(command_data))
print(msg)
def loadParameters(self):
config=configparser.ConfigParser()
dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
configFile = os.path.join(dirname, 'ha_radarr_sonarr.conf')
config.read(configFile)
self.HASS_SERVER = config.get('HomeAssistant', 'server_url')
self.HASS_API = config.get('HomeAssistant', 'api_key')
self.HASS_TOKEN = config.get('HomeAssistant', 'token')
self.HASS_SCRIPTS_PATH = config.get('HomeAssistant', 'scripts_path')
self.HASS_SPEAKER_ENTITY = config.get('HomeAssistant', 'speaker_entity')
self.HASS_TTS_SERVICE = config.get('HomeAssistant', 'tts_service')
self.SONARR_SERVER = config.get('Sonarr', 'server_url')
self.SONARR_API = config.get('Sonarr', 'api_key')
self.SONARR_DOWNLOAD_PATH = config.get('Sonarr', 'root_directory')
self.SONARR_QUALITY_PROFILE_ID = int(config.get('Sonarr', 'profile_id'))
self.OMDB_API = config.get('Services', 'omdb_api_key')
# print(self.HASS_SERVER)
# print(self.HASS_API)
# print(self.HASS_TOKEN)
# print(self.HASS_SCRIPTS_PATH)
# print(self.HASS_SPEAKER_ENTITY)
# print(self.HASS_TTS_SERVICE)
#
# print(self.SONARR_SERVER)
# print(self.SONARR_API)
# print(self.SONARR_DOWNLOAD_PATH)
# print(self.SONARR_QUALITY_PROFILE_ID)
#
# print(self.OMDB_API)
self.checkConfig()
def checkConfig(self):
error_messages = []
warning_messages = []
if not self.HASS_SERVER:
error_messages.append('Home Assistant url with port (usually localhost:8123) must be defined')
if not self.HASS_API and not self.HASS_TOKEN:
error_messages.append('A Long-lived token or HA API password (legacy) must be defined')
if not self.HASS_SCRIPTS_PATH:
error_messages.append('Path were this script is located. must be defined. eg. /users/vr/.homeassistant/scripts or for container (e.g HA SUPERVISED) /config/scripts')
if not self.HASS_SPEAKER_ENTITY:
error_messages.append('Home assistant speaker entity_id must be specified. eg. media_player.family_room_speaker')
if not self.HASS_TTS_SERVICE:
error_messages.append('Home assistant text-to-speech service must be specified.')
if not self.SONARR_SERVER:
error_messages.append('Sonarr url with port (usually :8989) must be defined')
if not self.SONARR_API:
error_messages.append('Sonarr API Key must be defined')
if not self.SONARR_DOWNLOAD_PATH:
error_messages.append('Sonarr root_directory also knwon as rootFolderPath must be defined')
if self.SONARR_QUALITY_PROFILE_ID == 0:
error_messages.append('Sonarr quality profile id must be defined. Default value is 4 (1080p)')
if not self.OMDB_API:
warning_messages.append("Warning. omdb_api_key (optional)(recommended) is not set. Your speaker's feedback will miss cast details for tvshows. http://www.omdbapi.com/apikey.aspx'")
if len(error_messages) > 0:
print('Problem(s) in configuration file :')
for m in error_messages:
print(m)
if len(warning_messages) > 0:
for m in warning_messages:
print(m)
if len(error_messages) > 0:
exit(1)
query = sys.argv[1]
mode = sys.argv[2]
monitor = sys.argv[3]
print(query)
print(mode)
print(monitor)
downloader = ShowDownloader(query, int(mode), monitor)
| [
"hello@mattweet.com"
] | hello@mattweet.com |
594c4bc3e795302aee87576ea12bf341eb45bed7 | 954a84ba67245d438a00055b62a5a8d9e0bacc6a | /oa_v1/urls.py | 1234081370b7906d89328a4e6e182f647d7c4aa9 | [] | no_license | disenQF/901oa | c292b6951f455e348c16f131044f5644f46a9a9f | 2ecd607c2edaac4d045cc87bf48ffdf58f7f47bc | refs/heads/master | 2020-06-10T10:01:18.303122 | 2019-07-04T09:18:37 | 2019-07-04T09:18:37 | 193,632,138 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """oa_v1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
import xadmin as admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"610039018@qq.com"
] | 610039018@qq.com |
ea9c16f95c8c149fefe64f5e6fe092a7816803c5 | bb707a6b345c5505ceaf1d9af43439e14925870b | /train_model/train_gbdtlr_classifer.py | cfb71c92692b26776def562a8178d314bd8520a8 | [] | no_license | xiaolinpeter/TextMatch | e02859e8908b03f6d338ccb86d75bad76e48a293 | 78607c98292a047c1fd15813c3453fb9c58d1bc6 | refs/heads/master | 2022-11-10T15:46:08.082680 | 2020-06-26T02:27:15 | 2020-06-26T02:27:15 | 275,122,996 | 2 | 0 | null | 2020-06-26T09:48:54 | 2020-06-26T09:48:53 | null | UTF-8 | Python | false | false | 2,600 | py | # -*- coding:utf-8 -*-
'''
-------------------------------------------------
Description : DNN trainer
Author : machinelp
Date : 2020-06-06
-------------------------------------------------
'''
import json
import numpy as np
from textmatch.models.text_embedding.model_factory_sklearn import ModelFactory
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from textmatch.models.ml.lgb import LGB
from textmatch.models.ml.lr import LR
from textmatch.models.ml.gbdt import GBDT
from textmatch.models.ml.gbdt_lr import GBDTLR
from textmatch.models.ml.xgb import XGB
if __name__ == '__main__':
doc_dict = {"0":"我去玉龙雪山并且喜欢玉龙雪山玉龙雪山", "1":"我在玉龙雪山并且喜欢玉龙雪山", "2":"我在九寨沟", "3":"你好"} #["我去玉龙雪山并且喜欢玉龙雪山玉龙雪山","我在玉龙雪山并且喜欢玉龙雪山","我在九寨沟"]
#doc_dict = {"0":"This is the first document.", "1":"This is the second second document.", "2":"And the third one."}
#query = "This is the second second document."
query = [ "我在玉龙雪山并且喜欢玉龙雪山", "我在玉龙雪山并且喜欢玉龙雪山", "我在玉龙雪山并且喜欢玉龙雪山","我在玉龙雪山并且喜欢玉龙雪山", "我在九寨沟,很喜欢", "我在九寨沟,很喜欢","我在九寨沟,很喜欢", "我在九寨沟,很喜欢", "我在九寨沟,很喜欢","我在九寨沟,很喜欢", "我在九寨沟,很喜欢", "我在玉龙雪山并且喜欢玉龙雪山"]
train_labels = [0,0,0,0,1,1,1,1,1,1,1,0]
# 基于bow
mf = ModelFactory( match_models=['bow', 'tfidf', 'ngram_tfidf', 'albert'] )
#mf.init(words_dict=doc_dict, update=True)
mf.init(update=False)
train_sample = []
for per_query in query:
bow_pre = mf.predict_emb(per_query)
# print ('pre>>>>>', bow_pre)
per_train_sample=[]
for per_v in bow_pre.values():
per_train_sample.extend( per_v )
train_sample.append(per_train_sample)
print ('train_sample, train_labels', train_sample, train_labels)
#print ('train_sample:::::', len(train_sample[0]))
train_x = np.array( train_sample[:10] )
train_y = train_labels[:10]
val_x = np.array( train_sample[10:12] )
val_y = train_labels[10:12]
print ('val_y:', val_y)
gbdtlr = GBDTLR()
gbdtlr.fit( train_x, train_y )
res = gbdtlr.predict(val_x)
print ('>>>>', res)
| [
"liupeng@qudian.com"
] | liupeng@qudian.com |
1fcbb481ea3790719543ee34ea0da17133e7038c | 77e63ec062ca1902842b58b2024a033e72b44a98 | /users/views.py | aa98ab69dd7529c8241c2d70bd0bf60114654679 | [] | no_license | aziz2719/Book_house | e0daef94ceb9d1067abebca527f7ab7fa79b6868 | ff3a59d53ad46f5c2ab8d2f5815255181bf0aca5 | refs/heads/master | 2023-04-13T19:00:14.665825 | 2021-04-15T14:03:10 | 2021-04-15T14:03:10 | 358,277,402 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from rest_framework.views import APIView
from .models import User, FavoriteBook
from .serializers import UserSerializer, FavoriteBookSerializer
from .permissions import IsUserOwnerOrReadOnly
from books.models import Book
class UserView(ModelViewSet):
queryset = User.objects.prefetch_related('user_favorite_book')
serializer_class = UserSerializer
lookup_field = 'pk'
permission_classes = (IsUserOwnerOrReadOnly, )
class UserFavoriteBookView(APIView):
def get(self, request, pk):
user = User.objects.get(id=pk)
favorite = FavoriteBook.objects.values_list('books__name', flat=True).filter(user=user)
return Response(favorite)
class FavoriteBookView(APIView):
def get(self, request, pk):
user = request.user
book = Book.objects.get(id=pk)
favorite = FavoriteBook.objects.values_list('user__username', flat=True).filter(books=books_name)
if Favorite.objects.filter(user=user, publication=publication).exists():
Favorite.objects.filter(user=user, publication=publication).delete()
return Response('FavoriteBook Deleted', status=status.HTTP_201_CREATED)
else:
Favorite.objects.create(user=user, publication=publication)
return Response('FavoriteBook Created', status=status.HTTP_200_OK) | [
"davletovaziz511@gmail.com"
] | davletovaziz511@gmail.com |
0ed003f9b982f9b4613357383628ec5e7651618d | af22b473dfa87642c02bd494340a4eb5c82f9c42 | /OrderTango/wsgi.py | f26a1383116a6dbfd166d022f1878e89efc20434 | [] | no_license | OrderTango/ProjectDRF | fcee4c2525f3ebb7e09e88063f69b2b483217120 | 8fb213849961851e5428a0a6ccbd50f2820884fb | refs/heads/master | 2021-01-05T13:58:42.523693 | 2020-03-11T11:05:10 | 2020-03-11T11:05:10 | 241,040,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | """
WSGI config for OrderTango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os,sys
from django.core.wsgi import get_wsgi_application
path='/var/www/ordertango'
sys.path.append(path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'OrderTango.settings')
application = get_wsgi_application() | [
"43664603+marimuthuinchennai@users.noreply.github.com"
] | 43664603+marimuthuinchennai@users.noreply.github.com |
a9e4ba1e247f2dbc1572b9401e6dedfd6ae4a75b | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/easy-money_20210129090524.py | 44f43e04000f82d449dec107ef59214db3f8ec15 | [] | no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,489 | py | # 东方财富网 首发申报
from datetime import datetime,timedelta
from urllib.parse import urlencode
import pandas as pd
import requests
import re
import time
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
def date_gen():
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text,'html.parser')
dateList = [i.text for i in soup.findAll('option')]
yield dateList
def get_eastmoneyData(dateList):
query = {'type': 'NS',
'sty' : 'NSFR',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '5000',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '1',
'rt' : '53721774'
}
main_data = []
for date in dateList:
query['fd'] = dateList
# start = datetime.strptime('2017-01-05','%Y-%m-%d').date()
# while start < datetime.today().date():
# query['fd'] = start
url = base_url + urlencode(query)
# yield url
# start += timedelta(days=7)
rs = requests.get(url,headers=headers)
if rs.text == '':
continue
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
main_data.extend(data)
time.sleep(2)
temp = [i.split(',') for i in main_data]
columns = ['会计师事务所','保荐代表人','保荐机构','xxx','律师事务所','日期','所属行业','板块','是否提交财务自查报告',
'注册地','类型','机构名称','签字会计师','签字律师','时间戳','简称']
df = pd.DataFrame(temp,columns=columns)
df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
df = df[['机构名称', '类型', '板块', '注册地', '保荐机构','保荐代表人', '律师事务所', '签字律师','会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业','日期','xxx', '时间戳', '保荐机构','文件链接']]
df = df[df['板块'] != '创业板']
df.to_csv('C:/Users/chen/Desktop/IPO_info/eastmoney_raw_data.csv',index=False,encoding='utf-8-sig')
# for i in ['2','4']:
# query = {'type': 'NS',
# 'sty' : 'NSSH',
# 'st' : '1',
# 'sr' : '-1',
# 'p' : '1',
# 'ps' : '5000',
# 'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
# 'mkt' : i,
# 'rt' : '53723990'
# }
# url = base_url + urlencode(query)
# rss = requests.get(url,headers=headers)
# jss = rss.text.split('var KIBhynDx={pages:1,data:')[1]
# data = eval(jss[:-1])
# temp = [j.split(',') for j in data]
# columns = ['时间戳','yyy','公司代码','机构名称','详情链接','申报日期','上会日期','申购日期','上市日期','9','拟发行数量','发行前总股本','发行后总股本','13','占发行后总股本比例','当前状态','上市地点','主承销商','承销方式','发审委委员','网站','简称']
# df = pd.DataFrame(temp,columns=columns)
# df['文件链接'] = df['时间戳'].apply(lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf")
# df['详情链接'] = df['公司代码'].apply(lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
# df = df[['机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期','上会日期', '申购日期', '上市日期', '主承销商','承销方式', '9', '发行前总股本','发行后总股本','13','占发行后总股本比例','发审委委员','网站','公司代码','yyy','时间戳', '简称', '详情链接','文件链接']]
# df.to_csv('C:/Users/chen/Desktop/IPO_info/easymoney_data_{}_onmeeting.csv'.format(i),index=False,encoding='utf-8-sig')
from urllib.parse import urlencode
import time
import requests
from bs4 import BeautifulSoup
import pandas as pd
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
def get_zzscData(dateList):
zzsc_dict = {}
for date in dateList:
query = {'type': 'NS',
'sty' : 'NSSE',
'st' : '1',
'sr' : '-1',
'p' : '1',
'ps' : '500',
'js' : 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt' : '4',
'stat':'zzsc',
'fd' : date,
'rt' : '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url,headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
continue
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
time.sleep(2)
zzsc = pd.DataFrame(zzsc_dict.items(),columns = ['机构名称','决定终止审查时间'])
zzsc.to_csv('C:/Users/chen/Desktop/IPO_info/eastmoneyzzsc.csv',encoding='utf-8-sig',index=False)
| [
"chenjiajun.jason@outlook.com"
] | chenjiajun.jason@outlook.com |
cda3d756f3b20c93887903067560970a4827e4e6 | 196d70bc1da67be81805f8f302b0a8af5a01b0e9 | /Def/Graph/BoxPlot.py | 4f509a9f9091c1ac3101c852f3c7697767689421 | [] | no_license | Pabloc98/Regresion_Predictiva | 7894e5f798281d20ee53264f6a91449c89b688de | b4ff9dabee86e375fc817e6d707e11bf612a5db9 | refs/heads/main | 2023-05-04T21:30:59.335242 | 2021-05-26T01:17:54 | 2021-05-26T01:17:54 | 370,846,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | import matplotlib.pyplot as plt
import seaborn as sns
def BoxPlot(data, remover = None):
"""Esta función realiza un boxplot por cada variable numerica que contenga el dataset y que no este incluida en la lista remover
Parametros
------------
df: pandas dataframe
Dataframe
remover: lista
Lista que indica las columnas que no seran tenidas en cuenta para la elaboración del grafico
Returns
------------
BoxPlot
Gráfico de cajas y bigotes para cada variable
"""
if remover == None:
plt.figure(figsize=(10, 6))
sns.boxplot(data=data)
plt.xticks(rotation=90);
else:
columnas = list(data.columns)
columnas_opt = [col for col in columnas if col not in remover]
plt.figure(figsize=(10, 6))
sns.boxplot(data=data[columnas_opt])
plt.xticks(rotation=90);
| [
"pfcifuentesd@unal.edu.co"
] | pfcifuentesd@unal.edu.co |
45a1aa6c555bcce64959313e9919168a40eec0fe | ba694353a3cb1cfd02a6773b40f693386d0dba39 | /sdk/python/pulumi_google_native/healthcare/v1/get_hl7_v2_store.py | 1abfe0c8835e7629b7050b2ac2f9fa0ed5ff4766 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-google-native | cc57af8bd3d1d6b76f1f48333ed1f1b31d56f92b | 124d255e5b7f5440d1ef63c9a71e4cc1d661cd10 | refs/heads/master | 2023-08-25T00:18:00.300230 | 2023-07-20T04:25:48 | 2023-07-20T04:25:48 | 323,680,373 | 69 | 16 | Apache-2.0 | 2023-09-13T00:28:04 | 2020-12-22T16:39:01 | Python | UTF-8 | Python | false | false | 6,311 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetHl7V2StoreResult',
'AwaitableGetHl7V2StoreResult',
'get_hl7_v2_store',
'get_hl7_v2_store_output',
]
@pulumi.output_type
class GetHl7V2StoreResult:
def __init__(__self__, labels=None, name=None, notification_configs=None, parser_config=None, reject_duplicate_message=None):
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notification_configs and not isinstance(notification_configs, list):
raise TypeError("Expected argument 'notification_configs' to be a list")
pulumi.set(__self__, "notification_configs", notification_configs)
if parser_config and not isinstance(parser_config, dict):
raise TypeError("Expected argument 'parser_config' to be a dict")
pulumi.set(__self__, "parser_config", parser_config)
if reject_duplicate_message and not isinstance(reject_duplicate_message, bool):
raise TypeError("Expected argument 'reject_duplicate_message' to be a bool")
pulumi.set(__self__, "reject_duplicate_message", reject_duplicate_message)
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
User-supplied key-value pairs used to organize HL7v2 stores. Label keys must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: \\p{Ll}\\p{Lo}{0,62} Label values are optional, must be between 1 and 63 characters long, have a UTF-8 encoding of maximum 128 bytes, and must conform to the following PCRE regular expression: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} No more than 64 labels can be associated with a given store.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name of the HL7v2 store, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/hl7V2Stores/{hl7v2_store_id}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationConfigs")
def notification_configs(self) -> Sequence['outputs.Hl7V2NotificationConfigResponse']:
"""
A list of notification configs. Each configuration uses a filter to determine whether to publish a message (both Ingest & Create) on the corresponding notification destination. Only the message name is sent as part of the notification. Supplied by the client.
"""
return pulumi.get(self, "notification_configs")
@property
@pulumi.getter(name="parserConfig")
def parser_config(self) -> 'outputs.ParserConfigResponse':
"""
The configuration for the parser. It determines how the server parses the messages.
"""
return pulumi.get(self, "parser_config")
@property
@pulumi.getter(name="rejectDuplicateMessage")
def reject_duplicate_message(self) -> bool:
"""
Determines whether to reject duplicate messages. A duplicate message is a message with the same raw bytes as a message that has already been ingested/created in this HL7v2 store. The default value is false, meaning that the store accepts the duplicate messages and it also returns the same ACK message in the IngestMessageResponse as has been returned previously. Note that only one resource is created in the store. When this field is set to true, CreateMessage/IngestMessage requests with a duplicate message will be rejected by the store, and IngestMessageErrorDetail returns a NACK message upon rejection.
"""
return pulumi.get(self, "reject_duplicate_message")
class AwaitableGetHl7V2StoreResult(GetHl7V2StoreResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHl7V2StoreResult(
labels=self.labels,
name=self.name,
notification_configs=self.notification_configs,
parser_config=self.parser_config,
reject_duplicate_message=self.reject_duplicate_message)
def get_hl7_v2_store(dataset_id: Optional[str] = None,
hl7_v2_store_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHl7V2StoreResult:
"""
Gets the specified HL7v2 store.
"""
__args__ = dict()
__args__['datasetId'] = dataset_id
__args__['hl7V2StoreId'] = hl7_v2_store_id
__args__['location'] = location
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('google-native:healthcare/v1:getHl7V2Store', __args__, opts=opts, typ=GetHl7V2StoreResult).value
return AwaitableGetHl7V2StoreResult(
labels=pulumi.get(__ret__, 'labels'),
name=pulumi.get(__ret__, 'name'),
notification_configs=pulumi.get(__ret__, 'notification_configs'),
parser_config=pulumi.get(__ret__, 'parser_config'),
reject_duplicate_message=pulumi.get(__ret__, 'reject_duplicate_message'))
@_utilities.lift_output_func(get_hl7_v2_store)
def get_hl7_v2_store_output(dataset_id: Optional[pulumi.Input[str]] = None,
hl7_v2_store_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetHl7V2StoreResult]:
"""
Gets the specified HL7v2 store.
"""
...
| [
"noreply@github.com"
] | pulumi.noreply@github.com |
eacb9972d19a4f022f58229c15cf2f95c755de2e | c67681f332a74ff8287f8a995fa67d6ffdb22ee2 | /transfer/resnet_pre_trained.py | 6c2dfc20b3cf1cac164e4de6f5c8fbe48898982f | [
"MIT"
] | permissive | mingewang/deep_learning_with_keras_from_scratch | 68144bd23e55b147743dbe38e54f9c0fa793f305 | 67fad8d875c789c53979919cd15d492271dfe005 | refs/heads/master | 2021-07-12T02:26:26.757961 | 2020-06-24T04:55:55 | 2020-06-24T04:55:55 | 169,931,438 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | # copy from https://keras.io/applications/#usage-examples-for-image-classification-models
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
import matplotlib.pyplot as plt
# load pre-trained imagenet model
model = ResNet50(weights='imagenet')
# you can download here wget https://en.wikipedia.org/wiki/File:African_Bush_Elephant.jpg
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
# we need to convert img obj to np array
x = image.img_to_array(img)
# show the image, imshow expect 0-1 range, so we divide 255 here
plt.imshow(x/255.)
plt.show()
# Insert a new axis that will appear at the axis position in the expanded array shape.
# this is needed as our model need this format
x = np.expand_dims(x, axis=0)
# substract the mean RGB value from each pixel
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
decoded = decode_predictions(preds, top=3)
print( decoded )
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
| [
"mingewang@gmail.com"
] | mingewang@gmail.com |
2cac8f96e274fdc42afb5cb02b0e9bba09365556 | 6fe84d02172d8c5d1ad6701d99adad960911bbb2 | /EIS.py | ddce19bba29fcdffd178201901bfffae22ff703d | [
"WTFPL"
] | permissive | greatwallisme/eis_analysis | fe6e7c7a7bdc6052c8ad028b1c45b8198242639c | fc8c5b2e1155f96313943e3641881dd621b3a118 | refs/heads/master | 2020-04-16T03:39:01.816735 | 2013-06-27T11:19:19 | 2013-06-27T11:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,278 | py | import sys
from PyQt4 import QtGui, QtCore
import fra
import os
import proj
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self)
self.resize(500, 200)
self.setWindowTitle('EIS')
self.setWindowIcon(QtGui.QIcon('gui/img/logo.png'))
#Path to the Imported Files
self.label1 = QtGui.QLabel("Input Path", self)
self.label1.move(10, 30)
self.pp = QtGui.QLineEdit(self)
self.pp.setGeometry(100, 30, 200, 20)
self.pp.setEnabled(False)
#Target Path for the Export
self.label2 = QtGui.QLabel("P00 Path", self)
self.label2.move(10, 60)
self.p00 = QtGui.QLineEdit(self)
self.p00.setGeometry(100, 60, 200, 20)
self.p00.setEnabled(False)
self.label3 = QtGui.QLabel("Analyse Path", self)
self.label3.move(10, 90)
self.anal = QtGui.QLineEdit(self)
self.anal.setGeometry(100, 90, 200, 20)
self.anal.setEnabled(False)
#Area of the Sample
self.label4 = QtGui.QLabel("Area", self)
self.label4.move(10, 120)
self.area = QtGui.QLineEdit(self)
self.area.setGeometry(100, 120, 200, 20)
self.area.setEnabled(True)
self.area.setText('0.196')
#checkboxes
self.cbp00 = QtGui.QCheckBox('Make P00', self)
self.cbp00.setFocusPolicy(QtCore.Qt.NoFocus)
self.cbp00.move(350, 30)
self.cbp00.setChecked(True)
self.cbimp = QtGui.QCheckBox('Make Impedancetable', self)
self.cbimp.setFocusPolicy(QtCore.Qt.NoFocus)
self.cbimp.move(350, 60)
self.cbimp.setChecked(True)
self.cbphi = QtGui.QCheckBox('Make Phasetable', self)
self.cbphi.setFocusPolicy(QtCore.Qt.NoFocus)
self.cbphi.move(350, 90)
self.cbphi.setChecked(True)
self.cbmsy = QtGui.QCheckBox('Make Mott Schottky', self)
self.cbmsy.setFocusPolicy(QtCore.Qt.NoFocus)
self.cbmsy.move(350, 120)
self.cbmsy.setChecked(True)
#Buttons
self.ExportButton = QtGui.QPushButton('Export', self)
self.ExportButton.setCheckable(True)
self.ExportButton.move(350, 150)
self.connect(self.ExportButton, QtCore.SIGNAL('clicked()'), self.exportFile)
self.ImportButton = QtGui.QPushButton('Import', self)
self.ImportButton.setCheckable(True)
self.ImportButton.move(200, 150)
self.connect(self.ImportButton, QtCore.SIGNAL('clicked()'), self.openDir)
#FileMenu
# Exit
exit = QtGui.QAction(QtGui.QIcon(''), 'Exit', self)
exit.setShortcut('Ctrl+Q')
exit.setStatusTip('Exit application')
self.connect(exit, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))
# Open Directory
openDirAction = QtGui.QAction('Open Directory', self)
openDirAction.setStatusTip('Open a directory')
openDirAction.triggered.connect(self.openDir)
# Export (as P00)
exportAction = QtGui.QAction('Export', self)
exportAction.setShortcut('Ctrl+E')
exportAction.setStatusTip('Export as P00 file')
exportAction.triggered.connect(self.exportFile)
#AboutMenu
# About
openAboutWidget = QtGui.QAction ('About', self)
openAboutWidget.triggered.connect(self.aboutWidget)
#Create Menu Bar
menubar = self.menuBar()
file = menubar.addMenu('&File')
file.addAction(exit)
file.addAction(openDirAction)
file.addAction(exportAction)
about = menubar.addMenu('&About')
about.addAction(openAboutWidget)
self.statusBar()
def exportFile(self):
#if os.path.isdir(self.pp.text()):
if self.cbp00.checkState():
self.Data.makeP00s()
if self.cbimp.checkState():
self.Data.makeImpTable(float(self.area.text()))
if self.cbphi.checkState():
self.Data.makePhaseTable()
if self.cbmsy.checkState():
self.Data.makeMottSchottky(float(self.area.text()))
self.Data.makeFittingTable(float(self.area.text()))
def openDir(self):
dirname = QtGui.QFileDialog.getExistingDirectory(self, 'Open Directory', os.getenv(''))
try:
self.Data = proj.analyse()
self.Data.setFilelist(dirname)
for files in os.listdir(dirname):
if os.path.splitext(files)[1] == '.dfr':
self.Data.openFile(dirname + '\\' +files)
if os.path.splitext(files)[1] == '.P00':
self.Data.openFile(dirname + '\\' +files)
self.pp.setText(dirname)
self.p00.setText(dirname + '\P00')
self.anal.setText(dirname + '\analyse')
except:
print ('Import error')
print (self.Data)
def aboutWidget(self):
import gui.about.aboutbox
gui.about.aboutbox.main()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
| [
"moritz370@googlemail.com"
] | moritz370@googlemail.com |
6da10e3d988849e0846a8f5c72f9ec9bdfc4ad8b | c201479b35a950b94376e0e73e9675a6cb837a86 | /input_custom_pyip.py | a0e7eb8e9abe4f86cc6dc07e8e117de92fef77ea | [] | no_license | josephcardillo/boring-stuff | 7527297065e6476450fcc1442bd3a67c84367111 | 4f5ac15ebfa98d77d54c226613df9d6413edde7b | refs/heads/main | 2023-03-20T22:30:33.826295 | 2021-03-08T19:11:13 | 2021-03-08T19:11:13 | 327,089,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | import pyinputplus as pyip
def adds_up_to_ten(numbers):
numbers_list = list(numbers)
for i, digit in enumerate(numbers_list):
numbers_list[i] = int(digit)
if sum(numbers_list) != 10:
raise Exception('The digits must add up to 10, not %s.' % (sum(numbers_list)))
return int(numbers)
response = pyip.inputCustom(adds_up_to_ten) | [
"joseph.s.cardillo@gmail.com"
] | joseph.s.cardillo@gmail.com |
e3c71b9961f93435dbc2c0a817fb9f89d4d871a1 | d53a176d74e5f75a227cf7857139cca7d62ed644 | /data_uploader/index_component_data_uploader.py | 4e2567f59b731624764605c0ae9beb06da0d4563 | [] | no_license | luoqing222/trading_analysis | 9a089c0aa877156caf74c3490fb77482e9d2e7ad | e44e66250ccf573f0cff57f7664348f311944354 | refs/heads/master | 2021-03-27T14:34:30.037258 | 2018-08-03T04:42:37 | 2018-08-03T04:42:37 | 31,827,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | __author__ = 'Qing'
import MySQLdb
import models
import logging
import os
import datetime
logger = logging.getLogger(__name__)
class IndexComponentDataUploader:
def __init__(self, host, database, user, password):
self.host = host
self.database = database
self.user = user
self.password = password
def create_index_component_table(self):
models.db.init(host=self.host, database=self.database, user=self.user, passwd=self.password)
models.db.connect()
if not models.IndexComponentData.table_exists():
models.db.create_table(models.IndexComponentData)
def upload_index_component_to_db(self,file_name, folder):
full_file_name = folder + "/" + file_name
if not os.path.exists(full_file_name):
logger.warning(full_file_name + " does not exist!")
return
self.create_index_component_table()
records = []
with open(full_file_name) as fp:
next(fp)
for line in fp:
line = line.strip()
splited_item = line.split(',')
if len(splited_item) == 4:
[transaction_date, index, symbol, name] = splited_item
transaction_date = datetime.datetime.strptime(transaction_date,"%m/%d/%Y").date().strftime("%Y-%m-%d")
try:
records.append((transaction_date, index, symbol, name))
except:
pass
db = MySQLdb.connect(host=self.host, db=self.database, user=self.user, passwd=self.password)
cursor = db.cursor()
sql_statement = "insert into indexcomponentdata(transaction_date, index_symbol, symbol, company_name) values(%s,%s,%s,%s)"
try:
cursor.executemany(sql_statement, records)
db.commit()
logger.info("%s is successfully uploaded", full_file_name)
except Exception, e:
logger.warning("exception is thrown when upload " + full_file_name + ":" + str(e))
db.rollback()
raise
finally:
cursor.close()
db.close()
def run(self, file_name, folder):
self.upload_index_component_to_db(file_name, folder)
| [
"luoqing222@gmail.com"
] | luoqing222@gmail.com |
f52b56536574c892378a2844bbaf8eb21c558d6d | eb8e00c8ae80e1fd6aca6500a67bda9940f86aa0 | /Solutions/Task2/lab2-1.2/lab2_setupable/lab2_decorator.py | 4c2ece07f313b0338648b398cfa4d1fa965996df | [] | no_license | KseniyaTonko/BSUIR-PYTHON-2020 | 9b9e2e6ebbbaf620f5d7bbf3a22d5cbb78d6a168 | 4a46b5787ecc800620d04763f5dc82c742c8c7ea | refs/heads/master | 2022-11-11T06:49:10.510148 | 2020-06-25T10:18:26 | 2020-06-25T10:18:26 | 256,189,528 | 0 | 0 | null | 2020-04-16T11:02:48 | 2020-04-16T11:02:47 | null | UTF-8 | Python | false | false | 657 | py | class CacheDecorator:
_func_values_ = dict()
def cache(function):
def wrapper(*args, **kwargs):
if CacheDecorator._func_values_.get(function.__name__):
if CacheDecorator._func_values_[function.__name__][0] == args and \
CacheDecorator._func_values_[function.__name__][1] == kwargs:
print("Decorator works!")
return CacheDecorator._func_values_[function.__name__][2]
result = function(*args, **kwargs)
CacheDecorator._func_values_[function.__name__] = (args, kwargs, result)
return result
return wrapper
| [
"andrey.karpyza.steam@gmail.com"
] | andrey.karpyza.steam@gmail.com |
a67be5f16d594f2be32e3bd5612b587e152569f6 | 7326c2589e6dc250083415d65e28754ea1e9968e | /11_Container_Loop_1.py | e13b704a088ecc6c305ef84903081890ed4a9cc5 | [] | no_license | nanoyslee/Python_Calulator-Login | 23aa1dd2135dafaa43de5c1d9ce12268e18eab19 | 5a4be881c96dd4495d58160f176d1c07283dd8bb | refs/heads/master | 2020-03-28T12:03:32.493326 | 2018-09-27T22:32:23 | 2018-09-27T22:32:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | members = ['nanoyslee', 'leezche', 'graphittie', 'k8805']
i = 0
while i < len(members):
print(members[i])
i = i + 1
| [
"noreply@github.com"
] | nanoyslee.noreply@github.com |
a86017ca0d8e95b6333457b55604ad6c7d6a39c3 | 4b7c036dd2dbc374e5635c586532f94bed5d3ab9 | /venv/Scripts/easy_install-3.5-script.py | c75e3ca7cd0042abb656472327e80fea7aac2e49 | [] | no_license | JayWu7/Genetic-algorithm-for-TSP-problem | 22b0827d5e0a676036f20e062fe6b3a0742681bf | 4d2db0677d2a7338bb05a19ae3e5cce3daf00920 | refs/heads/master | 2020-04-29T06:59:18.664059 | 2019-03-18T15:02:28 | 2019-03-18T15:02:28 | 175,937,348 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | #!D:\Atm\venv\Scripts\python3.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.5'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.5')()
)
| [
"jaywu16@163.com"
] | jaywu16@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.