hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1d3533156e4de3f0f953303a7aa03e8959c6889
| 990
|
py
|
Python
|
base/site-packages/jpush/push/audience.py
|
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 285
|
2019-12-23T09:50:21.000Z
|
2021-12-08T09:08:49.000Z
|
base/site-packages/jpush/push/audience.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | null | null | null |
base/site-packages/jpush/push/audience.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 9
|
2019-12-23T12:59:25.000Z
|
2022-03-15T05:12:11.000Z
|
import re
# Value selectors; aliases, tags, etc.
def tag(*tags):
"""Select a (list of) tag(s)."""
vtag = [t for t in tags]
return {"tag": vtag}
def tag_and(*tag_ands):
"""Select a (list of) tag_and(s)."""
vtag_and = [t for t in tag_ands]
return {"tag_and": vtag_and}
def tag_not(*tag_nots):
"""Select a (list of) tag_not(s)."""
vtag_not = [t for t in tag_nots]
return {"tag_not": vtag_not}
def alias(*alias):
"""Select a (list of) alias(es)."""
valias = [t for t in alias]
return {"alias": valias}
def registration_id(*reg_ids):
"""Select a (list of) registration_id(s)."""
vregistration_id = [t for t in reg_ids]
return {"registration_id": vregistration_id}
def segment(*segments):
"""Select a (list of) segment(s)."""
vsegment = [t for t in segments]
return {"segment": vsegment}
def abtest(*abtests):
"""Select a (list of) abtest(s)."""
vabtest = [t for t in abtests]
return {"abtest": vabtest}
| 25.384615
| 48
| 0.611111
|
import re
# Value selectors; aliases, tags, etc.
def tag(*tags):
"""Select a (list of) tag(s)."""
vtag = [t for t in tags]
return {"tag": vtag}
def tag_and(*tag_ands):
"""Select a (list of) tag_and(s)."""
vtag_and = [t for t in tag_ands]
return {"tag_and": vtag_and}
def tag_not(*tag_nots):
"""Select a (list of) tag_not(s)."""
vtag_not = [t for t in tag_nots]
return {"tag_not": vtag_not}
def alias(*alias):
"""Select a (list of) alias(es)."""
valias = [t for t in alias]
return {"alias": valias}
def registration_id(*reg_ids):
"""Select a (list of) registration_id(s)."""
vregistration_id = [t for t in reg_ids]
return {"registration_id": vregistration_id}
def segment(*segments):
"""Select a (list of) segment(s)."""
vsegment = [t for t in segments]
return {"segment": vsegment}
def abtest(*abtests):
"""Select a (list of) abtest(s)."""
vabtest = [t for t in abtests]
return {"abtest": vabtest}
| 0
| 0
| 0
|
24ae06873ec69434dce877fd511455ca4e400e69
| 525
|
py
|
Python
|
shop/migrations/0027_item_offer_price.py
|
Zex0n/django-simple-cms
|
097098dcea218697a53f9c04005c86a7680ee4e1
|
[
"MIT"
] | 1
|
2021-04-03T09:29:13.000Z
|
2021-04-03T09:29:13.000Z
|
shop/migrations/0027_item_offer_price.py
|
Zex0n/django-simple-cms
|
097098dcea218697a53f9c04005c86a7680ee4e1
|
[
"MIT"
] | null | null | null |
shop/migrations/0027_item_offer_price.py
|
Zex0n/django-simple-cms
|
097098dcea218697a53f9c04005c86a7680ee4e1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-07-30 14:11
from __future__ import unicode_literals
from django.db import migrations, models
| 25
| 126
| 0.64
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-07-30 14:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0026_auto_20180716_1626'),
]
operations = [
migrations.AddField(
model_name='item',
name='offer_price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Цена со скидкой'),
),
]
| 0
| 359
| 23
|
1ace981d823bbe67c690c75ab2bed1f4bfa3b8f6
| 1,143
|
py
|
Python
|
2019-10-23-ex-11.py
|
mpassosbr/python3
|
ff83f1f6f787206e49696134a99d68190606ed4f
|
[
"MIT"
] | null | null | null |
2019-10-23-ex-11.py
|
mpassosbr/python3
|
ff83f1f6f787206e49696134a99d68190606ed4f
|
[
"MIT"
] | null | null | null |
2019-10-23-ex-11.py
|
mpassosbr/python3
|
ff83f1f6f787206e49696134a99d68190606ed4f
|
[
"MIT"
] | null | null | null |
num_vezes = 0
soma_total = 0
maior_numero = None
menor_numero = None
while True:
num = input("Digite um número ou \"sair\" para encerrar o programa: ")
if num == "sair":
break
try:
numero = int(num)
num_vezes += 1
soma_total += numero
if maior_numero is None or numero > maior_numero:
maior_numero = numero
if menor_numero is None or numero < menor_numero:
menor_numero = numero
except:
print("Digite apenas números ou a palavra \"sair\", por favor.")
if maior_numero == None or menor_numero == None:
print("Você não digitou nenhum número. Portanto é impossível calcular o número de vezes, o somatório, o menor e o maior.")
print("Obrigado por utilizar o meu programa!")
else:
print("Números foram digitados " + str(num_vezes) + " vezes.")
print("A soma total dos números digitados é " + str(int(soma_total)) + ".")
print("O menor número digitado foi o número " + str(int(menor_numero)) + ".")
print("O maior número digitado foi o número " + str(int(maior_numero)) + ".")
print("Obrigado por utilizar o meu programa!")
| 40.821429
| 126
| 0.648294
|
num_vezes = 0
soma_total = 0
maior_numero = None
menor_numero = None
while True:
num = input("Digite um número ou \"sair\" para encerrar o programa: ")
if num == "sair":
break
try:
numero = int(num)
num_vezes += 1
soma_total += numero
if maior_numero is None or numero > maior_numero:
maior_numero = numero
if menor_numero is None or numero < menor_numero:
menor_numero = numero
except:
print("Digite apenas números ou a palavra \"sair\", por favor.")
if maior_numero == None or menor_numero == None:
print("Você não digitou nenhum número. Portanto é impossível calcular o número de vezes, o somatório, o menor e o maior.")
print("Obrigado por utilizar o meu programa!")
else:
print("Números foram digitados " + str(num_vezes) + " vezes.")
print("A soma total dos números digitados é " + str(int(soma_total)) + ".")
print("O menor número digitado foi o número " + str(int(menor_numero)) + ".")
print("O maior número digitado foi o número " + str(int(maior_numero)) + ".")
print("Obrigado por utilizar o meu programa!")
| 0
| 0
| 0
|
ad9d7a94f217bd44bed61f7bae257db19128308f
| 1,307
|
py
|
Python
|
modules/WEATHER_command.py
|
linnil1/slack_emoji_bot
|
b31b465cdab5ecdcbfc987000e182724e86a83c8
|
[
"MIT"
] | 4
|
2016-07-25T12:07:19.000Z
|
2020-11-25T15:09:16.000Z
|
modules/WEATHER_command.py
|
linnil1/slack_emoji_bot
|
b31b465cdab5ecdcbfc987000e182724e86a83c8
|
[
"MIT"
] | null | null | null |
modules/WEATHER_command.py
|
linnil1/slack_emoji_bot
|
b31b465cdab5ecdcbfc987000e182724e86a83c8
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
import time
| 37.342857
| 76
| 0.556236
|
from selenium import webdriver
import time
class WEATHER:
def require():
return [{"name": "Imgur", "module": True}]
def __init__(self, slack, custom):
self.slack = slack
self.imgur = custom['Imgur']
self.colorPrint = custom['colorPrint']
self.driver = webdriver.PhantomJS(
executable_path="./common/node_modules/phantomjs/bin/phantomjs")
def main(self, datadict):
if not datadict['type'] == 'message' or 'subtype' in datadict:
return
if datadict['text'] == "weather":
payload = {
"username": "Weather Brocaster",
"icon_emoji": ":_e6_b0_a3:",
"thread_ts": datadict.get("thread_ts") or '',
"channel": datadict['channel']}
# set the window size that you need
self.driver.set_window_size(1024, 768)
self.driver.get('http://weather.ntustudents.org/')
path = "data/tmp/weather_" + time.strftime("%s")
self.colorPrint("Store Image", path)
self.driver.save_screenshot(path)
text = "<{}|{}>".format(self.imgur.pathUpload(path),
time.strftime("%c"))
self.slack.api_call("chat.postMessage", **payload, text=text)
| 1,167
| -7
| 103
|
89e8813cf7f6c8a6eea7821b86c6ff1ab918f535
| 5,124
|
py
|
Python
|
cpix/filters.py
|
ArrisMultiTrust/pycpix
|
e250b113e8496c892a78d9a4a55f1c9a0886e4dd
|
[
"MIT"
] | 15
|
2018-09-25T14:52:04.000Z
|
2022-03-01T13:37:38.000Z
|
cpix/filters.py
|
ArrisMultiTrust/pycpix
|
e250b113e8496c892a78d9a4a55f1c9a0886e4dd
|
[
"MIT"
] | null | null | null |
cpix/filters.py
|
ArrisMultiTrust/pycpix
|
e250b113e8496c892a78d9a4a55f1c9a0886e4dd
|
[
"MIT"
] | 5
|
2019-02-19T10:58:59.000Z
|
2021-02-01T03:46:56.000Z
|
"""
Filter classes
"""
from . import etree
from .base import CPIXComparableBase
def encode_bool(value):
"""Encode booleans to produce valid XML"""
if value:
return "true"
return "false"
class KeyPeriodFilter(CPIXComparableBase):
"""
KeyPeriodFilter element
Has single required attribute:
periodId
"""
def element(self):
"""Returns XML element"""
el = etree.Element("KeyPeriodFilter")
el.set("periodId", str(self.period_id))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return KeyPeriodFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
period_id = xml.attrib["periodId"]
return KeyPeriodFilter(period_id)
class LabelFilter(CPIXComparableBase):
"""
LabelFilter element
Not yet implemented
"""
class VideoFilter(CPIXComparableBase):
"""
VideoFilter element
Has optional attributes:
minPixels
maxPixels
hdr
wcg
minFps
maxFps
"""
def element(self):
"""Returns XML element"""
el = etree.Element("VideoFilter")
if self.min_pixels is not None:
el.set("minPixels", str(self.min_pixels))
if self.max_pixels is not None:
el.set("maxPixels", str(self.max_pixels))
if self.hdr is not None:
el.set("hdr", encode_bool(self.hdr))
if self.wcg is not None:
el.set("wcg", encode_bool(self.wcg))
if self.min_fps is not None:
el.set("minFps", str(self.min_fps))
if self.max_fps is not None:
el.set("maxFps", str(self.max_fps))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return VideoFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
min_pixels = None
max_pixels = None
hdr = None
wcg = None
min_fps = None
max_fps = None
if "minPixels" in xml.attrib:
min_pixels = xml.attrib["minPixels"]
if "maxPixels" in xml.attrib:
max_pixels = xml.attrib["maxPixels"]
if "hdr" in xml.attrib:
hdr = xml.attrib["hdr"]
if "wcg" in xml.attrib:
wcg = xml.attrib["wcg"]
if "minFps" in xml.attrib:
min_fps = xml.attrib["minFps"]
if "maxFps" in xml.attrib:
max_fps = xml.attrib["maxFps"]
return VideoFilter(min_pixels, max_pixels, hdr, wcg, min_fps, max_fps)
class AudioFilter(CPIXComparableBase):
"""
AudioFilter element
Has optional attributes:
minChannels
maxChannels
"""
def element(self):
"""Returns XML element"""
el = etree.Element("AudioFilter")
if self.min_channels:
el.set("minChannels", str(self.min_channels))
if self.max_channels:
el.set("maxChannels", str(self.max_channels))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return AudioFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
min_channels = None
max_channels = None
if "minChannels" in xml.attrib:
min_channels = xml.attrib["minChannels"]
if "maxChannels" in xml.attrib:
max_channels = xml.attrib["maxChannels"]
return AudioFilter(min_channels, max_channels)
class BitrateFilter(CPIXComparableBase):
"""
BitrateFilter element
Has optional attributes:
minBitrate
maxBitrate
"""
def element(self):
"""Returns XML element"""
el = etree.Element("BitrateFilter")
if self.min_bitrate:
el.set("minBitrate", str(self.min_bitrate))
if self.max_bitrate:
el.set("maxBitrate", str(self.max_bitrate))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return BitrateFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
min_bitrate = None
max_bitrate = None
if "minBitrate" in xml.attrib:
min_bitrate = xml.attrib["minBitrate"]
if "maxBitrate" in xml.attrib:
max_bitrate = xml.attrib["maxBitrate"]
return BitrateFilter(min_bitrate, max_bitrate)
| 25.878788
| 78
| 0.583724
|
"""
Filter classes
"""
from . import etree
from .base import CPIXComparableBase
def encode_bool(value):
"""Encode booleans to produce valid XML"""
if value:
return "true"
return "false"
class KeyPeriodFilter(CPIXComparableBase):
"""
KeyPeriodFilter element
Has single required attribute:
periodId
"""
def __init__(self, period_id):
self.period_id = period_id
def element(self):
"""Returns XML element"""
el = etree.Element("KeyPeriodFilter")
el.set("periodId", str(self.period_id))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return KeyPeriodFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
period_id = xml.attrib["periodId"]
return KeyPeriodFilter(period_id)
class LabelFilter(CPIXComparableBase):
"""
LabelFilter element
Not yet implemented
"""
class VideoFilter(CPIXComparableBase):
"""
VideoFilter element
Has optional attributes:
minPixels
maxPixels
hdr
wcg
minFps
maxFps
"""
def __init__(self, min_pixels=None, max_pixels=None, hdr=None, wcg=None,
min_fps=None, max_fps=None):
self.min_pixels = min_pixels
self.max_pixels = max_pixels
self.hdr = hdr
self.wcg = wcg
self.min_fps = min_fps
self.max_fps = max_fps
def element(self):
"""Returns XML element"""
el = etree.Element("VideoFilter")
if self.min_pixels is not None:
el.set("minPixels", str(self.min_pixels))
if self.max_pixels is not None:
el.set("maxPixels", str(self.max_pixels))
if self.hdr is not None:
el.set("hdr", encode_bool(self.hdr))
if self.wcg is not None:
el.set("wcg", encode_bool(self.wcg))
if self.min_fps is not None:
el.set("minFps", str(self.min_fps))
if self.max_fps is not None:
el.set("maxFps", str(self.max_fps))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return VideoFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
min_pixels = None
max_pixels = None
hdr = None
wcg = None
min_fps = None
max_fps = None
if "minPixels" in xml.attrib:
min_pixels = xml.attrib["minPixels"]
if "maxPixels" in xml.attrib:
max_pixels = xml.attrib["maxPixels"]
if "hdr" in xml.attrib:
hdr = xml.attrib["hdr"]
if "wcg" in xml.attrib:
wcg = xml.attrib["wcg"]
if "minFps" in xml.attrib:
min_fps = xml.attrib["minFps"]
if "maxFps" in xml.attrib:
max_fps = xml.attrib["maxFps"]
return VideoFilter(min_pixels, max_pixels, hdr, wcg, min_fps, max_fps)
class AudioFilter(CPIXComparableBase):
"""
AudioFilter element
Has optional attributes:
minChannels
maxChannels
"""
def __init__(self, min_channels=None, max_channels=None):
self.min_channels = min_channels
self.max_channels = max_channels
def element(self):
"""Returns XML element"""
el = etree.Element("AudioFilter")
if self.min_channels:
el.set("minChannels", str(self.min_channels))
if self.max_channels:
el.set("maxChannels", str(self.max_channels))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return AudioFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
min_channels = None
max_channels = None
if "minChannels" in xml.attrib:
min_channels = xml.attrib["minChannels"]
if "maxChannels" in xml.attrib:
max_channels = xml.attrib["maxChannels"]
return AudioFilter(min_channels, max_channels)
class BitrateFilter(CPIXComparableBase):
"""
BitrateFilter element
Has optional attributes:
minBitrate
maxBitrate
"""
def __init__(self, min_bitrate=None, max_bitrate=None):
self.min_bitrate = min_bitrate
self.max_bitrate = max_bitrate
def element(self):
"""Returns XML element"""
el = etree.Element("BitrateFilter")
if self.min_bitrate:
el.set("minBitrate", str(self.min_bitrate))
if self.max_bitrate:
el.set("maxBitrate", str(self.max_bitrate))
return el
@staticmethod
def parse(xml):
"""
Parse XML and return BitrateFilter
"""
if isinstance(xml, (str, bytes)):
xml = etree.fromstring(xml)
min_bitrate = None
max_bitrate = None
if "minBitrate" in xml.attrib:
min_bitrate = xml.attrib["minBitrate"]
if "maxBitrate" in xml.attrib:
max_bitrate = xml.attrib["maxBitrate"]
return BitrateFilter(min_bitrate, max_bitrate)
| 553
| 0
| 108
|
7ec647a60c71377740d89e88489c6c77a19977d5
| 1,001
|
py
|
Python
|
Proyecto/Obtener El calendario/Correo.py
|
luisgerardoperalestorres/EstanciasI
|
2d9add096919d9911a0d40ad8bcabce4f6a24b40
|
[
"MIT"
] | null | null | null |
Proyecto/Obtener El calendario/Correo.py
|
luisgerardoperalestorres/EstanciasI
|
2d9add096919d9911a0d40ad8bcabce4f6a24b40
|
[
"MIT"
] | null | null | null |
Proyecto/Obtener El calendario/Correo.py
|
luisgerardoperalestorres/EstanciasI
|
2d9add096919d9911a0d40ad8bcabce4f6a24b40
|
[
"MIT"
] | null | null | null |
import base64
import httplib2
from email.mime.text import MIMEText
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow
from google_auth_oauthlib.flow import InstalledAppFlow
permiso = ['https://www.googleapis.com/auth/gmail.send']
memoria = Storage('gmail.storage')
IDOAuth = InstalledAppFlow.from_client_secrets_file("secreto_cliente_Gmail.json", scopes=permiso)
http = httplib2.Http()
credentials = memoria.get()
if credentials is None or credentials.invalid:
credentials = run_flow(IDOAuth, memoria, http=http)
Servicio=build('gmail', 'v1', credentials=credentials)
http = credentials.authorize(credentials)
message = MIMEText("Message")
message['to'] = "correousertb@gmail.com"
message['from'] = "estanciaupv@gmail.com"
message['subject'] = "Subject"
body = {'raw': base64.b64encode(message.as_bytes())}
Servicio.users().messages().send(userId="me",body=body).execute()
| 34.517241
| 97
| 0.791209
|
import base64
import httplib2
from email.mime.text import MIMEText
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow
from google_auth_oauthlib.flow import InstalledAppFlow
permiso = ['https://www.googleapis.com/auth/gmail.send']
memoria = Storage('gmail.storage')
IDOAuth = InstalledAppFlow.from_client_secrets_file("secreto_cliente_Gmail.json", scopes=permiso)
http = httplib2.Http()
credentials = memoria.get()
if credentials is None or credentials.invalid:
credentials = run_flow(IDOAuth, memoria, http=http)
Servicio=build('gmail', 'v1', credentials=credentials)
http = credentials.authorize(credentials)
message = MIMEText("Message")
message['to'] = "correousertb@gmail.com"
message['from'] = "estanciaupv@gmail.com"
message['subject'] = "Subject"
body = {'raw': base64.b64encode(message.as_bytes())}
Servicio.users().messages().send(userId="me",body=body).execute()
| 0
| 0
| 0
|
e8d66f7590965688803098f582bdd62de5164810
| 1,760
|
py
|
Python
|
zcmds/cmds/common/obs_organize.py
|
zackees/zcmds
|
7868b078bc0a3489f0de8435c19f0c3c7913f50f
|
[
"MIT"
] | null | null | null |
zcmds/cmds/common/obs_organize.py
|
zackees/zcmds
|
7868b078bc0a3489f0de8435c19f0c3c7913f50f
|
[
"MIT"
] | null | null | null |
zcmds/cmds/common/obs_organize.py
|
zackees/zcmds
|
7868b078bc0a3489f0de8435c19f0c3c7913f50f
|
[
"MIT"
] | null | null | null |
import os
import shutil
import traceback
HOME_DIR = os.path.abspath(os.path.expanduser("~"))
PATH_DEFAULT_OBS = os.path.join(HOME_DIR, "videos", "obs")
DRY_RUN = False
def _is_video_file(file_path: str) -> bool:
"""Returns True if the given file is a video file."""
_, ext = os.path.splitext(file_path.lower())
return ext in [".mp4", ".mkv"]
def makedirs(new_dir: str, exist_ok: bool = False) -> None:
"""Make the given directory."""
print(f"make_dirs: {new_dir}")
if DRY_RUN:
return
os.makedirs(new_dir, exist_ok=exist_ok)
def movefile(src: str, dst: str) -> None:
"""Move the given file."""
print(f"movefile: {src} -> {dst}")
if DRY_RUN:
return
shutil.move(src, dst)
def organize(path: str = PATH_DEFAULT_OBS) -> None:
"""Organize the given path."""
paths = [os.path.join(path, p) for p in os.listdir(path) if _is_video_file(p)]
for p in paths:
try:
name_ext = os.path.basename(p)
name = os.path.splitext(name_ext)[0]
ext = os.path.splitext(name_ext)[1]
date_time = name.replace(" ", "_").split("_")
new_dir = os.path.join(path, date_time[0])
new_path = os.path.join(new_dir, f"{date_time[1]}{ext}")
makedirs(os.path.dirname(new_path), exist_ok=True)
movefile(p, new_path)
except Exception as e:
traceback.print_exc()
print(f"Could not process {p} because of {e}")
def main() -> None:
"""Main entry point."""
reply = input(
f"WARNING! This will organize all your videos in the obs path:\n {PATH_DEFAULT_OBS}\ncontinue? [y/n]: "
)
if reply.lower() != "y":
organize()
if __name__ == "__main__":
main()
| 28.852459
| 112
| 0.599432
|
import os
import shutil
import traceback
HOME_DIR = os.path.abspath(os.path.expanduser("~"))
PATH_DEFAULT_OBS = os.path.join(HOME_DIR, "videos", "obs")
DRY_RUN = False
def _is_video_file(file_path: str) -> bool:
"""Returns True if the given file is a video file."""
_, ext = os.path.splitext(file_path.lower())
return ext in [".mp4", ".mkv"]
def makedirs(new_dir: str, exist_ok: bool = False) -> None:
"""Make the given directory."""
print(f"make_dirs: {new_dir}")
if DRY_RUN:
return
os.makedirs(new_dir, exist_ok=exist_ok)
def movefile(src: str, dst: str) -> None:
"""Move the given file."""
print(f"movefile: {src} -> {dst}")
if DRY_RUN:
return
shutil.move(src, dst)
def organize(path: str = PATH_DEFAULT_OBS) -> None:
"""Organize the given path."""
paths = [os.path.join(path, p) for p in os.listdir(path) if _is_video_file(p)]
for p in paths:
try:
name_ext = os.path.basename(p)
name = os.path.splitext(name_ext)[0]
ext = os.path.splitext(name_ext)[1]
date_time = name.replace(" ", "_").split("_")
new_dir = os.path.join(path, date_time[0])
new_path = os.path.join(new_dir, f"{date_time[1]}{ext}")
makedirs(os.path.dirname(new_path), exist_ok=True)
movefile(p, new_path)
except Exception as e:
traceback.print_exc()
print(f"Could not process {p} because of {e}")
def main() -> None:
"""Main entry point."""
reply = input(
f"WARNING! This will organize all your videos in the obs path:\n {PATH_DEFAULT_OBS}\ncontinue? [y/n]: "
)
if reply.lower() != "y":
organize()
if __name__ == "__main__":
main()
| 0
| 0
| 0
|
a004be6db5b89fa71ce5391401e2071c48d2e7ca
| 276
|
py
|
Python
|
Mundo1/aula8C.py
|
OliveiraVasconcelos/Python-CursoemVideo
|
bcca20d24b6af7bde9ce290b011cfa5abc0c1cc3
|
[
"MIT"
] | null | null | null |
Mundo1/aula8C.py
|
OliveiraVasconcelos/Python-CursoemVideo
|
bcca20d24b6af7bde9ce290b011cfa5abc0c1cc3
|
[
"MIT"
] | null | null | null |
Mundo1/aula8C.py
|
OliveiraVasconcelos/Python-CursoemVideo
|
bcca20d24b6af7bde9ce290b011cfa5abc0c1cc3
|
[
"MIT"
] | null | null | null |
import math
x = float(input('Digite um ângulo: '))
tangente = math.tan(math.radians(x))
cos = math.acos(math.radians(x))
seno = math.asin(math.radians(x))
print(f'O cosseno de {x} é {cos:.2f}')
print(f'O seno de {x} é {seno:.2f}')
print(f'A tangente de {x} é {tangente:.2f}')
| 34.5
| 45
| 0.65942
|
import math
x = float(input('Digite um ângulo: '))
tangente = math.tan(math.radians(x))
cos = math.acos(math.radians(x))
seno = math.asin(math.radians(x))
print(f'O cosseno de {x} é {cos:.2f}')
print(f'O seno de {x} é {seno:.2f}')
print(f'A tangente de {x} é {tangente:.2f}')
| 0
| 0
| 0
|
e2412a6280e8cd18645880fc44e80ad1de79dc52
| 2,482
|
py
|
Python
|
recipes/Python/273844_Minimal_http_upload_cgi/recipe-273844.py
|
tdiprima/code
|
61a74f5f93da087d27c70b2efe779ac6bd2a3b4f
|
[
"MIT"
] | 2,023
|
2017-07-29T09:34:46.000Z
|
2022-03-24T08:00:45.000Z
|
recipes/Python/273844_Minimal_http_upload_cgi/recipe-273844.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 32
|
2017-09-02T17:20:08.000Z
|
2022-02-11T17:49:37.000Z
|
recipes/Python/273844_Minimal_http_upload_cgi/recipe-273844.py
|
unhacker/code
|
73b09edc1b9850c557a79296655f140ce5e853db
|
[
"MIT"
] | 780
|
2017-07-28T19:23:28.000Z
|
2022-03-25T20:39:41.000Z
|
#!/usr/local/bin/python
"""This demonstrates a minimal http upload cgi.
This allows a user to upload up to three files at once.
It is trivial to change the number of files uploaded.
This script has security risks. A user could attempt to fill
a disk partition with endless uploads.
If you have a system open to the public you would obviously want
to limit the size and number of files written to the disk.
"""
import cgi
import cgitb; cgitb.enable()
import os, sys
try: # Windows needs stdio set for binary mode.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
UPLOAD_DIR = "/tmp"
HTML_TEMPLATE = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><title>File Upload</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head><body><h1>File Upload</h1>
<form action="%(SCRIPT_NAME)s" method="POST" enctype="multipart/form-data">
File name: <input name="file_1" type="file"><br>
File name: <input name="file_2" type="file"><br>
File name: <input name="file_3" type="file"><br>
<input name="submit" type="submit">
</form>
</body>
</html>"""
def print_html_form ():
"""This prints out the html form. Note that the action is set to
the name of the script which makes this is a self-posting form.
In other words, this cgi both displays a form and processes it.
"""
print "content-type: text/html\n"
print HTML_TEMPLATE % {'SCRIPT_NAME':os.environ['SCRIPT_NAME']}
def save_uploaded_file (form_field, upload_dir):
"""This saves a file uploaded by an HTML form.
The form_field is the name of the file input field from the form.
For example, the following form_field would be "file_1":
<input name="file_1" type="file">
The upload_dir is the directory where the file will be written.
If no file was uploaded or if the field does not exist then
this does nothing.
"""
form = cgi.FieldStorage()
if not form.has_key(form_field): return
fileitem = form[form_field]
if not fileitem.file: return
fout = file (os.path.join(upload_dir, fileitem.filename), 'wb')
while 1:
chunk = fileitem.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
save_uploaded_file ("file_1", UPLOAD_DIR)
save_uploaded_file ("file_2", UPLOAD_DIR)
save_uploaded_file ("file_3", UPLOAD_DIR)
print_html_form ()
| 35.971014
| 82
| 0.699436
|
#!/usr/local/bin/python
"""This demonstrates a minimal http upload cgi.
This allows a user to upload up to three files at once.
It is trivial to change the number of files uploaded.
This script has security risks. A user could attempt to fill
a disk partition with endless uploads.
If you have a system open to the public you would obviously want
to limit the size and number of files written to the disk.
"""
import cgi
import cgitb; cgitb.enable()
import os, sys
try: # Windows needs stdio set for binary mode.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
UPLOAD_DIR = "/tmp"
HTML_TEMPLATE = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><title>File Upload</title>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head><body><h1>File Upload</h1>
<form action="%(SCRIPT_NAME)s" method="POST" enctype="multipart/form-data">
File name: <input name="file_1" type="file"><br>
File name: <input name="file_2" type="file"><br>
File name: <input name="file_3" type="file"><br>
<input name="submit" type="submit">
</form>
</body>
</html>"""
def print_html_form ():
"""This prints out the html form. Note that the action is set to
the name of the script which makes this is a self-posting form.
In other words, this cgi both displays a form and processes it.
"""
print "content-type: text/html\n"
print HTML_TEMPLATE % {'SCRIPT_NAME':os.environ['SCRIPT_NAME']}
def save_uploaded_file (form_field, upload_dir):
"""This saves a file uploaded by an HTML form.
The form_field is the name of the file input field from the form.
For example, the following form_field would be "file_1":
<input name="file_1" type="file">
The upload_dir is the directory where the file will be written.
If no file was uploaded or if the field does not exist then
this does nothing.
"""
form = cgi.FieldStorage()
if not form.has_key(form_field): return
fileitem = form[form_field]
if not fileitem.file: return
fout = file (os.path.join(upload_dir, fileitem.filename), 'wb')
while 1:
chunk = fileitem.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
save_uploaded_file ("file_1", UPLOAD_DIR)
save_uploaded_file ("file_2", UPLOAD_DIR)
save_uploaded_file ("file_3", UPLOAD_DIR)
print_html_form ()
| 0
| 0
| 0
|
86c7c2703b64373394ab93187cb0ba9fcade85a4
| 16,837
|
py
|
Python
|
tools/converter/integration.py
|
zypeh/utf8rewind
|
46dd600a4716f80debbe4bbbe4a5b19daf0e2cb2
|
[
"MIT"
] | 5
|
2021-02-07T08:19:59.000Z
|
2021-11-18T01:17:01.000Z
|
tools/converter/integration.py
|
zypeh/utf8rewind
|
46dd600a4716f80debbe4bbbe4a5b19daf0e2cb2
|
[
"MIT"
] | 1
|
2020-05-25T08:59:35.000Z
|
2020-05-25T14:59:09.000Z
|
tools/converter/integration.py
|
zypeh/utf8rewind
|
46dd600a4716f80debbe4bbbe4a5b19daf0e2cb2
|
[
"MIT"
] | 1
|
2020-06-06T10:30:33.000Z
|
2020-06-06T10:30:33.000Z
|
import argparse
import datetime
import os
import re
import sys
import unicodedata
import libs.header
import libs.unicode
import libs.utf8
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse Unicode codepoint database and write integration tests.')
parser.add_argument(
'-v', '--verbose',
dest = 'verbose',
action = 'store_true',
help = 'verbose output')
parser.add_argument(
'--casemapping',
dest = 'casemapping',
action = 'store_true',
help = 'write case mapping tests')
parser.add_argument(
'--normalization',
dest = 'normalization',
action = 'store_true',
help = 'write normalization tests')
parser.add_argument(
'--is-normalized',
dest = 'isnormalized',
action = 'store_true',
help = 'write is-normalized tests')
parser.add_argument(
'--casefolding',
dest = 'casefolding',
action = 'store_true',
help = 'write casefolding tests')
args = parser.parse_args()
if not args.casemapping and not args.normalization and not args.isnormalized and not args.casefolding:
all = True
else:
all = False
db = unicodedata.Database()
db.loadFromFiles(None)
if all or args.casemapping:
suite = CaseMappingIntegrationSuite(db)
suite.execute()
if all or args.normalization:
suite = NormalizationIntegrationSuite(db)
suite.execute()
if all or args.isnormalized:
suite = IsNormalizedIntegrationSuite(db)
suite.execute()
if all or args.casefolding:
suite = CaseFoldingIntegrationSuite(db)
suite.execute()
| 28.440878
| 231
| 0.639247
|
import argparse
import datetime
import os
import re
import sys
import unicodedata
import libs.header
import libs.unicode
import libs.utf8
def codepointToUnicode(codepoint):
return "U+%04X" % codepoint
class IntegrationSuite:
def __init__(self, db):
self.db = db
def open(self, filepath):
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
self.header = libs.header.Header(script_path + filepath)
self.header.generatedNotice()
self.header.newLine()
self.header.writeLine("#include \"tests-base.hpp\"")
self.header.newLine()
def close(self):
self.header.close()
def execute(self):
pass
class CaseMappingIntegrationSuite(IntegrationSuite):
def execute(self):
print('Executing case mapping tests...')
valid_blocks = []
print('Checking for valid blocks...')
for b in db.blocks:
for u in range(b.start, b.end + 1):
if u in db.records:
r = db.records[u]
if r.uppercase or r.lowercase or r.titlecase:
valid_blocks.append(b)
break
print('Writing case mapping tests...')
self.open('/../../source/tests/integration-casemapping.cpp')
self.header.writeLine("#include \"../helpers/helpers-casemapping.hpp\"")
self.header.write("#include \"../helpers/helpers-locale.hpp\"")
for b in valid_blocks:
self.writeTest(range(b.start, b.end + 1), b.name)
self.close()
def writeTest(self, codepointRange, name):
name = re.sub('[ \-]', '', name)
if len(codepointRange) > 4000:
for i in range(0, len(codepointRange), 4000):
chunk = codepointRange[i:i + 4000]
self.writeTest(chunk, name + "Part" + str((i / 4000) + 1))
return
records = []
for i in codepointRange:
if i not in self.db.records:
continue
records.append(self.db.records[i])
if len(records) == 0:
return
print("Writing tests " + codepointToUnicode(codepointRange[0]) + " - " + codepointToUnicode(codepointRange[len(codepointRange) - 1]) + " \"" + name + "\"")
self.header.newLine()
self.header.newLine()
self.header.writeLine("TEST(CaseMapping, " + name + ")")
self.header.writeLine("{")
self.header.indent()
for r in records:
converted_codepoint = "0x%08X" % r.codepoint
if r.codepoint == 0:
self.header.writeLine("EXPECT_CASEMAPPING_CODEPOINT_NUL_EQ(" + converted_codepoint + ", \"" + r.name + "\", UTF8_LOCALE_DEFAULT);")
else:
if r.lowercase:
converted_lowercase = libs.utf8.unicodeToUtf8(r.lowercase)
else:
converted_lowercase = libs.utf8.codepointToUtf8(r.codepoint)[0]
if r.uppercase:
converted_uppercase = libs.utf8.unicodeToUtf8(r.uppercase)
else:
converted_uppercase = libs.utf8.codepointToUtf8(r.codepoint)[0]
if r.titlecase:
converted_titlecase = libs.utf8.unicodeToUtf8(r.titlecase)
else:
converted_titlecase = libs.utf8.codepointToUtf8(r.codepoint)[0]
self.header.writeLine("EXPECT_CASEMAPPING_CODEPOINT_EQ(" + converted_codepoint + ", \"" + converted_lowercase + "\", \"" + converted_uppercase + "\", \"" + converted_titlecase + "\", \"" + r.name + "\", UTF8_LOCALE_DEFAULT);")
self.header.outdent()
self.header.write("}")
class NormalizationEntry:
def __init__(self):
self.codepoint = 0
self.source = ""
self.nfc = ""
self.nfd = ""
self.nfkc = ""
self.nfkd = ""
def __str__(self):
return "{ codepoint " + hex(self.codepoint) + " source " + self.source + " nfc " + self.nfc + " nfd " + self.nfd + " nfkc " + self.nfkc + " nfkd " + self.nfkd + " }";
def parse(self, entry):
if len(entry.matches[0]) == 1:
self.codepoint = int(entry.matches[0][0], 16)
self.source = self.matchToString(entry.matches[0])
self.nfc = self.matchToString(entry.matches[1])
self.nfd = self.matchToString(entry.matches[2])
self.nfkc = self.matchToString(entry.matches[3])
self.nfkd = self.matchToString(entry.matches[4])
def matchToString(self, match):
codepoints = []
for m in match:
codepoints.append(int(m, 16))
return libs.utf8.unicodeToUtf8(codepoints)
class NormalizationGroup:
def __init__(self, block):
self.block = block
self.entries = []
class NormalizationSection:
def __init__(self, identifier, title):
self.identifier = identifier
self.title = title
self.entries = []
class NormalizationIntegrationSuite(IntegrationSuite):
def __init__(self, db):
self.db = db
self.current = None
self.sections = []
self.blockGroups = dict()
self.exclusions = CompositionExclusionIntegrationSuite(db)
def execute(self):
print('Executing normalization tests...')
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
document_normalization = libs.unicode.UnicodeDocument()
document_normalization.parse(script_path + '/data/NormalizationTest.txt')
document_normalization.accept(self)
self.exclusions.execute()
print('Writing normalization tests...')
self.open('/../../source/tests/integration-normalization.cpp')
self.header.writeLine("#include \"../helpers/helpers-normalization.hpp\"")
self.header.write("#include \"../helpers/helpers-strings.hpp\"")
section_mapping = {
'Part0': self.writeSpecificCasesSection,
'Part1': self.writeBlockGroupsSection,
'Part2': self.writeDefaultSection,
'Part3': self.writeDefaultSection,
}
for s in self.sections:
print(s.title + " (" + s.identifier + "):")
section_mapping[s.identifier](s)
self.writeNormalizationTest(self.exclusions.entries, "Composition exclusions", 100)
self.close()
def writeDefaultSection(self, section):
if len(section.entries) == 0:
return
self.writeNormalizationTest(section.entries, section.title, 100)
def writeSpecificCasesSection(self, section):
if len(section.entries) == 0:
return
normalization = []
ordering = []
for e in section.entries:
if e.codepoint == 0:
ordering.append(e)
else:
normalization.append(e)
self.writeNormalizationTest(normalization, section.title + " Codepoints")
self.writeNormalizationTest(ordering, section.title + " Ordering")
def writeBlockGroupsSection(self, section):
groups = dict()
for u in section.entries:
e = self.db.records[u.codepoint]
if e.block.name in groups:
group = groups[e.block.name]
else:
group = NormalizationGroup(e.block)
groups[e.block.name] = group
group.entries.append(u)
block_groups = sorted(groups.items(), key = lambda item: item[1].block.start)
for g in block_groups:
if g[1].block.start == 0xAC00 and g[1].block.end == 0xD7AF:
# ignore hangul syllables
continue
self.writeNormalizationTest(g[1].entries, "Characters " + g[0])
def writeNormalizationTest(self, entries, title, limit = 2000):
if len(entries) > limit:
for i in range(0, len(entries), limit):
chunk = entries[i:i + limit]
self.writeNormalizationTest(chunk, title + " Part" + str(int(i / limit) + 1), limit)
return
title = re.sub('[^\w ]', '', title.title()).replace(' ', '')
print("Writing tests \"" + title + "\"")
self.header.newLine()
self.header.newLine()
self.header.writeLine("TEST(Normalization, " + title + ")")
self.header.writeLine("{")
self.header.indent()
for e in entries:
self.header.writeIndentation()
if e.codepoint == 0:
self.header.write("CHECK_NORMALIZE_SEQUENCE")
self.header.write("(\"" + e.source + "\", \"" + e.nfd + "\", \"" + e.nfc + "\", \"" + e.nfkd + "\", \"" + e.nfkc + "\");")
else:
self.header.write("CHECK_NORMALIZE_CODEPOINT")
self.header.write("(0x" + format(e.codepoint, '08X') + ", \"" + e.nfd + "\", \"" + e.nfc + "\", \"" + e.nfkd + "\", \"" + e.nfkc + "\", \"" + self.db.records[e.codepoint].name + "\");")
self.header.newLine()
self.header.outdent()
self.header.write("}")
def visitDocument(self, document):
print('Parsing normalization tests...')
return True
def visitSection(self, section):
self.current = NormalizationSection(section.identifier, section.title)
self.sections.append(self.current)
return True
def visitEntry(self, entry):
normalization = NormalizationEntry()
normalization.parse(entry)
self.current.entries.append(normalization)
return True
class CompositionExclusionIntegrationSuite(IntegrationSuite):
def __init__(self, db):
self.db = db
self.entries = []
def execute(self):
print('Executing composition exclusion tests...')
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
document_exclusions = libs.unicode.UnicodeDocument()
document_exclusions.parse(script_path + '/data/CompositionExclusions.txt')
document_exclusions.accept(self)
def visitDocument(self, document):
return True
def visitSection(self, section):
return True
def visitEntry(self, entry):
codepoint = int(entry.matches[0][0], 16)
if codepoint in self.db.records:
record = db.records[codepoint]
entry = NormalizationEntry()
entry.codepoint = record.codepoint
entry.source = libs.utf8.codepointToUtf8Hex(record.codepoint)
entry.nfd = libs.utf8.unicodeToUtf8(record.decomposedNFD)
entry.nfc = entry.nfd
entry.nfkd = libs.utf8.unicodeToUtf8(record.decomposedNFD)
entry.nfkc = entry.nfkd
self.entries.append(entry)
return True
class QuickCheckEntry:
def __init__(self, codepoint):
self.codepoint = codepoint
self.nfc = "YES"
self.nfd = "YES"
self.nfkc = "YES"
self.nfkd = "YES"
class QuickCheckGroup:
def __init__(self, block):
self.block = block
self.entries = []
class IsNormalizedIntegrationSuite(IntegrationSuite):
def __init__(self, db):
self.db = db
self.entries = dict()
self.groups = dict()
def execute(self):
print('Parsing quickcheck records...')
records_list = [
{
"record": "qcNFCRecords",
"target": "nfc"
},
{
"record": "qcNFDRecords",
"target": "nfd"
},
{
"record": "qcNFKCRecords",
"target": "nfkc"
},
{
"record": "qcNFKDRecords",
"target": "nfkd"
},
]
value_map = {
1: "MAYBE",
2: "NO"
}
for i in records_list:
for r in db.__dict__[i["record"]]:
str_value = value_map[r.value]
for c in range(r.start, r.start + r.count + 1):
if c in self.entries:
e = self.entries[c]
else:
e = QuickCheckEntry(c)
self.entries[c] = e
e.__dict__[i["target"]] = str_value
for e in self.entries.values():
block = self.db.getBlockByCodepoint(e.codepoint)
if block in self.groups:
group = self.groups[block]
else:
group = QuickCheckGroup(block)
self.groups[block] = group
group.entries.append(e)
print('Writing is-normalized tests...')
self.open('/../../source/tests/integration-isnormalized.cpp')
self.header.write("#include \"../helpers/helpers-normalization.hpp\"")
for key, value in sorted(self.groups.items(), key = lambda block: block[0].start):
if key.start == 0xAC00 and key.end == 0xD7AF:
# ignore hangul syllables
continue
self.writeBlockSection(sorted(value.entries, key = lambda entry: entry.codepoint), key.name)
def writeBlockSection(self, entries, title, limit = 2000):
if len(entries) > limit:
for i in range(0, len(entries), limit):
chunk = entries[i:i + limit]
self.writeBlockSection(chunk, title + " Part" + str((i / limit) + 1), limit)
return
title = re.sub('[^\w ]', '', title.title()).replace(' ', '')
print("Writing tests \"" + title + "\"")
self.header.newLine()
self.header.newLine()
self.header.writeLine("TEST(IsNormalized, " + title + ")")
self.header.writeLine("{")
self.header.indent()
for e in entries:
self.header.writeLine("CHECK_IS_NORMALIZED(0x" + format(e.codepoint, '08X') + ", " + e.nfd + ", " + e.nfc + ", " + e.nfkd + ", " + e.nfkc + ", \"" + self.db.records[e.codepoint].name + "\");")
self.header.outdent()
self.header.write("}")
class CaseFoldingRecord():
def __init__(self):
self.codePoint = 0
self.type = ''
self.folded = []
def __str__(self):
return '{ codePoint ' + hex(self.codePoint) + ' type ' + self.type + ' folded ' + str(self.folded) + ' }'
def parse(self, entry):
self.codePoint = int(entry.matches[0][0], 16)
types = {
'C': 'Common',
'F': 'Full',
'S': 'Simple',
'T': 'Turkish'
}
self.type = types[entry.matches[1][0]]
for m in entry.matches[2]:
self.folded.append(int(m, 16))
class CaseFoldingIntegrationSuite(IntegrationSuite):
def __init__(self, db):
self.db = db
self.records = []
def execute(self):
print('Parsing case folding records...')
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
document_exclusions = libs.unicode.UnicodeDocument()
document_exclusions.parse(script_path + '/data/CaseFolding.txt')
document_exclusions.accept(self)
self.open('/../../source/tests/integration-casefolding.cpp')
self.header.writeLine("#include \"../helpers/helpers-casemapping.hpp\"")
self.header.write("#include \"../helpers/helpers-locale.hpp\"")
tests_turkish = []
for b in db.blocks:
tests = []
for r in self.records:
if r.codePoint >= b.start and r.codePoint <= b.end:
if r.type in ['Common', 'Full']:
tests.append(r)
elif r.type == 'Turkish':
tests_turkish.append(r)
if len(tests) > 0:
self.writeTest(tests, b.name)
if len(tests_turkish) > 0:
print("Writing tests to \"TurkishLocale\"")
self.header.newLine()
self.header.newLine()
self.header.writeLine("TEST(CaseFolding, TurkishAndAzeriLatinLocale)")
self.header.writeLine("{")
self.header.indent()
for r in tests_turkish:
self.header.writeLine("EXPECT_CASEFOLDING_EQ(0x" + format(r.codePoint, '08X') + ", \"" + libs.utf8.unicodeToUtf8(r.folded) + "\", \"" + self.db.records[r.codePoint].name + "\", UTF8_LOCALE_TURKISH_AND_AZERI_LATIN);")
self.header.outdent()
self.header.write("}")
def writeTest(self, records, name):
name = re.sub('[ \-]', '', name)
if len(records) > 4000:
for i in range(0, len(records), 4000):
chunk = records[i:i + 4000]
self.writeTest(chunk, name + "Part" + str((i / 4000) + 1))
return
print("Writing tests to \"" + name + "\"")
self.header.newLine()
self.header.newLine()
self.header.writeLine("TEST(CaseFolding, " + name + ")")
self.header.writeLine("{")
self.header.indent()
for r in records:
self.header.writeLine("EXPECT_CASEFOLDING_EQ(0x" + format(r.codePoint, '08X') + ", \"" + libs.utf8.unicodeToUtf8(r.folded) + "\", \"" + self.db.records[r.codePoint].name + "\", UTF8_LOCALE_DEFAULT);")
self.header.outdent()
self.header.write("}")
def visitDocument(self, document):
return True
def visitSection(self, section):
return True
def visitEntry(self, entry):
record = CaseFoldingRecord()
record.parse(entry)
self.records.append(record)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse Unicode codepoint database and write integration tests.')
parser.add_argument(
'-v', '--verbose',
dest = 'verbose',
action = 'store_true',
help = 'verbose output')
parser.add_argument(
'--casemapping',
dest = 'casemapping',
action = 'store_true',
help = 'write case mapping tests')
parser.add_argument(
'--normalization',
dest = 'normalization',
action = 'store_true',
help = 'write normalization tests')
parser.add_argument(
'--is-normalized',
dest = 'isnormalized',
action = 'store_true',
help = 'write is-normalized tests')
parser.add_argument(
'--casefolding',
dest = 'casefolding',
action = 'store_true',
help = 'write casefolding tests')
args = parser.parse_args()
if not args.casemapping and not args.normalization and not args.isnormalized and not args.casefolding:
all = True
else:
all = False
db = unicodedata.Database()
db.loadFromFiles(None)
if all or args.casemapping:
suite = CaseMappingIntegrationSuite(db)
suite.execute()
if all or args.normalization:
suite = NormalizationIntegrationSuite(db)
suite.execute()
if all or args.isnormalized:
suite = IsNormalizedIntegrationSuite(db)
suite.execute()
if all or args.casefolding:
suite = CaseFoldingIntegrationSuite(db)
suite.execute()
| 13,717
| 190
| 1,369
|
abefb5b34cf7c2c8dc3c3cbfe3542add3720d435
| 803
|
py
|
Python
|
notebooks/__code/__all/config_handler.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/__all/config_handler.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/__code/__all/config_handler.py
|
mabrahamdevops/python_notebooks
|
6d5e7383b60cc7fd476f6e85ab93e239c9c32330
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from PyQt4.QtCore import QSettings
except ImportError:
from PyQt5.QtCore import QSettings
| 24.333333
| 61
| 0.603985
|
try:
from PyQt4.QtCore import QSettings
except ImportError:
from PyQt5.QtCore import QSettings
def init_config():
settings = QSettings('settings.ini', QSettings.IniFormat)
def save_config(key='', value='', group=''):
settings = QSettings('settings.ini')
if not (group == ''):
settings.beginGroup(group)
if value == '':
value = None
settings.setValue(key, value)
if not (group == ''):
settings.endGroup()
def load_config(key='', default_value='', group=''):
settings = QSettings('settings.ini')
if not (group == ''):
settings.beginGroup(group)
value = settings.value(key)
settings.endGroup()
if (value is None) or (value == 'None'):
return default_value
else:
return value
| 614
| 0
| 81
|
15e381d0418ee10c4a423de63fd47788b4712bed
| 245
|
py
|
Python
|
app/service_validators/base_validator.py
|
OPEN-NEXT/import-export
|
db3e720f29cdc30846667f7cd6ba3cc653146fc4
|
[
"MIT"
] | null | null | null |
app/service_validators/base_validator.py
|
OPEN-NEXT/import-export
|
db3e720f29cdc30846667f7cd6ba3cc653146fc4
|
[
"MIT"
] | 25
|
2021-03-09T15:27:44.000Z
|
2021-06-09T10:09:43.000Z
|
app/service_validators/base_validator.py
|
wikifactory/import-export
|
f7775d52d23b06a47cdaad13ae48e7727bb850fd
|
[
"MIT"
] | null | null | null |
from re import search
from typing import List, Optional, Pattern
| 27.222222
| 79
| 0.730612
|
from re import search
from typing import List, Optional, Pattern
def regex_validator(
url: str, *, service_id: str, regexes: List[Pattern]
) -> Optional[str]:
return service_id if any(search(regex, url) for regex in regexes) else None
| 156
| 0
| 23
|
0a8a2003d5158da4a6b7bb11a36556ab218b3b4b
| 1,475
|
py
|
Python
|
sila_library/sila2lib/fdl_parser/data_type_response.py
|
lemmi25/sila2lib
|
ac4db8ee7fe6c99bde498151a539b25be2021d2f
|
[
"MIT"
] | null | null | null |
sila_library/sila2lib/fdl_parser/data_type_response.py
|
lemmi25/sila2lib
|
ac4db8ee7fe6c99bde498151a539b25be2021d2f
|
[
"MIT"
] | null | null | null |
sila_library/sila2lib/fdl_parser/data_type_response.py
|
lemmi25/sila2lib
|
ac4db8ee7fe6c99bde498151a539b25be2021d2f
|
[
"MIT"
] | null | null | null |
"""
__________________________________________________________________________________________________
:project: SiLA2_python
:details: Response data type in a SiLA Command, Property, Intermediate, ...
:file: data_type_response.py
:authors: Timm Severin
:date: (creation) 20190820
:date: (last modification) 20190820
__________________________________________________________________________________________________
**Copyright**:
This file is provided "AS IS" with NO WARRANTY OF ANY KIND,
INCLUDING THE WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
For further Information see LICENSE file that comes with this distribution.
__________________________________________________________________________________________________
"""
# import library packages
from .data_type_parameter import ParameterDataType
class ResponseDataType(ParameterDataType):
"""
The class for responses.
This is essentially identical to a :class:`~.ParameterDataType`, however can be handled differently in the final
application and thus exists as its own class/object.
.. note:: When checking whether an object is a response or a parameter, note that
:func:`isinstance(obj, ParameterDataType)` will also return true if the object is a
:class:`ResponseDataType`, since they are derived from each other. Use ``type(obj) is ParameterDataType``
for a precise check.
"""
| 37.820513
| 120
| 0.772203
|
"""
__________________________________________________________________________________________________
:project: SiLA2_python
:details: Response data type in a SiLA Command, Property, Intermediate, ...
:file: data_type_response.py
:authors: Timm Severin
:date: (creation) 20190820
:date: (last modification) 20190820
__________________________________________________________________________________________________
**Copyright**:
This file is provided "AS IS" with NO WARRANTY OF ANY KIND,
INCLUDING THE WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
For further Information see LICENSE file that comes with this distribution.
__________________________________________________________________________________________________
"""
# import library packages
from .data_type_parameter import ParameterDataType
class ResponseDataType(ParameterDataType):
"""
The class for responses.
This is essentially identical to a :class:`~.ParameterDataType`, however can be handled differently in the final
application and thus exists as its own class/object.
.. note:: When checking whether an object is a response or a parameter, note that
:func:`isinstance(obj, ParameterDataType)` will also return true if the object is a
:class:`ResponseDataType`, since they are derived from each other. Use ``type(obj) is ParameterDataType``
for a precise check.
"""
| 0
| 0
| 0
|
5c9e3dccd4c10b46579b853910c08110a0cbf6bd
| 1,939
|
py
|
Python
|
full/api_reader.py
|
PythonDayMX/MLaaS
|
3a109f1e83edabb4044c47e1cfa2a39c6227685c
|
[
"MIT"
] | 8
|
2018-11-30T05:05:10.000Z
|
2021-04-05T07:02:53.000Z
|
full/api_reader.py
|
PythonDayMX/MLaaS
|
3a109f1e83edabb4044c47e1cfa2a39c6227685c
|
[
"MIT"
] | null | null | null |
full/api_reader.py
|
PythonDayMX/MLaaS
|
3a109f1e83edabb4044c47e1cfa2a39c6227685c
|
[
"MIT"
] | 2
|
2018-11-30T17:09:05.000Z
|
2018-11-30T19:20:05.000Z
|
# ===============================================================
# Author: Rodolfo Ferro
# Email: ferro@cimat.mx
# Twitter: @FerroRodolfo
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by Rodolfo Ferro, for
# his workshop in PythonDay Mexico 2018 at CUCEA in Gdl, Mx.
# Any explicit usage of this script or its contents is granted
# according to the license provided and its conditions.
# ===============================================================
# -*- coding: utf-8 -*-
import requests
import pprint
import json
def get_json(url, filename):
"""
Download JSON response url for testing.
"""
# Get response:
response = requests.get(url)
# If response's status is 200:
if response.status_code == requests.codes.ok:
# Pretty print response:
pprint.pprint(response.json())
# Save response into a JSON file:
with open(filename, 'wt') as output:
output.write(response.text)
return
def get_prediction(url, filename):
"""
Download JSON response url for prediction.
"""
# Set metadata:
headers = {'Content-type': 'application/json'}
input_values = {'sepal_length': 6.4,
'sepal_width': 3.2,
'petal_length': 4.5,
'petal_width': 1.5}
# Get response:
response = requests.post(url, json=input_values, headers=headers)
# If response's status is 200:
if response.status_code == requests.codes.ok:
# Pretty print response:
pprint.pprint(response.json())
# Save response into a JSON file:
with open(filename, 'wt') as output:
output.write(response.text)
return
if __name__ == '__main__':
# Try out our JSON response downloader:
get_json('http://localhost:5000/api/v0.0', 'response.json')
get_prediction('http://localhost:5000/api/v0.0/predict', 'response.json')
| 28.101449
| 77
| 0.598762
|
# ===============================================================
# Author: Rodolfo Ferro
# Email: ferro@cimat.mx
# Twitter: @FerroRodolfo
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by Rodolfo Ferro, for
# his workshop in PythonDay Mexico 2018 at CUCEA in Gdl, Mx.
# Any explicit usage of this script or its contents is granted
# according to the license provided and its conditions.
# ===============================================================
# -*- coding: utf-8 -*-
import requests
import pprint
import json
def get_json(url, filename):
"""
Download JSON response url for testing.
"""
# Get response:
response = requests.get(url)
# If response's status is 200:
if response.status_code == requests.codes.ok:
# Pretty print response:
pprint.pprint(response.json())
# Save response into a JSON file:
with open(filename, 'wt') as output:
output.write(response.text)
return
def get_prediction(url, filename):
"""
Download JSON response url for prediction.
"""
# Set metadata:
headers = {'Content-type': 'application/json'}
input_values = {'sepal_length': 6.4,
'sepal_width': 3.2,
'petal_length': 4.5,
'petal_width': 1.5}
# Get response:
response = requests.post(url, json=input_values, headers=headers)
# If response's status is 200:
if response.status_code == requests.codes.ok:
# Pretty print response:
pprint.pprint(response.json())
# Save response into a JSON file:
with open(filename, 'wt') as output:
output.write(response.text)
return
if __name__ == '__main__':
# Try out our JSON response downloader:
get_json('http://localhost:5000/api/v0.0', 'response.json')
get_prediction('http://localhost:5000/api/v0.0/predict', 'response.json')
| 0
| 0
| 0
|
1647294741a7ef54149b7e1a9153b9c05311ea75
| 229
|
py
|
Python
|
PyBullet_experiments/experiments/laber_sac_halfchee.py
|
AnonymousLaBER/LaBER
|
af9da8ffda0654e2021de20cb162ef71dc9b9d6c
|
[
"MIT"
] | 3
|
2021-10-11T22:25:02.000Z
|
2022-03-04T20:00:56.000Z
|
PyBullet_experiments/experiments/laber_sac_halfchee.py
|
AnonymousLaBER/LaBER
|
af9da8ffda0654e2021de20cb162ef71dc9b9d6c
|
[
"MIT"
] | null | null | null |
PyBullet_experiments/experiments/laber_sac_halfchee.py
|
AnonymousLaBER/LaBER
|
af9da8ffda0654e2021de20cb162ef71dc9b9d6c
|
[
"MIT"
] | null | null | null |
import pybullet_envs
from stable_baselines3 import SAC_LABER
model = SAC_LABER('MlpPolicy', 'HalfCheetahBulletEnv-v0', verbose=1, tensorboard_log="results/long_SAC_LABER_HalfCheetahBullet/")
model.learn(total_timesteps=3000000)
| 38.166667
| 129
| 0.847162
|
import pybullet_envs
from stable_baselines3 import SAC_LABER
model = SAC_LABER('MlpPolicy', 'HalfCheetahBulletEnv-v0', verbose=1, tensorboard_log="results/long_SAC_LABER_HalfCheetahBullet/")
model.learn(total_timesteps=3000000)
| 0
| 0
| 0
|
f0ed52cc13fd5b72d59358509be5b900dc005beb
| 8,030
|
py
|
Python
|
testapplehealthdata.py
|
jclark017/applehealthdata
|
da825124011fa46f8309203b8a25b80ab5f062e8
|
[
"MIT"
] | 56
|
2016-06-05T15:41:02.000Z
|
2022-03-02T21:51:50.000Z
|
testapplehealthdata.py
|
jclark017/applehealthdata
|
da825124011fa46f8309203b8a25b80ab5f062e8
|
[
"MIT"
] | 1
|
2021-12-30T14:06:18.000Z
|
2021-12-30T14:06:18.000Z
|
testapplehealthdata.py
|
jclark017/applehealthdata
|
da825124011fa46f8309203b8a25b80ab5f062e8
|
[
"MIT"
] | 21
|
2017-02-04T13:06:20.000Z
|
2021-12-29T18:56:42.000Z
|
# -*- coding: utf-8 -*-
"""
testapplehealthdata.py: tests for the applehealthdata.py
Copyright (c) 2016 Nicholas J. Radcliffe
Licence: MIT
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import unittest
from collections import Counter
from applehealthdata import (HealthDataExtractor,
format_freqs, format_value,
abbreviate, encode)
CLEAN_UP = True
VERBOSE = False
def get_base_dir():
"""
Return the directory containing this test file,
which will (normally) be the applyhealthdata directory
also containing the testdata dir.
"""
return os.path.split(os.path.abspath(__file__))[0]
def get_testdata_dir():
"""Return the full path to the testdata directory"""
return os.path.join(get_base_dir(), 'testdata')
def get_tmp_dir():
"""Return the full path to the tmp directory"""
return os.path.join(get_base_dir(), 'tmp')
def remove_any_tmp_dir():
"""
Remove the temporary directory if it exists.
Returns its location either way.
"""
tmp_dir = get_tmp_dir()
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
return tmp_dir
def make_tmp_dir():
"""
Remove any existing tmp directory.
Create empty tmp direcory.
Return the location of the tmp dir.
"""
tmp_dir = remove_any_tmp_dir()
os.mkdir(tmp_dir)
return tmp_dir
def copy_test_data():
"""
Copy the test data export6s3sample.xml from testdata directory
to tmp directory.
"""
tmp_dir = make_tmp_dir()
name = 'export6s3sample.xml'
in_xml_file = os.path.join(get_testdata_dir(), name)
out_xml_file = os.path.join(get_tmp_dir(), name)
shutil.copyfile(in_xml_file, out_xml_file)
return out_xml_file
if __name__ == '__main__':
unittest.main()
| 31.490196
| 76
| 0.573101
|
# -*- coding: utf-8 -*-
"""
testapplehealthdata.py: tests for the applehealthdata.py
Copyright (c) 2016 Nicholas J. Radcliffe
Licence: MIT
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import shutil
import sys
import unittest
from collections import Counter
from applehealthdata import (HealthDataExtractor,
format_freqs, format_value,
abbreviate, encode)
CLEAN_UP = True
VERBOSE = False
def get_base_dir():
"""
Return the directory containing this test file,
which will (normally) be the applyhealthdata directory
also containing the testdata dir.
"""
return os.path.split(os.path.abspath(__file__))[0]
def get_testdata_dir():
"""Return the full path to the testdata directory"""
return os.path.join(get_base_dir(), 'testdata')
def get_tmp_dir():
"""Return the full path to the tmp directory"""
return os.path.join(get_base_dir(), 'tmp')
def remove_any_tmp_dir():
"""
Remove the temporary directory if it exists.
Returns its location either way.
"""
tmp_dir = get_tmp_dir()
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
return tmp_dir
def make_tmp_dir():
"""
Remove any existing tmp directory.
Create empty tmp direcory.
Return the location of the tmp dir.
"""
tmp_dir = remove_any_tmp_dir()
os.mkdir(tmp_dir)
return tmp_dir
def copy_test_data():
"""
Copy the test data export6s3sample.xml from testdata directory
to tmp directory.
"""
tmp_dir = make_tmp_dir()
name = 'export6s3sample.xml'
in_xml_file = os.path.join(get_testdata_dir(), name)
out_xml_file = os.path.join(get_tmp_dir(), name)
shutil.copyfile(in_xml_file, out_xml_file)
return out_xml_file
class TestAppleHealthDataExtractor(unittest.TestCase):
@classmethod
def tearDownClass(cls):
"""Clean up by removing the tmp directory, if it exists."""
if CLEAN_UP:
remove_any_tmp_dir()
def check_file(self, filename):
expected_output = os.path.join(get_testdata_dir(), filename)
actual_output = os.path.join(get_tmp_dir(), filename)
with open(expected_output) as f:
expected = f.read()
with open(actual_output) as f:
actual = f.read()
self.assertEqual((filename, expected), (filename, actual))
def test_tiny_reference_extraction(self):
path = copy_test_data()
data = HealthDataExtractor(path, verbose=VERBOSE)
data.extract()
for kind in ('StepCount', 'DistanceWalkingRunning',
'Workout', 'ActivitySummary'):
self.check_file('%s.csv' % kind)
def test_format_freqs(self):
counts = Counter()
self.assertEqual(format_freqs(counts), '')
counts['one'] += 1
self.assertEqual(format_freqs(counts), 'one: 1')
counts['one'] += 1
self.assertEqual(format_freqs(counts), 'one: 2')
counts['two'] += 1
counts['three'] += 1
self.assertEqual(format_freqs(counts),
'''one: 2
three: 1
two: 1''')
def test_format_null_values(self):
for dt in ('s', 'n', 'd', 'z'):
# Note: even an illegal type, z, produces correct output for
# null values.
# Questionable, but we'll leave as a feature
self.assertEqual(format_value(None, dt), '')
def test_format_numeric_values(self):
cases = {
'0': '0',
'3': '3',
'-1': '-1',
'2.5': '2.5',
}
for (k, v) in cases.items():
self.assertEqual((k, format_value(k, 'n')), (k, v))
def test_format_date_values(self):
hearts = 'any string not need escaping or quoting; even this: ♥♥'
cases = {
'01/02/2000 12:34:56': '01/02/2000 12:34:56',
hearts: hearts,
}
for (k, v) in cases.items():
self.assertEqual((k, format_value(k, 'd')), (k, v))
def test_format_string_values(self):
cases = {
'a': '"a"',
'': '""',
'one "2" three': r'"one \"2\" three"',
r'1\2\3': r'"1\\2\\3"',
}
for (k, v) in cases.items():
self.assertEqual((k, format_value(k, 's')), (k, v))
def test_abbreviate(self):
changed = {
'HKQuantityTypeIdentifierHeight': 'Height',
'HKQuantityTypeIdentifierStepCount': 'StepCount',
'HK*TypeIdentifierStepCount': 'StepCount',
'HKCharacteristicTypeIdentifierDateOfBirth': 'DateOfBirth',
'HKCharacteristicTypeIdentifierBiologicalSex': 'BiologicalSex',
'HKCharacteristicTypeIdentifierBloodType': 'BloodType',
'HKCharacteristicTypeIdentifierFitzpatrickSkinType':
'FitzpatrickSkinType',
}
unchanged = [
'',
'a',
'aHKQuantityTypeIdentifierHeight',
'HKQuantityTypeIdentityHeight',
]
for (k, v) in changed.items():
self.assertEqual((k, abbreviate(k)), (k, v))
self.assertEqual((k, abbreviate(k, False)), (k, k))
for k in unchanged:
self.assertEqual((k, abbreviate(k)), (k, k))
def test_encode(self):
# This test looks strange, but because of the import statments
# from __future__ import unicode_literals
# in Python 2, type('a') is unicode, and the point of the encode
# function is to ensure that it has been converted to a UTF-8 string
# before writing to file.
self.assertEqual(type(encode('a')), str)
def test_extracted_reference_stats(self):
path = copy_test_data()
data = HealthDataExtractor(path, verbose=VERBOSE)
self.assertEqual(data.n_nodes, 20)
expectedRecordCounts = [
('DistanceWalkingRunning', 5),
('StepCount', 10),
]
self.assertEqual(sorted(data.record_types.items()),
expectedRecordCounts)
self.assertEqual(data.n_nodes, 20)
expectedOtherCounts = [
('ActivitySummary', 2),
('Workout', 1),
]
self.assertEqual(sorted(data.other_types.items()),
expectedOtherCounts)
expectedTagCounts = [
('ActivitySummary', 2),
('ExportDate', 1),
('Me', 1),
('Record', 15),
('Workout', 1),
]
self.assertEqual(sorted(data.tags.items()),
expectedTagCounts)
expectedFieldCounts = [
('HKCharacteristicTypeIdentifierBiologicalSex', 1),
('HKCharacteristicTypeIdentifierBloodType', 1),
('HKCharacteristicTypeIdentifierDateOfBirth', 1),
('HKCharacteristicTypeIdentifierFitzpatrickSkinType', 1),
('activeEnergyBurned', 2),
('activeEnergyBurnedGoal', 2),
('activeEnergyBurnedUnit', 2),
('appleExerciseTime', 2),
('appleExerciseTimeGoal', 2),
('appleStandHours', 2),
('appleStandHoursGoal', 2),
('creationDate', 16),
('dateComponents', 2),
('duration', 1),
('durationUnit', 1),
('endDate', 16),
('sourceName', 16),
('sourceVersion', 1),
('startDate', 16),
('totalDistance', 1),
('totalDistanceUnit', 1),
('totalEnergyBurned', 1),
('totalEnergyBurnedUnit', 1),
('type', 15),
('unit', 15),
('value', 16),
('workoutActivityType', 1)
]
self.assertEqual(sorted(data.fields.items()),
expectedFieldCounts)
if __name__ == '__main__':
unittest.main()
| 5,578
| 470
| 23
|
afb0bae78a861d61cb7bff15ba2ae3ac74bd95d9
| 4,024
|
py
|
Python
|
tests/test_books.py
|
FilippoPisello/Books-Read-DB
|
f68b33861dc66222d23d37b03e1abf1d9623c0ba
|
[
"MIT"
] | null | null | null |
tests/test_books.py
|
FilippoPisello/Books-Read-DB
|
f68b33861dc66222d23d37b03e1abf1d9623c0ba
|
[
"MIT"
] | null | null | null |
tests/test_books.py
|
FilippoPisello/Books-Read-DB
|
f68b33861dc66222d23d37b03e1abf1d9623c0ba
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import date
from controller.books import Book, BookRead
if __name__ == "__main__":
unittest.main()
| 33.815126
| 87
| 0.582256
|
import unittest
from datetime import date
from controller.books import Book, BookRead
class TestBooks(unittest.TestCase):
def setUp(self) -> None:
self.book1 = Book("Title", "Lorem", "Ipsum", 120, "Genre", False, None)
self.book2 = Book("title", "lorem", "ipsum", 120, "genre", True, ["Yes", "nO"])
def test_capitalization(self):
self.assertEqual(self.book1.title, "Title")
self.assertEqual(self.book2.title, "Title")
self.assertEqual(self.book1.author_name, "Lorem")
self.assertEqual(self.book2.author_name, "Lorem")
self.assertEqual(self.book1.author_surname, "Ipsum")
self.assertEqual(self.book2.author_surname, "Ipsum")
self.assertEqual(self.book1.genre, "Genre")
self.assertEqual(self.book2.genre, "Genre")
self.assertIsNone(self.book1.tags)
self.assertEqual(self.book2.tags, ["yes", "no"])
def test_ownedint(self):
self.assertEqual(self.book1.owned, 0)
self.assertEqual(self.book2.owned, 1)
def test_author(self):
self.assertEqual(self.book1.author, "Lorem Ipsum")
self.assertEqual(self.book2.author, "Lorem Ipsum")
def test_fromdict(self):
"""Test that Book class instance is correctly created from dict"""
dict1 = {
"Book title": "Lorem",
"Author Name": "Ipsum",
"Author Surname": "Dolor",
"Pages": "1",
"Genre": "Crime",
"Owned": True,
"Tags": None,
"Starting date": "2021-10-18",
"Ending date": "2021-10-20",
"Score": "3",
"Comment": None,
}
book1 = Book.from_gui_dict(dict1)
self.assertEqual(book1.title, "Lorem")
self.assertEqual(book1.author_name, "Ipsum")
self.assertEqual(book1.author_surname, "Dolor")
self.assertEqual(book1.pages, 1)
self.assertEqual(book1.genre, "Crime")
self.assertEqual(book1.owned, True)
self.assertEqual(book1.tags, None)
dict2 = {
"Book title": "Lorem",
"Author Name": "Ipsum",
"Author Surname": "Dolor",
"Pages": "1",
"Genre": "Crime",
"Owned": True,
"Tags": "Lorem,Ipsum, Dolor ",
}
book2 = Book.from_gui_dict(dict2)
self.assertEqual(book2.tags, ["lorem", "ipsum", "dolor"])
def test_strtags(self):
self.assertEqual(self.book1.string_tags, None)
self.assertEqual(self.book2.string_tags, "yes, no")
class TestBookRead(unittest.TestCase):
def setUp(self) -> None:
date1 = date(2020, 1, 1)
date2 = date(2020, 1, 30)
date3 = date(2020, 2, 15)
self.read1 = BookRead(1, date1, date2, 8, "Lorem")
self.read2 = BookRead(2, date2, date3, 1, None)
def test_bookreadid(self):
self.assertEqual(self.read1.bookread_id, "1-20200101")
self.assertEqual(self.read2.bookread_id, "2-20200130")
def test_daystoread(self):
self.assertEqual(self.read1.days_to_read, 30)
self.assertEqual(self.read2.days_to_read, 17)
def test_fromdict(self):
"""Test that Bookread class instance is correctly created from dict"""
dict1 = {
"Book title": "Lorem",
"Author Name": "Ipsum",
"Author Surname": "Dolor",
"Pages": "1",
"Genre": "Crime",
"Owned": True,
"Tags": None,
"Starting date": "2021-10-18",
"Ending date": "2021-10-20",
"Score": "3",
"Comment": None,
}
bookread1 = BookRead.from_gui_dict(dict1, 1)
self.assertEqual(bookread1.book_id, 1)
self.assertEqual(bookread1.start_date, date(2021, 10, 18))
self.assertEqual(bookread1.end_date, date(2021, 10, 20))
self.assertEqual(bookread1.out_of_ten_score, 3)
self.assertEqual(bookread1.comment, None)
if __name__ == "__main__":
unittest.main()
| 1,525
| 2,315
| 46
|
28488c57149aff560b88dcc2034960548dbef7d0
| 5,430
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/realTimeTransportControlProtocol1733_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/realTimeTransportControlProtocol1733_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/realTimeTransportControlProtocol1733_template.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
| 36.689189
| 113
| 0.68895
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class RealTimeTransportControlProtocol1733(Base):
__slots__ = ()
_SDM_NAME = 'realTimeTransportControlProtocol1733'
_SDM_ATT_MAP = {
'RtcpHeaderVersion': 'realTimeTransportControlProtocol1733.rtcpHeader.version-1',
'RtcpHeaderPaddingBit': 'realTimeTransportControlProtocol1733.rtcpHeader.paddingBit-2',
'RtcpHeaderSubtype': 'realTimeTransportControlProtocol1733.rtcpHeader.subtype-3',
'RtcpHeaderPacketType': 'realTimeTransportControlProtocol1733.rtcpHeader.packetType-4',
'RtcpHeaderMessageLength': 'realTimeTransportControlProtocol1733.rtcpHeader.messageLength-5',
'RtcpHeaderSsrc/csrc': 'realTimeTransportControlProtocol1733.rtcpHeader.ssrc/csrc-6',
'RtcpHeaderName': 'realTimeTransportControlProtocol1733.rtcpHeader.name-7',
'RtcpHeaderGmTimeBaseIndicator': 'realTimeTransportControlProtocol1733.rtcpHeader.gmTimeBaseIndicator-8',
'RtcpHeaderGmIdentity': 'realTimeTransportControlProtocol1733.rtcpHeader.gmIdentity-9',
'RtcpHeaderStream_id': 'realTimeTransportControlProtocol1733.rtcpHeader.stream_id-10',
'RtcpHeaderAsTimestamp': 'realTimeTransportControlProtocol1733.rtcpHeader.asTimestamp-11',
'RtcpHeaderRtpTimestamp': 'realTimeTransportControlProtocol1733.rtcpHeader.rtpTimestamp-12',
}
def __init__(self, parent, list_op=False):
super(RealTimeTransportControlProtocol1733, self).__init__(parent, list_op)
@property
def RtcpHeaderVersion(self):
"""
Display Name: Version
Default Value: 0x2
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderVersion']))
@property
def RtcpHeaderPaddingBit(self):
"""
Display Name: Padding
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderPaddingBit']))
@property
def RtcpHeaderSubtype(self):
"""
Display Name: Subtype
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderSubtype']))
@property
def RtcpHeaderPacketType(self):
"""
Display Name: Packet Type
Default Value: 208
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderPacketType']))
@property
def RtcpHeaderMessageLength(self):
"""
Display Name: Message Length
Default Value: 0x9
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderMessageLength']))
@property
def RtcpHeaderSsrccsrc(self):
"""
Display Name: SSRC/CSRC
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderSsrc/csrc']))
@property
def RtcpHeaderName(self):
"""
Display Name: Name
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderName']))
@property
def RtcpHeaderGmTimeBaseIndicator(self):
"""
Display Name: gm Time Base Indicator
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderGmTimeBaseIndicator']))
@property
def RtcpHeaderGmIdentity(self):
"""
Display Name: gm Identity
Default Value: 0x00
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderGmIdentity']))
@property
def RtcpHeaderStream_id(self):
"""
Display Name: Stream Id
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderStream_id']))
@property
def RtcpHeaderAsTimestamp(self):
"""
Display Name: As Timestamp
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderAsTimestamp']))
@property
def RtcpHeaderRtpTimestamp(self):
"""
Display Name: RTP Timestamp
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RtcpHeaderRtpTimestamp']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 173
| 5,153
| 23
|
37db3f8e0076e46058d8fe443f9070145cfe0c5a
| 896
|
py
|
Python
|
tests/resources/project/conftest.py
|
gitter-badger/charybdis
|
1505258db0a43c5cb6e64e0513c74c6651df92fd
|
[
"MIT"
] | null | null | null |
tests/resources/project/conftest.py
|
gitter-badger/charybdis
|
1505258db0a43c5cb6e64e0513c74c6651df92fd
|
[
"MIT"
] | null | null | null |
tests/resources/project/conftest.py
|
gitter-badger/charybdis
|
1505258db0a43c5cb6e64e0513c74c6651df92fd
|
[
"MIT"
] | 1
|
2019-10-10T23:46:33.000Z
|
2019-10-10T23:46:33.000Z
|
import pytest
@pytest.yield_fixture(scope="module")
@pytest.yield_fixture(scope="module")
@pytest.yield_fixture(scope="module")
| 20.363636
| 49
| 0.691964
|
import pytest
@pytest.yield_fixture(scope="module")
def test_domain_project(test_admin_client):
domain = test_admin_client.Domain()
domain.slug = "test-project"
domain.name = "Test Domain"
domain.description = "Test" * 10
domain.save()
yield domain
domain.destroy()
@pytest.yield_fixture(scope="module")
def test_domain(test_admin_client):
domain = test_admin_client.Domain()
domain.slug = "test"
domain.name = "Test Domain"
domain.description = "Test" * 10
domain.save()
yield domain
domain.destroy()
@pytest.yield_fixture(scope="module")
def test_project(test_admin_client, test_domain):
project = test_admin_client.Project()
project.slug = "test"
project.name = "Test"
project.description = project.name * 10
project.domain_id = test_domain.id
project.save()
yield project
project.destroy()
| 696
| 0
| 66
|
ec861e6dda12fb8898b391b55324a0cbbdef264e
| 1,973
|
py
|
Python
|
ProjectDemo/scripts/test/threadScheduler.py
|
ryrynoryry/Immersive-Illumination
|
d0e28fc246850648ffc2eca81b0da3daed994d07
|
[
"MIT"
] | null | null | null |
ProjectDemo/scripts/test/threadScheduler.py
|
ryrynoryry/Immersive-Illumination
|
d0e28fc246850648ffc2eca81b0da3daed994d07
|
[
"MIT"
] | null | null | null |
ProjectDemo/scripts/test/threadScheduler.py
|
ryrynoryry/Immersive-Illumination
|
d0e28fc246850648ffc2eca81b0da3daed994d07
|
[
"MIT"
] | null | null | null |
import threading
import os.path
import time
from blueThread import MainBlue
# class myThread (threading.Thread):
# def __init__(self, threadID, name, counter):
# threading.Thread.__init__(self)
# self.threadID = threadID
# self.name = name
# self.counter = counter
# def run(self):
# print("Starting " + self.name)
# print_time(self.name, 5, self.counter)
# print("Exiting " + self.name)
run = True
foo = [False]
fileName = ""
def LookForFile(strToFind, path):
"""
function repeatedly look for a file
"""
while run:
MainBlue(foo)
time.sleep(1)
print("exiting file thread!")
def LookForStop(strToFind, path):
"""
function repeatedly look for a file
"""
global run
count = 0
filePath = path + strToFind
while run:
count += 1
if os.path.exists(filePath):
run = False
print("{0} FOUND {1} at {2} [{3}]".format(t2.getName(), strToFind, filePath, count))
else:
print("{0} not found {1} at {2} [{3}]".format(t2.getName(), strToFind, filePath, count))
time.sleep(1)
print("exiting stop thread!")
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(target=LookForFile, name="THREAD_Finder", args=("rain","../"), daemon=True)
# t2 = threading.Thread(name="THREAD_Stopper", target=LookForStop, args=("stop","../"), daemon=True)
# starting thread 1
t1.start()
# starting thread 2
# t2.start()
# while run:
# print("doing nothing...")
# time.sleep(10)
input("Press Enter to flip foo")
if foo[0]:
foo[0] = False
else:
foo[0] = True
input("Press Enter to exit")
run = False
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
# t2.join()
# both threads completely executed
print("Done!")
| 26.662162
| 105
| 0.583376
|
import threading
import os.path
import time
from blueThread import MainBlue
# class myThread (threading.Thread):
# def __init__(self, threadID, name, counter):
# threading.Thread.__init__(self)
# self.threadID = threadID
# self.name = name
# self.counter = counter
# def run(self):
# print("Starting " + self.name)
# print_time(self.name, 5, self.counter)
# print("Exiting " + self.name)
run = True
foo = [False]
fileName = ""
def LookForFile(strToFind, path):
"""
function repeatedly look for a file
"""
while run:
MainBlue(foo)
time.sleep(1)
print("exiting file thread!")
def LookForStop(strToFind, path):
"""
function repeatedly look for a file
"""
global run
count = 0
filePath = path + strToFind
while run:
count += 1
if os.path.exists(filePath):
run = False
print("{0} FOUND {1} at {2} [{3}]".format(t2.getName(), strToFind, filePath, count))
else:
print("{0} not found {1} at {2} [{3}]".format(t2.getName(), strToFind, filePath, count))
time.sleep(1)
print("exiting stop thread!")
if __name__ == "__main__":
# creating thread
t1 = threading.Thread(target=LookForFile, name="THREAD_Finder", args=("rain","../"), daemon=True)
# t2 = threading.Thread(name="THREAD_Stopper", target=LookForStop, args=("stop","../"), daemon=True)
# starting thread 1
t1.start()
# starting thread 2
# t2.start()
# while run:
# print("doing nothing...")
# time.sleep(10)
input("Press Enter to flip foo")
if foo[0]:
foo[0] = False
else:
foo[0] = True
input("Press Enter to exit")
run = False
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
# t2.join()
# both threads completely executed
print("Done!")
| 0
| 0
| 0
|
d22c2da0d5387f8d7a44708da0325afb1a53d6ac
| 78,592
|
py
|
Python
|
test/db/__init__.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 150
|
2016-10-05T11:09:36.000Z
|
2022-03-06T16:24:41.000Z
|
test/db/__init__.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 27
|
2017-03-02T03:37:02.000Z
|
2022-02-10T04:59:54.000Z
|
test/db/__init__.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 21
|
2016-11-11T07:51:48.000Z
|
2020-04-26T21:38:33.000Z
|
import time
from datetime import datetime
from datetime import timedelta
from uuid import uuid4 as uuid
from activitystreams import parse
from dino import environ
from dino.auth.redis import AuthRedis
from dino.cache.redis import CacheRedis
from dino.config import ApiActions, RedisKeys
from dino.config import ConfigKeys
from dino.config import SessionKeys
from dino.config import UserKeys
from dino.db.rdbms.handler import DatabaseRdbms
from dino.environ import ConfigDict
from dino.environ import GNEnvironment
from dino.exceptions import ChannelExistsException
from dino.exceptions import ChannelNameExistsException
from dino.exceptions import EmptyChannelNameException
from dino.exceptions import EmptyRoomNameException
from dino.exceptions import InvalidAclTypeException
from dino.exceptions import InvalidApiActionException
from dino.exceptions import NoSuchChannelException
from dino.exceptions import NoSuchRoomException
from dino.exceptions import NoSuchUserException
from dino.exceptions import RoomExistsException
from dino.exceptions import RoomNameExistsForChannelException
from dino.exceptions import UserExistsException
from dino.exceptions import ValidationException
from dino.validation.acl import AclDisallowValidator
from dino.validation.acl import AclIsAdminValidator
from dino.validation.acl import AclIsSuperUserValidator
from dino.validation.acl import AclRangeValidator
from dino.validation.acl import AclSameChannelValidator
from dino.validation.acl import AclSameRoomValidator
from dino.validation.acl import AclStrInCsvValidator
from test.base import BaseTest
| 42.852781
| 143
| 0.693607
|
import time
from datetime import datetime
from datetime import timedelta
from uuid import uuid4 as uuid
from activitystreams import parse
from dino import environ
from dino.auth.redis import AuthRedis
from dino.cache.redis import CacheRedis
from dino.config import ApiActions, RedisKeys
from dino.config import ConfigKeys
from dino.config import SessionKeys
from dino.config import UserKeys
from dino.db.rdbms.handler import DatabaseRdbms
from dino.environ import ConfigDict
from dino.environ import GNEnvironment
from dino.exceptions import ChannelExistsException
from dino.exceptions import ChannelNameExistsException
from dino.exceptions import EmptyChannelNameException
from dino.exceptions import EmptyRoomNameException
from dino.exceptions import InvalidAclTypeException
from dino.exceptions import InvalidApiActionException
from dino.exceptions import NoSuchChannelException
from dino.exceptions import NoSuchRoomException
from dino.exceptions import NoSuchUserException
from dino.exceptions import RoomExistsException
from dino.exceptions import RoomNameExistsForChannelException
from dino.exceptions import UserExistsException
from dino.exceptions import ValidationException
from dino.validation.acl import AclDisallowValidator
from dino.validation.acl import AclIsAdminValidator
from dino.validation.acl import AclIsSuperUserValidator
from dino.validation.acl import AclRangeValidator
from dino.validation.acl import AclSameChannelValidator
from dino.validation.acl import AclSameRoomValidator
from dino.validation.acl import AclStrInCsvValidator
from test.base import BaseTest
class BaseDatabaseTest(BaseTest):
class FakeRequest(object):
def __init__(self):
self.sid = str(uuid())
class FakeEnv(GNEnvironment):
def __init__(self):
super(BaseDatabaseTest.FakeEnv, self).__init__(None, ConfigDict(), skip_init=True)
self.config = ConfigDict()
self.cache = CacheRedis(self, 'mock')
self.session = dict()
self.node = 'test'
self.auth = AuthRedis(env=self, host='mock')
self.request = BaseDatabaseTest.FakeRequest()
MESSAGE_ID = str(uuid())
def set_up_env(self, db):
self.env = BaseDatabaseTest.FakeEnv()
self.env.config.set(ConfigKeys.TESTING, False)
all_acls = [
'age',
'gender',
'membership',
'group',
'country',
'city',
'image',
'has_webcam',
'fake_checked',
'owner',
'admin',
'moderator',
'superuser',
'crossroom',
'samechannel',
'sameroom',
'disallow'
]
self.env.config.set(ConfigKeys.ACL, {
'room': {
'join': {
'acls': all_acls
},
'message': {
'acls': all_acls
},
'history': {
'acls': all_acls
},
'crossroom': {
'acls': all_acls
}
},
'channel': {
'message': {
'acls': all_acls
},
'list': {
'acls': all_acls
},
'crossroom': {
'acls': all_acls
}
},
'available': {
'acls': all_acls
},
'validation': {
'samechannel': {
'type': 'samechannel',
'value': AclSameChannelValidator()
},
'sameroom': {
'type': 'sameroom',
'value': AclSameRoomValidator()
},
'country': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator()
},
'disallow': {
'type': 'disallow',
'value': AclDisallowValidator()
},
'gender': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator('m,f')
},
'membership': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator()
},
'city': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator()
},
'has_webcam': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator('y,n')
},
'fake_checked': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator('y,n')
},
'image': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator('y,n')
},
'group': {
'type': 'str_in_csv',
'value': AclStrInCsvValidator('')
},
'age': {
'type': 'range',
'value': AclRangeValidator()
},
'admin': {
'type': 'is_admin',
'value': AclIsAdminValidator()
},
'superuser': {
'type': 'is_super_user',
'value': AclIsSuperUserValidator()
}
}
}
)
self.env.session[SessionKeys.user_name.value] = BaseTest.USER_NAME
if db == 'postgres':
self.env.config.set(ConfigKeys.DRIVER, 'postgres+psycopg2', domain=ConfigKeys.DATABASE)
self.env.config.set(ConfigKeys.HOST, 'localhost', domain=ConfigKeys.DATABASE)
self.env.config.set(ConfigKeys.PORT, 5432, domain=ConfigKeys.DATABASE)
self.env.config.set(ConfigKeys.DB, 'dinotest', domain=ConfigKeys.DATABASE)
self.env.config.set(ConfigKeys.USER, 'dinouser', domain=ConfigKeys.DATABASE)
self.env.config.set(ConfigKeys.PASSWORD, 'dinopass', domain=ConfigKeys.DATABASE)
self.db = DatabaseRdbms(self.env)
elif db == 'sqlite':
self.env.config.set(ConfigKeys.DRIVER, 'sqlite', domain=ConfigKeys.DATABASE)
self.db = DatabaseRdbms(self.env)
elif db == 'redis':
from dino.db.redis import DatabaseRedis
self.db = DatabaseRedis(self.env, 'mock', db=99)
self.db.redis.flushall()
else:
raise ValueError('unknown type %s' % db)
environ.env.config = self.env.config
environ.env.db = self.db
environ.env.db.create_user(BaseDatabaseTest.USER_ID, BaseDatabaseTest.USER_NAME)
def act_message(self):
data = self.activity_for_message()
data['id'] = BaseDatabaseTest.MESSAGE_ID
data['target']['objectType'] = 'room'
data['published'] = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%M:%SZ')
return parse(data)
def act_create(self):
data = self.activity_for_create()
data['target']['id'] = BaseTest.ROOM_ID
data['published'] = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%M:%SZ')
return parse(data)
def _test_room_exists(self):
self.assertFalse(self._room_exists())
def _test_create_room_no_channel(self):
self.assertRaises(NoSuchChannelException, self._create_room)
def _test_create_existing_channel(self):
self._create_channel()
self.assertRaises(ChannelExistsException, self._create_channel)
def _test_leave_room_before_create(self):
self._create_channel()
self.assertRaises(NoSuchRoomException, self.db.leave_room, BaseTest.USER_ID, BaseTest.ROOM_ID)
def _test_leave_room_not_joined(self):
self._create_channel()
self._create_room()
rooms = self._rooms_for_user()
self.assertEqual(0, len(rooms))
self._leave()
rooms = self._rooms_for_user()
self.assertEqual(0, len(rooms))
def _test_leave_room_joined(self):
self._create_channel()
self._create_room()
rooms = self._rooms_for_user()
self.assertEqual(0, len(rooms))
self._join()
rooms = self._rooms_for_user()
self.assertEqual(1, len(rooms))
self._leave()
rooms = self._rooms_for_user()
self.assertEqual(0, len(rooms))
def _test_set_moderator_no_room(self):
self.assertRaises(NoSuchRoomException, self._set_moderator)
self.assertFalse(self._is_moderator())
def _test_set_moderator_with_room(self):
self._create_channel()
self._create_room()
self._set_moderator()
self.assertTrue(self._is_moderator())
def _test_set_room_owner_no_room(self):
self.assertRaises(NoSuchRoomException, self._set_owner_room)
self.assertFalse(self._is_owner_room())
def _test_set_room_owner_with_room(self):
self._create_channel()
self._create_room()
self._set_owner_room()
self.assertTrue(self._is_owner_room())
def _test_set_channel_owner_no_channel(self):
self.assertRaises(NoSuchChannelException, self._set_owner_channel)
self.assertFalse(self._is_owner_channel())
def _test_set_channel_owner_with_channel(self):
self._create_channel()
self._set_owner_channel()
self.assertTrue(self._is_owner_channel())
def _test_create_room(self):
self.assertFalse(self._room_exists())
self._create_channel()
self._create_room()
self.assertTrue(self._room_exists())
rooms = self.db.rooms_for_channel(BaseDatabaseTest.CHANNEL_ID)
self.assertEqual(1, len(rooms))
def _test_create_room_blank_name(self):
self._create_channel()
self.assertRaises(
EmptyRoomNameException, self.db.create_room,
'', BaseTest.ROOM_ID, BaseTest.CHANNEL_ID, BaseTest.USER_ID, BaseTest.USER_NAME)
def _test_create_existing_room(self):
self._create_channel()
self._create_room()
self.assertRaises(RoomExistsException, self._create_room)
def _test_create_existing_room_name(self):
self._create_channel()
self._create_room()
self.assertRaises(RoomNameExistsForChannelException, self._create_room, str(uuid()))
def _test_channel_exists_after_create(self):
self._create_channel()
self.assertTrue(self._channel_exists())
def _test_channel_exists_before_create(self):
self.assertFalse(self._channel_exists())
def _test_room_name_exists_before_create(self):
self.assertFalse(self._room_name_exists())
def _test_room_name_exists_after_create(self):
self._create_channel()
self._create_room()
self.assertTrue(self._room_name_exists())
def _test_room_name_exists_from_cache_after_create(self):
self._create_channel()
self._create_room()
self.assertTrue(self._room_name_exists())
self.assertTrue(self._room_name_exists())
def _test_get_channels_before_create(self):
self.assertEqual(0, len(self._get_channels()))
def _test_get_channels_after_create(self):
self._create_channel()
channels = self._get_channels()
self.assertEqual(1, len(channels))
self.assertTrue(BaseTest.CHANNEL_ID in channels.keys())
self.assertTrue(any((BaseTest.CHANNEL_NAME == channel_info[0] for channel_info in channels.values())))
def _test_rooms_for_channel_before_create_channel(self):
self.assertEqual(0, len(self._rooms_for_channel()))
def _test_rooms_for_channel_after_create_channel_before_create_room(self):
self._create_channel()
self.assertEqual(0, len(self._rooms_for_channel()))
def _test_rooms_for_channel_after_create_channel_after_create_room(self):
self._create_channel()
self._create_room()
rooms = self._rooms_for_channel()
self.assertEqual(1, len(rooms))
self.assertTrue(BaseTest.ROOM_ID in rooms.keys())
self.assertTrue(BaseTest.ROOM_NAME == list(rooms.values())[0]['name'])
def _test_rooms_for_user_before_joining(self):
self._create_channel()
self._create_room()
self.assertEqual(0, len(self.rooms_for_user()))
def _test_rooms_for_user_after_joining(self):
self._create_channel()
self._create_room()
self._join()
rooms = self.rooms_for_user()
self.assertEqual(1, len(rooms))
self.assertTrue(BaseTest.ROOM_ID in rooms.keys())
self.assertTrue(BaseTest.ROOM_NAME in rooms.values())
def _test_remove_current_rooms_for_user_before_joining(self):
self.db.remove_current_rooms_for_user(BaseTest.USER_ID)
self.assertEqual(0, len(self._rooms_for_user()))
def _test_remove_current_rooms_for_user_after_joining(self):
self._create_channel()
self._create_room()
self._join()
rooms = self._rooms_for_user()
self.assertEqual(1, len(rooms))
self.assertTrue(BaseTest.ROOM_ID in rooms.keys())
self.assertTrue(BaseTest.ROOM_NAME in rooms.values())
self.db.remove_sid_for_user_in_room(BaseTest.USER_ID, BaseTest.ROOM_ID, self.env.request.sid)
self.db.remove_current_rooms_for_user(BaseTest.USER_ID)
self.assertEqual(0, len(self._rooms_for_user()))
def _test_get_user_status_before_set(self, status):
self.assertEqual(status, self._user_status())
def _test_set_user_offline(self, status):
self._set_offline()
self.assertEqual(status, self._user_status())
def _test_room_contains_before_create_channel(self):
self.assertRaises(NoSuchRoomException, self.db.room_contains, BaseTest.ROOM_ID, BaseTest.USER_ID)
def _test_room_contains_before_create_room(self):
self.assertRaises(NoSuchRoomException, self.db.room_contains, BaseTest.ROOM_ID, BaseTest.USER_ID)
def _test_room_contains_after_create(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.room_contains(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_room_contains_after_join(self):
self._create_channel()
self._create_room()
self._join()
self.assertTrue(self.db.room_contains(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_set_user_offline_after_online(self):
self._set_online()
self._set_offline()
self.assertEqual(UserKeys.STATUS_UNAVAILABLE, self._user_status())
def _test_create_channel(self):
self.assertFalse(self.db.channel_exists(BaseTest.CHANNEL_ID))
self.db.create_channel(BaseTest.CHANNEL_NAME, BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.channel_exists(BaseTest.CHANNEL_ID))
def _test_create_channel_blank_name(self):
self.assertFalse(self.db.channel_exists(BaseTest.CHANNEL_ID))
self.assertRaises(EmptyChannelNameException, self.db.create_channel, '', BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.channel_exists(BaseTest.CHANNEL_ID))
def _test_create_channel_exists(self):
self.assertFalse(self.db.channel_exists(BaseTest.CHANNEL_ID))
self.db.create_channel(BaseTest.CHANNEL_NAME, BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.channel_exists(BaseTest.CHANNEL_ID))
self.assertRaises(ChannelExistsException, self.db.create_channel, BaseTest.CHANNEL_NAME, BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _test_set_user_online(self, status):
self._set_online()
self.assertEqual(status, self._user_status())
def _test_set_user_invisible(self, status):
self._set_invisible()
self.assertEqual(status, self._user_status())
def _test_is_admin_before_create(self):
self.assertFalse(self._is_admin())
def _test_is_admin_after_create(self):
self._create_channel()
self.assertFalse(self._is_admin())
def _test_is_admin_after_create_set_admin(self):
self._create_channel()
self._set_admin()
self.assertTrue(self._is_admin())
def _test_is_moderator_before_create(self):
self.assertFalse(self._is_moderator())
def _test_is_moderator_after_create(self):
self._create_channel()
self._create_room()
self.assertFalse(self._is_moderator())
def _test_is_moderator_after_create_set_moderator(self):
self._create_channel()
self._create_room()
self._set_moderator()
self.assertFalse(self._is_moderator())
def _test_channel_for_room_no_channel(self):
self.assertRaises(NoSuchRoomException, self._channel_for_room)
def _test_channel_for_room_with_channel_without_room(self):
self._create_channel()
self.assertRaises(NoSuchRoomException, self._channel_for_room)
def _test_channel_for_room_with_channel_with_room(self):
self._create_channel()
self._create_room()
self._channel_for_room()
def _test_channel_for_room_from_cache(self):
self._create_channel()
self._create_room()
channel_id_1 = self._channel_for_room()
channel_id_2 = self._channel_for_room()
self.assertIsNotNone(channel_id_1)
self.assertEqual(channel_id_1, channel_id_2)
def _channel_for_room(self):
return self.db.channel_for_room(BaseTest.ROOM_ID)
def _set_moderator(self):
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
def _set_admin(self):
self.db.set_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _is_moderator(self):
return self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
def _is_admin(self):
return self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _user_status(self):
return self.db.get_user_status(BaseTest.USER_ID)
def _set_owner_room(self):
self.db.set_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
def _set_owner_channel(self):
self.db.set_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _is_owner_room(self):
return self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
def _is_owner_channel(self):
return self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _set_offline(self):
self.db.set_user_offline(BaseTest.USER_ID)
def _set_online(self):
self.db.set_user_online(BaseTest.USER_ID)
def _set_invisible(self):
self.db.set_user_invisible(BaseTest.USER_ID)
def _rooms_for_user(self):
return self.db.rooms_for_user(BaseTest.USER_ID)
def _get_user_name_for(self):
return self.db.get_user_name_for(BaseTest.USER_ID)
def _join(self):
self.db.join_room(BaseTest.USER_ID, BaseTest.USER_NAME, BaseTest.ROOM_ID, BaseTest.ROOM_NAME)
def _leave(self):
self.db.leave_room(BaseTest.USER_ID, BaseTest.ROOM_ID)
def rooms_for_user(self):
return self.db.rooms_for_user(BaseTest.USER_ID)
def _rooms_for_channel(self):
return self.db.rooms_for_channel(BaseTest.CHANNEL_ID)
def _get_channels(self):
return self.db.get_channels()
def _channel_exists(self):
return self.db.channel_exists(BaseTest.CHANNEL_ID)
def _room_exists(self):
return self.db.room_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID)
def _create_channel(self):
self.db.create_channel(BaseTest.CHANNEL_NAME, BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _create_room(self, room_id=BaseTest.ROOM_ID):
self.db.create_room(
BaseTest.ROOM_NAME, room_id, BaseTest.CHANNEL_ID, BaseTest.USER_ID, BaseTest.USER_NAME)
def _room_name_exists(self):
return self.db.room_name_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_NAME)
def _test_delete_one_non_existing_acl(self):
self._create_channel()
self._create_room()
acls = {
'gender': 'm,f',
'membership': '0,1,2'
}
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, acls)
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
self.db.delete_acl_in_room_for_action(BaseTest.ROOM_ID, 'image', ApiActions.JOIN)
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
def _test_add_one_extra_acl(self):
self._create_channel()
self._create_room()
acls = {
'gender': 'm,f',
'membership': '0,1,2'
}
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, acls)
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, {'image': 'y'})
acls['image'] = 'y'
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
def _test_update_acl(self):
self._create_channel()
self._create_room()
acls = {
'gender': 'm,f',
'membership': '0,1,2'
}
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, acls)
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, {'gender': 'f'})
acls['gender'] = 'f'
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
def _test_get_all_acls_channel(self):
self._create_channel()
self._create_room()
acls = {
'gender': 'm,f',
'membership': '0,1,2'
}
self.db.add_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST, acls)
fetched = self.db.get_all_acls_channel(BaseTest.CHANNEL_ID)
self.assertIn(ApiActions.LIST, fetched.keys())
self.assertEqual(1, len(list(fetched.keys())))
self.assertEqual(fetched, {ApiActions.LIST: acls})
def _test_get_all_acls_channel_before_create(self):
self.assertRaises(NoSuchChannelException, self.db.get_all_acls_channel, BaseTest.CHANNEL_ID)
def _test_get_all_acls_room(self):
self._create_channel()
self._create_room()
acls = {
'gender': 'm,f',
'membership': '0,1,2'
}
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, acls)
fetched = self.db.get_all_acls_room(BaseTest.ROOM_ID)
self.assertIn(ApiActions.JOIN, fetched.keys())
self.assertEqual(1, len(list(fetched.keys())))
self.assertEqual(fetched, {ApiActions.JOIN: acls})
def _test_get_all_acls_room_before_create(self):
self.assertRaises(NoSuchRoomException, self.db.get_all_acls_room, BaseTest.ROOM_ID)
def _test_get_acl(self):
self._create_channel()
self._create_room()
self.assertEqual(0, len(self.db.get_all_acls_room(BaseTest.ROOM_ID)))
def _test_set_acl(self):
self._create_channel()
self._create_room()
acls = {
'gender': 'm,f',
'membership': '0,1,2'
}
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, acls)
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
def _test_delete_one_acl(self):
self._create_channel()
self._create_room()
acls = {
'gender': 'm,f',
'membership': '0,1,2'
}
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, acls)
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
del acls['gender']
self.db.delete_acl_in_room_for_action(BaseTest.ROOM_ID, 'gender', ApiActions.JOIN)
fetched = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(fetched.items(), acls.items())
def _test_set_room_allows_cross_group_messaging(self):
self._create_channel()
self._create_room()
self._set_allow_cross_group()
def _test_get_room_allows_cross_group_messaging_no_room(self):
self._create_channel()
self.assertRaises(NoSuchRoomException, self._room_allows_cross_group_messaging)
def _test_get_room_allows_cross_group_messaging(self):
self._create_channel()
self._create_room()
self._set_allow_cross_group()
self.assertTrue(self._room_allows_cross_group_messaging())
def _test_get_room_does_not_allow_cross_group_messaging(self):
self._create_channel()
self._create_room()
self.assertFalse(self._room_allows_cross_group_messaging())
def _test_room_allows_cross_group_messaging_no_channel(self):
self.assertRaises(NoSuchChannelException, self._room_allows_cross_group_messaging)
def _test_room_allows_cross_group_messaging_no_room(self):
self._create_channel()
self.assertRaises(NoSuchRoomException, self._room_allows_cross_group_messaging)
def _test_room_allows_cross_group_messaging(self):
self._create_channel()
self._create_room()
self._set_allow_cross_group()
self.assertTrue(self._room_allows_cross_group_messaging())
def _test_room_does_not_allow_cross_group_messaging_no_room(self):
self._create_channel()
self._create_room()
self.assertFalse(self._room_allows_cross_group_messaging())
def _set_allow_cross_group(self):
self.db.add_acls_in_channel_for_action(
BaseTest.CHANNEL_ID, ApiActions.CROSSROOM, {'samechannel': ''})
self.db.add_acls_in_room_for_action(
BaseTest.ROOM_ID, ApiActions.CROSSROOM, {'samechannel': ''})
def _room_allows_cross_group_messaging(self):
channel_acls = self.db.get_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.CROSSROOM)
if 'disallow' in channel_acls.keys():
return False
room_acls = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.CROSSROOM)
if 'disallow' in room_acls.keys():
return False
return 'samechannel' in channel_acls or 'samechannel' in room_acls
def _test_create_admin_room(self):
self._create_channel()
self.db.create_admin_room()
room_id = self.db.get_admin_room()
self.assertIsNotNone(room_id)
def _test_admin_room_before_exists(self):
self._create_channel()
room_uuid = self.db.get_admin_room()
self.assertIsNone(room_uuid)
def _test_admin_room_get_from_cache(self):
self._create_channel()
self.db.create_admin_room()
room_uuid_1 = self.db.get_admin_room()
self.assertIsNotNone(room_uuid_1)
room_uuid_2 = self.db.get_admin_room()
self.assertIsNotNone(room_uuid_2)
self.assertEqual(room_uuid_1, room_uuid_2)
def _test_get_user_status_after_set(self):
self.assertEqual(UserKeys.STATUS_UNAVAILABLE, self.db.get_user_status(BaseTest.USER_ID))
self.db.set_user_online(BaseTest.USER_ID)
self.assertEqual(UserKeys.STATUS_AVAILABLE, self.db.get_user_status(BaseTest.USER_ID))
def _test_set_user_invisible_twice_ignores_second(self):
self.db.set_user_invisible(BaseTest.USER_ID)
self.db.set_user_invisible(BaseTest.USER_ID)
self.assertEqual(UserKeys.STATUS_INVISIBLE, self.db.get_user_status(BaseTest.USER_ID))
def _test_set_user_offline_twice_ignores_second(self):
self.db.set_user_offline(BaseTest.USER_ID)
self.db.set_user_offline(BaseTest.USER_ID)
self.assertEqual(UserKeys.STATUS_UNAVAILABLE, self.db.get_user_status(BaseTest.USER_ID))
def _test_set_user_online_twice_ignores_second(self):
self.db.set_user_online(BaseTest.USER_ID)
self.db.set_user_online(BaseTest.USER_ID)
self.assertEqual(UserKeys.STATUS_AVAILABLE, self.db.get_user_status(BaseTest.USER_ID))
def _test_room_exists_from_cache(self):
self._create_channel()
self._create_room()
exists_1 = self.db.room_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID)
exists_2 = self.db.room_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID)
self.assertEqual(exists_1, exists_2)
self.assertTrue(exists_1)
self.assertFalse(self.db.room_exists(str(uuid()), str(uuid())))
def _test_get_user_status_from_cache(self):
status_1 = self.db.get_user_status(BaseTest.USER_ID)
status_2 = self.db.get_user_status(BaseTest.USER_ID)
self.assertEqual(UserKeys.STATUS_UNAVAILABLE, status_1)
self.assertEqual(status_1, status_2)
def _test_is_super_user(self):
self.assertFalse(self.db.is_super_user(BaseTest.USER_ID))
self.db.set_super_user(BaseTest.USER_ID)
self.assertTrue(self.db.is_super_user(BaseTest.USER_ID))
def _test_get_admin_room(self):
self._create_channel()
room_id = self.db.create_admin_room()
self.assertIsNotNone(room_id)
def _test_set_owner_channel_after_removing_owner(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.set_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_set_owner_and_moderator(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.remove_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.db.set_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_remove_channel_role(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_remove_room_role(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.remove_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_get_super_users(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_super_user(BaseTest.USER_ID))
self.db.set_super_user(BaseTest.USER_ID)
self.assertTrue(self.db.is_super_user(BaseTest.USER_ID))
super_users = self.db.get_super_users()
self.assertEqual(1, len(super_users))
self.assertIn(BaseTest.USER_ID, super_users.keys())
self.assertIn(BaseTest.USER_NAME, super_users.values())
def _test_remove_super_user(self):
self.assertFalse(self.db.is_super_user(BaseTest.USER_ID))
self.db.set_super_user(BaseTest.USER_ID)
self.assertTrue(self.db.is_super_user(BaseTest.USER_ID))
self.db.remove_super_user(BaseTest.USER_ID)
self.assertFalse(self.db.is_super_user(BaseTest.USER_ID))
def _test_remove_owner(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.remove_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_remove_channel_owner(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_remove_admin(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.set_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_remove_moderator(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.remove_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_remove_moderator_twice(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.remove_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.remove_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_remove_moderator_no_such_room(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.assertRaises(NoSuchRoomException, self.db.remove_moderator, str(uuid()), BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_set_owner_is_unique(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
users = self.db.get_owners_room(BaseTest.ROOM_ID)
self.assertEqual(1, len(users))
self.assertIn(BaseTest.USER_ID, users.keys())
self.assertIn(BaseTest.USER_NAME, users.values())
def _test_set_owner_channel_is_unique(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.set_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
users = self.db.get_owners_channel(BaseTest.CHANNEL_ID)
self.assertEqual(1, len(users))
self.assertIn(BaseTest.USER_ID, users.keys())
self.assertIn(BaseTest.USER_NAME, users.values())
def _test_set_moderator_is_unique(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
users = self.db.get_moderators_room(BaseTest.ROOM_ID)
self.assertEqual(1, len(users))
self.assertIn(BaseTest.USER_ID, users.keys())
self.assertIn(BaseTest.USER_NAME, users.values())
def _test_set_admin_is_unique(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.set_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.set_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
users = self.db.get_admins_channel(BaseTest.CHANNEL_ID)
self.assertEqual(1, len(users))
self.assertIn(BaseTest.USER_ID, users.keys())
self.assertIn(BaseTest.USER_NAME, users.values())
def _test_set_super_user_is_unique(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_super_user(BaseTest.USER_ID))
self.db.set_super_user(BaseTest.USER_ID)
self.assertTrue(self.db.is_super_user(BaseTest.USER_ID))
self.db.set_super_user(BaseTest.USER_ID)
self.assertTrue(self.db.is_super_user(BaseTest.USER_ID))
users = self.db.get_super_users()
self.assertEqual(1, len(users))
self.assertIn(BaseTest.USER_ID, users.keys())
self.assertIn(BaseTest.USER_NAME, users.values())
def _test_remove_super_user_without_setting(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_super_user(BaseTest.OTHER_USER_ID))
self.db.remove_super_user(BaseTest.OTHER_USER_ID)
self.assertFalse(self.db.is_super_user(BaseTest.OTHER_USER_ID))
def _test_remove_owner_without_setting(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.OTHER_USER_ID))
self.db.remove_owner(BaseTest.ROOM_ID, BaseTest.OTHER_USER_ID)
self.assertFalse(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.OTHER_USER_ID))
def _test_remove_channel_owner_without_setting(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.OTHER_USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.OTHER_USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.OTHER_USER_ID))
def _test_remove_admin_without_setting(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.OTHER_USER_ID))
self.db.remove_admin(BaseTest.CHANNEL_ID, BaseTest.OTHER_USER_ID)
self.assertFalse(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.OTHER_USER_ID))
def _test_remove_moderator_without_setting(self):
self._create_channel()
self._create_room()
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.OTHER_USER_ID))
self.db.remove_moderator(BaseTest.ROOM_ID, BaseTest.OTHER_USER_ID)
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.OTHER_USER_ID))
def _test_remove_other_role_channel(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.assertFalse(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.set_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.assertTrue(self.db.is_admin(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_remove_other_role_room(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.assertFalse(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.set_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.db.remove_owner(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner(BaseTest.ROOM_ID, BaseTest.USER_ID))
self.assertTrue(self.db.is_moderator(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_set_admin_no_such_channel(self):
self.assertRaises(NoSuchChannelException, self.db.set_admin, BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _test_remove_admin_no_such_room(self):
self.assertRaises(NoSuchChannelException, self.db.remove_admin, BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _test_channel_name_exists(self):
self.assertFalse(self.db.channel_name_exists(BaseTest.CHANNEL_NAME))
self._create_channel()
self.assertTrue(self.db.channel_name_exists(BaseTest.CHANNEL_NAME))
def _test_channel_exists(self):
self.assertFalse(self.db.channel_exists(''))
self.assertFalse(self.db.channel_exists(None))
self.assertFalse(self.db.channel_exists(BaseTest.CHANNEL_ID))
self._create_channel()
self.assertTrue(self.db.channel_exists(BaseTest.CHANNEL_ID))
def _test_create_user(self):
self.assertRaises(NoSuchUserException, self.db.get_user_name, BaseTest.OTHER_USER_ID)
self.db.create_user(BaseTest.OTHER_USER_ID, BaseTest.OTHER_USER_NAME)
self.assertEqual(BaseTest.OTHER_USER_NAME, self.db.get_user_name(BaseTest.OTHER_USER_ID))
def _test_users_in_room(self):
self.assertEqual(0, len(self.db.users_in_room(BaseTest.ROOM_ID)))
self._create_channel()
self.assertEqual(0, len(self.db.users_in_room(BaseTest.ROOM_ID)))
self._create_room()
self.assertEqual(0, len(self.db.users_in_room(BaseTest.ROOM_ID)))
def _test_users_in_room_after_join(self):
self._create_channel()
self._create_room()
self._join()
users = self.db.users_in_room(BaseTest.ROOM_ID)
self.assertEqual(1, len(users))
self.assertIn(BaseTest.USER_NAME, users.values())
def _test_delete_acl_in_room_for_action(self):
self.assertRaises(NoSuchRoomException, self.db.delete_acl_in_room_for_action, BaseTest.ROOM_ID, 'gender', ApiActions.JOIN)
self._create_channel()
self.assertRaises(NoSuchRoomException, self.db.delete_acl_in_room_for_action, BaseTest.ROOM_ID, 'gender', ApiActions.JOIN)
self._create_room()
self.db.delete_acl_in_room_for_action(BaseTest.ROOM_ID, 'gender', ApiActions.JOIN)
def _test_delete_acl_in_room_for_action_invalid_action(self):
self._create_channel()
self._create_room()
self.assertRaises(InvalidApiActionException, self.db.delete_acl_in_room_for_action, BaseTest.ROOM_ID, 'gender', 'invalid-action')
def _test_delete_acl_in_room_for_action_after_create(self):
self._create_channel()
self._create_room()
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, {'age': '25:35'})
acls = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertIn('age', acls.keys())
self.db.delete_acl_in_room_for_action(BaseTest.ROOM_ID, 'age', ApiActions.JOIN)
acls = self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)
self.assertEqual(0, len(acls))
def _test_delete_acl_in_channel_for_action_after_create(self):
self._create_channel()
self._create_room()
self.db.add_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST, {'age': '25:35'})
acls = self.db.get_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST)
self.assertIn('age', acls.keys())
self.db.delete_acl_in_channel_for_action(BaseTest.CHANNEL_ID, 'age', ApiActions.LIST)
acls = self.db.get_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST)
self.assertEqual(0, len(acls))
def _test_delete_acl_in_channel_for_action_invalid_action(self):
self._create_channel()
self._create_room()
self.assertRaises(InvalidApiActionException, self.db.delete_acl_in_channel_for_action, BaseTest.CHANNEL_ID, 'gender', 'invalid-action')
def _test_delete_acl_in_channel_for_action(self):
self.assertRaises(NoSuchChannelException, self.db.delete_acl_in_channel_for_action, BaseTest.CHANNEL_ID, 'gender', ApiActions.JOIN)
self._create_channel()
self.db.delete_acl_in_channel_for_action(BaseTest.CHANNEL_ID, 'gender', ApiActions.LIST)
def _test_remove_owner_channel_no_channel(self):
self.assertRaises(NoSuchChannelException, self.db.remove_owner_channel, BaseTest.CHANNEL_ID, BaseTest.USER_ID)
def _test_remove_owner_channel_not_owner(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_remove_owner_channel_is_owner(self):
self._create_channel()
self._create_room()
self._set_owner_channel()
self.assertTrue(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
self.db.remove_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(self.db.is_owner_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID))
def _test_set_two_owners_on_room(self):
self._create_channel()
self._create_room()
try:
self.db.create_user(BaseTest.USER_ID, BaseTest.USER_NAME)
except UserExistsException as e:
pass
try:
self.db.create_user(BaseTest.OTHER_USER_ID, BaseTest.OTHER_USER_NAME)
except UserExistsException as e:
pass
for user_id in [BaseTest.USER_ID, BaseTest.OTHER_USER_ID]:
self.db.set_owner(BaseTest.ROOM_ID, user_id)
for user_id in [BaseTest.USER_ID, BaseTest.OTHER_USER_ID]:
self.assertTrue(self.db.is_owner(BaseTest.ROOM_ID, user_id))
owners = self.db.get_owners_room(BaseTest.ROOM_ID)
self.assertTrue(2, len(owners))
self.assertIn(BaseTest.USER_ID, owners)
self.assertIn(BaseTest.OTHER_USER_ID, owners)
def _test_create_user_exists(self):
user_id = str(uuid())
self.db.create_user(user_id, BaseTest.USER_NAME)
self.assertRaises(UserExistsException, self.db.create_user, user_id, BaseTest.USER_NAME)
def _test_update_acl_in_room_for_action_no_channel(self):
self.assertRaises(NoSuchChannelException, self.db.update_acl_in_room_for_action,
BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, ApiActions.JOIN, 'age', '25:40')
def _test_update_acl_in_room_for_action_no_room(self):
self._create_channel()
self.assertRaises(NoSuchRoomException, self.db.update_acl_in_room_for_action,
BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, ApiActions.JOIN, 'age', '25:40')
def _test_update_acl_in_room_for_action_invalid_action(self):
self._create_channel()
self._create_room()
self.assertRaises(InvalidApiActionException, self.db.update_acl_in_room_for_action,
BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, 'some-invalid-action', 'age', '25:40')
def _test_update_acl_in_room_for_action_invalid_type(self):
self._create_channel()
self._create_room()
self.assertRaises(InvalidAclTypeException, self.db.update_acl_in_room_for_action,
BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, ApiActions.JOIN, 'something-invalid', '25:40')
def _test_update_acl_in_room_for_action_invalid_value(self):
self._create_channel()
self._create_room()
self.assertRaises(ValidationException, self.db.update_acl_in_room_for_action,
BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, ApiActions.JOIN, 'age', 'something-invalid')
def _test_update_acl_in_room_for_action(self):
self._create_channel()
self._create_room()
self.assertEqual(0, len(self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN)))
self.db.update_acl_in_room_for_action(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, ApiActions.JOIN, 'age', '25:40')
self.assertIn('age', self.db.get_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN))
def _test_update_acl_in_channel_for_action(self):
self._create_channel()
self.assertEqual(0, len(self.db.get_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST)))
self.db.update_acl_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST, 'age', '25:40')
self.assertIn('age', self.db.get_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST))
def _test_update_acl_in_channel_for_action_invalid_action(self):
self._create_channel()
self.assertRaises(InvalidApiActionException, self.db.update_acl_in_channel_for_action,
BaseTest.CHANNEL_ID, 'some-invalid-action', 'age', '25:40')
def _test_update_acl_in_channel_for_action_invalid_type(self):
self._create_channel()
self.assertRaises(InvalidAclTypeException, self.db.update_acl_in_channel_for_action,
BaseTest.CHANNEL_ID, ApiActions.LIST, 'something-invalid', '25:40')
def _test_update_acl_in_channel_for_action_invalid_value(self):
self._create_channel()
self.assertRaises(ValidationException, self.db.update_acl_in_channel_for_action,
BaseTest.CHANNEL_ID, ApiActions.LIST, 'age', 'something-invalid')
def _test_update_acl_in_channel_for_action_no_channel(self):
self.assertRaises(NoSuchChannelException, self.db.update_acl_in_channel_for_action,
BaseTest.CHANNEL_ID, ApiActions.LIST, 'age', '25:40')
def _test_is_banned_from_channel(self):
self._create_channel()
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(is_banned)
def _test_is_banned_from_room(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(is_banned)
def _test_is_banned_globally(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertTrue(is_banned)
def _test_remove_global_ban(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertTrue(is_banned)
self.db.remove_global_ban(BaseTest.USER_ID)
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_remove_channel_ban(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(is_banned)
self.db.remove_channel_ban(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_remove_room_ban(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(is_banned)
self.db.remove_room_ban(BaseTest.ROOM_ID, BaseTest.USER_ID)
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_was_banned_from_channel(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_was_banned_from_room(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_was_banned_globally(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertFalse(is_banned)
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_get_user_ban_status_channel(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
ban_status = self.db.get_user_ban_status(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertEqual('', ban_status['channel'])
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
ban_status = self.db.get_user_ban_status(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertNotEqual('', len(ban_status['channel']))
def _test_get_user_ban_status_room(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
ban_status = self.db.get_user_ban_status(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertEqual('', ban_status['room'])
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
ban_status = self.db.get_user_ban_status(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertNotEqual('', len(ban_status['room']))
def _test_get_user_ban_status_global(self):
self._create_channel()
self._create_room()
is_banned, time_left = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertFalse(is_banned)
ban_status = self.db.get_user_ban_status(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertEqual('', ban_status['global'])
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
ban_status = self.db.get_user_ban_status(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertNotEqual('', len(ban_status['global']))
def _test_get_banned_users_global_is_empty(self):
self._create_channel()
self._create_room()
self.assertEqual(0, len(self.db.get_banned_users_global()))
def _test_get_banned_users_global_is_empty_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
self.assertEqual(0, len(self.db.get_banned_users_global()))
def _test_get_banned_users_global_not_empty_after_ban(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
self.assertIn(BaseTest.USER_ID, self.db.get_banned_users_global())
def _test_get_banned_users_channel_is_empty(self):
self._create_channel()
self._create_room()
self.assertEqual(0, len(self.db.get_banned_users_for_channel(BaseTest.CHANNEL_ID)))
def _test_get_banned_users_channel_is_empty_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
self.assertEqual(0, len(self.db.get_banned_users_for_channel(BaseTest.CHANNEL_ID)))
def _test_get_banned_users_channel_not_empty_after_ban(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
self.assertIn(BaseTest.USER_ID, self.db.get_banned_users_for_channel(BaseTest.CHANNEL_ID))
def _test_get_banned_users_room_is_empty(self):
self._create_channel()
self._create_room()
self.assertEqual(0, len(self.db.get_banned_users_for_room(BaseTest.ROOM_ID)))
def _test_get_banned_users_room_is_empty_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
self.assertEqual(0, len(self.db.get_banned_users_for_room(BaseTest.ROOM_ID)))
def _test_get_banned_users_room_not_empty_after_ban(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
self.assertIn(BaseTest.USER_ID, self.db.get_banned_users_for_room(BaseTest.ROOM_ID))
def _test_get_banned_users_is_empty(self):
self._create_channel()
self._create_room()
banned = self.db.get_banned_users()
self.assertEqual(0, len(banned['global']))
self.assertEqual(0, len(banned['channels']))
self.assertEqual(0, len(banned['rooms']))
def _test_get_banned_users_for_room(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
banned = self.db.get_banned_users()
self.assertEqual(0, len(banned['global']))
self.assertEqual(0, len(banned['channels']))
self.assertIn(BaseTest.USER_ID, banned['rooms'][BaseTest.ROOM_ID]['users'])
def _test_get_banned_users_for_channel(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
banned = self.db.get_banned_users()
self.assertEqual(0, len(banned['global']))
self.assertEqual(0, len(banned['rooms']))
self.assertIn(BaseTest.USER_ID, banned['channels'][BaseTest.CHANNEL_ID]['users'])
def _test_get_banned_users_globally(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
banned = self.db.get_banned_users()
self.assertEqual(0, len(banned['channels']))
self.assertEqual(0, len(banned['rooms']))
self.assertIn(BaseTest.USER_ID, banned['global'])
def _test_get_global_ban_timestamp_is_none(self):
self._create_channel()
self._create_room()
ban, timestamp, name = self.db.get_global_ban_timestamp(BaseTest.USER_ID)
self.assertIsNone(ban)
self.assertIsNone(timestamp)
self.assertIsNone(name)
def _test_get_global_ban_timestamp_not_none(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
ban_duration, timestamp, name = self.db.get_global_ban_timestamp(BaseTest.USER_ID)
self.assertEqual('5m', ban_duration)
self.assertIsNotNone(timestamp)
def _test_get_global_ban_timestamp_not_empty_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
ban_duration, timestamp, name = self.db.get_global_ban_timestamp(BaseTest.USER_ID)
self.assertEqual('5m', ban_duration)
self.assertIsNotNone(timestamp)
def _test_get_channel_ban_timestamp_is_none(self):
self._create_channel()
self._create_room()
ban, timestamp, name = self.db.get_channel_ban_timestamp(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertIsNone(ban)
self.assertIsNone(timestamp)
self.assertIsNone(name)
def _test_get_channel_ban_timestamp_not_none(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
ban_duration, timestamp, name = self.db.get_channel_ban_timestamp(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertEqual('5m', ban_duration)
self.assertIsNotNone(timestamp)
def _test_get_channel_ban_timestamp_not_empty_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
ban_duration, timestamp, name = self.db.get_channel_ban_timestamp(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertEqual('5m', ban_duration)
self.assertIsNotNone(timestamp)
def _test_get_room_ban_timestamp_is_none(self):
self._create_channel()
self._create_room()
ban_duration, timestamp, name = self.db.get_room_ban_timestamp(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertIsNone(ban_duration)
self.assertIsNone(timestamp)
self.assertIsNone(name)
def _test_get_room_ban_timestamp_not_none(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
ban_duration, timestamp, name = self.db.get_room_ban_timestamp(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertEqual('5m', ban_duration)
self.assertIsNotNone(timestamp)
def _test_get_room_ban_timestamp_not_empty_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
ban_duration, timestamp, name = self.db.get_room_ban_timestamp(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertEqual('5m', ban_duration)
self.assertIsNotNone(timestamp)
def _test_get_acls_in_channel_for_action_no_channel(self):
self.assertRaises(
NoSuchChannelException, self.db.get_acls_in_channel_for_action, BaseTest.CHANNEL_ID, ApiActions.LIST)
def _test_get_acls_in_channel_for_action_no_room(self):
self._create_channel()
self.assertRaises(
NoSuchRoomException, self.db.get_acls_in_room_for_action, BaseTest.ROOM_ID, ApiActions.JOIN)
def _test_get_all_acls_channel_is_empty(self):
self._create_channel()
self._create_room()
acls = self.db.get_all_acls_channel(BaseTest.CHANNEL_ID)
self.assertEqual(0, len(acls))
def _test_get_all_acls_channel_not_empty(self):
self._create_channel()
self._create_room()
self.db.add_acls_in_channel_for_action(BaseTest.CHANNEL_ID, ApiActions.LIST, {'age': '25:35'})
acls = self.db.get_all_acls_channel(BaseTest.CHANNEL_ID)
self.assertEqual(1, len(acls))
def _test_get_all_acls_room_is_empty(self):
self._create_channel()
self._create_room()
acls = self.db.get_all_acls_room(BaseTest.ROOM_ID)
self.assertEqual(0, len(acls))
def _test_get_all_acls_room_not_empty(self):
self._create_channel()
self._create_room()
self.db.add_acls_in_room_for_action(BaseTest.ROOM_ID, ApiActions.JOIN, {'age': '25:35'})
acls = self.db.get_all_acls_room(BaseTest.ROOM_ID)
self.assertEqual(1, len(acls))
def _test_channel_for_room_blank_room_id(self):
self.assertRaises(NoSuchRoomException, self.db.channel_for_room, '')
def _test_channel_for_room_before_create(self):
self.assertRaises(NoSuchRoomException, self.db.channel_for_room, BaseTest.ROOM_ID)
def _test_channel_for_room_after_create(self):
self._create_channel()
self._create_room()
channel_id = self.db.channel_for_room(BaseTest.ROOM_ID)
self.assertEqual(BaseTest.CHANNEL_ID, channel_id)
def _test_channel_for_room_cache(self):
self._create_channel()
self._create_room()
self.db.channel_for_room(BaseTest.ROOM_ID)
channel_id = self.db.channel_for_room(BaseTest.ROOM_ID)
self.assertEqual(BaseTest.CHANNEL_ID, channel_id)
def _test_get_username_before_set(self):
self.assertRaises(NoSuchUserException, self.db.get_user_name, str(uuid()))
def _test_get_username_after_set(self):
self.db.set_user_name(BaseTest.USER_ID, BaseTest.USER_NAME)
username = self.db.get_user_name(BaseTest.USER_ID)
self.assertEqual(BaseTest.USER_NAME, username)
def _test_rename_channel(self):
self._create_channel()
self._create_room()
self.db.rename_channel(BaseTest.CHANNEL_ID, 'new-name')
self.assertEqual('new-name', self.db.get_channel_name(BaseTest.CHANNEL_ID))
def _test_rename_channel_before_create(self):
self.assertRaises(NoSuchChannelException, self.db.rename_channel, BaseTest.CHANNEL_ID, BaseTest.CHANNEL_NAME)
def _test_rename_channel_empty_name(self):
self._create_channel()
self._create_room()
self.assertRaises(EmptyChannelNameException, self.db.rename_channel, BaseTest.CHANNEL_ID, '')
def _test_rename_channel_exists(self):
self._create_channel()
self._create_room()
self.assertRaises(ChannelNameExistsException, self.db.rename_channel, BaseTest.CHANNEL_ID, BaseTest.CHANNEL_NAME)
def _test_rename_room(self):
self._create_channel()
self._create_room()
self.db.rename_room(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, 'new-name')
self.assertEqual('new-name', self.db.get_room_name(BaseTest.ROOM_ID))
def _test_rename_room_before_create_channel(self):
self.assertRaises(NoSuchChannelException, self.db.rename_room, BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, 'new-name')
def _test_rename_room_before_create_room(self):
self._create_channel()
self.assertRaises(NoSuchRoomException, self.db.rename_room, BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, 'new-name')
def _test_rename_room_empty_name(self):
self._create_channel()
self._create_room()
self.assertRaises(EmptyRoomNameException, self.db.rename_room, BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, '')
def _test_rename_room_already_exists(self):
self._create_channel()
self._create_room()
self.assertRaises(RoomNameExistsForChannelException, self.db.rename_room, BaseTest.CHANNEL_ID, BaseTest.ROOM_ID, BaseTest.ROOM_NAME)
def _test_remove_room(self):
self._create_channel()
self._create_room()
self.assertTrue(self.db.room_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID))
self.db.remove_room(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID)
self.assertFalse(self.db.room_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID))
def _test_remove_room_before_create_room(self):
self._create_channel()
self.assertFalse(self.db.room_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID))
self.assertRaises(NoSuchRoomException, self.db.remove_room, BaseTest.CHANNEL_ID, BaseTest.ROOM_ID)
def _test_remove_room_before_create_channel(self):
self.assertFalse(self.db.room_exists(BaseTest.CHANNEL_ID, BaseTest.ROOM_ID))
self.assertRaises(NoSuchChannelException, self.db.remove_room, BaseTest.CHANNEL_ID, BaseTest.ROOM_ID)
def _test_update_last_read_for(self):
self._create_channel()
self._create_room()
timestamp = int(datetime.utcnow().timestamp())
self.db.update_last_read_for({BaseTest.USER_ID}, BaseTest.ROOM_ID, timestamp)
timestamp_fetched = self.db.get_last_read_timestamp(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertIsNotNone(timestamp_fetched)
self.assertEqual(timestamp, timestamp_fetched)
def _test_get_last_read_timestamp_before_set(self):
self._create_channel()
self._create_room()
self.assertIsNone(self.db.get_last_read_timestamp(BaseTest.ROOM_ID, BaseTest.USER_ID))
def _test_update_username(self):
self._create_channel()
self._create_room()
self.db.set_user_name(BaseTest.USER_ID, BaseTest.USER_NAME)
self.assertEqual(BaseTest.USER_NAME, self.db.get_user_name(BaseTest.USER_ID))
self.db.set_user_name(BaseTest.USER_ID, 'Batman')
self.assertEqual('Batman', self.db.get_user_name(BaseTest.USER_ID))
def _test_get_room_name_from_cache(self):
self._create_channel()
self._create_room()
room_name = self.db.get_room_name(BaseTest.ROOM_ID)
self.assertEqual(BaseTest.ROOM_NAME, room_name)
room_name = self.db.get_room_name(BaseTest.ROOM_ID)
self.assertEqual(BaseTest.ROOM_NAME, room_name)
def _test_get_channel_name_from_cache(self):
self._create_channel()
self._create_room()
channel_name = self.db.get_channel_name(BaseTest.CHANNEL_ID)
self.assertEqual(BaseTest.CHANNEL_NAME, channel_name)
channel_name = self.db.get_channel_name(BaseTest.CHANNEL_ID)
self.assertEqual(BaseTest.CHANNEL_NAME, channel_name)
def _test_is_banned_globally_after_clearing_cache(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
self.env.cache.set_global_ban_timestamp(BaseTest.USER_ID, '', '', '')
is_banned, duration = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertTrue(is_banned)
def _test_is_banned_globally_after_clearing_cache_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_global(BaseTest.USER_ID, timestamp, duration)
self.env.cache.set_global_ban_timestamp(BaseTest.USER_ID, '', '', '')
is_banned, duration = self.db.is_banned_globally(BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_is_banned_from_channel_after_clearing_cache(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
self.env.cache.set_channel_ban_timestamp(BaseTest.CHANNEL_ID, BaseTest.USER_ID, '', '', '')
is_banned, duration = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertTrue(is_banned)
def _test_is_banned_from_channel_after_clearing_cache_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_channel(BaseTest.USER_ID, timestamp, duration, BaseTest.CHANNEL_ID)
self.env.cache.set_channel_ban_timestamp(BaseTest.CHANNEL_ID, BaseTest.USER_ID, '', '', '')
is_banned, duration = self.db.is_banned_from_channel(BaseTest.CHANNEL_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
def _test_is_banned_from_room_after_clearing_cache(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
self.env.cache.set_room_ban_timestamp(BaseTest.ROOM_ID, BaseTest.USER_ID, '', '', '')
is_banned, duration = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertTrue(is_banned)
def _test_is_banned_from_room_after_clearing_cache_if_expired(self):
self._create_channel()
self._create_room()
timestamp = str(int((datetime.utcnow() + timedelta(minutes=-5)).timestamp()))
duration = '5m'
self.db.ban_user_room(BaseTest.USER_ID, timestamp, duration, BaseTest.ROOM_ID)
self.env.cache.set_room_ban_timestamp(BaseTest.ROOM_ID, BaseTest.USER_ID, '', '', '')
is_banned, duration = self.db.is_banned_from_room(BaseTest.ROOM_ID, BaseTest.USER_ID)
self.assertFalse(is_banned)
| 70,519
| 6,459
| 23
|
7c24e0e7bd87da7dcf0d17237156e3ed8a9c914a
| 682
|
py
|
Python
|
packages/postgres-database/src/simcore_postgres_database/migration/versions/cfd1c43b5d33_migrate_workbench_state_enum.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 25
|
2018-04-13T12:44:12.000Z
|
2022-03-12T15:01:17.000Z
|
packages/postgres-database/src/simcore_postgres_database/migration/versions/cfd1c43b5d33_migrate_workbench_state_enum.py
|
colinRawlings/osparc-simcore
|
bf2f18d5bc1e574d5f4c238d08ad15156184c310
|
[
"MIT"
] | 2,553
|
2018-01-18T17:11:55.000Z
|
2022-03-31T16:26:40.000Z
|
packages/postgres-database/src/simcore_postgres_database/migration/versions/cfd1c43b5d33_migrate_workbench_state_enum.py
|
mrnicegyu11/osparc-simcore
|
b6fa6c245dbfbc18cc74a387111a52de9b05d1f4
|
[
"MIT"
] | 20
|
2018-01-18T19:45:33.000Z
|
2022-03-29T07:08:47.000Z
|
"""migrate workbench state enum
Revision ID: cfd1c43b5d33
Revises: c8a7073deebb
Create Date: 2020-11-17 16:42:32.511722+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cfd1c43b5d33'
down_revision = 'c8a7073deebb'
branch_labels = None
depends_on = None
| 19.485714
| 84
| 0.653959
|
"""migrate workbench state enum
Revision ID: cfd1c43b5d33
Revises: c8a7073deebb
Create Date: 2020-11-17 16:42:32.511722+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cfd1c43b5d33'
down_revision = 'c8a7073deebb'
branch_labels = None
depends_on = None
def upgrade():
op.execute(
sa.DDL(
"""
UPDATE projects
SET workbench = (regexp_replace(workbench::text, '"FAILURE"', '"FAILED"'))::json
WHERE workbench::text LIKE '%%FAILURE%%'
"""
)
)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 316
| 0
| 46
|
e0da0a1af0b2f9dd2f59c9caf104f7566ccc85e2
| 270
|
py
|
Python
|
individual_work/justin/sandbox/build_alderman.py
|
marquettecomputationalsocialscience/clusteredcrimemaps
|
4fb4ad1ad2474f854dabc1a72fbefcbb700d601a
|
[
"MIT"
] | 2
|
2017-11-16T23:22:53.000Z
|
2018-02-05T21:04:03.000Z
|
individual_work/justin/sandbox/build_alderman.py
|
marquettecomputationalsocialscience/clusteredcrimemaps
|
4fb4ad1ad2474f854dabc1a72fbefcbb700d601a
|
[
"MIT"
] | null | null | null |
individual_work/justin/sandbox/build_alderman.py
|
marquettecomputationalsocialscience/clusteredcrimemaps
|
4fb4ad1ad2474f854dabc1a72fbefcbb700d601a
|
[
"MIT"
] | 3
|
2017-08-06T22:44:45.000Z
|
2018-10-28T02:11:56.000Z
|
import json
fn = open('../static/alderman.js', 'w')
#add alderman boundaries variable
json_file = open('../maps/alderman.geojson')
geo_json = json.load(json_file)
fn.write('var alderman_boundaries = ')
fn.write(json.dumps(geo_json))
fn.write(';\n\n')
json_file.close()
| 24.545455
| 44
| 0.722222
|
import json
fn = open('../static/alderman.js', 'w')
#add alderman boundaries variable
json_file = open('../maps/alderman.geojson')
geo_json = json.load(json_file)
fn.write('var alderman_boundaries = ')
fn.write(json.dumps(geo_json))
fn.write(';\n\n')
json_file.close()
| 0
| 0
| 0
|
1685444ea08d378d0e2dd4b7a518820677952a30
| 5,760
|
py
|
Python
|
heavymodel/tables.py
|
lewisfogden/heavymodel
|
53507be8b7ce099740126d524f84e8cf84214995
|
[
"MIT"
] | 3
|
2020-05-22T22:38:56.000Z
|
2021-11-23T19:38:44.000Z
|
heavymodel/tables.py
|
lewisfogden/heavymodel
|
53507be8b7ce099740126d524f84e8cf84214995
|
[
"MIT"
] | 2
|
2020-05-23T21:08:22.000Z
|
2020-07-10T21:35:06.000Z
|
heavymodel/tables.py
|
lewisfogden/heavymodel
|
53507be8b7ce099740126d524f84e8cf84214995
|
[
"MIT"
] | 2
|
2021-01-31T10:37:39.000Z
|
2022-03-02T09:07:06.000Z
|
# tables.py
class MortalityTable:
"""mortalitytable is a matrix, by age and duration."""
class MortalityImprovementTable:
"""MortalityImprovementTable is a matrix, by age and year."""
class RangeTable:
"""range table"""
| 36.687898
| 129
| 0.485243
|
# tables.py
class MortalityTable:
"""mortalitytable is a matrix, by age and duration."""
def __init__(self, csv_filename, name, select_period, pc_of_base=1):
self.filename = csv_filename
self.name = name
self.select_period = select_period
self.pc_of_base = pc_of_base
self.load_csv(self.filename, self.select_period)
def load_csv(self, filename, select_period):
with open(filename, 'r') as csv_file:
header = None
self.q = dict()
for raw_line in csv_file:
line_array = raw_line.strip("\n").split(",")
if header is None:
header = line_array
if len(header) != select_period + 2:
raise ValueError("csv file does not have correct number of columns for select period.")
else:
age = int(line_array[0])
values = line_array[1:]
for duration, value in enumerate(values):
if value != "":
self.q[age, duration] = float(value) * self.pc_of_base
def get(self, age, duration):
if duration > self.select_period:
return self.q[(age, self.select_period)]
else:
return self.q[(age, duration)]
def __getitem__(self, key):
age, duration = key
return self.get(age, duration)
class MortalityImprovementTable:
"""MortalityImprovementTable is a matrix, by age and year."""
def __init__(self, csv_filename):
self.filename = csv_filename
self.load_csv(self.filename)
def load_csv(self, filename):
with open(filename, 'r') as csv_file:
header = None
self.q = dict()
for raw_line in csv_file:
line_array = raw_line.strip("\n").split(",")
if header is None:
header = line_array
years = [int(year) for year in header[1:]]
else:
age = int(line_array[0])
values = line_array[1:]
for year, value in zip(years, values):
if value != "":
self.q[age, year] = float(value)
def get(self, age, year):
return self.q[(age, year)]
def __getitem__(self, key):
age, year = key
return self.get(age, year)
class RangeTable:
"""range table"""
def __init__(self, filename=None):
with open(filename, 'r') as csv_file:
header = None
self.data = dict()
for raw_line in csv_file:
line_array = raw_line.strip("\n").split(",")
if header is None:
header = line_array
else:
key = int(line_array[0])
value = float(line_array[1])
self.data[key] = value
self.max = max(self.data)
self.min = min(self.data)
def __getitem__(self, key):
if key > self.max:
return 1
elif key < self.min:
return 0
else:
return self.data[key]
class YieldCurve:
def __init__(self, filename, key_period="annual", rate_type="spot_rate"):
self.filename = filename
self.key_period = key_period
self.rate_type = rate_type
with open(self.filename, 'r') as csv_file:
header = None
self.spot_annual = dict()
for raw_line in csv_file:
line_array = raw_line.strip("\n").split(",")
if header is None:
header = line_array
else:
key = int(line_array[0])
value = float(line_array[1])
self.spot_annual[key] = value
self.max_t = key * 12
self.max_t_years = key
self._build_tables()
def _build_tables(self):
self.v = dict() # discount factor, monthly
self.s = dict() # spot rate, monthly
for t in range(self.max_t):
t_years = int(t/12)
self.s[t] = (1 + self.spot_annual[t_years]) ** (1/12) - 1
self.v[t] = (1 + self.s[t])**(-t)
def npv(self, cashflow, proj_len):
pv = 0.0
for t in range(0, proj_len):
pv += self.v[t] * cashflow(t)
return pv
class ModelPoints:
header_types = {"str":str,
"int":int,
"float":float,
"bool":bool
}
def __init__(self, filename):
self.filename = filename
self.mps = []
with open(self.filename, 'r') as csv_file:
header = None
for raw_line in csv_file:
line_array = raw_line.strip("\n").split(",")
if header is None:
header = line_array
self.col_names = []
self.col_types = []
for col in line_array:
self.col_types.append(self.header_types[col.split(":")[0]])
self.col_names.append(col.split(":")[1])
else:
mp = {col_name:col_type(col) for col, col_name, col_type in zip(line_array, self.col_names, self.col_types)}
self.mps.append(mp)
def __iter__(self):
return iter(self.mps)
def __getitem__(self, key):
return self.mps[key]
| 4,814
| 237
| 450
|
9c9bdde05fd6b005c418fb0f2df26af899a1b5ee
| 457
|
py
|
Python
|
pysimgame/utils/maths.py
|
ScienceGamez/pysimgame
|
6c89280441358722efbc63b6d8aa914cbe21575e
|
[
"WTFPL"
] | null | null | null |
pysimgame/utils/maths.py
|
ScienceGamez/pysimgame
|
6c89280441358722efbc63b6d8aa914cbe21575e
|
[
"WTFPL"
] | null | null | null |
pysimgame/utils/maths.py
|
ScienceGamez/pysimgame
|
6c89280441358722efbc63b6d8aa914cbe21575e
|
[
"WTFPL"
] | null | null | null |
"""Mathematical helper functions."""
def normalize(array):
"""Normalize the array.
Set all the values betwwen 0 and 1.
0 corresponds to the min value and 1 the max.
If the normalization cannot occur, will return the array.
"""
min_ = min(array)
max_ = max(array)
return (
(array - min_) / (max_ - min_) # Normalize
if min_ != max_ else
array / (max_ if max_ > 0 else 1) # Avoid divide by 0
)
| 25.388889
| 62
| 0.601751
|
"""Mathematical helper functions."""
def normalize(array):
"""Normalize the array.
Set all the values betwwen 0 and 1.
0 corresponds to the min value and 1 the max.
If the normalization cannot occur, will return the array.
"""
min_ = min(array)
max_ = max(array)
return (
(array - min_) / (max_ - min_) # Normalize
if min_ != max_ else
array / (max_ if max_ > 0 else 1) # Avoid divide by 0
)
| 0
| 0
| 0
|
1767230b91ddc9a5770836dbc0935ed70a2e4b8d
| 105
|
py
|
Python
|
files/Strings/d25.py
|
heltonricardo/estudo-python
|
e82eb8ebc15378175b03d367a6eeea66e8858cff
|
[
"MIT"
] | null | null | null |
files/Strings/d25.py
|
heltonricardo/estudo-python
|
e82eb8ebc15378175b03d367a6eeea66e8858cff
|
[
"MIT"
] | null | null | null |
files/Strings/d25.py
|
heltonricardo/estudo-python
|
e82eb8ebc15378175b03d367a6eeea66e8858cff
|
[
"MIT"
] | null | null | null |
nome = input('Insira nome completo: ').strip()
print('Possui "Silva"?', 'silva' in nome.lower())
input()
| 26.25
| 49
| 0.657143
|
nome = input('Insira nome completo: ').strip()
print('Possui "Silva"?', 'silva' in nome.lower())
input()
| 0
| 0
| 0
|
de22e77706673d86f80049a8a5df73132c478cf4
| 5,266
|
py
|
Python
|
models/resnet.py
|
akanametov/classification-pytorch
|
1090d6e794a789150c3e8f0f68080dd208d340c1
|
[
"MIT"
] | null | null | null |
models/resnet.py
|
akanametov/classification-pytorch
|
1090d6e794a789150c3e8f0f68080dd208d340c1
|
[
"MIT"
] | null | null | null |
models/resnet.py
|
akanametov/classification-pytorch
|
1090d6e794a789150c3e8f0f68080dd208d340c1
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
def conv3x3(in_planes, out_planes, **kwargs):
"""3x3 convolution with padding"""
kwargs['kernel_size'] = 3
kwargs['padding'] = 1
kwargs['bias'] = False
return nn.Conv2d(in_planes, out_planes, **kwargs)
def conv1x1(in_planes, out_planes, **kwargs):
"""1x1 convolution"""
kwargs['kernel_size'] = 1
kwargs['bias'] = False
return nn.Conv2d(in_planes, out_planes, **kwargs)
class BasicBlock(nn.Module):
"""BasicBlock"""
expansion = 1
class Bottleneck(nn.Module):
"""Bottleneck"""
expansion = 4
class ResNet(nn.Module):
"""ResNet"""
def resnet18(num_classes=1000, **kwargs):
"""resnet18"""
return ResNet([2, 2, 2, 2], num_classes, BasicBlock)
def resnet34(num_classes=1000, **kwargs):
"""resnet34"""
return ResNet([3, 4, 6, 3], num_classes, BasicBlock)
def resnet50(num_classes=1000, **kwargs):
"""resnet50"""
return ResNet([3, 4, 6, 3], num_classes, Bottleneck)
def resnet101(num_classes=1000, **kwargs):
"""resnet101"""
return ResNet([3, 4, 23, 3], num_classes, Bottleneck)
def resnet152(num_classes=1000, **kwargs):
"""resnet152"""
return ResNet([3, 8, 36, 3], num_classes, Bottleneck)
| 30.616279
| 96
| 0.590201
|
import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
def conv3x3(in_planes, out_planes, **kwargs):
"""3x3 convolution with padding"""
kwargs['kernel_size'] = 3
kwargs['padding'] = 1
kwargs['bias'] = False
return nn.Conv2d(in_planes, out_planes, **kwargs)
def conv1x1(in_planes, out_planes, **kwargs):
"""1x1 convolution"""
kwargs['kernel_size'] = 1
kwargs['bias'] = False
return nn.Conv2d(in_planes, out_planes, **kwargs)
class BasicBlock(nn.Module):
"""BasicBlock"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
fx = x
fx = self.conv1(fx)
fx = self.bn1(fx)
fx = self.relu(fx)
fx = self.conv2(fx)
fx = self.bn2(fx)
if self.downsample is not None:
x = self.downsample(x)
fx = fx + x
fx = self.relu(fx)
return fx
class Bottleneck(nn.Module):
"""Bottleneck"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
fx = x
fx = self.conv1(fx)
fx = self.bn1(fx)
fx = self.relu(fx)
fx = self.conv2(fx)
fx = self.bn2(fx)
fx = self.relu(fx)
fx = self.conv3(fx)
fx = self.bn3(fx)
if self.downsample is not None:
x = self.downsample(x)
fx = fx + x
fx = self.relu(fx)
return fx
class ResNet(nn.Module):
"""ResNet"""
def __init__(self, layers, num_classes=1000, block=BasicBlock):
super(ResNet, self).__init__()
self.layers=layers
self.num_classes=num_classes
self.inplanes=64
# input
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# base
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# output
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
# init weights
self.initialize_weights()
def initialize_weights(self,):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
return None
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride=stride),
nn.BatchNorm2d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet18(num_classes=1000, **kwargs):
"""resnet18"""
return ResNet([2, 2, 2, 2], num_classes, BasicBlock)
def resnet34(num_classes=1000, **kwargs):
"""resnet34"""
return ResNet([3, 4, 6, 3], num_classes, BasicBlock)
def resnet50(num_classes=1000, **kwargs):
"""resnet50"""
return ResNet([3, 4, 6, 3], num_classes, Bottleneck)
def resnet101(num_classes=1000, **kwargs):
"""resnet101"""
return ResNet([3, 4, 23, 3], num_classes, Bottleneck)
def resnet152(num_classes=1000, **kwargs):
"""resnet152"""
return ResNet([3, 8, 36, 3], num_classes, Bottleneck)
| 3,726
| 0
| 221
|
41d120032ce70ba4e8b62faacb4efbcdbe973dd7
| 12,787
|
py
|
Python
|
tests/impact/client/test_experiment_definition.py
|
iakov-test/impact-client-python
|
c1f0f91fc15c3b8c80408cd0c0c1afc5dc67e827
|
[
"BSD-3-Clause"
] | 3
|
2020-09-30T12:08:58.000Z
|
2021-09-22T08:42:14.000Z
|
tests/impact/client/test_experiment_definition.py
|
iakov-test/impact-client-python
|
c1f0f91fc15c3b8c80408cd0c0c1afc5dc67e827
|
[
"BSD-3-Clause"
] | 11
|
2020-09-24T14:25:41.000Z
|
2022-02-17T03:48:26.000Z
|
tests/impact/client/test_experiment_definition.py
|
iakov-test/impact-client-python
|
c1f0f91fc15c3b8c80408cd0c0c1afc5dc67e827
|
[
"BSD-3-Clause"
] | 1
|
2022-01-10T13:50:51.000Z
|
2022-01-10T13:50:51.000Z
|
from modelon.impact.client import (
SimpleFMUExperimentDefinition,
SimpleModelicaExperimentDefinition,
Range,
Choices,
SimpleExperimentExtension,
)
import pytest
from modelon.impact.client import exceptions
from tests.impact.client.fixtures import *
| 37.498534
| 93
| 0.574255
|
from modelon.impact.client import (
SimpleFMUExperimentDefinition,
SimpleModelicaExperimentDefinition,
Range,
Choices,
SimpleExperimentExtension,
)
import pytest
from modelon.impact.client import exceptions
from tests.impact.client.fixtures import *
class TestSimpleFMUExperimentDefinition:
def test_experiment_definition_default_options(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param
)
config = definition.to_dict()
assert config == {
"experiment": {
"version": 2,
"base": {
"model": {"fmu": {"id": "Test"}},
"modifiers": {'variables': {}},
"analysis": {
"type": "dynamic",
"parameters": {},
"simulationOptions": {"ncp": 500},
"solverOptions": {},
"simulationLogLevel": "WARNING",
},
},
"extensions": [],
}
}
def test_experiment_definition_with_options(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu,
custom_function=custom_function_no_param,
simulation_options=custom_function_no_param.get_simulation_options().with_values(
ncp=2000, rtol=0.0001
),
solver_options={'a': 1},
)
config = definition.to_dict()
assert config["experiment"]["base"]["analysis"]["simulationOptions"] == {
"ncp": 2000,
"rtol": 0.0001,
}
assert config["experiment"]["base"]["analysis"]["solverOptions"] == {"a": 1}
def test_experiment_definition_with_modifier(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
).with_modifiers({'h0': Range(0.1, 0.5, 3)}, v=Choices(0.1, 0.5, 3))
config = definition.to_dict()
assert config["experiment"]["base"]["modifiers"]["variables"] == {
'h0': 'range(0.1,0.5,3)',
'v': 'choices(0.1, 0.5, 3)',
}
def test_experiment_definition_with_choices_1_input_modifier(
self, fmu, custom_function_no_param
):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
).with_modifiers(v=Choices(0.1))
config = definition.to_dict()
assert config["experiment"]["base"]["modifiers"]["variables"] == {
'v': 'choices(0.1)',
}
def test_experiment_definition_with_fmu_modifiers(
self, fmu_with_modifiers, custom_function_no_param
):
definition = SimpleFMUExperimentDefinition(
fmu_with_modifiers, custom_function=custom_function_no_param,
)
config = definition.to_dict()
assert config["experiment"]["base"]["modifiers"]["variables"] == {'PI.K': 20}
def test_experiment_definition_with_extensions(self, fmu, custom_function_no_param):
ext1 = SimpleExperimentExtension().with_modifiers(p=2)
ext2 = SimpleExperimentExtension({'final_time': 10}).with_modifiers(p=3)
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
).with_extensions([ext1, ext2])
config = definition.to_dict()
assert config["experiment"]["extensions"] == [
{"modifiers": {"variables": {"p": 2}}},
{
"modifiers": {"variables": {"p": 3}},
"analysis": {"parameters": {'final_time': 10}},
},
]
def test_experiment_definition_with_cases(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
).with_cases([{'p': 2}, {'p': 3}])
config = definition.to_dict()
assert config["experiment"]["extensions"] == [
{"modifiers": {"variables": {"p": 2}}},
{"modifiers": {"variables": {"p": 3}}},
]
def test_failed_compile_exp_def(
self,
fmu_compile_failed,
custom_function_no_param,
solver_options,
simulation_options,
):
pytest.raises(
exceptions.OperationFailureError,
SimpleFMUExperimentDefinition,
fmu_compile_failed,
custom_function_no_param,
solver_options,
simulation_options,
)
def test_cancelled_compile_exp_def(
self,
fmu_compile_cancelled,
custom_function_no_param,
solver_options,
simulation_options,
):
pytest.raises(
exceptions.OperationFailureError,
SimpleFMUExperimentDefinition,
fmu_compile_cancelled,
custom_function_no_param,
solver_options,
simulation_options,
)
def test_invalid_option_input(self, custom_function, custom_function_no_param):
pytest.raises(
TypeError,
SimpleFMUExperimentDefinition,
custom_function,
custom_function_no_param,
{},
)
def test_invalid_fmu_input(self, fmu, custom_function_no_param):
pytest.raises(
TypeError,
SimpleFMUExperimentDefinition,
fmu,
custom_function_no_param,
"",
"",
)
class TestSimpleExperimentExtension:
def test_experiment_extension_default_options(self):
ext = SimpleExperimentExtension()
config = ext.to_dict()
assert config == {}
def test_experiment_extension_with_options(self, custom_function_no_param):
ext = SimpleExperimentExtension(
{'stop_time': 5},
{'a': 1},
custom_function_no_param.get_simulation_options().with_values(
ncp=2000, rtol=0.0001
),
)
config = ext.to_dict()
assert config == {
"analysis": {
"parameters": {'stop_time': 5},
"simulationOptions": {'ncp': 2000, 'rtol': 0.0001},
"solverOptions": {'a': 1},
},
}
def test_experiment_extension_with_modifiers(self):
ext = SimpleExperimentExtension().with_modifiers({'PI.k': 10}, P=5, d=15)
config = ext.to_dict()
assert config == {
"modifiers": {"variables": {'PI.k': 10, 'P': 5, 'd': 15}},
}
def test_experiment_extension_with_range_modifier(self):
ext = SimpleExperimentExtension()
pytest.raises(ValueError, ext.with_modifiers, {'h0': Range(0.1, 0.5, 3)})
def test_experiment_extension_with_choices_modifier(self):
ext = SimpleExperimentExtension()
pytest.raises(ValueError, ext.with_modifiers, {'h0': Choices(0.1, 0.5, 3)})
def test_invalid_with_extensions_input(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
)
pytest.raises(TypeError, definition.with_extensions, {})
def test_invalid_with_extensions_list_input(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
)
pytest.raises(TypeError, definition.with_extensions, [{}])
def test_invalid_with_cases_input(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
)
pytest.raises(TypeError, definition.with_cases, {})
def test_invalid_with_cases_list_input(self, fmu, custom_function_no_param):
definition = SimpleFMUExperimentDefinition(
fmu, custom_function=custom_function_no_param,
)
pytest.raises(TypeError, definition.with_cases, [[]])
class TestSimpleModelicaExperimentDefinition:
def test_experiment_definition_default_options(
self, model, custom_function_no_param
):
definition = SimpleModelicaExperimentDefinition(
model, custom_function=custom_function_no_param
)
config = definition.to_dict()
assert config == {
"experiment": {
"version": 2,
"base": {
"model": {
"modelica": {
"className": "Test.PID",
"compilerOptions": {"c_compiler": "gcc"},
"runtimeOptions": {},
"compilerLogLevel": 'warning',
"fmiTarget": 'me',
"fmiVersion": '2.0',
"platform": 'auto',
}
},
"modifiers": {'variables': {}},
"analysis": {
"type": "dynamic",
"parameters": {},
"simulationOptions": {"ncp": 500},
"solverOptions": {},
"simulationLogLevel": "WARNING",
},
},
"extensions": [],
}
}
def test_experiment_definition_with_options(self, model, custom_function_no_param):
definition = SimpleModelicaExperimentDefinition(
model,
custom_function=custom_function_no_param,
compiler_options=custom_function_no_param.get_compiler_options().with_values(
a=2, b=1
),
runtime_options={'d': 1},
simulation_options=custom_function_no_param.get_simulation_options().with_values(
ncp=2000, rtol=0.0001
),
solver_options={'a': 1},
)
config = definition.to_dict()
assert config["experiment"]["base"]["model"]["modelica"]["compilerOptions"] == {
"a": 2,
"b": 1,
'c_compiler': 'gcc',
}
assert config["experiment"]["base"]["model"]["modelica"]["runtimeOptions"] == {
"d": 1,
}
assert config["experiment"]["base"]["analysis"]["simulationOptions"] == {
"ncp": 2000,
"rtol": 0.0001,
}
assert config["experiment"]["base"]["analysis"]["solverOptions"] == {"a": 1}
def test_experiment_definition_with_modifier(self, model, custom_function_no_param):
definition = SimpleModelicaExperimentDefinition(
model, custom_function=custom_function_no_param,
).with_modifiers({'h0': Range(0.1, 0.5, 3), 'v': Choices(0.1, 0.5, 3)})
config = definition.to_dict()
assert config["experiment"]["base"]["modifiers"]["variables"] == {
'h0': 'range(0.1,0.5,3)',
'v': 'choices(0.1, 0.5, 3)',
}
def test_experiment_definition_with_extensions(
self, model, custom_function_no_param
):
ext1 = SimpleExperimentExtension().with_modifiers(p=2)
ext2 = SimpleExperimentExtension({'final_time': 10}).with_modifiers(p=3)
definition = SimpleModelicaExperimentDefinition(
model, custom_function=custom_function_no_param,
).with_extensions([ext1, ext2])
config = definition.to_dict()
assert config["experiment"]["extensions"] == [
{"modifiers": {"variables": {"p": 2}}},
{
"modifiers": {"variables": {"p": 3}},
"analysis": {"parameters": {'final_time': 10}},
},
]
def test_experiment_definition_with_cases(self, model, custom_function_no_param):
definition = SimpleModelicaExperimentDefinition(
model, custom_function=custom_function_no_param,
).with_cases([{'p': 2}, {'p': 3}])
config = definition.to_dict()
assert config["experiment"]["extensions"] == [
{"modifiers": {"variables": {"p": 2}}},
{"modifiers": {"variables": {"p": 3}}},
]
def test_invalid_option_input(self, custom_function, custom_function_no_param):
pytest.raises(
TypeError,
SimpleModelicaExperimentDefinition,
custom_function,
custom_function_no_param,
{},
)
def test_invalid_model_input(self, fmu, model, custom_function_no_param):
pytest.raises(
TypeError,
SimpleModelicaExperimentDefinition,
fmu,
custom_function_no_param,
)
| 11,660
| 58
| 795
|
b9fcee270a8071595b5e1f5162f790c169294d8f
| 3,537
|
py
|
Python
|
database.py
|
shikanon/sota-website
|
3a49fe5e91de72a5d7c7a07ce8b76637953c8194
|
[
"MIT"
] | null | null | null |
database.py
|
shikanon/sota-website
|
3a49fe5e91de72a5d7c7a07ce8b76637953c8194
|
[
"MIT"
] | null | null | null |
database.py
|
shikanon/sota-website
|
3a49fe5e91de72a5d7c7a07ce8b76637953c8194
|
[
"MIT"
] | null | null | null |
#coding:utf-8
import pymongo
import records
| 34.009615
| 125
| 0.615493
|
#coding:utf-8
import pymongo
import records
class DB:
def __init__(self):
''' 初始化数据库
'''
self.init_mongo()
self.init_record()
def init_mongo(self):
# mongodb客户端
mongo_client = pymongo.MongoClient('mongodb://10.10.6.85:27017/')
api_db = mongo_client["sotaapi"]
self.api_col = api_db["apiGroup"]
self.api_col.create_index([('name', pymongo.ASCENDING)], unique=True)
def init_record(self):
# 关系型数据库
self.record_db = records.Database('mysql+pymysql://root:shikanon@10.10.6.85:3306/sota', encoding='utf-8', echo=True)
def create_relation_table(self):
'''创建数据库'''
create_index_table = '''CREATE TABLE IF NOT EXISTS sotaindex(
id int(4) NOT NULL AUTO_INCREMENT,
name varchar(64) NOT NULL comment '搜索大厅呈现类别',
TypeID int(4) NOT NULL comment '0代表模型接口, 1代表行业解决方案模型, 2 代表数据集接口',
APIType varchar(16) NOT NULL comment '类型名称',
Image TEXT comment '图片路径',
PRIMARY KEY ( `id` )
)DEFAULT CHARSET=utf8 ;
'''
create_api_table = '''CREATE TABLE IF NOT EXISTS api(
id int AUTO_INCREMENT,
name varchar(255) NOT NULL comment 'API名称',
APIGroup varchar(32) NOT NULL comment 'API组,属于API种类下的子类,表示一组API的集合',
APIGroupDescription TEXT,
DeploymentTime TIMESTAMP comment '部署时间' NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
Description TEXT comment '接口描述',
APIClass varchar(32) NOT NULL comment 'API种类,如图像生成、人脸识别',
APIClassDescription TEXT,
`index_id` int(4) NOT NULL,
PRIMARY KEY ( `id` ),
INDEX groupname (APIGroup),
INDEX classname (APIClass),
FOREIGN KEY (`index_id`) REFERENCES `sotaindex` (`id`)
)DEFAULT CHARSET=utf8 ;
'''
self.record_db.query(create_index_table)
self.record_db.query(create_api_table)
def reset(self):
# 删除关系表
delete_table = '''DROP TABLE IF EXISTS api'''
self.record_db.query(delete_table)
delete_table = '''DROP TABLE IF EXISTS sotaindex'''
self.record_db.query(delete_table)
# 删除mongo collections
self.api_col.remove()
# 创建表
self.create_relation_table()
def insert_sotaindex(self, records):
insert_sql = '''insert ignore into sotaindex(name, APIType, TypeID, Image)
values(:name, :APIType, :TypeID, :Image)
'''
self.record_db.bulk_query(insert_sql, *records)
def get_sotaindex(self):
search_sql = "select * from sotaindex;"
return self.record_db.query(search_sql)
def insert_api(self, records):
insert_sql = '''
insert ignore into api (
name, index_id, APIGroup, APIGroupDescription, APIClass, DeploymentTime, APIClassDescription, Description
)
values (
:name, :index_id, :APIGroup, :APIGroupDescription, :APIClass, :DeploymentTime, :APIClassDescription, :Description
)
'''
self.record_db.bulk_query(insert_sql, *records)
def search_api_from_sotaindex(self, index_id):
search_sql = "select * from api where index_id=:index_id"
return self.record_db.query(search_sql,index_id=index_id)
# def insert_or_update_sdk(self, sdk):
# self.api_col.insert_one(sdk)
# def get_sdk(self, name):
# self.api_col.find_one(name)
| 1,646
| 2,043
| 23
|
36557163a13310fca57cbdee6a3ecf79cac43fbc
| 27,785
|
py
|
Python
|
tm351_nb_utils.py
|
innovationOUtside/notebook_processing
|
7444235112c2c0677f0873db54f080138c78ad95
|
[
"MIT"
] | 2
|
2020-02-12T06:34:03.000Z
|
2021-09-30T04:55:10.000Z
|
tm351_nb_utils.py
|
innovationOUtside/notebook_processing
|
7444235112c2c0677f0873db54f080138c78ad95
|
[
"MIT"
] | 8
|
2020-07-04T21:35:31.000Z
|
2022-01-10T17:13:40.000Z
|
tm351_nb_utils.py
|
innovationOUtside/notebook_processing
|
7444235112c2c0677f0873db54f080138c78ad95
|
[
"MIT"
] | null | null | null |
# ou-tm351 - `nb_pub_utils`
#GOTCHA - Python on Mac logging in to Github: https://stackoverflow.com/a/42098127/454773
import click
import os
import shutil
import zipfile
import humanize
import datetime
import github
from tabulate import tabulate
from shlex import quote
import subprocess
def listify(item):
''' If presented with a string and a list is required, make a list... '''
item = [] if item is None else item
#We may be passed a tuple - in which case, listify...
item = list(item) if isinstance(item,(list,tuple)) else [item]
return item
def exclude_hidden_items(itemlist, exclude_hidden=True):
''' Exclude hidden items from ziplist '''
if exclude_hidden:
rmlist=[]
for x in itemlist:
if x.startswith('.'):
rmlist.append(x)
for x in rmlist:
itemlist.remove(x)
def exclude_items(itemlist, excludes, exclude_hidden=True, ipynb_only=False):
''' Exclude items from ziplist '''
for xd in set(itemlist).intersection(excludes):
itemlist.remove(xd)
if ipynb_only:
for i in [_i for _i in itemlist if not _i.endswith("ipynb")]:
itemlist.remove(i)
if exclude_hidden: exclude_hidden_items(itemlist)
def notebookTest(path=None, filename=None, dir_excludes=None, file_excludes=None):
''' Run notebook tests over explicitly named files and directories.
'''
#Could probably define this recursively to handle mulitple paths/filenames...
sanitiser = """[regex1]
regex: <graphviz.files.Source at [^>]*>
replace: <graphviz.files.Source>
[regex2]
regex: CPU times: .*
replace: CPU times: CPUTIME
[regex3]
regex: Wall time: .*
replace: Wall time: WALLTIME
[regex4]
regex: .* per loop \(mean ± std. dev. of .* runs, .* loops each\)
replace: TIMEIT_REPORT
"""
#tmp_fn = "_sanitise_cfg.cfg"
#with open(tmp_fn, "w") as f:
# f.write(sanitiser)
#cmd=f'py.test --nbval-sanitize-with {tmp_fn} '
cmd=f'py.test '
file_excludes = listify(file_excludes)
for d in listify(dir_excludes):
cmd = cmd + ' --ignore={} '.format(quote(d))
print("*Not testing in directory: {}*".format(d))
cmd = cmd+' --nbval '
## WARNING - TO DO - if we are running this from a notebook, also exclude path=='.'
if path is None and filename is None:
#Process current directory
return cli_command(cmd)
elif filename:
#Process file(s) in directory
if isinstance(filename, list):
for _filename in filename:
cmd = '{cmd} {filename}'.format(cmd=cmd, filename=pathmaker(path, quote(_filename)))
resp=cli_command(cmd)
else:
cmd = '{cmd} {filename}'.format(cmd=cmd, filename=pathmaker(path, quote(filename)))
resp=cli_command(cmd)
return resp
else:
#Process files in path
#If we pass a directory name in then the test will be run over all files in the directory
#py.test accumulates the test responses
resps = []
for singlepath in listify(path):
for dirname, subdirs, files in os.walk(singlepath):
exclude_items(subdirs, dir_excludes)
exclude_items(files, file_excludes, ipynb_only=True)
print('Processing directory: {}'.format(dirname))
with click.progressbar(files) as bar:
for filename in bar:
filepathname=os.path.join(dirname, filename)
cmd = '{cmd} {path}'.format(cmd=cmd, path=quote(filepathname))
resps.append( cli_command(cmd) )
#for singlepath in listify(path):
# print("\nTesting in directory: {}".format(singlepath))
# if singlepath=='.':
# print('**DO NOT test in current directory from a notebook**')
# cmd = '{cmd} {path}'.format(cmd=cmd, path=quote(singlepath))
# resps.append( cli_command(cmd) )
os.unlink(tmp_fn)
return resps
def notebookProcessor(notebook, mode=None, outpath=None, outfile=None, inplace=True):
''' Clear notebook output cells.
Process a single notebook, clearing cell outputs running cells until
a warning, or running all cells despite warnings.
Processed notebooks can be written to a specified directory or rendered inplace.
'''
if mode is None: return (-1, 'Mode not specified.')
if outpath is not None and not os.path.exists(outpath):
os.makedirs(outpath)
if outfile is not None:
outpath = '/'.join([outpath,outfile]) if outpath is not None else outfile
cmd='jupyter nbconvert --to notebook'
if mode in ['clearOutput', 'clearOutputTest' ]:
cmd = '{cmd} --ClearOutputPreprocessor.enabled=True'.format(cmd=cmd)
elif mode == 'run':
cmd = '{cmd} --execute'.format(cmd=cmd)
elif mode == 'runWithErrors':
cmd = '{cmd} --ExecutePreprocessor.allow_errors=True --execute'.format(cmd=cmd)
else: return (-1, 'Mode not specified correctly.')
if outpath is None and inplace:
cmd='{cmd} --inplace'.format(cmd=cmd)
#Select file
cmd='{cmd} {notebook}'.format(cmd=cmd,notebook=quote(notebook))
#If output path not set, and --inplace is not set,
# nbformat will create a new file with same name ending: .nbformat.ipynb
if outpath is not None:
cmd ='{cmd} --output-dir {outpath}'.format(cmd=cmd, outpath=quote(outpath))
return cli_command(cmd)
def directoryProcessor(path,
mode=None, outpath=None, inplace=True,
include_hidden=False,
dir_excludes=None,
file_excludes=None, rmdir=False, currdir=False, subdirs=True,
reportlevel=1, logfile=None):
''' Process all the notebooks in one or more directories and
(optionally) in associated subdirectories.
Processed notebooks can be written to a specified directory or rendered inplace.
Path hierarchies to notebooks in multiple directories or subdirectories are
respected when writing to a specified output directory.
'''
def _process(outpath):
''' Process files associated with a particular directory '''
processfiles=[f for f in files if f.endswith('.ipynb')]
if subdirs:
print(dirname)
if outpath is not None:
outpath='/'.join([outpath, dirname])
if not os.path.exists(outpath):
os.makedirs(outpath)
if not mode == 'tests':
#print('About to process {}'.format(processfiles))
with click.progressbar(processfiles) as bar:
for filename in bar:
if not currdir and dirname=='.': continue
if reportlevel>1:
print("Processing >{}<".format('/'.join([dirname,filename])))
resp = notebookProcessor('/'.join([dirname,filename]), mode=mode, outpath=outpath, inplace=inplace )
if reportlevel>0 and resp and resp[0]!=0:
print("Error with {}".format('/'.join([dirname,filename])))
if logfile:
with open(logfile, "a") as out:
out.write(resp[1])
#if mode in ['tests', 'clearOutputTest']:
# #Tests need to run in original dir in case of file dependencies
# testreport = notebookTest(path=dirname,dir_excludes=dir_excludes)
# print('tested:',dirname)
# print(testreport[1])
#if mode == 'clearOutputTest':
# #If we are testing for warnings, need to test in original directory
# # in case there are file dependencies
# outpath=None
# inplace=True
if mode is None: return
if isinstance(path, list):
if rmdir:
shutil.rmtree(outpath, ignore_errors=True)
#Make sure we only delete the directory on the way in...
rmdir=False
for _path in path:
#When provided with multiple directories, process each one separately
#Note that subdirs for each directory can be handled automatically
directoryProcessor(_path, mode, '/'.join([outpath, _path]), inplace,
include_hidden, dir_excludes, file_excludes,
rmdir, currdir, subdirs, reportlevel, logfile)
return
#TO DO - simplify this so we just pass one exclusion type then detect if file or dir?
file_excludes = listify(file_excludes)
dir_excludes = listify(dir_excludes)
if outpath is not None and os.path.exists(outpath):
if rmdir:
print('\n***Deleting directory `{}` and all its contents....***\n\n'.format(outpath))
shutil.rmtree(outpath, ignore_errors=True)
else:
print('\nOutput directory `{}` already exists. Remove it first by setting: rmdir=True\n'.format(outpath))
#dir_excludes = [] if dir_excludes is None else dir_excludes
#file_excludes = [] if file_excludes is None else file_excludes
if os.path.isfile(path):
notebookProcessor(path, mode=mode, outpath=outpath, inplace=inplace )
elif subdirs:
for dirname, subdirs, files in os.walk(path):
exclude_items(subdirs, dir_excludes, not include_hidden)
exclude_items(files, file_excludes, not include_hidden)
_process(outpath)
# if passed a single file rather than directory path
else:
files=os.listdir(path)
exclude_items(files, file_excludes, not include_hidden)
dirname=path
_process(outpath)
#Running zipper with a file_processor will change the cell state in current dir
#That is, notebooks are processed in place then zipped
#The notebooks as seen in the dir will reflect those in the zipfile
#We could modify this behaviour so it does not affect original notebooks?
def zipper(dirtozip, zipfilename,
include_hidden=False,
dir_excludes=None,
file_excludes=None,
file_processor=None,
reportlevel=1, rmdir=False,
zip_append=False):
''' Zip the contents of a directory and its subdirectories '''
file_excludes = listify(file_excludes)
dir_excludes = listify(dir_excludes)
zip_permission = "a" if zip_append else "w"
#Create a new/replacement zip file, rather than append if zipfile already exists
zf = zipfile.ZipFile(zipfilename, zip_permission, compression=zipfile.ZIP_DEFLATED)
#Don't zip files of same name as the zip file we are creating
file_excludes.append(zipfilename)
# if we have just a single file to zip and not a dir, zip that
if os.path.isfile(dirtozip):
zf.write(dirtozip)
elif os.path.isdir(dirtozip):
#https://stackoverflow.com/a/31779538/454773
for dirname, subdirs, files in os.walk(dirtozip):
exclude_items(subdirs, dir_excludes, not include_hidden)
exclude_items(files, file_excludes, not include_hidden)
print('Processing directory: {}'.format(dirname))
zf.write(dirname)
with click.progressbar(files) as bar:
for filename in bar:
if reportlevel>1:print(filename)
filepathname=os.path.join(dirname, filename)
#There is no point using 'run': if there is an error, nbconvert will fail
if file_processor in ['clearOutput', 'runWithErrors'] and filename.endswith('.ipynb'):
#This introduces side effects - notebooks are processed in current path
#Should we do this in a tmpfile?
notebookProcessor(filepathname, mode=file_processor, inplace=True)
zf.write(filepathname)
zf.close()
#Is this too risky?!
#if rmdir: shutil.rmtree(dirtozip, ignore_errors=True)
return zipfilename
def insideZip(zfn, report=True):
''' Look inside a zip file.
The report contains four columns: file_size, file compressed size, datetime and filename.
Setting report=True returns a pretty printed report. '''
if not os.path.isfile(zfn):
print("\nHmm... {} doesn't seem to be a file?\n".format(zfn))
return
print('\nLooking inside zipfile: {}\n'.format(zfn))
fz=zipfile.ZipFile(zfn)
txt=[]
for fn in fz.infolist():
txt.append( [fn.file_size,
fn.compress_size,
datetime.datetime(*fn.date_time).isoformat(),
fn.filename] )
print('{}, {}, {}, {}'.format(fn.file_size,
fn.compress_size,
datetime.datetime(*fn.date_time).isoformat(),
fn.filename))
tabulate(txt, headers=['Full','Zip','Datetime','Path'],tablefmt="simple")
return txt
@click.command()
@click.option('--file-processor','-r', type=click.Choice(['clearOutput', 'runWithErrors']))
@click.option('--include-hiddenfiles', '-H', is_flag=True, help='Include hidden files')
@click.option('--exclude-dir', '-X', multiple=True, type=click.Path(resolve_path=False), help='Exclude specified directory')
@click.option('--exclude-file','-x', multiple=True,type=click.Path(resolve_path=False), help='Exclude specified file')
@click.option('--zip_append','-a', is_flag=True, help='Add to existing zip file')
@click.argument('path', type=click.Path(resolve_path=False))
#@click.argument('zipfile', type=click.File('wb'))
@click.argument('zipfile', type=click.Path())
def cli_zip(file_processor, include_hiddenfiles, exclude_dir, exclude_file, zip_append, path, zipfile):
"""Create a zip file from the contents of a specified directory.
The zipper can optionally run a notebook processor on notebooks before zipping them to check that all cells are run or all cells are cleared.
"""
print('You must be crazy using this...')
if not zip_append:
print(f"\nOverwriting any previous {zipfile} file\n")
else:
print(f"\nAppending zipped files to: {zipfile}\n")
fn = zipper(path, zipfile,
include_hidden=include_hiddenfiles,
dir_excludes=exclude_dir,
file_excludes=exclude_file,
file_processor=file_processor,
zip_append=zip_append)
print(f"\nZip file: {fn}\n")
@click.command()
@click.option('--quiet', '-q', is_flag=True, help='Suppress the report.')
@click.option('--warnings', '-w', is_flag=True, help='Display warnings')
@click.argument('filename', type=click.Path(resolve_path=True),nargs=-1)
def cli_zipview(filename, warnings, quiet):
"""List the contents of one or more specified zipfiles.
"""
zip_contents = []
for f in listify(filename):
zip_contents.append((f, insideZip(f)))
if warnings and zip_contents:
for (zn, item) in zip_contents:
print(f"\n\n====== Zip file quality report: {zn} ======\n")
for record in item:
if record[1] > 1e6:
print(f"WARNING: \"{record[3]}\" looks quite large file ({humanize.naturalsize(record[0])} unzipped, {humanize.naturalsize(record[1])} compressed)")
for _path in record[3].split('/'):
if len(_path) > 50:
print(f"ERROR: the filepath element \"{_path}\" in \"{record[3]}\" is too long (max. 50 chars)")
if _path.startswith("."):
print(f"WARNING: \"{record[3]}\" is a hidden file/directory (do you really need it in the zip file?)")
print("\n===========================\n\n")
@click.command()
@click.option('--exclude-dir','-X', multiple=True,type=click.Path(resolve_path=False), help='Do not recurse through specified directory when assembling tests.')
@click.option('--exclude-file','-x', multiple=True,type=click.Path(resolve_path=False), help='Exclude specified file')
@click.option('--outfile','-o', type=click.Path(resolve_path=False), help='Output report file. Leave this blank to display report on command line.')
@click.argument('testitems', type=click.Path(resolve_path=False),nargs=-1)
def cli_nbtest( exclude_dir, exclude_file, outfile, testitems):
"""Test specified notebooks and/or the notebooks in a specified directory or directories (`TESTITEMS`) using the `nbdime` plugin for `py.test`.
Running `tm351nbtest` without any specified directory or file will assemble tests recursively from the current directory down."""
testitems = testitems or '.'
_notebookTest(testitems, outfile, exclude_dir, exclude_file)
@click.command()
@click.option('--file-processor','-r', type=click.Choice(['clearOutput', 'runWithErrors']), help='File processor actions that can be applied to notebooks using `nbconvert`')
@click.option('--outpath', '-O', type=click.Path(resolve_path=False), help='path to output directory')
@click.option('--inplace/--no-inplace',default=True, help='Run processors on notebooks inplace')
@click.option('--exclude-dir', '-X', multiple=True, type=click.Path(resolve_path=False), help='Exclude specified directory')
@click.option('--exclude-file','-x', multiple=True,type=click.Path(resolve_path=False), help='Exclude specified file')
@click.option('--include-hidden/--no-include-hidden',default=False, help='Include hidden files')
@click.option('--rmdir/--no-rmdir',default=False, help='Check the output directory is empty before we use it')
@click.option('--currdir/--no-currdir',default=False, help='Process files in current directory')
@click.option('--subdirs/--no-subdirs',default=True, help='Process files in subdirectories')
@click.option('--reportlevel', default=1, help='Reporting level')
@click.argument('path',type=click.Path(resolve_path=False))
def cli_nbrun(file_processor, outpath, inplace, exclude_dir, exclude_file, include_hidden, rmdir, currdir, subdirs, reportlevel, path):
"""Directory processor for notebooks - allows the user to run nbconvert operations on notebooks, such as running all cells or clearing all cells.
To run tests, use: tm351nbtest
To zip folders (with the option or running notebook processors on zipped files), use: tm351zip
"""
directoryProcessor(path,
mode=file_processor, outpath=outpath, inplace=inplace,
include_hidden=include_hidden,
dir_excludes=exclude_dir,
file_excludes=exclude_file, rmdir=rmdir, currdir=currdir,
subdirs=subdirs,reportlevel=reportlevel)
from github import Github
import getpass
import base64
import logging
from github.GithubException import GithubException
def get_sha_for_tag(repository, tag):
"""
Returns a commit PyGithub object for the specified repository and tag.
"""
branches = repository.get_branches()
matched_branches = [match for match in branches if match.name == tag]
if matched_branches:
return matched_branches[0].commit.sha
tags = repository.get_tags()
matched_tags = [match for match in tags if match.name == tag]
if not matched_tags:
raise ValueError('No Tag or Branch exists with that name')
return matched_tags[0].commit.sha
def download_directory(repository, sha, server_path, outpath='gh_downloads', file_processor=None):
"""
Download all contents at server_path with commit tag sha in
the repository.
"""
contents = repository.get_dir_contents(server_path, ref=sha)
if not os.path.exists(outpath):
os.makedirs(outpath)
for content in contents:
print("Downloading: %s" % content.path)
if content.type == 'dir':
download_directory(repository, sha, content.path, '/'.join([outpath,content.name]))
else:
try:
path = content.path
file_content = repository.get_contents(path, ref=sha)
file_data = base64.b64decode(file_content.content)
outpathfile='/'.join([outpath,content.name])
file_out = open(outpathfile, "wb")
file_out.write(file_data)
file_out.close()
except (IOError, github.GithubException) as exc:
#If we fail over because of a large blog, use the data api for the download
ret,error=exc.args
if 'message' in error and error['message']=='Not Found':
print('Hmm... file not found? {}'.format(path))
elif 'errors' in error and error['errors'][0]['code']=='too_large':
#print('...large file, trying blob download instead...')
file_content = repository.get_git_blob(content.sha)
file_data = base64.b64decode(file_content.content)
file_out = open('/'.join([outpath,content.name]), "wb")
file_out.write(file_data)
file_out.close()
#logging.error('Error processing %s: %s', content.path, exc)
#if content.name.endswith('.ipynb') and file_processor in ['clearOutput', 'clearOutputTest','runWithErrors' ]:
# notebookProcessor(outpathfile, file_processor)
DEFAULT_REPO='undercertainty/tm351'
@click.command()
@click.option('--github-user', '-u', help="Your Github username.")
@click.option('--password', hide_input=True,
confirmation_prompt=False)
@click.option('--repo','-r', prompt='Repository ({})'.format(DEFAULT_REPO),
help='Repository name')
@click.option('--branch','-b',help='Branch or tag to download')
@click.option('--directory', help='Directory to download (or: all)')
@click.option('--savedir',type=click.Path(resolve_path=False),
help='Directory to download repo / repo dir into; default is dir name')
@click.option('--file-processor', type=click.Choice(['clearOutput', 'runWithErrors']), help='Optionally specify a file processor to be run against downloaded notebooks.')
@click.option('--zip/--no-zip', default=False, help='Optionally create a zip file of the downloaded repository/directory with the same name as the repository/directory.')
@click.option('--auth/--no-auth', default=True, help="By default, run with auth (prompt for credentials)")
@click.option('--with-tests','-t',is_flag=True, help="Run tests on notebooks after download")
@click.option('--logfile',type=click.Path(resolve_path=False), help='Path to logfile')
def cli_gitrepos(github_user, password, repo, branch, directory, savedir, file_processor, zip, auth, with_tests, logfile):
"""Download files from a specified branch in a particular git repository.
The download can also be limited to just the contents of a specified directory.
Don't worry that there look to be a lot of arguments - you will be prompted for them if you just run: tm351gitrepos
"""
if auth or github_user:
if not github_user: github_user = click.prompt('\nGithub username')
if not password: password = click.prompt('\nGithub password', hide_input=True)
github = Github(github_user, password)
#Show we're keeping no password...
password = None
auth = True
else: github = Github()
if auth:
user = github.get_user()
#organisations = github.get_user().get_orgs()
print('Logging into git as {} ({})'.format(github_user, user.name))
repo = repo or DEFAULT_REPO
repository = github.get_repo(repo)
if not branch:
print('\nBranches available:\n\t{}'.format('\n\t'.join(github_repo_branches(repository)) ))
branch = click.prompt('\nWhich branch? (master)')
branch_or_tag_to_download = branch or 'master'
sha = get_sha_for_tag(repository, branch_or_tag_to_download)
another = ''
while another!='-':
if not directory:
if branch!='master':
contents = repository.get_dir_contents('.', ref=sha)
else:
contents = repository.get_dir_contents('.')
print('\nYou can download all directories from this repo (all) or select one:\n\t{}'.format('\n\t'.join(github_repo_topdirs(contents))))
directory = click.prompt('Which directory? (all)')
directory_to_download = '.' if (not directory or directory=='all') else directory
outpath = savedir or directory_to_download
if outpath == '.' and savedir !='.': outpath=repo.replace('/','_')+'_files'
msg='\nOkay... downloading {}/{}'.format(repo,directory_to_download )
if file_processor is not None:
msg = msg + ' using notebook processor: {}'.format(file_processor)
else: msg = msg + ' with no notebook processing'
print(msg)
download_directory(repository, sha, directory_to_download, outpath,file_processor )
if file_processor in ['clearOutput', 'clearOutputTest','runWithErrors' ]:
click.echo('\nRunning notebook processor: {}'.format(file_processor))
directoryProcessor(outpath, mode=file_processor, subdirs=True,
reportlevel=1, logfile=logfile)
if logfile:
click.echo('\nLog written to {}'.format(logfile))
if with_tests:
click.echo('\nRunning notebook tests over: {}'.format(outpath))
if not logfile: logfile = 'tests.log'
_notebookTest(outpath, logfile )
click.echo('\nLog written to {}'.format(logfile))
if zip:
print('\nZipping into: {}/nYou may also want to delete the working directory ({}).'.format(repository, outpath) )
zipper(outpath,repository)
else:
print('\n\nTo zip the downloaded directory, run something like: {}'.format('tm351zip {o} {z}\n\nTo run a notebook processor (OPTIONS: runWithErrors, clearOutput) while zipping: tm351zip "{o}" {z} --file-processor OPTION\n'.format(o=outpath,z=repository.name)))
directory=''
another = click.prompt('\Download another directory from this branch? (To quit: -)')
#TODO
#print('\n\nTo run this command again: {}'.format())
| 44.670418
| 272
| 0.633831
|
# ou-tm351 - `nb_pub_utils`
#GOTCHA - Python on Mac logging in to Github: https://stackoverflow.com/a/42098127/454773
import click
import os
import shutil
import zipfile
import humanize
import datetime
import github
from tabulate import tabulate
from shlex import quote
import subprocess
def cli_command(cmd):
try:
out = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode('utf-8')
except subprocess.CalledProcessError as error:
return (error.returncode, error.output.decode('utf-8'))
if out!='': return (0, out)
def listify(item):
''' If presented with a string and a list is required, make a list... '''
item = [] if item is None else item
#We may be passed a tuple - in which case, listify...
item = list(item) if isinstance(item,(list,tuple)) else [item]
return item
def exclude_hidden_items(itemlist, exclude_hidden=True):
''' Exclude hidden items from ziplist '''
if exclude_hidden:
rmlist=[]
for x in itemlist:
if x.startswith('.'):
rmlist.append(x)
for x in rmlist:
itemlist.remove(x)
def exclude_items(itemlist, excludes, exclude_hidden=True, ipynb_only=False):
''' Exclude items from ziplist '''
for xd in set(itemlist).intersection(excludes):
itemlist.remove(xd)
if ipynb_only:
for i in [_i for _i in itemlist if not _i.endswith("ipynb")]:
itemlist.remove(i)
if exclude_hidden: exclude_hidden_items(itemlist)
def notebookTest(path=None, filename=None, dir_excludes=None, file_excludes=None):
''' Run notebook tests over explicitly named files and directories.
'''
#Could probably define this recursively to handle mulitple paths/filenames...
def pathmaker(path,filename):
if not path or path in ['.']: return filename
if not isinstance(path,list):
return '/'.join([path,filename])
sanitiser = """[regex1]
regex: <graphviz.files.Source at [^>]*>
replace: <graphviz.files.Source>
[regex2]
regex: CPU times: .*
replace: CPU times: CPUTIME
[regex3]
regex: Wall time: .*
replace: Wall time: WALLTIME
[regex4]
regex: .* per loop \(mean ± std. dev. of .* runs, .* loops each\)
replace: TIMEIT_REPORT
"""
#tmp_fn = "_sanitise_cfg.cfg"
#with open(tmp_fn, "w") as f:
# f.write(sanitiser)
#cmd=f'py.test --nbval-sanitize-with {tmp_fn} '
cmd=f'py.test '
file_excludes = listify(file_excludes)
for d in listify(dir_excludes):
cmd = cmd + ' --ignore={} '.format(quote(d))
print("*Not testing in directory: {}*".format(d))
cmd = cmd+' --nbval '
## WARNING - TO DO - if we are running this from a notebook, also exclude path=='.'
if path is None and filename is None:
#Process current directory
return cli_command(cmd)
elif filename:
#Process file(s) in directory
if isinstance(filename, list):
for _filename in filename:
cmd = '{cmd} {filename}'.format(cmd=cmd, filename=pathmaker(path, quote(_filename)))
resp=cli_command(cmd)
else:
cmd = '{cmd} {filename}'.format(cmd=cmd, filename=pathmaker(path, quote(filename)))
resp=cli_command(cmd)
return resp
else:
#Process files in path
#If we pass a directory name in then the test will be run over all files in the directory
#py.test accumulates the test responses
resps = []
for singlepath in listify(path):
for dirname, subdirs, files in os.walk(singlepath):
exclude_items(subdirs, dir_excludes)
exclude_items(files, file_excludes, ipynb_only=True)
print('Processing directory: {}'.format(dirname))
with click.progressbar(files) as bar:
for filename in bar:
filepathname=os.path.join(dirname, filename)
cmd = '{cmd} {path}'.format(cmd=cmd, path=quote(filepathname))
resps.append( cli_command(cmd) )
#for singlepath in listify(path):
# print("\nTesting in directory: {}".format(singlepath))
# if singlepath=='.':
# print('**DO NOT test in current directory from a notebook**')
# cmd = '{cmd} {path}'.format(cmd=cmd, path=quote(singlepath))
# resps.append( cli_command(cmd) )
os.unlink(tmp_fn)
return resps
def notebookProcessor(notebook, mode=None, outpath=None, outfile=None, inplace=True):
''' Clear notebook output cells.
Process a single notebook, clearing cell outputs running cells until
a warning, or running all cells despite warnings.
Processed notebooks can be written to a specified directory or rendered inplace.
'''
if mode is None: return (-1, 'Mode not specified.')
if outpath is not None and not os.path.exists(outpath):
os.makedirs(outpath)
if outfile is not None:
outpath = '/'.join([outpath,outfile]) if outpath is not None else outfile
cmd='jupyter nbconvert --to notebook'
if mode in ['clearOutput', 'clearOutputTest' ]:
cmd = '{cmd} --ClearOutputPreprocessor.enabled=True'.format(cmd=cmd)
elif mode == 'run':
cmd = '{cmd} --execute'.format(cmd=cmd)
elif mode == 'runWithErrors':
cmd = '{cmd} --ExecutePreprocessor.allow_errors=True --execute'.format(cmd=cmd)
else: return (-1, 'Mode not specified correctly.')
if outpath is None and inplace:
cmd='{cmd} --inplace'.format(cmd=cmd)
#Select file
cmd='{cmd} {notebook}'.format(cmd=cmd,notebook=quote(notebook))
#If output path not set, and --inplace is not set,
# nbformat will create a new file with same name ending: .nbformat.ipynb
if outpath is not None:
cmd ='{cmd} --output-dir {outpath}'.format(cmd=cmd, outpath=quote(outpath))
return cli_command(cmd)
def directoryProcessor(path,
mode=None, outpath=None, inplace=True,
include_hidden=False,
dir_excludes=None,
file_excludes=None, rmdir=False, currdir=False, subdirs=True,
reportlevel=1, logfile=None):
''' Process all the notebooks in one or more directories and
(optionally) in associated subdirectories.
Processed notebooks can be written to a specified directory or rendered inplace.
Path hierarchies to notebooks in multiple directories or subdirectories are
respected when writing to a specified output directory.
'''
def _process(outpath):
''' Process files associated with a particular directory '''
processfiles=[f for f in files if f.endswith('.ipynb')]
if subdirs:
print(dirname)
if outpath is not None:
outpath='/'.join([outpath, dirname])
if not os.path.exists(outpath):
os.makedirs(outpath)
if not mode == 'tests':
#print('About to process {}'.format(processfiles))
with click.progressbar(processfiles) as bar:
for filename in bar:
if not currdir and dirname=='.': continue
if reportlevel>1:
print("Processing >{}<".format('/'.join([dirname,filename])))
resp = notebookProcessor('/'.join([dirname,filename]), mode=mode, outpath=outpath, inplace=inplace )
if reportlevel>0 and resp and resp[0]!=0:
print("Error with {}".format('/'.join([dirname,filename])))
if logfile:
with open(logfile, "a") as out:
out.write(resp[1])
#if mode in ['tests', 'clearOutputTest']:
# #Tests need to run in original dir in case of file dependencies
# testreport = notebookTest(path=dirname,dir_excludes=dir_excludes)
# print('tested:',dirname)
# print(testreport[1])
#if mode == 'clearOutputTest':
# #If we are testing for warnings, need to test in original directory
# # in case there are file dependencies
# outpath=None
# inplace=True
if mode is None: return
if isinstance(path, list):
if rmdir:
shutil.rmtree(outpath, ignore_errors=True)
#Make sure we only delete the directory on the way in...
rmdir=False
for _path in path:
#When provided with multiple directories, process each one separately
#Note that subdirs for each directory can be handled automatically
directoryProcessor(_path, mode, '/'.join([outpath, _path]), inplace,
include_hidden, dir_excludes, file_excludes,
rmdir, currdir, subdirs, reportlevel, logfile)
return
#TO DO - simplify this so we just pass one exclusion type then detect if file or dir?
file_excludes = listify(file_excludes)
dir_excludes = listify(dir_excludes)
if outpath is not None and os.path.exists(outpath):
if rmdir:
print('\n***Deleting directory `{}` and all its contents....***\n\n'.format(outpath))
shutil.rmtree(outpath, ignore_errors=True)
else:
print('\nOutput directory `{}` already exists. Remove it first by setting: rmdir=True\n'.format(outpath))
#dir_excludes = [] if dir_excludes is None else dir_excludes
#file_excludes = [] if file_excludes is None else file_excludes
if os.path.isfile(path):
notebookProcessor(path, mode=mode, outpath=outpath, inplace=inplace )
elif subdirs:
for dirname, subdirs, files in os.walk(path):
exclude_items(subdirs, dir_excludes, not include_hidden)
exclude_items(files, file_excludes, not include_hidden)
_process(outpath)
# if passed a single file rather than directory path
else:
files=os.listdir(path)
exclude_items(files, file_excludes, not include_hidden)
dirname=path
_process(outpath)
#Running zipper with a file_processor will change the cell state in current dir
#That is, notebooks are processed in place then zipped
#The notebooks as seen in the dir will reflect those in the zipfile
#We could modify this behaviour so it does not affect original notebooks?
def zipper(dirtozip, zipfilename,
include_hidden=False,
dir_excludes=None,
file_excludes=None,
file_processor=None,
reportlevel=1, rmdir=False,
zip_append=False):
''' Zip the contents of a directory and its subdirectories '''
file_excludes = listify(file_excludes)
dir_excludes = listify(dir_excludes)
zip_permission = "a" if zip_append else "w"
#Create a new/replacement zip file, rather than append if zipfile already exists
zf = zipfile.ZipFile(zipfilename, zip_permission, compression=zipfile.ZIP_DEFLATED)
#Don't zip files of same name as the zip file we are creating
file_excludes.append(zipfilename)
# if we have just a single file to zip and not a dir, zip that
if os.path.isfile(dirtozip):
zf.write(dirtozip)
elif os.path.isdir(dirtozip):
#https://stackoverflow.com/a/31779538/454773
for dirname, subdirs, files in os.walk(dirtozip):
exclude_items(subdirs, dir_excludes, not include_hidden)
exclude_items(files, file_excludes, not include_hidden)
print('Processing directory: {}'.format(dirname))
zf.write(dirname)
with click.progressbar(files) as bar:
for filename in bar:
if reportlevel>1:print(filename)
filepathname=os.path.join(dirname, filename)
#There is no point using 'run': if there is an error, nbconvert will fail
if file_processor in ['clearOutput', 'runWithErrors'] and filename.endswith('.ipynb'):
#This introduces side effects - notebooks are processed in current path
#Should we do this in a tmpfile?
notebookProcessor(filepathname, mode=file_processor, inplace=True)
zf.write(filepathname)
zf.close()
#Is this too risky?!
#if rmdir: shutil.rmtree(dirtozip, ignore_errors=True)
return zipfilename
def insideZip(zfn, report=True):
''' Look inside a zip file.
The report contains four columns: file_size, file compressed size, datetime and filename.
Setting report=True returns a pretty printed report. '''
if not os.path.isfile(zfn):
print("\nHmm... {} doesn't seem to be a file?\n".format(zfn))
return
print('\nLooking inside zipfile: {}\n'.format(zfn))
fz=zipfile.ZipFile(zfn)
txt=[]
for fn in fz.infolist():
txt.append( [fn.file_size,
fn.compress_size,
datetime.datetime(*fn.date_time).isoformat(),
fn.filename] )
print('{}, {}, {}, {}'.format(fn.file_size,
fn.compress_size,
datetime.datetime(*fn.date_time).isoformat(),
fn.filename))
tabulate(txt, headers=['Full','Zip','Datetime','Path'],tablefmt="simple")
return txt
@click.command()
@click.option('--file-processor','-r', type=click.Choice(['clearOutput', 'runWithErrors']))
@click.option('--include-hiddenfiles', '-H', is_flag=True, help='Include hidden files')
@click.option('--exclude-dir', '-X', multiple=True, type=click.Path(resolve_path=False), help='Exclude specified directory')
@click.option('--exclude-file','-x', multiple=True,type=click.Path(resolve_path=False), help='Exclude specified file')
@click.option('--zip_append','-a', is_flag=True, help='Add to existing zip file')
@click.argument('path', type=click.Path(resolve_path=False))
#@click.argument('zipfile', type=click.File('wb'))
@click.argument('zipfile', type=click.Path())
def cli_zip(file_processor, include_hiddenfiles, exclude_dir, exclude_file, zip_append, path, zipfile):
"""Create a zip file from the contents of a specified directory.
The zipper can optionally run a notebook processor on notebooks before zipping them to check that all cells are run or all cells are cleared.
"""
print('You must be crazy using this...')
if not zip_append:
print(f"\nOverwriting any previous {zipfile} file\n")
else:
print(f"\nAppending zipped files to: {zipfile}\n")
fn = zipper(path, zipfile,
include_hidden=include_hiddenfiles,
dir_excludes=exclude_dir,
file_excludes=exclude_file,
file_processor=file_processor,
zip_append=zip_append)
print(f"\nZip file: {fn}\n")
@click.command()
@click.option('--quiet', '-q', is_flag=True, help='Suppress the report.')
@click.option('--warnings', '-w', is_flag=True, help='Display warnings')
@click.argument('filename', type=click.Path(resolve_path=True),nargs=-1)
def cli_zipview(filename, warnings, quiet):
"""List the contents of one or more specified zipfiles.
"""
zip_contents = []
for f in listify(filename):
zip_contents.append((f, insideZip(f)))
if warnings and zip_contents:
for (zn, item) in zip_contents:
print(f"\n\n====== Zip file quality report: {zn} ======\n")
for record in item:
if record[1] > 1e6:
print(f"WARNING: \"{record[3]}\" looks quite large file ({humanize.naturalsize(record[0])} unzipped, {humanize.naturalsize(record[1])} compressed)")
for _path in record[3].split('/'):
if len(_path) > 50:
print(f"ERROR: the filepath element \"{_path}\" in \"{record[3]}\" is too long (max. 50 chars)")
if _path.startswith("."):
print(f"WARNING: \"{record[3]}\" is a hidden file/directory (do you really need it in the zip file?)")
print("\n===========================\n\n")
def _notebookTest(testitems, outfile=None, dir_excludes=None, file_excludes=None):
path=[]
filename=[]
for i in listify(testitems):
if os.path.isdir(i):
path.append(i)
else:
filename.append(i)
resps = notebookTest(path=path, filename=filename, dir_excludes=dir_excludes, file_excludes=file_excludes)
if isinstance(resps, tuple): resps = [resps]
for resp in resps:
if outfile:
with open(outfile, "a") as out:
out.write(resp[1])
print('\nTest report written to {}'.format(outfile))
else:
print(resp[1])
@click.command()
@click.option('--exclude-dir','-X', multiple=True,type=click.Path(resolve_path=False), help='Do not recurse through specified directory when assembling tests.')
@click.option('--exclude-file','-x', multiple=True,type=click.Path(resolve_path=False), help='Exclude specified file')
@click.option('--outfile','-o', type=click.Path(resolve_path=False), help='Output report file. Leave this blank to display report on command line.')
@click.argument('testitems', type=click.Path(resolve_path=False),nargs=-1)
def cli_nbtest( exclude_dir, exclude_file, outfile, testitems):
"""Test specified notebooks and/or the notebooks in a specified directory or directories (`TESTITEMS`) using the `nbdime` plugin for `py.test`.
Running `tm351nbtest` without any specified directory or file will assemble tests recursively from the current directory down."""
testitems = testitems or '.'
_notebookTest(testitems, outfile, exclude_dir, exclude_file)
@click.command()
@click.option('--file-processor','-r', type=click.Choice(['clearOutput', 'runWithErrors']), help='File processor actions that can be applied to notebooks using `nbconvert`')
@click.option('--outpath', '-O', type=click.Path(resolve_path=False), help='path to output directory')
@click.option('--inplace/--no-inplace',default=True, help='Run processors on notebooks inplace')
@click.option('--exclude-dir', '-X', multiple=True, type=click.Path(resolve_path=False), help='Exclude specified directory')
@click.option('--exclude-file','-x', multiple=True,type=click.Path(resolve_path=False), help='Exclude specified file')
@click.option('--include-hidden/--no-include-hidden',default=False, help='Include hidden files')
@click.option('--rmdir/--no-rmdir',default=False, help='Check the output directory is empty before we use it')
@click.option('--currdir/--no-currdir',default=False, help='Process files in current directory')
@click.option('--subdirs/--no-subdirs',default=True, help='Process files in subdirectories')
@click.option('--reportlevel', default=1, help='Reporting level')
@click.argument('path',type=click.Path(resolve_path=False))
def cli_nbrun(file_processor, outpath, inplace, exclude_dir, exclude_file, include_hidden, rmdir, currdir, subdirs, reportlevel, path):
"""Directory processor for notebooks - allows the user to run nbconvert operations on notebooks, such as running all cells or clearing all cells.
To run tests, use: tm351nbtest
To zip folders (with the option or running notebook processors on zipped files), use: tm351zip
"""
directoryProcessor(path,
mode=file_processor, outpath=outpath, inplace=inplace,
include_hidden=include_hidden,
dir_excludes=exclude_dir,
file_excludes=exclude_file, rmdir=rmdir, currdir=currdir,
subdirs=subdirs,reportlevel=reportlevel)
from github import Github
import getpass
import base64
import logging
from github.GithubException import GithubException
def get_sha_for_tag(repository, tag):
"""
Returns a commit PyGithub object for the specified repository and tag.
"""
branches = repository.get_branches()
matched_branches = [match for match in branches if match.name == tag]
if matched_branches:
return matched_branches[0].commit.sha
tags = repository.get_tags()
matched_tags = [match for match in tags if match.name == tag]
if not matched_tags:
raise ValueError('No Tag or Branch exists with that name')
return matched_tags[0].commit.sha
def download_directory(repository, sha, server_path, outpath='gh_downloads', file_processor=None):
"""
Download all contents at server_path with commit tag sha in
the repository.
"""
contents = repository.get_dir_contents(server_path, ref=sha)
if not os.path.exists(outpath):
os.makedirs(outpath)
for content in contents:
print("Downloading: %s" % content.path)
if content.type == 'dir':
download_directory(repository, sha, content.path, '/'.join([outpath,content.name]))
else:
try:
path = content.path
file_content = repository.get_contents(path, ref=sha)
file_data = base64.b64decode(file_content.content)
outpathfile='/'.join([outpath,content.name])
file_out = open(outpathfile, "wb")
file_out.write(file_data)
file_out.close()
except (IOError, github.GithubException) as exc:
#If we fail over because of a large blog, use the data api for the download
ret,error=exc.args
if 'message' in error and error['message']=='Not Found':
print('Hmm... file not found? {}'.format(path))
elif 'errors' in error and error['errors'][0]['code']=='too_large':
#print('...large file, trying blob download instead...')
file_content = repository.get_git_blob(content.sha)
file_data = base64.b64decode(file_content.content)
file_out = open('/'.join([outpath,content.name]), "wb")
file_out.write(file_data)
file_out.close()
#logging.error('Error processing %s: %s', content.path, exc)
#if content.name.endswith('.ipynb') and file_processor in ['clearOutput', 'clearOutputTest','runWithErrors' ]:
# notebookProcessor(outpathfile, file_processor)
def github_repo_branches(repository):
return [br.name for br in repository.get_branches()]
def github_repo_topdirs(contents):
return [i.name for i in contents if i.type=='dir']
DEFAULT_REPO='undercertainty/tm351'
@click.command()
@click.option('--github-user', '-u', help="Your Github username.")
@click.option('--password', hide_input=True,
confirmation_prompt=False)
@click.option('--repo','-r', prompt='Repository ({})'.format(DEFAULT_REPO),
help='Repository name')
@click.option('--branch','-b',help='Branch or tag to download')
@click.option('--directory', help='Directory to download (or: all)')
@click.option('--savedir',type=click.Path(resolve_path=False),
help='Directory to download repo / repo dir into; default is dir name')
@click.option('--file-processor', type=click.Choice(['clearOutput', 'runWithErrors']), help='Optionally specify a file processor to be run against downloaded notebooks.')
@click.option('--zip/--no-zip', default=False, help='Optionally create a zip file of the downloaded repository/directory with the same name as the repository/directory.')
@click.option('--auth/--no-auth', default=True, help="By default, run with auth (prompt for credentials)")
@click.option('--with-tests','-t',is_flag=True, help="Run tests on notebooks after download")
@click.option('--logfile',type=click.Path(resolve_path=False), help='Path to logfile')
def cli_gitrepos(github_user, password, repo, branch, directory, savedir, file_processor, zip, auth, with_tests, logfile):
"""Download files from a specified branch in a particular git repository.
The download can also be limited to just the contents of a specified directory.
Don't worry that there look to be a lot of arguments - you will be prompted for them if you just run: tm351gitrepos
"""
if auth or github_user:
if not github_user: github_user = click.prompt('\nGithub username')
if not password: password = click.prompt('\nGithub password', hide_input=True)
github = Github(github_user, password)
#Show we're keeping no password...
password = None
auth = True
else: github = Github()
if auth:
user = github.get_user()
#organisations = github.get_user().get_orgs()
print('Logging into git as {} ({})'.format(github_user, user.name))
repo = repo or DEFAULT_REPO
repository = github.get_repo(repo)
if not branch:
print('\nBranches available:\n\t{}'.format('\n\t'.join(github_repo_branches(repository)) ))
branch = click.prompt('\nWhich branch? (master)')
branch_or_tag_to_download = branch or 'master'
sha = get_sha_for_tag(repository, branch_or_tag_to_download)
another = ''
while another!='-':
if not directory:
if branch!='master':
contents = repository.get_dir_contents('.', ref=sha)
else:
contents = repository.get_dir_contents('.')
print('\nYou can download all directories from this repo (all) or select one:\n\t{}'.format('\n\t'.join(github_repo_topdirs(contents))))
directory = click.prompt('Which directory? (all)')
directory_to_download = '.' if (not directory or directory=='all') else directory
outpath = savedir or directory_to_download
if outpath == '.' and savedir !='.': outpath=repo.replace('/','_')+'_files'
msg='\nOkay... downloading {}/{}'.format(repo,directory_to_download )
if file_processor is not None:
msg = msg + ' using notebook processor: {}'.format(file_processor)
else: msg = msg + ' with no notebook processing'
print(msg)
download_directory(repository, sha, directory_to_download, outpath,file_processor )
if file_processor in ['clearOutput', 'clearOutputTest','runWithErrors' ]:
click.echo('\nRunning notebook processor: {}'.format(file_processor))
directoryProcessor(outpath, mode=file_processor, subdirs=True,
reportlevel=1, logfile=logfile)
if logfile:
click.echo('\nLog written to {}'.format(logfile))
if with_tests:
click.echo('\nRunning notebook tests over: {}'.format(outpath))
if not logfile: logfile = 'tests.log'
_notebookTest(outpath, logfile )
click.echo('\nLog written to {}'.format(logfile))
if zip:
print('\nZipping into: {}/nYou may also want to delete the working directory ({}).'.format(repository, outpath) )
zipper(outpath,repository)
else:
print('\n\nTo zip the downloaded directory, run something like: {}'.format('tm351zip {o} {z}\n\nTo run a notebook processor (OPTIONS: runWithErrors, clearOutput) while zipping: tm351zip "{o}" {z} --file-processor OPTION\n'.format(o=outpath,z=repository.name)))
directory=''
another = click.prompt('\Download another directory from this branch? (To quit: -)')
#TODO
#print('\n\nTo run this command again: {}'.format())
| 1,166
| 0
| 122
|
9753d7bfd9a6580628e5db08bdd5fcda2b36ed62
| 42,974
|
py
|
Python
|
plots/instrument_plot.py
|
shaunwbell/EcoFOCI_MooringAnalysis
|
369617cac20b41c1606aeac83fbd9131b77e1c15
|
[
"MIT"
] | 3
|
2017-03-23T16:52:44.000Z
|
2022-03-08T16:53:29.000Z
|
plots/instrument_plot.py
|
shaunwbell/EcoFOCI_MooringAnalysis
|
369617cac20b41c1606aeac83fbd9131b77e1c15
|
[
"MIT"
] | 43
|
2017-04-03T23:12:52.000Z
|
2022-01-28T16:04:34.000Z
|
plots/instrument_plot.py
|
shaunwbell/EcoFOCI_MooringAnalysis
|
369617cac20b41c1606aeac83fbd9131b77e1c15
|
[
"MIT"
] | 2
|
2017-03-30T22:01:25.000Z
|
2019-10-17T17:30:29.000Z
|
#!/usr/bin/env
"""
class definitions for standard 1 variable plots
class definitions for standard 2 variable plots
class definitions for standard 3 variable plots
History:
--------
2019-05-21: error in calculation used corrected udata to correct vdata
"""
# System Stack
import datetime
# science stack
import numpy as np
# Visual Stack
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.dates import (
YearLocator,
WeekdayLocator,
MonthLocator,
DayLocator,
HourLocator,
DateFormatter,
)
import matplotlib.ticker as ticker
class TimeseriesPorpertyPropertyPlot(object):
""" class to plot property vs property plots with density iso-contours"""
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(
self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="seaborn-whitegrid"
):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
@staticmethod
| 36.635976
| 119
| 0.588472
|
#!/usr/bin/env
"""
class definitions for standard 1 variable plots
class definitions for standard 2 variable plots
class definitions for standard 3 variable plots
History:
--------
2019-05-21: error in calculation used corrected udata to correct vdata
"""
# System Stack
import datetime
# science stack
import numpy as np
# Visual Stack
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.dates import (
YearLocator,
WeekdayLocator,
MonthLocator,
DayLocator,
HourLocator,
DateFormatter,
)
import matplotlib.ticker as ticker
class Timeseries1varPlot(object):
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="bmh"):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
def plot(self, xdata=None, ydata=None, ylabel=None, **kwargs):
fig = plt.figure(1)
ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1)
p1 = ax1.plot(xdata, ydata, self.plotstyle, markersize=2)
ax1.set_ylim([np.nanmin(ydata), np.nanmax(ydata)])
ax1.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel)
ax1.xaxis.set_major_locator(MonthLocator())
ax1.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax1.tick_params(axis="both", which="minor", labelsize=self.labelsize)
return plt, fig
class Timeseries2varPlot(object):
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="bmh"):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
def plot(
self, xdata=None, ydata=None, ydata2=None, ylabel=None, ylabel2=None, **kwargs
):
fig = plt.figure(1)
ax1 = plt.subplot2grid((2, 1), (0, 0), colspan=1, rowspan=1)
p1 = ax1.plot(xdata, ydata, self.plotstyle, markersize=2)
ax1.set_ylim([np.nanmin(ydata), np.nanmax(ydata)])
ax1.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel)
ax1.xaxis.set_major_locator(MonthLocator())
ax1.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax1.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax1.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax1.spines["bottom"].set_visible(False)
ax2 = plt.subplot2grid((2, 1), (1, 0), colspan=1, rowspan=1)
p2 = ax2.plot(xdata, ydata2, self.plotstyle, markersize=2)
ax2.set_ylim([np.nanmin(ydata2), np.nanmax(ydata2)])
ax2.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel2)
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax2.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax2.tick_params(axis="x", which="both", top="off")
ax2.spines["top"].set_visible(False)
return plt, fig
class Timeseries3varPlot(object):
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="bmh"):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
def plot(
self,
xdata=None,
ydata=None,
ydata2=None,
ydata3=None,
ylabel=None,
ylabel2=None,
ylabel3=None,
**kwargs
):
fig = plt.figure(1)
ax1 = plt.subplot2grid((3, 1), (0, 0), colspan=1, rowspan=1)
p1 = ax1.plot(xdata, ydata, self.plotstyle, markersize=2)
ax1.set_ylim([np.nanmin(ydata), np.nanmax(ydata)])
ax1.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel)
ax1.xaxis.set_major_locator(MonthLocator())
ax1.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax1.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax1.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax1.spines["bottom"].set_visible(False)
ax2 = plt.subplot2grid((3, 1), (1, 0), colspan=1, rowspan=1)
p2 = ax2.plot(xdata, ydata2, self.plotstyle, markersize=2)
ax2.set_ylim([np.nanmin(ydata2), np.nanmax(ydata2)])
ax2.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel2)
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax2.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax2.tick_params(
axis="x", which="both", top="off", bottom="off", labelbottom="off"
)
ax2.spines["top"].set_visible(False)
ax2.spines["bottom"].set_visible(False)
ax3 = plt.subplot2grid((3, 1), (2, 0), colspan=1, rowspan=1)
p2 = ax3.plot(xdata, ydata3, self.plotstyle, markersize=2)
ax3.set_ylim([np.nanmin(ydata3), np.nanmax(ydata3)])
ax3.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel3)
ax3.xaxis.set_major_locator(MonthLocator())
ax3.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax3.xaxis.set_major_formatter(ticker.NullFormatter())
ax3.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax3.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax3.tick_params(axis="x", which="both", top="off")
ax3.spines["top"].set_visible(False)
return plt, fig
class Timeseries1dStickPlot(object):
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="bmh"):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
def plot(self, timedata=None, udata=None, vdata=None, ylabel=None, **kwargs):
if kwargs["rotate"] != 0.0:
# when rotating vectors - positive(+) rotation is equal to ccw of the axis (cw of vector)
# - negative(-) rotation is equal to cw of the axis (ccw of the vector)
print("rotating vectors {} degrees".format(kwargs["rotate"]))
angle_offset_rad = np.deg2rad(kwargs["rotate"])
# error in calculation used corrected udata to correct vdata 2019/05
uprime = udata * np.cos(angle_offset_rad) + vdata * np.sin(angle_offset_rad)
vprime = -1.0 * udata * np.sin(angle_offset_rad) + vdata * np.cos(
angle_offset_rad
)
udata = uprime
vdata = vprime
magnitude = np.sqrt(udata ** 2 + vdata ** 2)
fig = plt.figure(1)
ax1 = plt.subplot2grid((2, 1), (0, 0), colspan=1, rowspan=1)
ax2 = plt.subplot2grid((2, 1), (1, 0), colspan=1, rowspan=1)
# Plot u and v components
# Plot quiver
ax1.set_ylim(-1 * np.nanmax(magnitude), np.nanmax(magnitude))
fill1 = ax1.fill_between(timedata, magnitude, 0, color="k", alpha=0.1)
# Fake 'box' to be able to insert a legend for 'Magnitude'
p = ax1.add_patch(plt.Rectangle((1, 1), 1, 1, fc="k", alpha=0.1))
leg1 = ax1.legend([p], ["Current magnitude [cm/s]"], loc="lower right")
leg1._drawFrame = False
# 1D Quiver plot
q = ax1.quiver(
timedata,
0,
udata,
vdata,
color="r",
units="y",
scale_units="y",
scale=1,
headlength=1,
headaxislength=1,
width=0.04,
alpha=0.95,
)
qk = plt.quiverkey(
q,
0.2,
0.05,
5,
r"$5 \frac{cm}{s}$",
labelpos="W",
fontproperties={"weight": "bold"},
)
# Plot u and v components
ax1.set_xticklabels(ax1.get_xticklabels(), visible=False)
ax2.set_xticklabels(ax2.get_xticklabels(), visible=True)
ax1.axes.get_xaxis().set_visible(False)
ax1.set_xlim(timedata.min() - 0.5, timedata.max() + 0.5)
ax1.set_ylabel("Velocity (cm/s)")
ax2.plot(timedata, vdata, "b-", linewidth=0.25)
ax2.plot(timedata, udata, "g-", linewidth=0.25)
ax2.set_xlim(timedata.min() - 0.5, timedata.max() + 0.5)
ax2.set_xlabel("Date (UTC)")
ax2.set_ylabel("Velocity (cm/s)")
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonth=range(1, 13), bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax1.spines["bottom"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax1.xaxis.set_ticks_position("top")
ax2.xaxis.set_ticks_position("bottom")
ax2.yaxis.set_ticks_position("both")
ax2.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax1.tick_params(axis="both", which="minor", labelsize=self.labelsize)
# manual time limit sets
# ax1.set_xlim([datetime.datetime(2016,2,1),datetime.datetime(2016,9,15)])
# ax2.set_xlim([datetime.datetime(2016,2,1),datetime.datetime(2016,9,15)])
# Set legend location - See: http://matplotlib.org/Volumes/WDC_internal/users/legend_guide.html#legend-location
leg2 = plt.legend(["v", "u"], loc="upper left")
leg2._drawFrame = False
return plt, fig
class Timeseries1dStickPlot_2params(object):
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="bmh"):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
def plot(self, timedata=None, udata=None, vdata=None, ylabel=None, **kwargs):
if kwargs["rotate"] != 0.0:
print("rotating vectors")
angle_offset_rad = np.deg2rad(kwargs["rotate"])
udata = udata * np.cos(angle_offset_rad) + vdata * np.sin(angle_offset_rad)
vdata = -1 * udata * np.sin(angle_offset_rad) + vdata * np.cos(
angle_offset_rad
)
magnitude = np.sqrt(udata ** 2 + vdata ** 2)
fig = plt.figure(1)
ax2 = plt.subplot2grid((2, 1), (0, 0), colspan=1, rowspan=1)
ax1 = plt.subplot2grid((2, 1), (1, 0), colspan=1, rowspan=1)
# Plot u and v components
# Plot quiver
ax1.set_ylim(-1 * np.nanmax(magnitude), np.nanmax(magnitude))
fill1 = ax1.fill_between(timedata, magnitude, 0, color="k", alpha=0.1)
# Fake 'box' to be able to insert a legend for 'Magnitude'
p = ax1.add_patch(plt.Rectangle((1, 1), 1, 1, fc="k", alpha=0.1))
leg1 = ax1.legend([p], ["Current magnitude [cm/s]"], loc="lower right")
leg1._drawFrame = False
# 1D Quiver plot
q = ax1.quiver(
timedata,
0,
udata,
vdata,
color="r",
units="y",
scale_units="y",
scale=1,
headlength=1,
headaxislength=1,
width=0.04,
alpha=0.95,
)
qk = plt.quiverkey(
q,
0.2,
0.05,
25,
r"$25 \frac{cm}{s}$",
labelpos="W",
fontproperties={"weight": "bold"},
)
# Plot u and v components
ax1.set_xticklabels(ax1.get_xticklabels(), visible=True)
ax2.set_xticklabels(ax2.get_xticklabels(), visible=True)
ax1.axes.get_xaxis().set_visible(True)
ax1.set_xlim(timedata.min() - 0.5, timedata.max() + 0.5)
ax1.set_ylabel("Velocity (cm/s)")
ax2.plot(kwargs["timedata2"], kwargs["data2"][:, 0, 0, 0], "k-", linewidth=1)
ax2.set_xlim(timedata.min() - 0.5, timedata.max() + 0.5)
ax2.set_xlabel("Date (UTC)")
ax2.set_ylabel("Salinity (PSU)")
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonth=range(1, 13), bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(ticker.NullFormatter())
ax1.xaxis.set_major_locator(MonthLocator())
ax1.xaxis.set_minor_locator(MonthLocator(bymonth=range(1, 13), bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax2.spines["bottom"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.xaxis.set_ticks_position("top")
ax2.xaxis.set_ticks_position("bottom")
ax2.yaxis.set_ticks_position("both")
ax1.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax2.tick_params(axis="both", which="minor", labelsize=self.labelsize)
return plt, fig
class Timeseries2dStickPlot(object):
# TODO
pass
class TimeseriesImagePlot(object):
pass
class TimeseriesWPAK(object):
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="bmh"):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
def plot(self, xdata=None, ydata=None, **kwargs):
"""
Purpose
--------
Plot all sfc met observations as one image
"""
TC = ydata["AT_21"][:, 0, 0, 0]
TD = ydata["RH_910"][:, 0, 0, 0]
Press = ydata["BP_915"][:, 0, 0, 0]
WindU = ydata["WU_422"][:, 0, 0, 0]
WindV = ydata["WV_423"][:, 0, 0, 0]
Rad = ydata["Qs_133"][:, 0, 0, 0]
Teq = ydata["Teq_1800"][:, 0, 0, 0]
bat = ydata["BAT_106"][:, 0, 0, 0]
comp = ydata["comp_1404"][:, 0, 0, 0]
fig = plt.figure()
# text locations
right = 0.05
top = 0.95
# TC, TD
ax = fig.add_subplot(911)
tplot = ax.plot(xdata, TC)
ax.set_xlim(min(xdata), max(xdata))
plt.setp(tplot, "color", "r", "linestyle", "-", "linewidth", 0.5)
ax.text(
right,
top,
"Air Temperature ",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
plt.ylabel("(Deg C)")
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax.spines["bottom"].set_visible(False)
ax.yaxis.set_ticks_position("both")
# RH
ax = fig.add_subplot(912)
tplot = ax.plot(xdata, TD)
ax.set_xlim(min(xdata), max(xdata))
plt.setp(tplot, "color", "g", "linestyle", "-", "linewidth", 0.5)
ax.text(
right,
top,
"Relative Humidity ",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
plt.ylabel("%")
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.yaxis.set_ticks_position("both")
# Press
ax = fig.add_subplot(913)
tplot = ax.plot(xdata, Press)
ax.set_xlim(min(xdata), max(xdata))
plt.setp(tplot, "color", "k", "linestyle", "-", "linewidth", 0.5)
ax.text(
right,
top,
"Pressure ",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
plt.ylabel("(mb)")
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.yaxis.set_ticks_position("both")
# Plot quiver
# WindU[WindU == np.nan] = 0
# WindV[WindV == np.nan] = 0
ax1 = fig.add_subplot(914)
ax2 = fig.add_subplot(915)
magnitude = (WindU ** 2 + WindV ** 2) ** 0.5
ax1.set_ylim(-1 * np.nanmax(magnitude), np.nanmax(magnitude))
ax1.set_xlim(min(xdata), max(xdata))
fill1 = ax1.fill_between(xdata, magnitude, 0, color="k", alpha=0.1)
# Fake 'box' to be able to insert a legend for 'Magnitude'
p = ax1.add_patch(plt.Rectangle((1, 1), 1, 1, fc="k", alpha=0.5))
leg1 = ax1.legend([p], ["Wind magnitude [m/s]"], loc="lower right")
leg1._drawFrame = False
# 1D Quiver plot
q = ax1.quiver(
xdata,
0,
WindU,
WindV,
color="r",
units="y",
scale_units="y",
scale=1,
headlength=1,
headaxislength=1,
width=0.04,
alpha=0.75,
)
qk = plt.quiverkey(
q,
0.2,
0.05,
2,
r"$2 \frac{m}{s}$",
labelpos="W",
fontproperties={"weight": "bold"},
)
# Plot u and v components
ax1.set_ylabel("Velocity (m/s)")
ax2.plot(xdata, WindV, "b-")
ax2.plot(xdata, WindU, "g-")
ax2.set_xlim(min(xdata), max(xdata))
ax2.set_ylabel("Velocity (m/s)")
ax2.yaxis.set_ticks_position("both")
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(ticker.NullFormatter())
ax2.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax2.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax1.set_ylabel("Velocity (m/s)")
ax1.yaxis.set_ticks_position("both")
ax1.xaxis.set_major_locator(MonthLocator())
ax1.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(ticker.NullFormatter())
ax1.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax1.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax1.axes.get_xaxis().set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax2.axes.get_xaxis().set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.spines["bottom"].set_visible(False)
# Set legend location - See: http://matplotlib.org/users/legend_guide.html#legend-location
leg2 = plt.legend(["v", "u"], loc="upper left")
leg2._drawFrame = False
# Rad
ax = fig.add_subplot(916)
tplot = ax.plot(xdata, Rad)
ax.set_xlim(min(xdata), max(xdata))
plt.setp(tplot, "color", "k", "linestyle", "-", "linewidth", 0.5)
ax.fill_between(xdata, 0, Rad, facecolor="yellow")
ax.text(
right,
top,
"Shortwave Radiation ",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
plt.ylabel("(W*m^-2)")
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.yaxis.set_ticks_position("both")
# system vars - equil temp, battery, compass
ax = fig.add_subplot(917)
tplot = ax.plot(xdata, Teq)
ax.set_xlim(min(xdata), max(xdata))
plt.setp(tplot, "color", "k", "linestyle", "-", "linewidth", 0.5)
ax.text(
right,
top,
"Teq ",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.yaxis.set_ticks_position("both")
ax = fig.add_subplot(918)
tplot = ax.plot(xdata, bat)
ax.set_xlim(min(xdata), max(xdata))
plt.setp(tplot, "color", "k", "linestyle", "-", "linewidth", 0.5)
ax.text(
right,
top,
"battery ",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
plt.ylabel("volts")
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.yaxis.set_ticks_position("both")
ax = fig.add_subplot(919)
tplot = ax.plot(xdata, comp)
ax.set_xlim(min(xdata), max(xdata))
plt.setp(
tplot, "color", "k", "linestyle", "None", "marker", ".", "markersize", 2.5
)
ax.text(
right,
top,
"compass ",
horizontalalignment="left",
verticalalignment="top",
transform=ax.transAxes,
)
plt.ylabel("degrees")
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax.tick_params(axis="y", which="major", labelsize=self.labelsize)
ax.tick_params(axis="x", which="both", top="off")
ax.spines["top"].set_visible(False)
ax.set_xlabel("Date (UTC)")
return (plt, fig)
def plot_rad(
self,
xdata=None,
ydata=None,
ydata2=None,
ylabel=None,
textlabel=None,
textlabel2=None,
**kwargs
):
# Shortwave Radiation
fig = plt.figure(1)
ax1 = plt.subplot2grid((2, 1), (0, 0), colspan=1, rowspan=1)
p1 = ax1.plot(xdata, ydata, "k", linewidth=0.25)
ax1.fill_between(xdata, 0, ydata, facecolor="yellow")
ax1.set_ylim([np.nanmin(ydata), np.nanmax(ydata)])
ax1.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel)
ax1.xaxis.set_major_locator(MonthLocator())
ax1.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax1.xaxis.set_major_formatter(ticker.NullFormatter())
ax1.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax1.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax1.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
ax1.spines["bottom"].set_visible(False)
ax1.text(
0.05,
0.95,
textlabel,
horizontalalignment="left",
verticalalignment="top",
transform=ax1.transAxes,
)
# Shortwave Radiation Estimated (1000Wm^-2 incident)
ax2 = plt.subplot2grid((2, 1), (1, 0), colspan=1, rowspan=1)
p2 = ax2.plot(xdata, ydata2, "k", linewidth=0.25)
ax2.fill_between(xdata, 0, ydata, facecolor="yellow")
ax2.set_ylim([np.nanmin(ydata2), np.nanmax(ydata2)])
ax2.set_xlim([np.nanmin(xdata), np.nanmax(xdata)])
plt.ylabel(ylabel)
ax2.xaxis.set_major_locator(MonthLocator())
ax2.xaxis.set_minor_locator(MonthLocator(bymonthday=15))
ax2.xaxis.set_major_formatter(ticker.NullFormatter())
ax2.xaxis.set_minor_formatter(DateFormatter("%b %y"))
ax2.tick_params(axis="both", which="minor", labelsize=self.labelsize)
ax2.tick_params(axis="x", which="both", top="off")
ax2.spines["top"].set_visible(False)
ax2.text(
0.05,
0.95,
textlabel2,
horizontalalignment="left",
verticalalignment="top",
transform=ax2.transAxes,
)
ax2.set_xlabel("Date (UTC)")
return (plt, fig)
class TimeseriesPorpertyPropertyPlot(object):
""" class to plot property vs property plots with density iso-contours"""
mpl.rcParams["svg.fonttype"] = "none"
mpl.rcParams["ps.fonttype"] = 42
mpl.rcParams["pdf.fonttype"] = 42
def __init__(
self, fontsize=10, labelsize=10, plotstyle="k-.", stylesheet="seaborn-whitegrid"
):
"""Initialize the timeseries with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
fontsize : int
The fontsize to use for drawing text
labelsize : int
The fontsize to use for labels
stylesheet : str
Choose a mpl stylesheet [u'seaborn-darkgrid',
u'seaborn-notebook', u'classic', u'seaborn-ticks',
u'grayscale', u'bmh', u'seaborn-talk', u'dark_background',
u'ggplot', u'fivethirtyeight', u'seaborn-colorblind',
u'seaborn-deep', u'seaborn-whitegrid', u'seaborn-bright',
u'seaborn-poster', u'seaborn-muted', u'seaborn-paper',
u'seaborn-white', u'seaborn-pastel', u'seaborn-dark',
u'seaborn-dark-palette']
"""
self.fontsize = fontsize
self.labelsize = labelsize
self.plotstyle = plotstyle
plt.style.use(stylesheet)
@staticmethod
def add_title(mooringid="", lat=-99.9, lon=-99.9, depth=9999, instrument=""):
"""Pass parameters to annotate the title of the plot
This sets the standard plot title using common meta information from PMEL/EPIC style netcdf files
Parameters
----------
mooringid : str
Mooring Identifier
lat : float
The latitude of the mooring
lon : float
The longitude of the mooring
depth : int
Nominal depth of the instrument
instrument : str
Name/identifier of the instrument plotted
"""
ptitle = (
"Plotted on: {time:%Y/%m/%d %H:%M} \n from {mooringid} Lat: {latitude:3.3f} Lon: {longitude:3.3f}"
" Depth: {depth}\n : {instrument}"
).format(
time=datetime.datetime.now(),
mooringid=mooringid,
latitude=lat,
longitude=lon,
depth=depth,
instrument=instrument,
)
return ptitle
@staticmethod
def plot(
self, var1, var2, var3=None, var1range=[0, 1], var2range=[0, 10], ptitle=""
):
# Calculate how many gridcells we need in the x and y dimensions
xdim = round((var1range[1] - var1range[0]) / 0.1 + 1, 0)
ydim = round((var2range[1] - var2range[0]) + 2, 0)
# print 'ydim: ' + str(ydim) + ' xdim: ' + str(xdim) + ' \n'
if (xdim > 10000) or (ydim > 10000):
print(
"To many dimensions for grid in "
+ cruise
+ cast
+ " file. Likely missing data \n"
)
return
# Create empty grid of zeros
dens = np.zeros((int(ydim), int(xdim)))
# Create temp and salt vectors of appropiate dimensions
ti = np.linspace(0, ydim - 1, ydim) + var2range[0]
si = np.linspace(0, xdim - 1, xdim) * 0.1 + var1range[0]
# Loop to fill in grid with densities
for j in range(0, int(ydim)):
for i in range(0, int(xdim)):
dens[j, i] = sw.dens0(si[i], ti[j])
# Substract 1000 to convert to sigma-t
dens = dens - 1000
# Plot data ***********************************************
fig = plt.figure(1)
ax1 = plt.subplot2grid((1, 1), (0, 0), colspan=1, rowspan=1)
CS = plt.contour(si, ti, dens, linestyles="dashed", colors="k")
plt.clabel(CS, fontsize=12, inline=1, fmt="%1.1f") # Label every second level
ts = ax1.scatter(var1, var2, s=25, c=var3, marker=".", cmap="Blues")
plt.colorbar(ts, label="DOY")
plt.ylim(var2range[0], var2range[1])
plt.xlim(var1range[0], var1range[1])
ax1.set_xlabel("Salinity (PSU)")
ax1.set_ylabel("Temperature (C)")
return plt, fig
| 15,568
| 24,092
| 210
|
1e8a2fa776f5bd96393450257a891c27824bdb82
| 14,904
|
py
|
Python
|
tests/test_interactions/test_omit_interactions.py
|
radifar/PyPLIF-HIPPOS
|
95fc5dd81b900e84ae6f3368c4b70a08d17257dd
|
[
"HPND"
] | 13
|
2020-07-29T12:19:56.000Z
|
2022-02-07T04:48:19.000Z
|
tests/test_interactions/test_omit_interactions.py
|
radifar/PyPLIF-HIPPOS
|
95fc5dd81b900e84ae6f3368c4b70a08d17257dd
|
[
"HPND"
] | 5
|
2021-03-30T01:11:49.000Z
|
2021-09-13T11:49:35.000Z
|
tests/test_interactions/test_omit_interactions.py
|
radifar/PyPLIF-HIPPOS
|
95fc5dd81b900e84ae6f3368c4b70a08d17257dd
|
[
"HPND"
] | 5
|
2020-08-06T07:26:06.000Z
|
2021-10-30T17:16:39.000Z
|
"""
The tests for omit interaction feature
"""
import os
import sys
from collections import namedtuple
from pyplif_hippos import ParseConfig, hippos, similarity
def test_configuration_single_omit_interaction(tmpdir):
"""Test configuration for omitting specific interaction"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG223
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction = hippos_config.omit_interaction[0]
# Assert
assert omit_interaction.interaction_type == "hydrophobic"
assert omit_interaction.res_name == ["ARG223"]
def test_configuration_omit_multiple_residue_interaction(tmpdir):
"""Test configuration for omitting multiple residue interaction"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG150 TRP177 ARG223
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction = hippos_config.omit_interaction[0]
# Assert
assert omit_interaction.interaction_type == "hydrophobic"
assert omit_interaction.res_name == ["ARG150", "TRP177", "ARG223"]
def test_configuration_omit_multiple_interaction_type(tmpdir):
"""Test configuration for omitting multiple interaction type"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG223
omit_interaction h_bond ARG292
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction_1 = hippos_config.omit_interaction[0]
omit_interaction_2 = hippos_config.omit_interaction[1]
# Assert
assert omit_interaction_1.interaction_type == "hydrophobic"
assert omit_interaction_1.res_name == ["ARG223"]
assert omit_interaction_2.interaction_type == "h_bond"
assert omit_interaction_2.res_name == ["ARG292"]
def test_configuration_long_interaction_type(tmpdir):
"""Test configuration checking all long interaction_type"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG116
omit_interaction aromatic GLU117
omit_interaction h_bond LEU132
omit_interaction electrostatic LYS148
omit_interaction h_bond_donor ASP149
omit_interaction h_bond_acceptor ARG150
omit_interaction electrostatic_positive ARG154
omit_interaction electrostatic_negative TRP177
omit_interaction aromatic_facetoface SER178
omit_interaction aromatic_edgetoface ILE221
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction_1 = hippos_config.omit_interaction[0]
omit_interaction_2 = hippos_config.omit_interaction[1]
omit_interaction_3 = hippos_config.omit_interaction[2]
omit_interaction_4 = hippos_config.omit_interaction[3]
omit_interaction_5 = hippos_config.omit_interaction[4]
omit_interaction_6 = hippos_config.omit_interaction[5]
omit_interaction_7 = hippos_config.omit_interaction[6]
omit_interaction_8 = hippos_config.omit_interaction[7]
omit_interaction_9 = hippos_config.omit_interaction[8]
omit_interaction_10 = hippos_config.omit_interaction[9]
# Assert
assert omit_interaction_1.interaction_type == "hydrophobic"
assert omit_interaction_1.res_name == ["ARG116"]
assert omit_interaction_2.interaction_type == "aromatic"
assert omit_interaction_2.res_name == ["GLU117"]
assert omit_interaction_3.interaction_type == "h_bond"
assert omit_interaction_3.res_name == ["LEU132"]
assert omit_interaction_4.interaction_type == "electrostatic"
assert omit_interaction_4.res_name == ["LYS148"]
assert omit_interaction_5.interaction_type == "h_bond_donor"
assert omit_interaction_5.res_name == ["ASP149"]
assert omit_interaction_6.interaction_type == "h_bond_acceptor"
assert omit_interaction_6.res_name == ["ARG150"]
assert omit_interaction_7.interaction_type == "electrostatic_positive"
assert omit_interaction_7.res_name == ["ARG154"]
assert omit_interaction_8.interaction_type == "electrostatic_negative"
assert omit_interaction_8.res_name == ["TRP177"]
assert omit_interaction_9.interaction_type == "aromatic_facetoface"
assert omit_interaction_9.res_name == ["SER178"]
assert omit_interaction_10.interaction_type == "aromatic_edgetoface"
assert omit_interaction_10.res_name == ["ILE221"]
def test_configuration_short_interaction_type(tmpdir):
"""Test configuration checking all short interaction_type"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction HPB ARG116
omit_interaction ARM GLU117
omit_interaction HBD LEU132
omit_interaction ELE LYS148
omit_interaction HBD_DON ASP149
omit_interaction HBD_ACC ARG150
omit_interaction ELE_POS ARG154
omit_interaction ELE_NEG TRP177
omit_interaction ARM_F2F SER178
omit_interaction ARM_E2F ILE221
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction_1 = hippos_config.omit_interaction[0]
omit_interaction_2 = hippos_config.omit_interaction[1]
omit_interaction_3 = hippos_config.omit_interaction[2]
omit_interaction_4 = hippos_config.omit_interaction[3]
omit_interaction_5 = hippos_config.omit_interaction[4]
omit_interaction_6 = hippos_config.omit_interaction[5]
omit_interaction_7 = hippos_config.omit_interaction[6]
omit_interaction_8 = hippos_config.omit_interaction[7]
omit_interaction_9 = hippos_config.omit_interaction[8]
omit_interaction_10 = hippos_config.omit_interaction[9]
# Assert
assert omit_interaction_1.interaction_type == "hydrophobic"
assert omit_interaction_1.res_name == ["ARG116"]
assert omit_interaction_2.interaction_type == "aromatic"
assert omit_interaction_2.res_name == ["GLU117"]
assert omit_interaction_3.interaction_type == "h_bond"
assert omit_interaction_3.res_name == ["LEU132"]
assert omit_interaction_4.interaction_type == "electrostatic"
assert omit_interaction_4.res_name == ["LYS148"]
assert omit_interaction_5.interaction_type == "h_bond_donor"
assert omit_interaction_5.res_name == ["ASP149"]
assert omit_interaction_6.interaction_type == "h_bond_acceptor"
assert omit_interaction_6.res_name == ["ARG150"]
assert omit_interaction_7.interaction_type == "electrostatic_positive"
assert omit_interaction_7.res_name == ["ARG154"]
assert omit_interaction_8.interaction_type == "electrostatic_negative"
assert omit_interaction_8.res_name == ["TRP177"]
assert omit_interaction_9.interaction_type == "aromatic_facetoface"
assert omit_interaction_9.res_name == ["SER178"]
assert omit_interaction_10.interaction_type == "aromatic_edgetoface"
assert omit_interaction_10.res_name == ["ILE221"]
def test_replace_bit_char():
"""Test bit replacement function for omitted residue"""
# Arrange
bitstring = "1000001"
omit_hydrophobic = [1, 0, 0, 0, 0, 0, 0]
omit_aromatic = [0, 1, 1, 0, 0, 0, 0]
omit_h_bond = [0, 0, 0, 1, 1, 0, 0]
omit_electrostatic = [0, 0, 0, 0, 0, 1, 1]
omit_h_bond_donor = [0, 0, 0, 1, 0, 0, 0]
omit_h_bond_acceptor = [0, 0, 0, 0, 1, 0, 0]
omit_electrostatic_positive = [0, 0, 0, 0, 0, 1, 0]
omit_electrostatic_negative = [0, 0, 0, 0, 0, 0, 1]
omit_aromatic_facetoface = [0, 1, 0, 0, 0, 0, 0]
omit_aromatic_edgetoface = [0, 0, 1, 0, 0, 0, 0]
# Act
bitstring_1 = hippos.replace_bit_char(bitstring, omit_hydrophobic)
bitstring_2 = hippos.replace_bit_char(bitstring, omit_aromatic)
bitstring_3 = hippos.replace_bit_char(bitstring, omit_h_bond)
bitstring_4 = hippos.replace_bit_char(bitstring, omit_electrostatic)
bitstring_5 = hippos.replace_bit_char(bitstring, omit_h_bond_donor)
bitstring_6 = hippos.replace_bit_char(bitstring, omit_h_bond_acceptor)
bitstring_7 = hippos.replace_bit_char(bitstring, omit_electrostatic_positive)
bitstring_8 = hippos.replace_bit_char(bitstring, omit_electrostatic_negative)
bitstring_9 = hippos.replace_bit_char(bitstring, omit_aromatic_facetoface)
bitstring_10 = hippos.replace_bit_char(bitstring, omit_aromatic_edgetoface)
# Assert
assert bitstring_1 == "n000001"
assert bitstring_2 == "1nn0001"
assert bitstring_3 == "100nn01"
assert bitstring_4 == "10000nn"
assert bitstring_5 == "100n001"
assert bitstring_6 == "1000n01"
assert bitstring_7 == "10000n1"
assert bitstring_8 == "100000n"
assert bitstring_9 == "1n00001"
assert bitstring_10 == "10n0001"
def test_cleanup_omitted_interaction():
"""Test for bitstring preparation prior to similarity calculation"""
# Arrange
refbit = "000001000101"
tgtbit = "11n00n000011"
# Act
clean_refbit, clean_tgtbit = similarity.clean_omitted_interactions(refbit, tgtbit)
# Assert
assert clean_refbit == "0000000101"
assert clean_tgtbit == "1100000011"
| 39.638298
| 495
| 0.790526
|
"""
The tests for omit interaction feature
"""
import os
import sys
from collections import namedtuple
from pyplif_hippos import ParseConfig, hippos, similarity
def test_configuration_single_omit_interaction(tmpdir):
"""Test configuration for omitting specific interaction"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG223
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction = hippos_config.omit_interaction[0]
# Assert
assert omit_interaction.interaction_type == "hydrophobic"
assert omit_interaction.res_name == ["ARG223"]
def test_configuration_omit_multiple_residue_interaction(tmpdir):
"""Test configuration for omitting multiple residue interaction"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG150 TRP177 ARG223
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction = hippos_config.omit_interaction[0]
# Assert
assert omit_interaction.interaction_type == "hydrophobic"
assert omit_interaction.res_name == ["ARG150", "TRP177", "ARG223"]
def test_configuration_omit_multiple_interaction_type(tmpdir):
"""Test configuration for omitting multiple interaction type"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG223
omit_interaction h_bond ARG292
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction_1 = hippos_config.omit_interaction[0]
omit_interaction_2 = hippos_config.omit_interaction[1]
# Assert
assert omit_interaction_1.interaction_type == "hydrophobic"
assert omit_interaction_1.res_name == ["ARG223"]
assert omit_interaction_2.interaction_type == "h_bond"
assert omit_interaction_2.res_name == ["ARG292"]
def test_configuration_long_interaction_type(tmpdir):
"""Test configuration checking all long interaction_type"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction hydrophobic ARG116
omit_interaction aromatic GLU117
omit_interaction h_bond LEU132
omit_interaction electrostatic LYS148
omit_interaction h_bond_donor ASP149
omit_interaction h_bond_acceptor ARG150
omit_interaction electrostatic_positive ARG154
omit_interaction electrostatic_negative TRP177
omit_interaction aromatic_facetoface SER178
omit_interaction aromatic_edgetoface ILE221
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction_1 = hippos_config.omit_interaction[0]
omit_interaction_2 = hippos_config.omit_interaction[1]
omit_interaction_3 = hippos_config.omit_interaction[2]
omit_interaction_4 = hippos_config.omit_interaction[3]
omit_interaction_5 = hippos_config.omit_interaction[4]
omit_interaction_6 = hippos_config.omit_interaction[5]
omit_interaction_7 = hippos_config.omit_interaction[6]
omit_interaction_8 = hippos_config.omit_interaction[7]
omit_interaction_9 = hippos_config.omit_interaction[8]
omit_interaction_10 = hippos_config.omit_interaction[9]
# Assert
assert omit_interaction_1.interaction_type == "hydrophobic"
assert omit_interaction_1.res_name == ["ARG116"]
assert omit_interaction_2.interaction_type == "aromatic"
assert omit_interaction_2.res_name == ["GLU117"]
assert omit_interaction_3.interaction_type == "h_bond"
assert omit_interaction_3.res_name == ["LEU132"]
assert omit_interaction_4.interaction_type == "electrostatic"
assert omit_interaction_4.res_name == ["LYS148"]
assert omit_interaction_5.interaction_type == "h_bond_donor"
assert omit_interaction_5.res_name == ["ASP149"]
assert omit_interaction_6.interaction_type == "h_bond_acceptor"
assert omit_interaction_6.res_name == ["ARG150"]
assert omit_interaction_7.interaction_type == "electrostatic_positive"
assert omit_interaction_7.res_name == ["ARG154"]
assert omit_interaction_8.interaction_type == "electrostatic_negative"
assert omit_interaction_8.res_name == ["TRP177"]
assert omit_interaction_9.interaction_type == "aromatic_facetoface"
assert omit_interaction_9.res_name == ["SER178"]
assert omit_interaction_10.interaction_type == "aromatic_edgetoface"
assert omit_interaction_10.res_name == ["ILE221"]
def test_configuration_short_interaction_type(tmpdir):
"""Test configuration checking all short interaction_type"""
# Arrange
config_file = tmpdir.mkdir("sub").join("config.txt")
config_file.write(
"""
docking_method plants # plants or vina
docking_conf plants-003.conf
similarity_coef tanimoto mcconnaughey
full_ref 00000100000000000000000000000000000100000000000001000000000000010000001000000000000000000001000000000000000000000000000000101000000000000000000101000000000010000 00010101000000000000000000000000000100000000000001010000000000010000001000000000000010000000000000000000000001011000001000001000000000000000000101000000000000000 00010101000000100000000000000000000100000000000001010100100000010000001000000000000010000001000000000000010000000000100000101010000000000000000001000000000000000
residue_name ARG116 GLU117 LEU132 LYS148 ASP149 ARG150 ARG154 TRP177 SER178 ILE221 ARG223 THR224 GLU226 ALA245 HIS273 GLU275 GLU276 ARG292 ASP294 GLY347 ARG374 TRP408 TYR409
residue_number 40 41 56 72 73 74 78 101 102 145 147 148 150 169 197 199 200 216 218 271 298 332 333
omit_interaction HPB ARG116
omit_interaction ARM GLU117
omit_interaction HBD LEU132
omit_interaction ELE LYS148
omit_interaction HBD_DON ASP149
omit_interaction HBD_ACC ARG150
omit_interaction ELE_POS ARG154
omit_interaction ELE_NEG TRP177
omit_interaction ARM_F2F SER178
omit_interaction ARM_E2F ILE221
full_outfile plants_full_ifp.csv
sim_outfile plants_similarity.csv
logfile plants.log
"""
)
arg = os.path.join(config_file.dirname, config_file.basename)
if len(sys.argv) > 1:
sys.argv[1] = arg
else:
sys.argv.append(arg)
# Act
hippos_config = ParseConfig()
hippos_config.parse_config()
omit_interaction_1 = hippos_config.omit_interaction[0]
omit_interaction_2 = hippos_config.omit_interaction[1]
omit_interaction_3 = hippos_config.omit_interaction[2]
omit_interaction_4 = hippos_config.omit_interaction[3]
omit_interaction_5 = hippos_config.omit_interaction[4]
omit_interaction_6 = hippos_config.omit_interaction[5]
omit_interaction_7 = hippos_config.omit_interaction[6]
omit_interaction_8 = hippos_config.omit_interaction[7]
omit_interaction_9 = hippos_config.omit_interaction[8]
omit_interaction_10 = hippos_config.omit_interaction[9]
# Assert
assert omit_interaction_1.interaction_type == "hydrophobic"
assert omit_interaction_1.res_name == ["ARG116"]
assert omit_interaction_2.interaction_type == "aromatic"
assert omit_interaction_2.res_name == ["GLU117"]
assert omit_interaction_3.interaction_type == "h_bond"
assert omit_interaction_3.res_name == ["LEU132"]
assert omit_interaction_4.interaction_type == "electrostatic"
assert omit_interaction_4.res_name == ["LYS148"]
assert omit_interaction_5.interaction_type == "h_bond_donor"
assert omit_interaction_5.res_name == ["ASP149"]
assert omit_interaction_6.interaction_type == "h_bond_acceptor"
assert omit_interaction_6.res_name == ["ARG150"]
assert omit_interaction_7.interaction_type == "electrostatic_positive"
assert omit_interaction_7.res_name == ["ARG154"]
assert omit_interaction_8.interaction_type == "electrostatic_negative"
assert omit_interaction_8.res_name == ["TRP177"]
assert omit_interaction_9.interaction_type == "aromatic_facetoface"
assert omit_interaction_9.res_name == ["SER178"]
assert omit_interaction_10.interaction_type == "aromatic_edgetoface"
assert omit_interaction_10.res_name == ["ILE221"]
def test_replace_bit_char():
"""Test bit replacement function for omitted residue"""
# Arrange
bitstring = "1000001"
omit_hydrophobic = [1, 0, 0, 0, 0, 0, 0]
omit_aromatic = [0, 1, 1, 0, 0, 0, 0]
omit_h_bond = [0, 0, 0, 1, 1, 0, 0]
omit_electrostatic = [0, 0, 0, 0, 0, 1, 1]
omit_h_bond_donor = [0, 0, 0, 1, 0, 0, 0]
omit_h_bond_acceptor = [0, 0, 0, 0, 1, 0, 0]
omit_electrostatic_positive = [0, 0, 0, 0, 0, 1, 0]
omit_electrostatic_negative = [0, 0, 0, 0, 0, 0, 1]
omit_aromatic_facetoface = [0, 1, 0, 0, 0, 0, 0]
omit_aromatic_edgetoface = [0, 0, 1, 0, 0, 0, 0]
# Act
bitstring_1 = hippos.replace_bit_char(bitstring, omit_hydrophobic)
bitstring_2 = hippos.replace_bit_char(bitstring, omit_aromatic)
bitstring_3 = hippos.replace_bit_char(bitstring, omit_h_bond)
bitstring_4 = hippos.replace_bit_char(bitstring, omit_electrostatic)
bitstring_5 = hippos.replace_bit_char(bitstring, omit_h_bond_donor)
bitstring_6 = hippos.replace_bit_char(bitstring, omit_h_bond_acceptor)
bitstring_7 = hippos.replace_bit_char(bitstring, omit_electrostatic_positive)
bitstring_8 = hippos.replace_bit_char(bitstring, omit_electrostatic_negative)
bitstring_9 = hippos.replace_bit_char(bitstring, omit_aromatic_facetoface)
bitstring_10 = hippos.replace_bit_char(bitstring, omit_aromatic_edgetoface)
# Assert
assert bitstring_1 == "n000001"
assert bitstring_2 == "1nn0001"
assert bitstring_3 == "100nn01"
assert bitstring_4 == "10000nn"
assert bitstring_5 == "100n001"
assert bitstring_6 == "1000n01"
assert bitstring_7 == "10000n1"
assert bitstring_8 == "100000n"
assert bitstring_9 == "1n00001"
assert bitstring_10 == "10n0001"
def test_cleanup_omitted_interaction():
"""Test for bitstring preparation prior to similarity calculation"""
# Arrange
refbit = "000001000101"
tgtbit = "11n00n000011"
# Act
clean_refbit, clean_tgtbit = similarity.clean_omitted_interactions(refbit, tgtbit)
# Assert
assert clean_refbit == "0000000101"
assert clean_tgtbit == "1100000011"
| 0
| 0
| 0
|
18b43d93d77ebe67e06100b1f3fc186f79af79fd
| 3,413
|
py
|
Python
|
lib/demucs_service.py
|
avillalobos/demucs_service
|
0ef2c45983620cd9522a5953387ec19662283456
|
[
"MIT"
] | 2
|
2021-03-05T11:25:23.000Z
|
2022-01-03T19:42:28.000Z
|
lib/demucs_service.py
|
avillalobos/demucs_service
|
0ef2c45983620cd9522a5953387ec19662283456
|
[
"MIT"
] | null | null | null |
lib/demucs_service.py
|
avillalobos/demucs_service
|
0ef2c45983620cd9522a5953387ec19662283456
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
from lib.demucs import demucs
from lib.demucs.demucs import model
from lib.demucs.demucs.audio import AudioFile
from lib.demucs.demucs.utils import apply_model, load_model
from pathlib import Path
from scipy.io import wavfile
# within the demucs directory
sys.modules['demucs.model'] = model
sys.modules['demucs'] = demucs
class DemucsService():
"""
def encode_mp3(wav, path, bitrate=320, verbose=False):
try:
import lameenc
except ImportError:
print("Failed to call lame encoder. Maybe it is not installed? "
"On windows, run `python.exe -m pip install -U lameenc`, "
"on OSX/Linux, run `python3 -m pip install -U lameenc`, "
"then try again.", file=sys.stderr)
sys.exit(1)
encoder = lameenc.Encoder()
encoder.set_bit_rate(bitrate)
encoder.set_in_sample_rate(44100)
encoder.set_channels(2)
encoder.set_quality(2) # 2-highest, 7-fastest
if not verbose:
encoder.silence()
mp3_data = encoder.encode(wav.tostring())
mp3_data += encoder.flush()
with open(path, "wb") as f:
f.write(mp3_data)
"""
| 36.698925
| 79
| 0.612364
|
#!/usr/bin/python3
import sys
from lib.demucs import demucs
from lib.demucs.demucs import model
from lib.demucs.demucs.audio import AudioFile
from lib.demucs.demucs.utils import apply_model, load_model
from pathlib import Path
from scipy.io import wavfile
# within the demucs directory
sys.modules['demucs.model'] = model
sys.modules['demucs'] = demucs
class DemucsService():
def __init__(self, model, device):
# This will require all the parameters to build and split the song.
# Get from the arguments the model that we want to use,
# by default demucs
self.model_path = f"lib/demucs/models/{model}.th"
# Get the device that we want to use to split the song, by default cpu
self.device = device # it can be cuda if NVIDIA available
# Number of random shifts for equivariant stabilization.
# Increase separation time but improves quality for Demucs. 10 was used
self.shifts = 0
# Apply the model to the entire input at once rather than
# first splitting it in chunks of 10 seconds
self.split = True
# This hack is to be able to load a pickled class from
self.model = load_model(self.model_path)
# default location for the service
self.out = Path(f'separated/{model}')
self.out.mkdir(parents=True, exist_ok=True)
# default tracks:
self.source_names = ["drums", "bass", "other", "vocals"]
self.mp3_bitrate = 320
"""
def encode_mp3(wav, path, bitrate=320, verbose=False):
try:
import lameenc
except ImportError:
print("Failed to call lame encoder. Maybe it is not installed? "
"On windows, run `python.exe -m pip install -U lameenc`, "
"on OSX/Linux, run `python3 -m pip install -U lameenc`, "
"then try again.", file=sys.stderr)
sys.exit(1)
encoder = lameenc.Encoder()
encoder.set_bit_rate(bitrate)
encoder.set_in_sample_rate(44100)
encoder.set_channels(2)
encoder.set_quality(2) # 2-highest, 7-fastest
if not verbose:
encoder.silence()
mp3_data = encoder.encode(wav.tostring())
mp3_data += encoder.flush()
with open(path, "wb") as f:
f.write(mp3_data)
"""
def split_song(self, track_path):
track = Path(track_path)
wav = AudioFile(track).read(
streams=0, samplerate=44100, channels=2).to(self.device)
wav = (wav * 2**15).round() / 2**15
ref = wav.mean(0)
wav = (wav - ref.mean()) / ref.std()
sources = apply_model(self.model, wav, shifts=self.shifts,
split=self.split, progress=True)
sources = sources * ref.std() + ref.mean()
track_folder = self.out / track.name.replace(track.suffix, '')
track_folder.mkdir(exist_ok=True)
for source, name in zip(sources, self.source_names):
source = (source * 2**15).clamp_(-2**15, 2**15 - 1).short()
source = source.cpu().transpose(0, 1).numpy()
# I can't install lameenc so I'm skipping mp3 for now
# stem = str(track_folder / name)
# self.encode_mp3(source, stem + ".mp3", self.mp3_bitrate)
wavname = str(track_folder / f"{name}.wav")
wavfile.write(wavname, 44100, source)
| 2,122
| 0
| 54
|
781b2eb6394e77875786319670d0805d9e8efe89
| 596
|
py
|
Python
|
100days/day80/query_demo.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | null | null | null |
100days/day80/query_demo.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | 16
|
2020-02-12T03:09:30.000Z
|
2022-03-12T00:08:59.000Z
|
100days/day80/query_demo.py
|
chainren/python-learn
|
5e48e96c4bb212806b9ae0954fdb368abdcf9ba3
|
[
"Apache-2.0"
] | null | null | null |
import pymysql
conn = pymysql.Connection(
host = '192.168.160.33',
port = 3306,
user = 'develop',
password='xs_dev',
database='test',
charset='utf8'
)
cursor = conn.cursor()
sql = """
select * from user1
"""
try:
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
id = row[0]
fname=row[1]
lname=row[2]
age =row[3]
sex=row[4]
income=row[5]
print("id=%s,fname=%s,lname=%s,age=%s,sex=%s,income=%s" % (id, fname, lname, age, sex, income))
except Exception as e:
print(e)
# 关闭连接
conn.close()
| 18.060606
| 103
| 0.553691
|
import pymysql
conn = pymysql.Connection(
host = '192.168.160.33',
port = 3306,
user = 'develop',
password='xs_dev',
database='test',
charset='utf8'
)
cursor = conn.cursor()
sql = """
select * from user1
"""
try:
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
id = row[0]
fname=row[1]
lname=row[2]
age =row[3]
sex=row[4]
income=row[5]
print("id=%s,fname=%s,lname=%s,age=%s,sex=%s,income=%s" % (id, fname, lname, age, sex, income))
except Exception as e:
print(e)
# 关闭连接
conn.close()
| 0
| 0
| 0
|
22bac7328f5707f686d5d1081d9a304673121b3f
| 2,245
|
py
|
Python
|
nfmanagementapi/resources/ServiceObjectCollectionResource.py
|
nfirewall/nfmapi
|
7232975711ad01b031ed50d7f26936afcfe5312a
|
[
"MIT"
] | null | null | null |
nfmanagementapi/resources/ServiceObjectCollectionResource.py
|
nfirewall/nfmapi
|
7232975711ad01b031ed50d7f26936afcfe5312a
|
[
"MIT"
] | null | null | null |
nfmanagementapi/resources/ServiceObjectCollectionResource.py
|
nfirewall/nfmapi
|
7232975711ad01b031ed50d7f26936afcfe5312a
|
[
"MIT"
] | null | null | null |
from nfmanagementapi.models import ServiceObject
from nfmanagementapi.schemata import ServiceObjectSchema
from marshmallow.exceptions import ValidationError
from .BaseResource import BaseResource
from flask import request
from app import db
from uuid import uuid4
path = 'service_objects'
endpoint = 'service_objects'
| 29.155844
| 72
| 0.561247
|
from nfmanagementapi.models import ServiceObject
from nfmanagementapi.schemata import ServiceObjectSchema
from marshmallow.exceptions import ValidationError
from .BaseResource import BaseResource
from flask import request
from app import db
from uuid import uuid4
path = 'service_objects'
endpoint = 'service_objects'
class ServiceObjectCollectionResource(BaseResource):
def get(self):
"""List Service Objects
---
description: List all service objects
tags:
- Service Objects
responses:
200:
content:
application/json:
schema:
type: array
items: ServiceObjectSchema
"""
objects = ServiceObject.query.all()
schema = ServiceObjectSchema(many = True)
return schema.dump(objects)
def post(self):
"""Create service object
---
description: Create a service object
tags:
- Service Objects
requestBody:
content:
application/json:
schema: ServiceObjectSchema
responses:
201:
description: Created
content:
application/json:
schema: ServiceObjectSchema
422:
description: Unprocessable Entity
content:
application/json:
schema: MessageSchema
"""
messages = []
json_data = request.get_json()
try:
data = ServiceObjectSchema().load(json_data)
except ValidationError as err:
for msg in err.messages:
messages.append("{}: {}".format(msg, err.messages[msg]))
return {"messages": messages}, 422
object = ServiceObject()
error = False
for key in data:
try:
setattr(object, key, data[key])
except ValueError as e:
error = True
messages.append(e.args[0])
if error:
return {"messages": messages}, 422
db.session.add(object)
db.session.commit()
db.session.refresh(object)
return ServiceObjectSchema().dump(object)
| 0
| 1,903
| 23
|
0e99d00e9c38f4e19afb900ddb0a47445848a8c2
| 3,824
|
py
|
Python
|
code/EthicAssessmentSoftware/migrations/0001_initial.py
|
FelixOliverLange/ethikbackend
|
3fd2ad6c2953966254841fb770b482c7f7741791
|
[
"BSD-3-Clause"
] | null | null | null |
code/EthicAssessmentSoftware/migrations/0001_initial.py
|
FelixOliverLange/ethikbackend
|
3fd2ad6c2953966254841fb770b482c7f7741791
|
[
"BSD-3-Clause"
] | 2
|
2021-06-10T20:36:01.000Z
|
2021-09-22T19:44:05.000Z
|
code/EthicAssessmentSoftware/migrations/0001_initial.py
|
FelixOliverLange/ethikbackend
|
3fd2ad6c2953966254841fb770b482c7f7741791
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-02-12 00:15
from django.db import migrations, models
import django.db.models.deletion
| 50.986667
| 138
| 0.599895
|
# Generated by Django 3.1.6 on 2021-02-12 00:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Anwendung',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=70, unique=True)),
],
),
migrations.CreateModel(
name='Stakeholder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=70, unique=True)),
('beschreibung', models.CharField(blank=True, default='', max_length=200)),
('anwendung', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='EthicAssessmentSoftware.anwendung')),
],
),
migrations.CreateModel(
name='Motivation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=70, unique=True)),
('beschreibung', models.CharField(blank=True, default='', max_length=200)),
('schutzklasse', models.CharField(default='', max_length=25)),
('prioritaet', models.IntegerField(default=1)),
('ist_recht', models.BooleanField(default=False)),
('anwendung', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='EthicAssessmentSoftware.anwendung')),
],
),
migrations.CreateModel(
name='Konsequenz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=70, unique=True)),
('beschreibung', models.CharField(blank=True, default='', max_length=200)),
('bewertung', models.IntegerField()),
('betroffener', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='EthicAssessmentSoftware.stakeholder')),
('motivation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='EthicAssessmentSoftware.motivation')),
],
),
migrations.CreateModel(
name='Ansatz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=70, unique=True)),
('beschreibung', models.CharField(blank=True, default='', max_length=200)),
('auswirkung', models.IntegerField(default=0)),
('adressiert', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='EthicAssessmentSoftware.konsequenz')),
('anwendung', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='EthicAssessmentSoftware.anwendung')),
],
),
migrations.CreateModel(
name='Anforderung',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=70, unique=True)),
('beschreibung', models.CharField(blank=True, default='', max_length=200)),
('ansatz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='EthicAssessmentSoftware.ansatz')),
],
),
]
| 0
| 3,677
| 23
|
b0c7f1929a58731db1dd66c0222e1c9b375dbf86
| 1,339
|
py
|
Python
|
old/v1/config_v1.py
|
apollo-heidal/Sound-Synth
|
c392291ee83d391333acaaff7ff3448da758bdbb
|
[
"MIT"
] | null | null | null |
old/v1/config_v1.py
|
apollo-heidal/Sound-Synth
|
c392291ee83d391333acaaff7ff3448da758bdbb
|
[
"MIT"
] | null | null | null |
old/v1/config_v1.py
|
apollo-heidal/Sound-Synth
|
c392291ee83d391333acaaff7ff3448da758bdbb
|
[
"MIT"
] | null | null | null |
config = {
"frequency": 440.0,
"duration": 20.0,
"sampling_rate": 44100,
"filename": "test_v1.wav",
"overtones": [
[
440.0,
1.0
],
[
12447.350408741928,
0.1098108639573242
],
[
12465.3571923053,
0.843727285302496
],
[
21539.57505590213,
0.17496422223017305
],
[
14675.669378957353,
0.013028474684831037
],
[
20577.216573422433,
0.23529784971612777
],
[
21425.497754119715,
0.6436550795219932
],
[
11410.89145988607,
0.011826877382886125
]
],
"amp_ctrl_points": [
[
0.0,
0.0
],
[
20.0,
100.0
],
[
33.0,
20.0
],
[
47.0,
88.0
],
[
56.0,
45.0
],
[
76.0,
80.0
],
[
90.0,
5.0
],
[
100.0,
20.0
]
]
}
| 17.853333
| 33
| 0.285288
|
config = {
"frequency": 440.0,
"duration": 20.0,
"sampling_rate": 44100,
"filename": "test_v1.wav",
"overtones": [
[
440.0,
1.0
],
[
12447.350408741928,
0.1098108639573242
],
[
12465.3571923053,
0.843727285302496
],
[
21539.57505590213,
0.17496422223017305
],
[
14675.669378957353,
0.013028474684831037
],
[
20577.216573422433,
0.23529784971612777
],
[
21425.497754119715,
0.6436550795219932
],
[
11410.89145988607,
0.011826877382886125
]
],
"amp_ctrl_points": [
[
0.0,
0.0
],
[
20.0,
100.0
],
[
33.0,
20.0
],
[
47.0,
88.0
],
[
56.0,
45.0
],
[
76.0,
80.0
],
[
90.0,
5.0
],
[
100.0,
20.0
]
]
}
| 0
| 0
| 0
|
66cc98c8d9319502ca072034ba7112b3f7e1b2f6
| 67,679
|
py
|
Python
|
pyspectools/ftmw_analysis.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 22
|
2018-03-14T10:44:17.000Z
|
2022-01-10T15:02:37.000Z
|
pyspectools/ftmw_analysis.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 21
|
2019-07-27T01:43:50.000Z
|
2021-11-15T14:57:15.000Z
|
pyspectools/ftmw_analysis.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 3
|
2020-08-03T16:22:00.000Z
|
2021-11-01T15:31:55.000Z
|
import datetime
import re
import os
import struct
from dataclasses import dataclass, field
from itertools import combinations, product
from typing import List, Dict
import pandas as pd
import numpy as np
import peakutils
from matplotlib import pyplot as plt
from scipy import signal as spsig
import plotly.graph_objs as go
from tqdm.autonotebook import tqdm
import networkx as nx
from ipywidgets import interactive, VBox, HBox
from lmfit.models import LinearModel
from pyspectools import routines
from pyspectools import figurefactory as ff
from pyspectools import fitting
from pyspectools.spectra import analysis
from pyspectools import parsers
def parse_spectrum(filename, threshold=20.0):
""" Function to read in a blackchirp or QtFTM spectrum from file """
dataframe = pd.read_csv(
filename, delimiter="\t", names=["Frequency", "Intensity"], skiprows=1
)
return dataframe[dataframe["Intensity"] <= threshold]
def center_cavity(dataframe, thres=0.3, verbose=True):
""" Finds the center frequency of a Doppler pair in cavity FTM measurements
and provides a column of offset frequencies.
Sometimes the peak finding threshold has to be tweaked to get the center
frequency correctly.
"""
# Find the peak intensities
center_indexes = peakutils.indexes(dataframe["Intensity"], thres=thres)
peak_frequencies = dataframe.iloc[center_indexes]["Frequency"]
# Calculate the center frequency as the average
center = np.average(peak_frequencies)
if verbose is True:
print("Center frequency at " + str(center))
dataframe["Offset Frequency"] = dataframe["Frequency"] - center
@dataclass
@dataclass
class Scan:
"""
DataClass for a Scan. Holds all of the relevant information that
describes a FT scan, such as the ID, what machine it was collected
on, and the experimental settings.
Has a few class methods that will make look ups easily such as
the date the scan was collected and the gases used.
"""
id: int
machine: str
fid: np.array
date: datetime.datetime
shots: int = 0
cavity_voltage: int = 0
cavity_atten: int = 0
cavity_frequency: float = 0.0
dr_frequency: float = 0.0
dr_power: int = 0
fid_points: int = 0
fid_spacing: float = 0.0
discharge: bool = False
magnet: bool = False
gases: Dict = field(default_factory=dict)
filter: List = field(default_factory=list)
exp: float = 0.0
zeropad: bool = False
window: str = ""
def __post_init__(self):
"""
Functions called after __init__ is called.
"""
# Perform FFT
self.process_fid()
def __deepcopy__(self):
"""
Dunder method to produce a deep copy - this will be used when
manipulating multiple Scan objects.
:return: A deep copy of the current Scan object
"""
new_scan = Empty()
new_scan.__class__ = self.__class__
new_scan.__dict__.update(self.__dict__)
return new_scan
def average(self, others):
"""
Dunder method to co-average two or more Scans in the time domain.
:param other: Scan object, or tuple/list
:return: A new Scan object with the co-added FID
"""
new_scan = self.__deepcopy__()
try:
new_scan.fid = np.average(others.extend(new_scan.fid), axis=0)
new_scan.average_ids = [scan.id for scan in others]
# If there is no extend method, then assume we're working with a
# single Scan
except AttributeError:
new_scan.fid = np.average([new_scan.fid, others.fid], axis=0)
new_scan.average_ids = [others.id]
new_scan.process_fid()
return new_scan
def __add__(self, other):
"""
Dunder method to co-add two or more Scans in the time domain.
:param other: Scan object, or tuple/list
:return: A new Scan object with the co-added FID
"""
new_scan = self.__deepcopy__()
new_scan.fid = np.sum([new_scan.fid, other.fid], axis=0)
new_scan.process_fid()
return new_scan
def __sub__(self, other):
"""
Dunder method to subtract another Scan from the current Scan in the time domain.
i.e. this scan - other scan
:param other: Scan object, or tuple/list
:return: A new Scan object with the subtracted FID
"""
new_scan = self.__deepcopy__()
new_scan.fid = np.subtract(new_scan.fid, other.fid)
new_scan.process_fid()
return new_scan
def subtract_frequency(self, other):
"""
Method to subtract another Scan from the current in the frequency domain.
:param other: Scan object to subtract with
:return: A new Scan object with the subtracted spectrum
"""
new_scan = self.__deepcopy__()
new_scan.spectrum["Intensity"] = (
new_scan.spectrum["Intensity"] - other.spectrum["Intensity"]
)
new_scan.subtracted = other.id
return new_scan
def add_frequency(self, other):
"""
Method to add another Scan from the current in the frequency domain.
:param other: Scan object to add with
:return: A new Scan object with the co-added spectrum
"""
new_scan = self.__deepcopy__()
new_scan.spectrum["Intensity"] = (
new_scan.spectrum["Intensity"] + other.spectrum["Intensity"]
)
new_scan.subtracted = other.id
return new_scan
@classmethod
def from_dict(cls, data_dict):
"""
Function to initialize a Scan object from a dictionary
of FT scan data collected from `parse_scan`.
:param data_dict: dict containing parsed data from FT
:return: Scan object
"""
scan_obj = cls(**data_dict)
return scan_obj
@classmethod
def from_qtftm(cls, filepath):
"""
Method to initialize a Scan object from a FT scan file.
Will load the lines into memory and parse the data into
a dictionary, which then gets passed into a Scan object.
:param filepath: str path to FID file
:return: Scan object
"""
with open(filepath) as read_file:
data_dict = parse_scan(read_file.readlines())
scan_obj = cls(**data_dict)
return scan_obj
@classmethod
def from_pickle(cls, filepath):
"""
Method to create a Scan object from a previously pickled
Scan.
:param filepath: path to the Scan pickle
:return: instance of the Scan object
"""
scan_obj = routines.read_obj(filepath)
if isinstance(scan_obj, Scan) is False:
raise Exception("File is not a Scan object; {}".format(type(scan_obj)))
else:
return scan_obj
@classmethod
def from_remote(cls, remote_path, ssh_obj=None):
"""
Method to initialize a Scan object from a remote server.
Has the option to pass an instance of a paramiko SSHClient, which would be
useful in a Batch. If none is supplied, an instance will be created.
:param remote_path: str remote path to the file
:param ssh_obj: optional argument to supply a paramiko SSHClient object
:return: Scan object from remote QtFTM file
"""
if ssh_obj is None:
default_keypath = os.path.join(os.path.expanduser("~"), ".ssh/id_rsa.pub")
hostname = input("Please provide remote hostname: ")
username = input("Please provide login: ")
ssh_settings = {"hostname": hostname, "username": username}
if os.path.isfile(default_keypath) is True:
ssh_settings["key_filename"] = default_keypath
else:
password = input("Please provide password: ")
ssh_settings["password"] = password
ssh_obj = routines.RemoteClient(**ssh_settings)
# Parse the scan data from remote file
data_dict = parse_scan(ssh_obj.open_remote(remote_path))
scan_obj = cls(**data_dict)
return scan_obj
def to_file(self, filepath, format="yaml"):
""" Method to dump data to YAML format.
Extensions are automatically decided, but
can also be supplied.
parameters:
--------------------
:param filepath - str path to yaml file
:param format - str denoting the syntax used for dumping. Defaults to YAML.
"""
if "." not in filepath:
if format == "json":
filepath += ".json"
else:
filepath += ".yml"
if format == "json":
writer = routines.dump_json
else:
writer = routines.dump_yaml
writer(filepath, self.__dict__)
def to_pickle(self, filepath=None, **kwargs):
"""
Pickles the Scan object with the joblib wrapper implemented
in routines.
:param filepath: optional argument to pickle to. Defaults to the id.pkl
:param kwargs: additional settings for the pickle operation
"""
if filepath is None:
filepath = "{}.pkl".format(self.id)
routines.save_obj(self, filepath, **kwargs)
def process_fid(self, **kwargs):
"""
Perform an FFT on the FID to yield the frequency domain spectrum.
Kwargs are passed into the FID processing, which will override the
Scan attributes.
:param kwargs: Optional keyword arguments for processing the FID
"""
# Calculate the frequency bins
frequencies = np.linspace(
self.cavity_frequency, self.cavity_frequency + 1.0, len(self.fid)
)
# Calculate the time bins
time = np.linspace(0.0, self.fid_spacing * self.fid_points, self.fid_points)
process_list = ["window", "filter", "exp", "zeropad"]
process_dict = {
key: value for key, value in self.__dict__.items() if key in process_list
}
# Override with user settings
process_dict.update(**kwargs)
temp_fid = np.copy(self.fid)
self.spectrum = fid2fft(
temp_fid, 1.0 / self.fid_spacing, frequencies, **process_dict
)
self.fid_df = pd.DataFrame({"Time (us)": time * 1e6, "FID": temp_fid})
def within_time(self, date_range):
"""
Function for determining of the scan was taken between
a specified date range in month/day/year, in the format
04/09/08 for April 9th, 2008.
:param date_range: list containing the beginning and end date strings
:return: bool - True if within range, False otherwise
"""
try:
early = datetime.datetime.strptime(date_range[0], "%m/%d/%y")
except:
early = datetime.datetime(1, 1, 1)
try:
late = datetime.datetime.strptime(date_range[1], "%m/%d/%y")
except:
late = datetime.datetime(9999, 1, 1)
return early <= self.date <= late
def is_depleted(self, ref, roi=None, depletion=None):
"""
Function for determining if the signal in this Scan is less
than that of another scan. This is done by a simple comparison
of the average of 10 largest intensities in the two spectra. If
the current scan is less intense than the reference by the
expected depletion percentage, then it is "depleted".
This function can be used to determine if a scan if depleted
in DR/magnet/discharge assays.
TODO - implement a chi squared test of sorts to determine if a
depletion is statistically significant
:param ref: second Scan object for comparison
:param depletion: percentage of depletion expected of the reference
:return: bool - True if signal in this Scan is less intense than the reference
"""
y_ref = ref.spectrum["Intensity"].values
y_obs = self.spectrum["Intensity"].values
self.ref_freq = ref.fit.frequency
self.ref_id = ref.id
if roi:
y_ref = y_ref[roi]
y_obs = y_obs[roi]
# This doesn't work, or is not particularly discriminating.
# chisq, p_value = chisquare(
# y_obs, y_ref
# )
if depletion is None:
sigma = np.std(y_obs, axis=0) * 16.0
else:
sigma = depletion
expected = np.sum(y_ref, axis=0) - sigma
return np.sum(y_obs, axis=0) <= expected
def scatter_trace(self):
"""
Create a Plotly Scattergl trace. Called by the Batch function, although
performance-wise it takes forever to plot up ~3000 scans.
:return trace: Scattergl object
"""
text = "Scan ID: {}<br>Cavity: {}<br>DR: {}<br>Magnet: {}<br>Attn: {}".format(
self.id,
self.cavity_frequency,
self.dr_frequency,
self.magnet,
self.cavity_atten,
)
trace = go.Scattergl(
x=np.linspace(self.id, self.id + 1, len(self.spectrum["Intensity"])),
y=self.spectrum["Intensity"],
text=text,
marker={"color": "rgb(43,140,190)"},
hoverinfo="text",
)
return trace
def fit_cavity(self, plot=True, verbose=False):
"""
Perform a fit to the cavity spectrum. Uses a paired Gaussian model
that minimizes the number of fitting parameters.
:param plot: bool specify whether a Plotly figure is made
:return: Model Fit result
"""
y = self.spectrum["Intensity"].dropna().values
x = self.spectrum["Frequency (MHz)"].dropna().values
model = fitting.PairGaussianModel()
result = model.fit_pair(x, y, verbose=verbose)
self.spectrum["Fit"] = result.best_fit
self.fit = result
self.fit.frequency = self.fit.best_values["x0"]
if plot is True:
fig = go.FigureWidget()
fig.layout["xaxis"]["title"] = "Frequency (MHz)"
fig.layout["xaxis"]["tickformat"] = ".2f"
fig.add_scatter(x=x, y=y, name="Observed")
fig.add_scatter(x=x, y=result.best_fit, name="Fit")
return result, fig
else:
return result
def parse_scan(filecontents):
"""
Function for extracting the FID data from an FT scan. The data
is returned as a dictionary, which can be used to initialize a
Scan object.
:param filecontents: list of lines from an FID file
:return: dict containing parsed data from FID
"""
data = {"gases": dict()}
# FID regex
fid_regex = re.compile(r"^fid\d*", re.M)
# Regex to find gas channels
gas_regex = re.compile(r"^#Gas \d name", re.M)
flow_regex = re.compile(r"^#Gas \d flow", re.M)
# Regex to detect which channel is set to the discharge
dc_regex = re.compile(r"^#Pulse ch \d name\s*DC", re.M)
dc_channel = None
for index, line in enumerate(filecontents):
if "#Scan" in line:
split_line = line.split()
data["id"] = int(split_line[1])
try:
data["machine"] = split_line[2]
except IndexError:
data["machine"] = "FT1"
if "#Probe freq" in line:
data["cavity_frequency"] = float(line.split()[2])
if "#Shots" in line:
data["shots"] = int(line.split()[-1])
if "#Date" in line:
strip_targets = ["#Date", "\t", "\n"]
data["date"] = datetime.datetime.strptime(
re.sub("|".join(strip_targets), "", line), "%a %b %d %H:%M:%S %Y"
)
if "#Cavity Voltage" in line:
data["cavity_voltage"] = int(line.split()[2])
if "#Attenuation" in line:
data["cavity_atten"] = int(line.split()[1])
if "#DR freq" in line:
data["dr_frequency"] = float(line.split()[2])
if "#DR power" in line:
data["dr_power"] = int(line.split()[2])
if "#FID spacing" in line:
data["fid_spacing"] = float(re.findall(r"\de[+-]?\d\d", line)[0])
if "#FID points" in line:
data["fid_points"] = int(line.split()[-1])
# Get the name of the gas
if gas_regex.match(line):
split_line = line.split()
# Only bother parsing if the channel is used
gas_index = int(split_line[1])
try:
data["gases"][gas_index] = {"gas": " ".join(split_line[3:])}
except IndexError:
data["gases"][gas_index] = {"gas": ""}
# Get the flow rate for channel
if flow_regex.match(line):
split_line = line.split()
gas_index = int(split_line[1])
data["gases"][gas_index]["flow"] = float(split_line[3])
if "#Magnet enabled" in line:
data["magnet"] = bool(int(line.split()[2]))
# Find the channel the discharge is set to and compile a regex
# to look for the channel
if dc_regex.match(line):
dc_index = line.split()[2]
dc_channel = re.compile(r"^#Pulse ch {} enabled".format(dc_index), re.M)
# Once the discharge channel index is known, start searching for it
if dc_channel:
if dc_channel.match(line):
data["discharge"] = bool(int(line.split()[-1]))
# Find when the FID lines start popping up
if fid_regex.match(line):
fid = filecontents[index + 1 :]
fid = [float(value) for value in fid]
data["fid"] = np.array(fid)
return data
def perform_fft(fid, spacing, start=0, stop=-1, window="boxcar"):
"""
Perform an FFT on an FID to get the frequency domain spectrum.
All of the arguments are optional, and provide control over how the FFT is performed, as well as post-processing
parameters like window functions and zero-padding.
This is based on the FFT code by Kyle Crabtree, with modifications to fit this dataclass.
Parameters
----------
fid - Numpy 1D array
Array holding the values of the FID
spacing - float
Time spacing between FID points in microseconds
start - int, optional
Starting index for the FID array to perform the FFT
stop - int, optional
End index for the FID array to perform the FFT
zpf - int, optional
Pad the FID with zeros to nth nearest power of 2
window - str
Specify the window function used to process the FID. Defaults to boxcar, which is effectively no filtering.
The names of the window functions available can be found at:
https://docs.scipy.org/doc/scipy/reference/signal.windows.html
Returns
-------
"""
fid = np.copy(fid)
if window is not None and window in spsig.windows.__all__:
window_f = spsig.windows.get_window(window, fid.size)
fid *= window_f
else:
raise Exception("Specified window function is not implemented in SciPy!")
# Set values to zero up to starting index
fid[:start] = 0.0
if stop < 0:
# If we're using negative indexes
fid[fid.size + stop :] = 0.0
else:
# Otherwise, index with a positive number
fid[stop:] = 0.0
# Perform the FFT
fft = np.fft.rfft(fid)
read_length = len(fid) // 2 + 1
df = 1.0 / fid.size / spacing
# Generate the frequency array
frequency = np.linspace(0.0, self.header["sideband"] * df, read_length)
frequency += self.header["probe_freq"]
fft[(frequency >= f_max) & (frequency <= f_min)] = 0.0
fft *= 1000.0
return frequency, fft
def fid2fft(fid, rate, frequencies, **kwargs):
"""
Process an FID by performing an FFT to yield the frequency domain
information. Kwargs are passed as additional processing options,
and are implemented as some case statements to ensure the settings
are valid (e.g. conforms to sampling rate, etc.)
:param fid: np.array corresponding to the FID intensity
:param rate: sampling rate in Hz
:param frequencies: np.array corresponding to the frequency bins
:param kwargs: signal processing options:
delay - delays the FID processing by setting the start
of the FID to zero
zeropad - Toggles whether or not the number of sampled
points is doubled to get artificially higher
resolution in the FFT
window - Various window functions provided by `scipy.signal`
exp - Specifies an exponential filter
filter - 2-tuple specifying the frequency cutoffs for a
band pass filter
:return: freq_df - pandas dataframe with the FFT spectrum
"""
# Remove DC
new_fid = fid - np.average(fid)
if "delay" in kwargs:
delay = int(kwargs["delay"] / (1.0 / rate) / 1e6)
new_fid[:delay] = 0.0
# Zero-pad the FID
if "zeropad" in kwargs:
if kwargs["zeropad"] is True:
# Pad the FID with zeros to get higher resolution
fid = np.append(new_fid, np.zeros(len(new_fid)))
# Since we've padded with zeros, we'll have to update the
# frequency array
frequencies = spsig.resample(frequencies, len(frequencies) * 2)
# Apply a window function to the FID
if "window" in kwargs:
if kwargs["window"] in spsig.windows.__all__:
new_fid *= spsig.get_window(kwargs["window"], new_fid.size)
# Apply an exponential filter on the FID
if "exp" in kwargs:
if kwargs["exp"] > 0.0:
new_fid *= spsig.exponential(len(new_fid), tau=kwargs["exp"])
# Apply a bandpass filter on the FID
if ("filter" in kwargs) and (len(kwargs["filter"]) == 2):
low, high = sorted(kwargs["filter"])
if low < high:
new_fid = apply_butter_filter(new_fid, low, high, rate)
# Perform the FFT
fft = np.fft.rfft(new_fid)
# Get the real part of the FFT, and only the non-duplicated side
real_fft = np.abs(fft[: int(len(new_fid) / 2)]) / len(new_fid) * 1e3
frequencies = spsig.resample(frequencies, real_fft.size)
# For some reason, resampling screws up the frequency ordering...
real_fft = real_fft[np.argsort(frequencies)]
frequencies = np.sort(frequencies)
# Package into a pandas dataframe
freq_df = pd.DataFrame({"Frequency (MHz)": frequencies, "Intensity": real_fft})
return freq_df
def butter_bandpass(low, high, rate, order=1):
"""
A modified version of the Butterworth bandpass filter described here,
adapted for use with the FID signal.
http://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
The arguments are:
:param low The low frequency cut-off, given in kHz.
:param high The high frequency cut-off, given in kHz.
:param rate The sampling rate, given in Hz. From the FIDs, this means that
the inverse of the FID spacing is used.
:return bandpass window
"""
# Calculate the Nyquist frequency
nyq = 0.5 * (rate / (2.0 * np.pi))
low = (low * 1e3) / nyq
high = (high * 1e3) / nyq
if high > 1.0:
raise Exception("High frequency cut-off exceeds the Nyquist frequency.")
b, a = spsig.butter(order, [low, high], btype="band", analog=False)
return b, a
def apply_butter_filter(data, low, high, rate, order=1):
"""
A modified Butterworth bandpass filter, adapted from the Scipy cookbook.
The argument data supplies the FID, which then uses the scipy signal
processing function to apply the digital filter, and returns the filtered
FID.
See the `butter_bandpass` function for additional arguments.
"""
b, a = butter_bandpass(low, high, rate, order=order)
y = spsig.lfilter(b, a, data)
return y
def generate_ftb_line(frequency, shots, **kwargs):
""" Function that generates an FTB file for a list of
frequencies, plus categorization tests.
kwargs are passed as additional options for the ftb
batch. Keywords are:
magnet: bool
dipole: float
atten: int
skiptune: bool
drfreq: float
drpower: int
cal
parameters:
---------------
:param frequency: float for frequency in MHz
:param shots: int number of shots to integrate for
returns:
---------------
:return ftbline: str
"""
line = "ftm:{:.4f} shots:{}".format(frequency, shots)
for key, value in kwargs.items():
line += " {}:{}".format(key, value)
line += "\n"
return line
def neu_categorize_frequencies(frequencies, intensities=None, nshots=50, **kwargs):
"""
Routine to generate an FTB batch file for performing a series of tests
on frequencies.
"""
ftb_string = ""
if intensities:
norm_int = intensities / np.max(intensities)
shotcounts = np.round(nshots / norm_int).astype(int)
else:
shotcounts = np.full(len(frequencies), nshots, dtype=int)
# default settings for all stuff
param_dict = {
"dipole": 1.0,
"magnet": "false",
"drpower": "10",
"skiptune": "false",
}
param_dict.update(kwargs)
for freq, shot in zip(frequencies, shotcounts):
ftb_string += generate_ftb_str(freq, shot, **param_dict)
if "magnet" in kwargs:
param_dict["magnet"] = "true"
ftb_string += generate_ftb_str(freq, shot, **param_dict)
def categorize_frequencies(
frequencies,
nshots=50,
intensities=None,
power=None,
attn_list=None,
dipole=None,
attn=None,
magnet=False,
dr=False,
discharge=False,
):
"""
Function that will format an FT batch file to perform categorization
tests, with some flexibility on how certain tests are performed.
"""
ftb_str = ""
if intensities is None:
shots = np.full(len(frequencies), nshots, dtype=int)
else:
shots = np.sqrt(nshots / intensities).astype(int)
if dipole:
if attn is None:
# If dipole test requested, but no attenuation
# supplied do the default sweep
dipole_test = [0.01, 0.1, 1.0, 3.0, 5.0]
dipole_flag = "dipole"
else:
# Otherwise run specific attenuations
dipole_test = attn_list
dipole_flag = "atten"
if dr is True:
freq_list = combinations(frequencies, 2)
print(list(freq_list))
else:
freq_list = frequencies
# loop over each frequency and number of shots
for value, shotcount in zip(freq_list, shots):
if dr is True:
freq, dr_freq = value
else:
freq = value
# Generate normal observation
try:
freq = float(freq)
shotcount = int(shotcount)
if dr is True:
dr_freq = float(dr_freq)
ftb_str += generate_ftb_line(freq, shotcount, **{"skiptune": "false"})
if dr is True:
ftb_str += generate_ftb_line(
freq, shotcount, **{"skiptune": "true", "drfreq": dr_freq}
)
if dipole is True:
for dipole_value in dipole_test:
ftb_str += generate_ftb_line(
freq, shotcount, **{dipole_flag: dipole_value}
)
if magnet is True:
ftb_str += generate_ftb_line(freq, shotcount, **{"magnet": "true"})
if discharge is True:
# Toggle the discharge stack on and off
ftb_str += generate_ftb_line(
freq, shotcount, **{"pulse,1,enabled": "false"}
)
ftb_str += generate_ftb_line(
freq, shotcount, **{"pulse,1,enabled": "true"}
)
except ValueError:
print("Error with " + str(value))
return ftb_str
def calculate_integration_times(intensity, nshots=50):
"""
Method for calculating the expected integration time
in shot counts based on the intensity; either theoretical
line strengths or SNR.
parameters:
---------------
intensity - array of intensity metric; e.g. SNR
nshots - optional int number of shots used for the strongest line
returns:
---------------
shot_counts - array of shot counts for each frequency
"""
norm_int = intensity / np.max(intensity)
shot_counts = np.round(nshots / norm_int).astype(int)
return shot_counts
@dataclass
@dataclass
| 37.516075
| 120
| 0.576604
|
import datetime
import re
import os
import struct
from dataclasses import dataclass, field
from itertools import combinations, product
from typing import List, Dict
import pandas as pd
import numpy as np
import peakutils
from matplotlib import pyplot as plt
from scipy import signal as spsig
import plotly.graph_objs as go
from tqdm.autonotebook import tqdm
import networkx as nx
from ipywidgets import interactive, VBox, HBox
from lmfit.models import LinearModel
from pyspectools import routines
from pyspectools import figurefactory as ff
from pyspectools import fitting
from pyspectools.spectra import analysis
from pyspectools import parsers
def parse_specdata(filename):
# For reading the output of a SPECData analysis
return pd.read_csv(filename, skiprows=4)
def parse_spectrum(filename, threshold=20.0):
""" Function to read in a blackchirp or QtFTM spectrum from file """
dataframe = pd.read_csv(
filename, delimiter="\t", names=["Frequency", "Intensity"], skiprows=1
)
return dataframe[dataframe["Intensity"] <= threshold]
def center_cavity(dataframe, thres=0.3, verbose=True):
""" Finds the center frequency of a Doppler pair in cavity FTM measurements
and provides a column of offset frequencies.
Sometimes the peak finding threshold has to be tweaked to get the center
frequency correctly.
"""
# Find the peak intensities
center_indexes = peakutils.indexes(dataframe["Intensity"], thres=thres)
peak_frequencies = dataframe.iloc[center_indexes]["Frequency"]
# Calculate the center frequency as the average
center = np.average(peak_frequencies)
if verbose is True:
print("Center frequency at " + str(center))
dataframe["Offset Frequency"] = dataframe["Frequency"] - center
@dataclass
class Batch:
assay: str
id: int
machine: str
date: datetime.datetime
scans: List = field(default_factory=list)
filter: List = field(default_factory=list)
exp: float = 0.0
zeropad: bool = False
window: str = ""
@classmethod
def from_qtftm(cls, filepath, assay, machine):
"""
Create a Batch object from a QtFTM scan file.
:param filepath:
:param assay:
:param machine:
:return:
"""
assays = ["dr", "magnet", "discharge", "dipole"]
assay = assay.lower()
if assay not in assays:
raise Exception(
"Not a valid assay type; choose dr, magnet, discharge, dipole."
)
with open(filepath) as read_file:
batch_df, batch_data = parse_batch(read_file.readlines())
batch_data["assay"] = assay
batch_data["machine"] = machine.upper()
batch_obj = cls(**batch_data)
batch_obj.details = batch_df
return batch_obj
@classmethod
def from_remote(cls, root_path, batch_id, assay, machine, ssh_obj=None):
"""
Create a Batch object by retrieving a QtFTM batch file from a remote
location. The user must provide a path to the root directory of the
batch type, e.g. /home/data/QtFTM/batch/, and the corresponding
batch type ID. Additionally, the type of batch must be specified to
determine the type of analysis required.
Optionally, the user can provide a reference to a RemoteClient object
from `pyspectools.routines`. If none is provided, a RemoteClient object
will be created with public key authentication (if available), otherwise
the user will be queried for a hostname, username, and password.
Keep in mind this method can be slow as every scan is downloaded. I
recommend creating this once, then saving a local copy for subsequent
analysis.
:param root_path: str path to the batch type root directory
:param batch_id: int or str value for the batch number
:param assay: str the batch type
:param machine: str reference the machine used for the batch
:param ssh_obj: `RemoteClient` object
:return:
"""
if ssh_obj is None:
default_keypath = os.path.join(os.path.expanduser("~"), ".ssh/id_rsa.pub")
hostname = input("Please provide remote hostname: ")
username = input("Please provide login: ")
ssh_settings = {"hostname": hostname, "username": username}
if os.path.isfile(default_keypath) is True:
ssh_settings["key_filename"] = default_keypath
else:
password = input("Please provide password: ")
ssh_settings["password"] = password
ssh_obj = routines.RemoteClient(**ssh_settings)
# Parse the scan data from remote file
remote_path = ssh_obj.ls(
os.path.join(root_path, "*", "*", str(batch_id) + ".txt")
)
batch_df, batch_data = parse_batch(ssh_obj.open_remote(remote_path[0]))
batch_data["assay"] = assay
batch_data["machine"] = machine.upper()
batch_obj = cls(**batch_data)
batch_obj.details = batch_df
batch_obj.remote = ssh_obj
batch_obj.get_scans(root_path, batch_df.id.values)
return batch_obj
@classmethod
def from_pickle(cls, filepath):
"""
Method to create a Scan object from a previously pickled
Scan.
:param filepath: path to the Scan pickle
:return: instance of the Scan object
"""
batch_obj = routines.read_obj(filepath)
if isinstance(batch_obj, Batch) is False:
raise Exception("File is not a Scan object; {}".format(type(batch_obj)))
else:
return batch_obj
def __repr__(self):
return "{}-Batch {}".format(self.machine, self.id)
def __copy__(self):
batch_obj = Batch(**self.__dict__)
return batch_obj
def find_scan(self, id):
scans = [scan for scan in self.scans if scan.id == id]
if len(scans) == 0:
raise Exception("No scans were found.")
else:
return scans[0]
def get_scans(self, root_path, ids):
"""
Function to create Scan objects for all of the scans in
a QtFTM batch.
:param root_path: str scans root path
:param ids: list scan ids
:param src: str optional specifying whether a remote or local path is used
"""
root_path = root_path.replace("batch", "scans")
path_list = tqdm(
[
os.path.join(root_path, "*", "*", str(scan_id) + ".txt")
for scan_id in ids
]
)
if hasattr(self, "remote") is True:
scans = [Scan.from_remote(path, self.remote) for path in path_list]
else:
scans = [Scan.from_qtftm(path) for path in path_list]
self.scans = scans
def process_dr(self, significance=16.0):
"""
Function to batch process all of the DR measurements.
:param global_depletion: float between 0. and 1. specifying the expected depletion for any line
without a specific expected value.
:param depletion_dict: dict with keys corresponding to cavity frequencies, and values the expected
depletion value between 0. and 1.
:return dr_dict: dict with keys corresponding to cavity frequency, with each value
a dict of the DR frequencies, scan IDs and Scan objects.
"""
if self.assay != "dr":
raise Exception(
"Batch is not a DR test! I think it's {}".format(self.assay)
)
# Find the cavity frequencies that DR was performed on
progressions = self.split_progression_batch()
dr_dict = dict()
counter = 0
for index, progression in tqdm(progressions.items()):
ref = progression.pop(0)
try:
ref_fit = ref.fit_cavity(plot=False)
roi, ref_x, ref_y = ref.get_line_roi()
signal = [np.sum(ref_y, axis=0)]
sigma = (
np.average(
[
np.std(scan.spectrum["Intensity"].iloc[roi])
for scan in progression
]
)
* significance
)
connections = [
scan for scan in progression if scan.is_depleted(ref, roi, sigma)
]
if len(connections) > 1:
counter += len(connections)
signal.extend(
[
np.sum(scan.spectrum["Intensity"].iloc[roi], axis=0)
for scan in connections
]
)
dr_dict[index] = {
"frequencies": [scan.dr_frequency for scan in connections],
"ids": [scan.id for scan in connections],
"cavity": ref.fit.frequency,
"signal": signal,
"expected": np.sum(ref_y) - sigma,
}
except ValueError:
print("Progression {} could not be fit; ignoring.".format(index))
print(
"Possible depletions detected in these indexes: {}".format(
list(dr_dict.keys())
)
)
print("There are {} possible depletions.".format(counter))
return dr_dict
def split_progression_batch(self):
"""
Split up a DR batch into individual progressions based on the cavity frequency
and whether or not the scan IDs are consecutive.
:return progressions: dict with keys corresponding to progression index and values are lists of Scans
"""
counter = 0
progressions = dict()
self.details["id"] = self.details["id"].apply(int)
for freq in self.details["ftfreq"].unique():
slice_df = self.details.loc[self.details["ftfreq"] == freq]
chunks = routines.group_consecutives(slice_df["id"])
for chunk in chunks:
progressions[counter] = [
scan for scan in self.scans if scan.id in chunk
]
counter += 1
return progressions
def interactive_dr_batch(self):
"""
Create an interactive widget slider with a Plotly figure. The batch will be split
up into "subbatches" by the cavity frequency and whether or not the scan IDs are
consecutive.
:return vbox: VBox object with the Plotly figure and slider objects
"""
progressions = self.split_progression_batch()
fig = go.FigureWidget()
fig.layout["width"] = 900.0
fig.layout["showlegend"] = False
def update_figure(index):
fig.data = []
fig.add_traces([scan.scatter_trace() for scan in progressions[index]])
index_slider = interactive(update_figure, index=(0, len(progressions) - 1, 1))
vbox = VBox((fig, index_slider))
vbox.layout.align_items = "center"
return vbox
def plot_scans(self):
"""
Create a plotly figure of all of the Scans within a Batch.
:return:
"""
fig = go.FigureWidget()
fig.layout["title"] = "{} Batch {}".format(self.machine, self.id)
fig.layout["showlegend"] = False
fig.add_traces([scan.scatter_trace() for scan in self.scans])
return fig
def reprocess_fft(self, **kwargs):
"""
Reprocess all of the FIDs with specified settings. The default values
are taken from the Batch attributes, and kwargs provided will override
the defaults.
:param kwargs:
"""
param_list = ["filter", "exp", "zeropad", "window"]
params = {
key: value for key, value in self.__dict__.items() if key in param_list
}
params.update(**kwargs)
_ = [scan.process_fid(**params) for scan in tqdm(self.scans)]
def to_pickle(self, filepath=None, **kwargs):
"""
Pickles the Batch object with the joblib wrapper implemented
in routines.
:param filepath: optional argument to pickle to. Defaults to the {assay}-{id}.pkl
:param kwargs: additional settings for the pickle operation
"""
if filepath is None:
filepath = "{}-{}.pkl".format(self.assay, self.id)
# the RemoteClient object has some thread locking going on that prevents
# pickling TODO - figure out why paramiko doesn't allow pickling
if hasattr(self, "remote"):
delattr(self, "remote")
routines.save_obj(self, filepath, **kwargs)
def create_dr_network(self, scans):
"""
Take a list of scans, and generate a NetworkX Graph object
for analysis and plotting.
:param scans: list of scan IDs to connect
:return fig: Plotly FigureWidget object
"""
connections = [
[np.floor(scan.cavity_frequency), np.floor(scan.dr_frequency)]
for scan in self.scans
if scan.id in scans
]
fig, self.progressions = ff.dr_network_diagram(connections)
return fig
def find_optimum_scans(self, thres=0.8):
"""
:param thres:
:return:
"""
progressions = self.split_progression_batch()
data = list()
for index, progression in tqdm(progressions.items()):
snrs = [scan.calc_snr(thres=thres) for scan in progression]
best_scan = progression[np.argmax(snrs)]
try:
fit_result = best_scan.fit_cavity(plot=False)
if fit_result.best_values["w"] < 0.049:
data.append(
{
"frequency": np.round(fit_result.best_values["x0"], 4),
"snr": np.max(snrs),
"scan": best_scan.id,
"attenuation": best_scan.cavity_atten,
"index": index,
}
)
except ValueError:
print("Index {} failed to fit!".format(index))
opt_df = pd.DataFrame(data)
return opt_df
def search_frequency(self, frequency, tol=0.001):
"""
Search the Batch scans for a particular frequency, and return
any scans that lie within the tolerance window
:param frequency: float specifying frequency to search
:param tol: float decimal percentage to use for the search tolerance
:return new_batch: a new Batch object with selected scans
"""
upper = frequency * (1 + tol)
lower = frequency * (1 - tol)
scans = [scan for scan in self.scans if lower <= scan.cavity_frequency <= upper]
# new_batch = deepcopy(self)
# new_batch.scans = scans
return scans
@dataclass
class Scan:
"""
DataClass for a Scan. Holds all of the relevant information that
describes a FT scan, such as the ID, what machine it was collected
on, and the experimental settings.
Has a few class methods that will make look ups easily such as
the date the scan was collected and the gases used.
"""
id: int
machine: str
fid: np.array
date: datetime.datetime
shots: int = 0
cavity_voltage: int = 0
cavity_atten: int = 0
cavity_frequency: float = 0.0
dr_frequency: float = 0.0
dr_power: int = 0
fid_points: int = 0
fid_spacing: float = 0.0
discharge: bool = False
magnet: bool = False
gases: Dict = field(default_factory=dict)
filter: List = field(default_factory=list)
exp: float = 0.0
zeropad: bool = False
window: str = ""
def __post_init__(self):
"""
Functions called after __init__ is called.
"""
# Perform FFT
self.process_fid()
def __deepcopy__(self):
"""
Dunder method to produce a deep copy - this will be used when
manipulating multiple Scan objects.
:return: A deep copy of the current Scan object
"""
class Empty(self.__class__):
def __init__(self):
pass
new_scan = Empty()
new_scan.__class__ = self.__class__
new_scan.__dict__.update(self.__dict__)
return new_scan
def __repr__(self):
return str(f"Scan {self.id}")
def average(self, others):
"""
Dunder method to co-average two or more Scans in the time domain.
:param other: Scan object, or tuple/list
:return: A new Scan object with the co-added FID
"""
new_scan = self.__deepcopy__()
try:
new_scan.fid = np.average(others.extend(new_scan.fid), axis=0)
new_scan.average_ids = [scan.id for scan in others]
# If there is no extend method, then assume we're working with a
# single Scan
except AttributeError:
new_scan.fid = np.average([new_scan.fid, others.fid], axis=0)
new_scan.average_ids = [others.id]
new_scan.process_fid()
return new_scan
def __add__(self, other):
"""
Dunder method to co-add two or more Scans in the time domain.
:param other: Scan object, or tuple/list
:return: A new Scan object with the co-added FID
"""
new_scan = self.__deepcopy__()
new_scan.fid = np.sum([new_scan.fid, other.fid], axis=0)
new_scan.process_fid()
return new_scan
def __sub__(self, other):
"""
Dunder method to subtract another Scan from the current Scan in the time domain.
i.e. this scan - other scan
:param other: Scan object, or tuple/list
:return: A new Scan object with the subtracted FID
"""
new_scan = self.__deepcopy__()
new_scan.fid = np.subtract(new_scan.fid, other.fid)
new_scan.process_fid()
return new_scan
def subtract_frequency(self, other):
"""
Method to subtract another Scan from the current in the frequency domain.
:param other: Scan object to subtract with
:return: A new Scan object with the subtracted spectrum
"""
new_scan = self.__deepcopy__()
new_scan.spectrum["Intensity"] = (
new_scan.spectrum["Intensity"] - other.spectrum["Intensity"]
)
new_scan.subtracted = other.id
return new_scan
def add_frequency(self, other):
"""
Method to add another Scan from the current in the frequency domain.
:param other: Scan object to add with
:return: A new Scan object with the co-added spectrum
"""
new_scan = self.__deepcopy__()
new_scan.spectrum["Intensity"] = (
new_scan.spectrum["Intensity"] + other.spectrum["Intensity"]
)
new_scan.subtracted = other.id
return new_scan
@classmethod
def from_dict(cls, data_dict):
"""
Function to initialize a Scan object from a dictionary
of FT scan data collected from `parse_scan`.
:param data_dict: dict containing parsed data from FT
:return: Scan object
"""
scan_obj = cls(**data_dict)
return scan_obj
@classmethod
def from_qtftm(cls, filepath):
"""
Method to initialize a Scan object from a FT scan file.
Will load the lines into memory and parse the data into
a dictionary, which then gets passed into a Scan object.
:param filepath: str path to FID file
:return: Scan object
"""
with open(filepath) as read_file:
data_dict = parse_scan(read_file.readlines())
scan_obj = cls(**data_dict)
return scan_obj
@classmethod
def from_pickle(cls, filepath):
"""
Method to create a Scan object from a previously pickled
Scan.
:param filepath: path to the Scan pickle
:return: instance of the Scan object
"""
scan_obj = routines.read_obj(filepath)
if isinstance(scan_obj, Scan) is False:
raise Exception("File is not a Scan object; {}".format(type(scan_obj)))
else:
return scan_obj
@classmethod
def from_remote(cls, remote_path, ssh_obj=None):
"""
Method to initialize a Scan object from a remote server.
Has the option to pass an instance of a paramiko SSHClient, which would be
useful in a Batch. If none is supplied, an instance will be created.
:param remote_path: str remote path to the file
:param ssh_obj: optional argument to supply a paramiko SSHClient object
:return: Scan object from remote QtFTM file
"""
if ssh_obj is None:
default_keypath = os.path.join(os.path.expanduser("~"), ".ssh/id_rsa.pub")
hostname = input("Please provide remote hostname: ")
username = input("Please provide login: ")
ssh_settings = {"hostname": hostname, "username": username}
if os.path.isfile(default_keypath) is True:
ssh_settings["key_filename"] = default_keypath
else:
password = input("Please provide password: ")
ssh_settings["password"] = password
ssh_obj = routines.RemoteClient(**ssh_settings)
# Parse the scan data from remote file
data_dict = parse_scan(ssh_obj.open_remote(remote_path))
scan_obj = cls(**data_dict)
return scan_obj
def to_file(self, filepath, format="yaml"):
""" Method to dump data to YAML format.
Extensions are automatically decided, but
can also be supplied.
parameters:
--------------------
:param filepath - str path to yaml file
:param format - str denoting the syntax used for dumping. Defaults to YAML.
"""
if "." not in filepath:
if format == "json":
filepath += ".json"
else:
filepath += ".yml"
if format == "json":
writer = routines.dump_json
else:
writer = routines.dump_yaml
writer(filepath, self.__dict__)
def to_pickle(self, filepath=None, **kwargs):
"""
Pickles the Scan object with the joblib wrapper implemented
in routines.
:param filepath: optional argument to pickle to. Defaults to the id.pkl
:param kwargs: additional settings for the pickle operation
"""
if filepath is None:
filepath = "{}.pkl".format(self.id)
routines.save_obj(self, filepath, **kwargs)
def process_fid(self, **kwargs):
"""
Perform an FFT on the FID to yield the frequency domain spectrum.
Kwargs are passed into the FID processing, which will override the
Scan attributes.
:param kwargs: Optional keyword arguments for processing the FID
"""
# Calculate the frequency bins
frequencies = np.linspace(
self.cavity_frequency, self.cavity_frequency + 1.0, len(self.fid)
)
# Calculate the time bins
time = np.linspace(0.0, self.fid_spacing * self.fid_points, self.fid_points)
process_list = ["window", "filter", "exp", "zeropad"]
process_dict = {
key: value for key, value in self.__dict__.items() if key in process_list
}
# Override with user settings
process_dict.update(**kwargs)
temp_fid = np.copy(self.fid)
self.spectrum = fid2fft(
temp_fid, 1.0 / self.fid_spacing, frequencies, **process_dict
)
self.fid_df = pd.DataFrame({"Time (us)": time * 1e6, "FID": temp_fid})
def within_time(self, date_range):
"""
Function for determining of the scan was taken between
a specified date range in month/day/year, in the format
04/09/08 for April 9th, 2008.
:param date_range: list containing the beginning and end date strings
:return: bool - True if within range, False otherwise
"""
try:
early = datetime.datetime.strptime(date_range[0], "%m/%d/%y")
except:
early = datetime.datetime(1, 1, 1)
try:
late = datetime.datetime.strptime(date_range[1], "%m/%d/%y")
except:
late = datetime.datetime(9999, 1, 1)
return early <= self.date <= late
def is_depleted(self, ref, roi=None, depletion=None):
"""
Function for determining if the signal in this Scan is less
than that of another scan. This is done by a simple comparison
of the average of 10 largest intensities in the two spectra. If
the current scan is less intense than the reference by the
expected depletion percentage, then it is "depleted".
This function can be used to determine if a scan if depleted
in DR/magnet/discharge assays.
TODO - implement a chi squared test of sorts to determine if a
depletion is statistically significant
:param ref: second Scan object for comparison
:param depletion: percentage of depletion expected of the reference
:return: bool - True if signal in this Scan is less intense than the reference
"""
y_ref = ref.spectrum["Intensity"].values
y_obs = self.spectrum["Intensity"].values
self.ref_freq = ref.fit.frequency
self.ref_id = ref.id
if roi:
y_ref = y_ref[roi]
y_obs = y_obs[roi]
# This doesn't work, or is not particularly discriminating.
# chisq, p_value = chisquare(
# y_obs, y_ref
# )
if depletion is None:
sigma = np.std(y_obs, axis=0) * 16.0
else:
sigma = depletion
expected = np.sum(y_ref, axis=0) - sigma
return np.sum(y_obs, axis=0) <= expected
def scatter_trace(self):
"""
Create a Plotly Scattergl trace. Called by the Batch function, although
performance-wise it takes forever to plot up ~3000 scans.
:return trace: Scattergl object
"""
text = "Scan ID: {}<br>Cavity: {}<br>DR: {}<br>Magnet: {}<br>Attn: {}".format(
self.id,
self.cavity_frequency,
self.dr_frequency,
self.magnet,
self.cavity_atten,
)
trace = go.Scattergl(
x=np.linspace(self.id, self.id + 1, len(self.spectrum["Intensity"])),
y=self.spectrum["Intensity"],
text=text,
marker={"color": "rgb(43,140,190)"},
hoverinfo="text",
)
return trace
def fit_cavity(self, plot=True, verbose=False):
"""
Perform a fit to the cavity spectrum. Uses a paired Gaussian model
that minimizes the number of fitting parameters.
:param plot: bool specify whether a Plotly figure is made
:return: Model Fit result
"""
y = self.spectrum["Intensity"].dropna().values
x = self.spectrum["Frequency (MHz)"].dropna().values
model = fitting.PairGaussianModel()
result = model.fit_pair(x, y, verbose=verbose)
self.spectrum["Fit"] = result.best_fit
self.fit = result
self.fit.frequency = self.fit.best_values["x0"]
if plot is True:
fig = go.FigureWidget()
fig.layout["xaxis"]["title"] = "Frequency (MHz)"
fig.layout["xaxis"]["tickformat"] = ".2f"
fig.add_scatter(x=x, y=y, name="Observed")
fig.add_scatter(x=x, y=result.best_fit, name="Fit")
return result, fig
else:
return result
def get_line_roi(self):
if hasattr(self, "fit") is False:
raise Exception("Auto peak fitting has not been run yet!")
# Get one of the Doppler horns plus 4sigma
params = self.fit.best_values
x = self.spectrum["Frequency (MHz)"].values
y = self.spectrum["Intensity"].values
_, low_end = routines.find_nearest(
x, params["x0"] - params["xsep"] - params["w"] * 4.0
)
_, high_end = routines.find_nearest(
x, params["x0"] + params["xsep"] + params["w"] * 4.0
)
index = list(range(low_end, high_end))
return index, x[low_end:high_end], y[low_end:high_end]
def calc_snr(self, noise=None, thres=0.6):
if noise is None:
# Get the last 10 points at the end and at the beginning
noise = np.average(
[
self.spectrum["Intensity"].iloc[-10:],
self.spectrum["Intensity"].iloc[:10],
]
)
peaks = (
self.spectrum["Intensity"]
.iloc[
peakutils.indexes(
self.spectrum["Intensity"], thres=thres, thres_abs=True
)
]
.values
)
signal = np.average(np.sort(peaks)[:2])
return signal / noise
def parse_scan(filecontents):
"""
Function for extracting the FID data from an FT scan. The data
is returned as a dictionary, which can be used to initialize a
Scan object.
:param filecontents: list of lines from an FID file
:return: dict containing parsed data from FID
"""
data = {"gases": dict()}
# FID regex
fid_regex = re.compile(r"^fid\d*", re.M)
# Regex to find gas channels
gas_regex = re.compile(r"^#Gas \d name", re.M)
flow_regex = re.compile(r"^#Gas \d flow", re.M)
# Regex to detect which channel is set to the discharge
dc_regex = re.compile(r"^#Pulse ch \d name\s*DC", re.M)
dc_channel = None
for index, line in enumerate(filecontents):
if "#Scan" in line:
split_line = line.split()
data["id"] = int(split_line[1])
try:
data["machine"] = split_line[2]
except IndexError:
data["machine"] = "FT1"
if "#Probe freq" in line:
data["cavity_frequency"] = float(line.split()[2])
if "#Shots" in line:
data["shots"] = int(line.split()[-1])
if "#Date" in line:
strip_targets = ["#Date", "\t", "\n"]
data["date"] = datetime.datetime.strptime(
re.sub("|".join(strip_targets), "", line), "%a %b %d %H:%M:%S %Y"
)
if "#Cavity Voltage" in line:
data["cavity_voltage"] = int(line.split()[2])
if "#Attenuation" in line:
data["cavity_atten"] = int(line.split()[1])
if "#DR freq" in line:
data["dr_frequency"] = float(line.split()[2])
if "#DR power" in line:
data["dr_power"] = int(line.split()[2])
if "#FID spacing" in line:
data["fid_spacing"] = float(re.findall(r"\de[+-]?\d\d", line)[0])
if "#FID points" in line:
data["fid_points"] = int(line.split()[-1])
# Get the name of the gas
if gas_regex.match(line):
split_line = line.split()
# Only bother parsing if the channel is used
gas_index = int(split_line[1])
try:
data["gases"][gas_index] = {"gas": " ".join(split_line[3:])}
except IndexError:
data["gases"][gas_index] = {"gas": ""}
# Get the flow rate for channel
if flow_regex.match(line):
split_line = line.split()
gas_index = int(split_line[1])
data["gases"][gas_index]["flow"] = float(split_line[3])
if "#Magnet enabled" in line:
data["magnet"] = bool(int(line.split()[2]))
# Find the channel the discharge is set to and compile a regex
# to look for the channel
if dc_regex.match(line):
dc_index = line.split()[2]
dc_channel = re.compile(r"^#Pulse ch {} enabled".format(dc_index), re.M)
# Once the discharge channel index is known, start searching for it
if dc_channel:
if dc_channel.match(line):
data["discharge"] = bool(int(line.split()[-1]))
# Find when the FID lines start popping up
if fid_regex.match(line):
fid = filecontents[index + 1 :]
fid = [float(value) for value in fid]
data["fid"] = np.array(fid)
return data
def perform_fft(fid, spacing, start=0, stop=-1, window="boxcar"):
"""
Perform an FFT on an FID to get the frequency domain spectrum.
All of the arguments are optional, and provide control over how the FFT is performed, as well as post-processing
parameters like window functions and zero-padding.
This is based on the FFT code by Kyle Crabtree, with modifications to fit this dataclass.
Parameters
----------
fid - Numpy 1D array
Array holding the values of the FID
spacing - float
Time spacing between FID points in microseconds
start - int, optional
Starting index for the FID array to perform the FFT
stop - int, optional
End index for the FID array to perform the FFT
zpf - int, optional
Pad the FID with zeros to nth nearest power of 2
window - str
Specify the window function used to process the FID. Defaults to boxcar, which is effectively no filtering.
The names of the window functions available can be found at:
https://docs.scipy.org/doc/scipy/reference/signal.windows.html
Returns
-------
"""
fid = np.copy(fid)
if window is not None and window in spsig.windows.__all__:
window_f = spsig.windows.get_window(window, fid.size)
fid *= window_f
else:
raise Exception("Specified window function is not implemented in SciPy!")
# Set values to zero up to starting index
fid[:start] = 0.0
if stop < 0:
# If we're using negative indexes
fid[fid.size + stop :] = 0.0
else:
# Otherwise, index with a positive number
fid[stop:] = 0.0
# Perform the FFT
fft = np.fft.rfft(fid)
read_length = len(fid) // 2 + 1
df = 1.0 / fid.size / spacing
# Generate the frequency array
frequency = np.linspace(0.0, self.header["sideband"] * df, read_length)
frequency += self.header["probe_freq"]
fft[(frequency >= f_max) & (frequency <= f_min)] = 0.0
fft *= 1000.0
return frequency, fft
def fid2fft(fid, rate, frequencies, **kwargs):
"""
Process an FID by performing an FFT to yield the frequency domain
information. Kwargs are passed as additional processing options,
and are implemented as some case statements to ensure the settings
are valid (e.g. conforms to sampling rate, etc.)
:param fid: np.array corresponding to the FID intensity
:param rate: sampling rate in Hz
:param frequencies: np.array corresponding to the frequency bins
:param kwargs: signal processing options:
delay - delays the FID processing by setting the start
of the FID to zero
zeropad - Toggles whether or not the number of sampled
points is doubled to get artificially higher
resolution in the FFT
window - Various window functions provided by `scipy.signal`
exp - Specifies an exponential filter
filter - 2-tuple specifying the frequency cutoffs for a
band pass filter
:return: freq_df - pandas dataframe with the FFT spectrum
"""
# Remove DC
new_fid = fid - np.average(fid)
if "delay" in kwargs:
delay = int(kwargs["delay"] / (1.0 / rate) / 1e6)
new_fid[:delay] = 0.0
# Zero-pad the FID
if "zeropad" in kwargs:
if kwargs["zeropad"] is True:
# Pad the FID with zeros to get higher resolution
fid = np.append(new_fid, np.zeros(len(new_fid)))
# Since we've padded with zeros, we'll have to update the
# frequency array
frequencies = spsig.resample(frequencies, len(frequencies) * 2)
# Apply a window function to the FID
if "window" in kwargs:
if kwargs["window"] in spsig.windows.__all__:
new_fid *= spsig.get_window(kwargs["window"], new_fid.size)
# Apply an exponential filter on the FID
if "exp" in kwargs:
if kwargs["exp"] > 0.0:
new_fid *= spsig.exponential(len(new_fid), tau=kwargs["exp"])
# Apply a bandpass filter on the FID
if ("filter" in kwargs) and (len(kwargs["filter"]) == 2):
low, high = sorted(kwargs["filter"])
if low < high:
new_fid = apply_butter_filter(new_fid, low, high, rate)
# Perform the FFT
fft = np.fft.rfft(new_fid)
# Get the real part of the FFT, and only the non-duplicated side
real_fft = np.abs(fft[: int(len(new_fid) / 2)]) / len(new_fid) * 1e3
frequencies = spsig.resample(frequencies, real_fft.size)
# For some reason, resampling screws up the frequency ordering...
real_fft = real_fft[np.argsort(frequencies)]
frequencies = np.sort(frequencies)
# Package into a pandas dataframe
freq_df = pd.DataFrame({"Frequency (MHz)": frequencies, "Intensity": real_fft})
return freq_df
def butter_bandpass(low, high, rate, order=1):
"""
A modified version of the Butterworth bandpass filter described here,
adapted for use with the FID signal.
http://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
The arguments are:
:param low The low frequency cut-off, given in kHz.
:param high The high frequency cut-off, given in kHz.
:param rate The sampling rate, given in Hz. From the FIDs, this means that
the inverse of the FID spacing is used.
:return bandpass window
"""
# Calculate the Nyquist frequency
nyq = 0.5 * (rate / (2.0 * np.pi))
low = (low * 1e3) / nyq
high = (high * 1e3) / nyq
if high > 1.0:
raise Exception("High frequency cut-off exceeds the Nyquist frequency.")
b, a = spsig.butter(order, [low, high], btype="band", analog=False)
return b, a
def apply_butter_filter(data, low, high, rate, order=1):
"""
A modified Butterworth bandpass filter, adapted from the Scipy cookbook.
The argument data supplies the FID, which then uses the scipy signal
processing function to apply the digital filter, and returns the filtered
FID.
See the `butter_bandpass` function for additional arguments.
"""
b, a = butter_bandpass(low, high, rate, order=order)
y = spsig.lfilter(b, a, data)
return y
def parse_batch(filecontents):
data = dict()
for index, line in enumerate(filecontents):
if "#Batch scan" in line:
data["id"] = int(line.split()[2])
if "#Date" in line:
strip_targets = ["#Date", "\t", "\n"]
data["date"] = datetime.datetime.strptime(
re.sub("|".join(strip_targets), "", line), "%a %b %d %H:%M:%S %Y"
)
if line.startswith("batchscan"):
scan_details = filecontents[index + 1 :]
scan_details = [scan.split() for scan in scan_details]
headers = [
"id",
"max",
"iscal",
"issat",
"ftfreq",
"attn",
"drfreq",
"drpower",
"pulses",
"shots",
"autofitpair_freq",
"autofitpair_int",
"autofitfreq",
"autofitint",
]
df = pd.DataFrame(scan_details, columns=headers)
return df, data
def generate_ftb_line(frequency, shots, **kwargs):
""" Function that generates an FTB file for a list of
frequencies, plus categorization tests.
kwargs are passed as additional options for the ftb
batch. Keywords are:
magnet: bool
dipole: float
atten: int
skiptune: bool
drfreq: float
drpower: int
cal
parameters:
---------------
:param frequency: float for frequency in MHz
:param shots: int number of shots to integrate for
returns:
---------------
:return ftbline: str
"""
line = "ftm:{:.4f} shots:{}".format(frequency, shots)
for key, value in kwargs.items():
line += " {}:{}".format(key, value)
line += "\n"
return line
def neu_categorize_frequencies(frequencies, intensities=None, nshots=50, **kwargs):
"""
Routine to generate an FTB batch file for performing a series of tests
on frequencies.
"""
ftb_string = ""
if intensities:
norm_int = intensities / np.max(intensities)
shotcounts = np.round(nshots / norm_int).astype(int)
else:
shotcounts = np.full(len(frequencies), nshots, dtype=int)
# default settings for all stuff
param_dict = {
"dipole": 1.0,
"magnet": "false",
"drpower": "10",
"skiptune": "false",
}
param_dict.update(kwargs)
for freq, shot in zip(frequencies, shotcounts):
ftb_string += generate_ftb_str(freq, shot, **param_dict)
if "magnet" in kwargs:
param_dict["magnet"] = "true"
ftb_string += generate_ftb_str(freq, shot, **param_dict)
def categorize_frequencies(
frequencies,
nshots=50,
intensities=None,
power=None,
attn_list=None,
dipole=None,
attn=None,
magnet=False,
dr=False,
discharge=False,
):
"""
Function that will format an FT batch file to perform categorization
tests, with some flexibility on how certain tests are performed.
"""
ftb_str = ""
if intensities is None:
shots = np.full(len(frequencies), nshots, dtype=int)
else:
shots = np.sqrt(nshots / intensities).astype(int)
if dipole:
if attn is None:
# If dipole test requested, but no attenuation
# supplied do the default sweep
dipole_test = [0.01, 0.1, 1.0, 3.0, 5.0]
dipole_flag = "dipole"
else:
# Otherwise run specific attenuations
dipole_test = attn_list
dipole_flag = "atten"
if dr is True:
freq_list = combinations(frequencies, 2)
print(list(freq_list))
else:
freq_list = frequencies
# loop over each frequency and number of shots
for value, shotcount in zip(freq_list, shots):
if dr is True:
freq, dr_freq = value
else:
freq = value
# Generate normal observation
try:
freq = float(freq)
shotcount = int(shotcount)
if dr is True:
dr_freq = float(dr_freq)
ftb_str += generate_ftb_line(freq, shotcount, **{"skiptune": "false"})
if dr is True:
ftb_str += generate_ftb_line(
freq, shotcount, **{"skiptune": "true", "drfreq": dr_freq}
)
if dipole is True:
for dipole_value in dipole_test:
ftb_str += generate_ftb_line(
freq, shotcount, **{dipole_flag: dipole_value}
)
if magnet is True:
ftb_str += generate_ftb_line(freq, shotcount, **{"magnet": "true"})
if discharge is True:
# Toggle the discharge stack on and off
ftb_str += generate_ftb_line(
freq, shotcount, **{"pulse,1,enabled": "false"}
)
ftb_str += generate_ftb_line(
freq, shotcount, **{"pulse,1,enabled": "true"}
)
except ValueError:
print("Error with " + str(value))
return ftb_str
def calculate_integration_times(intensity, nshots=50):
"""
Method for calculating the expected integration time
in shot counts based on the intensity; either theoretical
line strengths or SNR.
parameters:
---------------
intensity - array of intensity metric; e.g. SNR
nshots - optional int number of shots used for the strongest line
returns:
---------------
shot_counts - array of shot counts for each frequency
"""
norm_int = intensity / np.max(intensity)
shot_counts = np.round(nshots / norm_int).astype(int)
return shot_counts
class AssayBatch:
@classmethod
def from_csv(cls, filepath, exp_id):
"""
Create an AssayBatch session by providing a
filepath to a file containing the frequency and
intensity information, as well as the experiment
ID from which it was based.
parameters:
--------------
filepath - path to a CSV file with Frequency/Intensity
columns
exp_id - experiment ID; typically the chirp experiment number
"""
df = pd.read_csv(filepath)
return cls(df, exp_id)
@classmethod
def load_session(cls, filepath):
"""
Loads a previously saved AssayBatch session.
"""
session = routines.read_obj(filepath)
obj = cls(**session)
return obj
def __init__(self, data, exp_id, freq_col="Frequency", int_col="Intensity"):
folders = ["assays", "ftbfiles"]
for folder in folders:
if os.path.isdir(folder) is False:
os.mkdir(folder)
if os.path.isdir("assays/plots") is False:
os.mkdir("./assays/plots")
self.data = data
self.exp_id = exp_id
if freq_col not in self.data.columns:
self.freq_col = self.data.columns[0]
else:
self.freq_col = freq_col
if int_col not in self.data.columns:
self.int_col = self.data.columns[0]
else:
self.int_col = int_col
def calc_scan_SNR(self, peak_int, noise_array):
"""
Calculate the signal-to-noise ratio of a peak
for a given reference noise intensity array
and peak intensity.
"""
noise = np.mean(noise_array)
return peak_int / noise
def dipole_analysis(
self,
batch_path,
thres=0.5,
dipoles=[1.0, 0.01, 0.1, 1.0, 3.0, 5.0],
snr_thres=5.0,
):
"""
Method for determining the optimal dipole moment to use
for each frequency in a given exported batch of dipole tests.
In addition to returning a pandas dataframe, it is also
stored as the object attribute dipole_df.
parameters:
----------------
batch_path - filepath to the exported XY file from a QtFTM batch
thres - threshold in absolute intensity for peak detection (in Volts)
dipoles - list of dipole moments used in the screening
snr_thres - threshold in signal-to-noise for line detection
returns:
----------------
optimal_df - pandas dataframe containing the optimal dipole moments
"""
batch_df = pd.read_csv(batch_path, sep="\t")
batch_df.columns = ["Scan", "Intensity"]
# Reference scan numbers from the batch
scan_numbers = np.unique(np.around(batch_df["Scan"]).astype(int))
# Make a dataframe for what we expect everything should be
full_df = pd.DataFrame(
data=list(product(self.data["Frequency"], dipoles)),
columns=["Frequency", "Dipole"],
)
full_df["Scan"] = scan_numbers
# Loop over each scan, and determine whether or not there is a peak
# If there is sufficiently strong feature, add it to the list with the
# scan number
detected_scans = list()
for index, scan in enumerate(scan_numbers):
scan_slice = batch_df.loc[
(batch_df["Scan"] >= scan - 0.5) & (batch_df["Scan"] <= scan + 0.5)
]
# Find the peaks based on absolute signal intensity
# Assumption is that everything is integrated for the same period of time
peaks = scan_slice.iloc[
peakutils.indexes(scan_slice["Intensity"], thres=thres, thres_abs=True)
].sort_values(["Intensity"], ascending=False)
peak_int = np.average(peaks["Intensity"][:2])
snr = self.calc_scan_SNR(peak_int, scan_slice["Intensity"][-10:])
if snr >= snr_thres:
detected_scans.append([scan, snr])
snr_df = pd.DataFrame(data=detected_scans, columns=["Scan", "SNR"])
# Merge the dataframes based on scan number. This will identify
# scans where we observe a line
obs_df = full_df.merge(snr_df, on=["Scan"], copy=False)
optimal_data = list()
# Loop over each frequency in order to determine the optimal
# dipole moment to use
for frequency in np.unique(obs_df["Frequency"]):
slice_df = obs_df.loc[obs_df["Frequency"] == frequency]
# Sort the best response dipole moment at the top
slice_df.sort_values(["SNR"], inplace=True, ascending=False)
slice_df.index = np.arange(len(slice_df))
optimal_data.append(slice_df.iloc[0].values)
optimal_df = pd.DataFrame(
optimal_data, columns=["Frequency", "Dipole", "Scan", "SNR"]
)
optimal_df.sort_values(["SNR"], ascending=False, inplace=True)
self.dipole_df = optimal_df
# Generate histogram of dipole moments
with plt.style.context("publication"):
fig, ax = plt.subplots()
self.dipole_df["Dipole"].hist(ax=ax)
ax.set_xlabel("Dipole (D)")
ax.set_ylabel("Counts")
fig.savefig(
"./assays/plots/{}-dipole.pdf".format(self.exp_id),
format="pdf",
transparent=True,
)
return optimal_df
def generate_magnet_test(self, dataframe=None, cal=False, nshots=50, **kwargs):
"""
Generate an FT batch file for performing magnetic tests.
parameters:
-----------------
dataframe - dataframe used for the frequency information.
By default the dipole dataframe will be used.
cal - bool denoting whether a calibration line is used.
A user can provide it in kwargs, or use the default
which is the first line in the dataframe (strongest
if sorted by intensity)
nshots - optional int specifying number of shots for each
line, or if SNR is in the dataframe, the number
of shots for the strongest line.
kwargs - passed into the calibration settings, which are
cal_freq, cal_rate, and cal_shots
"""
if dataframe is None:
dataframe = self.dipole_df
dataframe["Shots"] = calculate_integration_times(
dataframe["SNR"].values, nshots
)
ftb_str = ""
# If there are no shots determined,
if "Shots" not in dataframe:
dataframe["Shots"] = nshots
# If calibration is requested
# cal_rate sets the number of scans per calibration
if cal is True:
cal_settings = {
"cal_freq": dataframe["Frequency"][0],
"cal_rate": 100,
"cal_shots": dataframe["Shots"][0],
}
if "Dipole" in dataframe:
cal_settings["cal_dipole"] = dataframe["Dipole"][0]
cal_settings.update(kwargs)
# Generate the calibration string
cal_str = generate_ftb_line(
cal_settings["cal_freq"],
cal_settings["cal_shots"],
**{"dipole": cal_settings["cal_dipole"], "skiptune": "false"},
)
cal_str = cal_str.replace("\n", " cal\n")
# generate the batch file
for index, row in dataframe.iterrows():
# If we want to add a calibration line
if index % cal_settings["cal_rate"] == 0:
ftb_str += cal_str
param_dict = {}
# Make sure the magnet is off
if "Dipole" in row:
param_df["dipole"] = row["Dipole"]
param_df["magnet"] = "false"
param_df["skiptune"] = "false"
ftb_str += generate_ftb_line(row["Frequency"], row["Shots"], **param_df)
# Turn on the magnet
param_df["skiptune"] = "true"
param_df["magnet"] = "true"
ftb_str += generate_ftb_line(row["Frequency"], row["Shots"], **param_df)
with open("./ftbfiles/{}.ftb".format(self.exp_id), "w+") as write_file:
write_file.write(ftb_str)
def generate_bruteforce_dr(self, nshots=10, dr_channel=3):
"""
Brute force double resonance test on every single frequency
observed in the initial dipole test.
This method will perform DR measurements on sequentially
weaker lines, if the dipole_df is sorted by SNR.
Optionally, the
"""
ftb_str = ""
combos = combinations(self.dipole_df[["Frequency", "Dipole"]].values, 2)
for index, combo in enumerate(combos):
# For the first time a cavity frequency is used
# we will measure it once without DR
freq = combo[0][0]
if (index == 0) or (last_freq != freq):
ftb_str += generate_ftb_line(
freq,
nshots,
**{
"dipole": combo[0][1],
"pulse,{},enable".format(dr_channel): "false",
"skiptune": "false",
},
)
# Only do DR if the DR frequency is significantly different from
# the cavity frequency
if np.abs(combo[1][0] - freq) >= 100.0:
ftb_str += generate_ftb_line(
freq,
nshots,
**{
"dipole": combo[0][1],
"drfreq": combo[1][0],
"pulse,{},enable".format(dr_channel): "true",
"skiptune": "true",
},
)
last_freq = combo[0][0]
print("There are {} combinations to measure.".format(index))
with open("./ftbfiles/{}-bruteDR.ftb".format(self.exp_id), "w+") as write_file:
write_file.write(ftb_str)
print("FTB file saved to ./ftbfiles/{}-bruteDR.ftb".format(self.exp_id))
def find_progressions(self, **kwargs):
"""
Uses the dipole assay data to look for harmonic
progression.
Kwargs are passed into the affinity propagation
clustering; usually this means "preference" should
be set to tune the number of clusters.
returns:
--------------
cluster_dict - dictionary containing all of the clustered
progressions
"""
progressions = analysis.harmonic_finder(self.dipole_df["Frequency"].values)
self.progression_df = fitting.harmonic_fitter(progressions)
data, ap_obj = analysis.cluster_AP_analysis(
self.progression_df, True, False, **kwargs
)
self.cluster_dict = data
self.cluster_obj = ap_obj
return self.cluster_dict
def generate_progression_test(self, nshots=50, dr_channel=3):
"""
Take the dipole moment dataframe and generate a DR batch.
If possible, we will instead use the progressions predicted
by the cluster model.
"""
ftb_str = ""
count = 0
# Loop over progressions
for index, sub_dict in self.cluster_dict.items():
# Take only frequencies that are in the current progression
slice_df = self.dipole_df.loc[
self.dipole_df["Frequency"].isin(sub_dict["Frequencies"])
]
prog_data = slice_df[["Frequency", "Dipole", "Shots"]].values
for sub_index, pair in enumerate(combinations(prog_data, 2)):
count += 1
if (sub_index == 0) or (last_freq != pair[0][0]):
ftb_str += generate_ftb_line(
pair[0][0],
10,
**{
"dipole": pair[0][1],
"pulse,{},enable".format(dr_channel): "false",
"skiptune": "false",
},
)
# Perform the DR measurement
ftb_str += generate_ftb_line(
pair[0][0],
10,
**{
"dipole": pair[0][1],
"pulse,{},enable".format(dr_channel): "true",
"skiptune": "true",
},
)
last_freq = pair[0][0]
print("There are {} combinations to test.".format(count))
with open(
"./ftbfiles/{}-progressionDR.ftb".format(self.exp_id), "w+"
) as write_file:
write_file.write(ftb_str)
print("FTB file saved to ./ftbfiles/{}-progressionDR.ftb".format(self.exp_id))
def plot_scan(self, scan_number=None):
"""
Quick method for plotting up a strip to highlight a particular
scan in a batch.
parameters:
---------------
scan_number - float corresponding to the scan
"""
fig = go.FigureWidget()
fig.add_scatter(x=self.data["Scan"], y=self.data["Intensity"])
if scan_number is not None:
fig.add_bar(x=[scan_number], y=[np.max(self.data["Intensity"])])
return fig
def static_plot(self, scan_number, dataframe=None):
"""
Produce a static plot with matplotlib of a particular
scan from a batch.
Saves the plot to ./assays/plots/
By default, the full dataframe will be used for the plotting.
Other dataframes (such as from other assays) can also be used.
parameters:
---------------
scan_number - float corresponding to the scan of interest
dataframe - optional arg; pandas dataframe with Scan/Intensity
"""
if dataframe is None:
dataframe = self.data
slice_df = dataframe.loc[
(dataframe["Scan"] >= scan_number - 1.5)
& (dataframe["Scan"] <= scan_number + 1.5)
]
scan_numbers = np.unique(np.round(slice_df["Scan"]))
with plt.style.context("publication"):
fig, ax = plt.subplots()
ax.plot(slice_df["Scan"], slice_df["Intensity"])
ax.set_xticks(scan_numbers)
# Makes the x axis not scientific notation
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.set_xlabel("Scan number")
ax.set_ylabel("Intensity")
fig.savefig(
"./assays/plots/{}.pdf".format(scan_number),
format="pdf",
transparent=True,
)
def save_session(self, filepath=None):
"""
Method to save the current assay analysis session to disk.
The data can be reloaded using the AssayBatch.load_session
class method.
parameters:
---------------
filepath - path to save the data to. By default, the path will
be the experiment ID.
"""
if filepath is None:
filepath = "./assays/{}-assay-analysis.dat".format(self.exp_id)
routines.save_obj(self.__dict__, filepath)
print("Saved session to {}".format(filepath))
def predict_prolate_series(progressions, J_thres=0.1):
fit_df, fits = fitting.harmonic_fitter(progressions, J_thres)
J_model = LinearModel()
BJ_model = fitting.BJModel()
predictions = dict()
for index, row in fit_df.iterrows():
row = row.dropna()
J_values = row[[col for col in row.keys() if "J" in str(col)]].values
if len(J_values) > 2:
J_fit = J_model.fit(data=J_values, x=np.arange(len(J_values)))
J_predicted = J_fit.eval(x=np.arange(-10, 10, 1))
BJ_params = row[["B", "D"]].values
freq_predicted = BJ_model.eval(
J=J_predicted, B=BJ_params[0], D=BJ_params[1]
)
elif len(J_values) == 2:
frequencies = row[[2, 4]].values
approx_B = np.abs(np.diff(frequencies))
next_freq = np.max(frequencies) + approx_B
low_freq = np.min(frequencies) - approx_B
freq_predicted = np.concatenate(
(frequencies, [next_freq, low_freq]), axis=None
)
freq_predicted = np.sort(freq_predicted)
J_predicted = freq_predicted / approx_B
# Filter out negative frequencies
freq_predicted = freq_predicted[0.0 < freq_predicted]
predictions[index] = {
"predicted_freq": freq_predicted,
"predicted_J": J_predicted,
}
return predictions
@dataclass
class BlackchirpExperiment:
exp_id: int
fid_start: int = 0
fid_end: int = -1
ft_min: float = 0.0
ft_max: float = 40000.0
ft_filter: str = "boxcar"
freq_offset: float = 0.0
fids: List = field(default_factory=list)
header: Dict = field(default_factory=dict)
@classmethod
def from_dir(cls, filepath):
exp_id, header, fids, timedata = parsers.parse_blackchirp(filepath)
exp_obj = cls(exp_id=exp_id, header=header, fids=fids)
return exp_obj
def process_ffts(self, weighting=None):
"""
Batch perform FFTs on all of the FIDs. The end result is a Pandas DataFrame with the Frequency and Intensity
data, where the intensity is just the weighted co-average of all the FFTs. By default, every FID is equally
weighted.
Parameters
----------
weighting
Returns
-------
"""
weight_factors = {index: 1.0 for index in range(len(self.fids))}
if weighting:
weight_factors.update(**weighting)
# Work out the frequency bins
frequency = self.fids[0].determine_frequencies()
# Weight the FIDs
weighted_fids = [
self.fids[index][1] * weight for index, weight in weight_factors.items()
]
averaged = np.sum(weighted_fids) / np.sum(
[weight for weight in weight_factors.values()]
)
# Calculate the sample rate; inverse of the spacing, converted back to seconds
rate = 1.0 / self.header["spacing"] / 1e6
fid2fft(averaged, rate, frequency)
spectrum_df = pd.DataFrame(
{"Frequency": fft_data[0][0] + self.freq_offset, "Intensity": averaged}
)
self.spectrum = spectrum_df
return spectrum_df
@dataclass
class BlackChirpFid:
xy_data: np.array
header: Dict = field(default_factory=dict)
@classmethod
def from_binary(cls, filepath):
"""
Create a BlackChirp FID object from a binary BlackChirp FID file.
Parameters
----------
filepath - str
Filepath to the BlackChirp .fid file
Returns
-------
BlackChirpFid object
"""
param_dict, xy_data, _ = parsers.read_binary_fid(filepath)
fid_obj = cls(xy_data, param_dict)
return fid_obj
def to_pickle(self, filepath, **kwargs):
"""
Save the Blackchirp FID to a pickle file.
Parameters
----------
filepath - str
Filepath to save the FID to
kwargs - dict-like
Additional keyword arguments that are passed to the
pickle function.
"""
routines.save_obj(self, filepath, **kwargs)
def perform_fft(self, start=0, stop=-1, window="boxcar", f_min=0.0, f_max=30000.0):
"""
Perform an FFT on the current FID to get the frequency domain spectrum.
All of the arguments are optional, and provide control over how the FFT is performed, as well as post-processing
parameters like window functions and zero-padding.
This is based on the FFT code by Kyle Crabtree, with modifications to fit this dataclass.
Parameters
----------
start - int, optional
Starting index for the FID array to perform the FFT
stop - int, optional
End index for the FID array to perform the FFT
zpf - int, optional
Pad the FID with zeros to nth nearest power of 2
window - str
Specify the window function used to process the FID. Defaults to boxcar, which is effectively no filtering.
The names of the window functions available can be found at:
https://docs.scipy.org/doc/scipy/reference/signal.windows.html
f_min - float
Specify the minimum frequency in the spectrum; everything below this value is set to zero
f_max - float
Specify the maximum frequency in the spectrum; everything above this value is set to zero
Returns
-------
"""
fid = np.copy(self.xy_data[1])
if window is not None and window in spsig.windows.__all__:
window_f = spsig.windows.get_window(window, fid.size)
fid *= window_f
else:
raise Exception("Specified window function is not implemented in SciPy!")
# Set values to zero up to starting index
fid[:start] = 0.0
if stop < 0:
# If we're using negative indexes
fid[fid.size + stop :] = 0.0
else:
# Otherwise, index with a positive number
fid[stop:] = 0.0
# Perform the FFT
fft = np.fft.rfft(fid)
read_length = len(fid) // 2 + 1
df = 1.0 / fid.size / self.header["spacing"]
# Generate the frequency array
frequency = np.linspace(0.0, self.header["sideband"] * df, read_length)
frequency += self.header["probe_freq"]
fft[(frequency >= f_max) & (frequency <= f_min)] = 0.0
fft *= 1000.0
return frequency, fft
def determine_frequencies(self):
"""
Calculate the frequency bins for the FFT.
Returns
-------
frequency - numpy 1D array
Array containing the frequency bins (x values)
"""
fid = self.xy_data[1]
df = 1.0 / fid.size / self.header["spacing"]
read_length = len(fid) // 2 + 1
# Generate the frequency array
frequency = np.linspace(0.0, self.header["sideband"] * df, read_length)
frequency += self.header["probe_freq"]
return frequency
| 4,983
| 33,421
| 304
|
cf478e66633fb9947f670d728f5489d1a7e3d900
| 999
|
py
|
Python
|
dnsimple/struct/domain_renewal.py
|
mherrmann/dnsimple-python
|
a89127f0bafb2a001c902206fba87cbc4f3bc2d1
|
[
"MIT"
] | 12
|
2020-06-18T17:16:03.000Z
|
2022-03-23T08:35:49.000Z
|
dnsimple/struct/domain_renewal.py
|
mherrmann/dnsimple-python
|
a89127f0bafb2a001c902206fba87cbc4f3bc2d1
|
[
"MIT"
] | 129
|
2020-06-25T12:15:51.000Z
|
2022-03-23T09:42:16.000Z
|
dnsimple/struct/domain_renewal.py
|
mherrmann/dnsimple-python
|
a89127f0bafb2a001c902206fba87cbc4f3bc2d1
|
[
"MIT"
] | 6
|
2020-07-03T09:34:01.000Z
|
2021-12-20T04:29:59.000Z
|
import json
from dataclasses import dataclass
import omitempty
from dnsimple.struct import Struct
class DomainRenewRequest(dict):
"""DomainRenewRequest represents the attributes you can pass to a renew API request."""
@dataclass
class DomainRenewal(Struct):
"""Represents the result of a domain renewal call."""
id = None
"""The domain registration ID in DNSimple"""
domain_id = None
"""The associated domain ID"""
state = None
"""The state of the renewal"""
period = None
"""The number of years the domain was registered for"""
created_at = None
"""When the domain renewal was created in DNSimple"""
updated_at = None
"""When the domain renewal was last updated in DNSimple"""
| 27
| 91
| 0.692693
|
import json
from dataclasses import dataclass
import omitempty
from dnsimple.struct import Struct
class DomainRenewRequest(dict):
"""DomainRenewRequest represents the attributes you can pass to a renew API request."""
def __init__(self, period=None, premium_price=None):
dict.__init__(self, period=period, premium_price=premium_price)
def to_json(self):
return json.dumps(omitempty(self))
@dataclass
class DomainRenewal(Struct):
"""Represents the result of a domain renewal call."""
id = None
"""The domain registration ID in DNSimple"""
domain_id = None
"""The associated domain ID"""
state = None
"""The state of the renewal"""
period = None
"""The number of years the domain was registered for"""
created_at = None
"""When the domain renewal was created in DNSimple"""
updated_at = None
"""When the domain renewal was last updated in DNSimple"""
def __init__(self, data):
super().__init__(data)
| 178
| 0
| 80
|
ab2b1e286f0be8439a38f4a5f410e09790e73292
| 898
|
py
|
Python
|
transmute_core/framework/request_adapter.py
|
yunstanford/transmute-core
|
a8e5dd055f0f3d39327d71dd61bf0ee147f59ebe
|
[
"MIT"
] | null | null | null |
transmute_core/framework/request_adapter.py
|
yunstanford/transmute-core
|
a8e5dd055f0f3d39327d71dd61bf0ee147f59ebe
|
[
"MIT"
] | null | null | null |
transmute_core/framework/request_adapter.py
|
yunstanford/transmute-core
|
a8e5dd055f0f3d39327d71dd61bf0ee147f59ebe
|
[
"MIT"
] | null | null | null |
class RequestAdapter(object):
"""
RequestAdapters bridge transmute's
representation of a request, with the framework's
implementation.
implement the unimplemented methods.
"""
@property
def body(self):
""" return the request body. """
raise NotImplementedError()
def _get_framework_args(self):
"""
often, a framework provides specific variables that are passed
into the handler function (e.g. the request object in
aiohttp). return a dictionary of these arguments, which will be
added to the function arguments if they appear.
"""
raise NotImplementedError()
| 28.0625
| 71
| 0.662584
|
class RequestAdapter(object):
"""
RequestAdapters bridge transmute's
representation of a request, with the framework's
implementation.
implement the unimplemented methods.
"""
@property
def body(self):
""" return the request body. """
raise NotImplementedError()
def _get_framework_args(self):
"""
often, a framework provides specific variables that are passed
into the handler function (e.g. the request object in
aiohttp). return a dictionary of these arguments, which will be
added to the function arguments if they appear.
"""
raise NotImplementedError()
def _query_argument(self, key, is_list):
raise NotImplementedError()
def _header_argument(self, key):
raise NotImplementedError()
def _path_argument(self, key):
raise NotImplementedError()
| 147
| 0
| 81
|
2d8767d69cb3fe7dd0333a69a100b095f51eda10
| 942
|
py
|
Python
|
week4_divide_and_conquer/4_number_of_inversions/inversions.py
|
thegautamkumarjaiswal/Algorithm-s_ToolBox_Solutions
|
bb265647ed183f44e0d56f14a4b8b966af73dfd2
|
[
"Apache-2.0"
] | null | null | null |
week4_divide_and_conquer/4_number_of_inversions/inversions.py
|
thegautamkumarjaiswal/Algorithm-s_ToolBox_Solutions
|
bb265647ed183f44e0d56f14a4b8b966af73dfd2
|
[
"Apache-2.0"
] | null | null | null |
week4_divide_and_conquer/4_number_of_inversions/inversions.py
|
thegautamkumarjaiswal/Algorithm-s_ToolBox_Solutions
|
bb265647ed183f44e0d56f14a4b8b966af73dfd2
|
[
"Apache-2.0"
] | null | null | null |
# python3
""" Task: Count the number of inversions of a given sequence """
tot_count = 0
n = int ( input () )
seq = [ int ( i ) for i in input ().split () ]
mergesort ( seq )
print ( tot_count )
| 21.409091
| 64
| 0.505308
|
# python3
""" Task: Count the number of inversions of a given sequence """
def merge(left , right) :
i , j , inversion_counter = 0 , 0 , 0
final = list ()
while i < len ( left ) and j < len ( right ) :
if left [ i ] <= right [ j ] :
final.append ( left [ i ] )
i += 1
else :
final.append ( right [ j ] )
inversion_counter += len ( left ) - i
j += 1
final += left [ i : ]
final += right [ j : ]
return final , inversion_counter
def mergesort(arr) :
global tot_count
if len ( arr ) <= 1 :
return arr
mid = len ( arr ) // 2
left = mergesort ( arr [ :mid ] )
right = mergesort ( arr [ mid : ] )
sorted_arr , temp = merge ( left , right )
tot_count += temp
return sorted_arr
tot_count = 0
n = int ( input () )
seq = [ int ( i ) for i in input ().split () ]
mergesort ( seq )
print ( tot_count )
| 696
| 0
| 46
|
7672fabf7a8b7a3f323dedbf8df709a757105afa
| 3,646
|
py
|
Python
|
py3/tflib/lsun256.py
|
fr42k/gap-wgan-gp
|
4e373c43d606a1b83f76893d93f9cf8be8cd460d
|
[
"MIT"
] | null | null | null |
py3/tflib/lsun256.py
|
fr42k/gap-wgan-gp
|
4e373c43d606a1b83f76893d93f9cf8be8cd460d
|
[
"MIT"
] | null | null | null |
py3/tflib/lsun256.py
|
fr42k/gap-wgan-gp
|
4e373c43d606a1b83f76893d93f9cf8be8cd460d
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.misc
import os
import time
# from PIL import Image
DATA_DIR = '/home/ubuntu/lsun/bedrooms/'
NEW_DATA_DIR = '/home/ubuntu/lsun/bedrooms_128/'
# with open(DATA_DIR+'files.txt', 'r') as f:
# files = [l[:-1] for l in f]
# # images = np.zeros((batch_size, 3, 256, 256), dtype='int32')
# random_state = np.random.RandomState(42)
# random_state.shuffle(files)
# z = 1729468
# for i, path in enumerate(files):
# if i < 1729500:
# continue
# try:
# image = scipy.misc.imread(
# os.path.normpath(os.path.join(DATA_DIR, path))
# )
# # try:
# # image = image.transpose(2,0,1)
# offset_y = (image.shape[0]-256)/2
# offset_x = (image.shape[1]-256)/2
# image = image[offset_y:offset_y+256, offset_x:offset_x+256]
# image = image[::2,::2]+image[1::2,::2]+image[::2,1::2]+image[1::2,1::2]
# image = image / 4
# # image = image.astype('int32')
# # im = Image.fromarray(image)
# # p = os.path.normpath(os.path.join(NEW_DATA_DIR, path))
# # try:
# # os.makedirs(os.path.dirname(p))
# # except:
# # pass
# scipy.misc.imsave(NEW_DATA_DIR+'{}.jpg'.format(z), image)
# # im.save(p[:-4]+'jpg')
# if z % 100 == 0:
# print z
# z += 1
# except:
# print "skip"
# # if i > 0 and i % batch_size == 0:
# # if downscale:
# # downscaled_images = images[:,:,::2,::2] + images[:,:,1::2,::2] + images[:,:,::2,1::2] + images[:,:,1::2,1::2]
# # downscaled_images = downscaled_images / 4.
# # yield (downscaled_images.astype('int32'),)
# # else:
# # yield (images,)
# # except Exception as ex:
# # print ex
# # print "warning data preprocess failed for path {}".format(path)
if __name__ == '__main__':
train_gen = load(64)
t0 = time.time()
for i, batch in enumerate(train_gen(), start=1):
print "{}\t{}".format(str(time.time() - t0), batch[0][0,0,0,0])
if i == 1000:
break
t0 = time.time()
| 36.828283
| 133
| 0.507131
|
import numpy as np
import scipy.misc
import os
import time
# from PIL import Image
DATA_DIR = '/home/ubuntu/lsun/bedrooms/'
NEW_DATA_DIR = '/home/ubuntu/lsun/bedrooms_128/'
# with open(DATA_DIR+'files.txt', 'r') as f:
# files = [l[:-1] for l in f]
# # images = np.zeros((batch_size, 3, 256, 256), dtype='int32')
# random_state = np.random.RandomState(42)
# random_state.shuffle(files)
# z = 1729468
# for i, path in enumerate(files):
# if i < 1729500:
# continue
# try:
# image = scipy.misc.imread(
# os.path.normpath(os.path.join(DATA_DIR, path))
# )
# # try:
# # image = image.transpose(2,0,1)
# offset_y = (image.shape[0]-256)/2
# offset_x = (image.shape[1]-256)/2
# image = image[offset_y:offset_y+256, offset_x:offset_x+256]
# image = image[::2,::2]+image[1::2,::2]+image[::2,1::2]+image[1::2,1::2]
# image = image / 4
# # image = image.astype('int32')
# # im = Image.fromarray(image)
# # p = os.path.normpath(os.path.join(NEW_DATA_DIR, path))
# # try:
# # os.makedirs(os.path.dirname(p))
# # except:
# # pass
# scipy.misc.imsave(NEW_DATA_DIR+'{}.jpg'.format(z), image)
# # im.save(p[:-4]+'jpg')
# if z % 100 == 0:
# print z
# z += 1
# except:
# print "skip"
# # if i > 0 and i % batch_size == 0:
# # if downscale:
# # downscaled_images = images[:,:,::2,::2] + images[:,:,1::2,::2] + images[:,:,::2,1::2] + images[:,:,1::2,1::2]
# # downscaled_images = downscaled_images / 4.
# # yield (downscaled_images.astype('int32'),)
# # else:
# # yield (images,)
# # except Exception as ex:
# # print ex
# # print "warning data preprocess failed for path {}".format(path)
def load(batch_size, downscale=False):
def generator():
with open(DATA_DIR+'files.txt', 'r') as f:
files = [l[:-1] for l in f]
images = np.zeros((batch_size, 3, 256, 256), dtype='int32')
random_state = np.random.RandomState(42)
random_state.shuffle(files)
for i, path in enumerate(files):
try:
image = scipy.misc.imread(
os.path.normpath(os.path.join(DATA_DIR, path))
)
except Exception as ex:
print ex
print "warning data load failed for path {}".format(path)
try:
image = image.transpose(2,0,1)
offset_y = (image.shape[1]-256)/2
offset_x = (image.shape[2]-256)/2
images[i % batch_size] = image[:, offset_y:offset_y+256, offset_x:offset_x+256]
if i > 0 and i % batch_size == 0:
if downscale:
downscaled_images = images[:,:,::2,::2] + images[:,:,1::2,::2] + images[:,:,::2,1::2] + images[:,:,1::2,1::2]
downscaled_images = downscaled_images / 4
yield (downscaled_images.astype('int32'),)
else:
yield (images,)
except Exception as ex:
print ex
print "warning data preprocess failed for path {}".format(path)
return generator
if __name__ == '__main__':
train_gen = load(64)
t0 = time.time()
for i, batch in enumerate(train_gen(), start=1):
print "{}\t{}".format(str(time.time() - t0), batch[0][0,0,0,0])
if i == 1000:
break
t0 = time.time()
| 1,458
| 0
| 23
|
0980ee7ae89d6028f2daf449114f96ec122ba0e5
| 3,461
|
py
|
Python
|
server/favorite_thing/serializers.py
|
omobosteven/favorite-things
|
c5fdf307f9a0c5d2ab659d0cc826f0f7201df141
|
[
"MIT"
] | null | null | null |
server/favorite_thing/serializers.py
|
omobosteven/favorite-things
|
c5fdf307f9a0c5d2ab659d0cc826f0f7201df141
|
[
"MIT"
] | 15
|
2019-12-04T23:16:58.000Z
|
2022-02-27T05:06:03.000Z
|
server/favorite_thing/serializers.py
|
omobosteven/favorite-things
|
c5fdf307f9a0c5d2ab659d0cc826f0f7201df141
|
[
"MIT"
] | 1
|
2019-07-25T20:35:24.000Z
|
2019-07-25T20:35:24.000Z
|
from django.db.utils import IntegrityError
from django.db.models import Q
from rest_framework import serializers
from core.models import FavoriteThing
from core.models import Category
from .helper import reorder_rankings, reorder_rankings_subtract
| 39.781609
| 78
| 0.645478
|
from django.db.utils import IntegrityError
from django.db.models import Q
from rest_framework import serializers
from core.models import FavoriteThing
from core.models import Category
from .helper import reorder_rankings, reorder_rankings_subtract
class FavoriteThingSerializer(serializers.ModelSerializer):
category = serializers.SlugRelatedField(
queryset=Category.objects.all(), slug_field='name')
class Meta:
model = FavoriteThing
fields = ('favorite_thing_id', 'title', 'description', 'ranking',
'metadata', 'category', 'user', 'created_at', 'modified_at')
read_only_fields = (
'favorite_thing_id', 'user', 'created_at', 'modified_at')
extra_kwargs = {
'description': {'required': False, 'min_length': 10},
}
def create(self, validated_data):
ranking = validated_data.get('ranking')
category = validated_data.get('category')
user = validated_data.get('user')
rankings_queryset = FavoriteThing.objects.order_by('ranking')\
.filter(Q(category=category) & Q(user=user.user_id))
existing_ranking = rankings_queryset.filter(ranking=ranking)
if not rankings_queryset and ranking > 1:
ranking = 1
validated_data = {**validated_data, 'ranking': ranking}
if rankings_queryset and \
ranking > rankings_queryset.last().ranking + 1:
ranking = rankings_queryset.last().ranking + 1
validated_data = {**validated_data, 'ranking': ranking}
if existing_ranking:
next_rankings = rankings_queryset.filter(ranking__gte=ranking)
reorder_rankings(next_rankings)
try:
favorite_thing = FavoriteThing.objects.create(**validated_data)
return favorite_thing
except IntegrityError:
raise serializers.ValidationError('Favorite thing already exist')
def update(self, instance, validated_data):
ranking = validated_data.get('ranking', instance.ranking)
category = validated_data.get('category', instance.category)
user = instance.user
favorite_thing_id = instance.favorite_thing_id
rankings_queryset = FavoriteThing.objects.order_by('ranking')\
.filter(Q(category=category) & Q(user=user.user_id))
if ranking > rankings_queryset.last().ranking + 1:
ranking = rankings_queryset.last().ranking
validated_data = {**validated_data, 'ranking': ranking}
existing_ranking = rankings_queryset.filter(ranking=ranking)
if existing_ranking and \
existing_ranking.first().favorite_thing_id != \
favorite_thing_id:
if ranking > instance.ranking:
next_rankings = rankings_queryset.filter(
ranking__range=(instance.ranking+1, ranking))
reorder_rankings_subtract(next_rankings)
else:
next_rankings = rankings_queryset.filter(
ranking__range=(ranking, instance.ranking-1))
reorder_rankings(next_rankings)
for attr, value in validated_data.items():
setattr(instance, attr, value)
try:
instance.save()
return instance
except IntegrityError:
raise serializers.ValidationError(
'Favorite thing already exist in database')
| 2,592
| 597
| 23
|
52f33d0f97e1c3d24a976315266ee6737b9b34db
| 14,048
|
py
|
Python
|
tests/cron_job_tests/test_genomic_data_quality_pipeline.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
tests/cron_job_tests/test_genomic_data_quality_pipeline.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
tests/cron_job_tests/test_genomic_data_quality_pipeline.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
# Tests for the Genomics Data Quality Pipeline
import mock, datetime, pytz
from rdr_service import clock
from rdr_service.api_util import open_cloud_file
from rdr_service.genomic_enums import GenomicJob, GenomicSubProcessStatus, GenomicSubProcessResult, \
GenomicManifestTypes, GenomicIncidentCode
from tests.helpers.unittest_base import BaseTestCase
from rdr_service.genomic.genomic_job_controller import DataQualityJobController
from rdr_service.genomic.genomic_data_quality_components import ReportingComponent
| 42.313253
| 105
| 0.670487
|
# Tests for the Genomics Data Quality Pipeline
import mock, datetime, pytz
from rdr_service import clock
from rdr_service.api_util import open_cloud_file
from rdr_service.genomic_enums import GenomicJob, GenomicSubProcessStatus, GenomicSubProcessResult, \
GenomicManifestTypes, GenomicIncidentCode
from tests.helpers.unittest_base import BaseTestCase
from rdr_service.genomic.genomic_job_controller import DataQualityJobController
from rdr_service.genomic.genomic_data_quality_components import ReportingComponent
class GenomicDataQualityJobControllerTest(BaseTestCase):
def setUp(self, with_data=True, with_consent_codes=False) -> None:
super().setUp()
@mock.patch('rdr_service.genomic.genomic_job_controller.genomic_job_run_update')
@mock.patch('rdr_service.genomic.genomic_job_controller.bq_genomic_job_run_update')
@mock.patch('rdr_service.dao.genomics_dao.GenomicJobRunDao.insert_run_record')
@mock.patch('rdr_service.dao.genomics_dao.GenomicJobRunDao.update_run_record')
def test_data_quality_job_controller_creation(self, job_update_mock, job_insert_mock,
bq_update_mock, resource_update_mock):
new_run = mock.Mock()
new_run.id = 1
job_insert_mock.return_value = new_run
# Test context manager works correctly
with DataQualityJobController(GenomicJob.DAILY_SUMMARY_REPORT_JOB_RUNS):
pass
job_insert_mock.assert_called_with(GenomicJob.DAILY_SUMMARY_REPORT_JOB_RUNS)
job_update_mock.assert_called_with(new_run.id,
GenomicSubProcessResult.UNSET,
GenomicSubProcessStatus.COMPLETED)
bq_update_mock.assert_called()
resource_update_mock.assert_called()
@mock.patch('rdr_service.genomic.genomic_job_controller.DataQualityJobController.get_report')
def test_controller_job_registry(self, report_job_mock):
with DataQualityJobController(GenomicJob.DAILY_SUMMARY_REPORT_JOB_RUNS) as controller:
controller.execute_workflow()
report_job_mock.assert_called_once()
class GenomicDataQualityComponentTest(BaseTestCase):
def setUp(self, with_data=True, with_consent_codes=False) -> None:
super().setUp()
self.fake_time = datetime.datetime(2021, 2, 1, 0, 0, 0, 0, tzinfo=pytz.timezone("UTC"))
def test_reporting_component_get_report_def_from_date(self):
rc = ReportingComponent()
with clock.FakeClock(self.fake_time):
query_module = 'rdr_service.genomic.genomic_data.GenomicQueryClass.dq_report_runs_summary'
with mock.patch(query_module) as query_def:
query_def.return_value = ("", {})
# Report defs for reports
report_def_d = rc.set_report_def(level="SUMMARY", target="RUNS", time_frame="D")
report_def_w = rc.set_report_def(level="SUMMARY", target="RUNS", time_frame="W")
exp_from_date_d = self.fake_time - datetime.timedelta(days=1)
exp_from_date_w = self.fake_time - datetime.timedelta(days=7)
self.assertEqual(exp_from_date_d, report_def_d.from_date)
self.assertEqual(exp_from_date_w, report_def_w.from_date)
def test_reporting_component_get_report_def_query(self):
rc = ReportingComponent()
# Report defs to test (QUERY, LEVEL, TARGET, TIME_FRAME)
test_definitions = (
("dq_report_runs_summary", {"level": "SUMMARY", "target": "RUNS", "time_frame": "D"}),
)
for test_def in test_definitions:
query_class = 'rdr_service.genomic.genomic_data.GenomicQueryClass'
query_class += f".{test_def[0]}"
with mock.patch(query_class) as query_mock:
query_mock.return_value = ("", {})
rc.set_report_def(**test_def[1])
query_mock.assert_called()
def test_reporting_component_summary_runs_query(self):
# set up genomic job runs for report
def_fields = ("jobId", "startTime", "runResult")
run_defs = (
(GenomicJob.METRICS_INGESTION, self.fake_time, GenomicSubProcessResult.SUCCESS),
(GenomicJob.METRICS_INGESTION, self.fake_time, GenomicSubProcessResult.SUCCESS),
(GenomicJob.METRICS_INGESTION, self.fake_time, GenomicSubProcessResult.ERROR),
(GenomicJob.AW1_MANIFEST, self.fake_time, GenomicSubProcessResult.SUCCESS),
(GenomicJob.AW1_MANIFEST, self.fake_time, GenomicSubProcessResult.ERROR),
(GenomicJob.AW1_MANIFEST, self.fake_time, GenomicSubProcessResult.ERROR),
)
for run_def in run_defs:
def_dict = dict(zip(def_fields, run_def))
self.data_generator.create_database_genomic_job_run(**def_dict)
# Generate report with ReportingComponent
rc = ReportingComponent()
report_ran_time = self.fake_time + datetime.timedelta(hours=6)
rc.set_report_def(level="SUMMARY", target="RUNS", time_frame="D")
with clock.FakeClock(report_ran_time):
report_data = rc.get_report_data()
# Get the genomic_job_run records
for row in report_data:
if row['job_id'] == 1:
self.assertEqual(0, row['UNSET'])
self.assertEqual(2, row['SUCCESS'])
self.assertEqual(1, row['ERROR'])
if row['job_id'] == 8:
self.assertEqual(0, row['UNSET'])
self.assertEqual(1, row['SUCCESS'])
self.assertEqual(2, row['ERROR'])
self.assertEqual(0, row['NO_FILES'])
self.assertEqual(0, row['INVALID_FILE_NAME'])
self.assertEqual(0, row['INVALID_FILE_STRUCTURE'])
class GenomicDataQualityReportTest(BaseTestCase):
def setUp(self, with_data=False, with_consent_codes=False) -> None:
super().setUp()
def test_daily_ingestion_summary(self):
# Set up test data
bucket_name = "test-bucket"
aw1_file_name = "AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv"
aw1_manifest_path = f"{bucket_name}/{aw1_file_name}"
aw2_file_name = "AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv"
aw2_manifest_path = f"{bucket_name}/{aw2_file_name}"
# Create AW1 job_run
aw1_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# Create AW2 job_run
aw2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_INGESTION,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# Create genomic_aw1_raw record
self.data_generator.create_database_genomic_aw1_raw(
file_path=aw1_manifest_path,
package_id="PKG-2104-026571",
biobank_id="A10001",
)
# Create genomic_aw2_raw record
self.data_generator.create_database_genomic_aw2_raw(
file_path=aw2_manifest_path,
biobank_id="A10001",
sample_id="100001",
biobankidsampleid="A10001_100001",
)
# Create AW1 genomic_manifest_file record
aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.BIOBANK_GC,
filePath=aw1_manifest_path,
fileName=aw1_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW2 genomic_manifest_file record
aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.GC_DRC,
filePath=aw2_manifest_path,
fileName=aw2_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW1 file_processed
self.data_generator.create_database_genomic_file_processed(
runId=aw1_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw1_manifest_file.id,
filePath=f"/{aw1_manifest_path}",
bucketName=bucket_name,
fileName=aw1_file_name,
)
# Create AW2 file_processed
self.data_generator.create_database_genomic_file_processed(
runId=aw2_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw2_manifest_file.id,
filePath=f"/{aw2_manifest_path}",
bucketName=bucket_name,
fileName=aw2_file_name,
)
with DataQualityJobController(GenomicJob.DAILY_SUMMARY_REPORT_INGESTIONS) as controller:
report_output = controller.execute_workflow()
expected_report = "```Daily Ingestions Summary\n"
expected_report += "record_count ingested_count incident_count "
expected_report += "file_type gc_site_id genome_type file_path\n"
expected_report += "1 0 0 aw1 rdr aou_wgs "
expected_report += f"{aw1_manifest_path}\n"
expected_report += "1 0 0 aw2 rdr aou_wgs "
expected_report += f"{aw2_manifest_path}"
expected_report += "\n```"
self.assertEqual(expected_report, report_output)
@mock.patch('rdr_service.services.slack_utils.SlackMessageHandler.send_message_to_webhook')
@mock.patch('rdr_service.genomic.genomic_data_quality_components.ReportingComponent.get_report_data')
@mock.patch('rdr_service.genomic.genomic_data_quality_components.ReportingComponent.format_report')
def test_report_slack_integration(self, format_mock, report_data_mock, slack_handler_mock):
# Mock the generated report
expected_report = "record_count ingested_count incident_count "
expected_report += "file_type gc_site_id genome_type file_path\n"
expected_report += "1 0 0 aw1 rdr aou_wgs "
expected_report += "test-bucket/AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv"
expected_report += "\n"
report_data_mock.return_value = None # skip running the report query
format_mock.return_value = expected_report
# Run the workflow
with DataQualityJobController(GenomicJob.DAILY_SUMMARY_REPORT_INGESTIONS) as controller:
controller.execute_workflow(slack=True)
# Test the slack API was called correctly
slack_handler_mock.assert_called_with(message_data={'text': expected_report})
def test_daily_ingestion_summary_no_files(self):
with DataQualityJobController(GenomicJob.DAILY_SUMMARY_REPORT_INGESTIONS) as controller:
report_output = controller.execute_workflow()
expected_report = "No data to display for Daily Ingestions Summary"
self.assertEqual(expected_report, report_output)
@mock.patch('rdr_service.genomic.genomic_data_quality_components.ReportingComponent.format_report')
def test_daily_ingestion_summary_long_report(self, format_mock):
format_mock.return_value = "test\n" * 30
with DataQualityJobController(GenomicJob.DAILY_SUMMARY_REPORT_INGESTIONS) as controller:
report_output = controller.execute_workflow(slack=True)
expected_report = "test\n" * 30
with open_cloud_file(report_output, 'r') as report_file:
report_file_data = report_file.read()
self.assertEqual(expected_report, report_file_data)
def test_daily_incident_report(self):
# timeframes
time_1 = datetime.datetime(2021, 5, 13, 0, 0, 0, 0)
time_2 = time_1 - datetime.timedelta(days=2)
# Set up test data
# Create AW1 job_run
aw1_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=time_1,
runResult=GenomicSubProcessResult.SUCCESS
)
with clock.FakeClock(time_1):
# Incident included in report
self.data_generator.create_database_genomic_incident(
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message='test message',
source_job_run_id=aw1_job_run.id,
biobank_id="10001",
sample_id="20001",
collection_tube_id="30001",
)
with clock.FakeClock(time_2):
# Incident excluded from report
self.data_generator.create_database_genomic_incident(
code=GenomicIncidentCode.UNABLE_TO_FIND_MEMBER.name,
message='test message 2',
source_job_run_id=aw1_job_run.id,
biobank_id="10002",
sample_id="20002",
collection_tube_id="30002",
)
with clock.FakeClock(time_1):
with DataQualityJobController(GenomicJob.DAILY_SUMMARY_REPORT_INCIDENTS) as controller:
report_output = controller.execute_workflow()
expected_report = "```Daily Incidents Summary\n"
expected_report += "code created biobank_id genomic_set_member_id " \
"source_job_run_id source_file_processed_id\n"
expected_report += "UNABLE_TO_FIND_MEMBER 2021-05-13 00:00:00 10001 None 1 None"
expected_report += "\n```"
self.assertEqual(expected_report, report_output)
| 12,168
| 1,182
| 176
|
431ef6db152840abe39b59d1b1a27606e88c65d6
| 716
|
py
|
Python
|
Tried/Strings/1237.py
|
LorranSutter/URI-Online-Judge
|
aef885b9a7caa83484cf172e29eea8ec92fc3627
|
[
"MIT"
] | null | null | null |
Tried/Strings/1237.py
|
LorranSutter/URI-Online-Judge
|
aef885b9a7caa83484cf172e29eea8ec92fc3627
|
[
"MIT"
] | null | null | null |
Tried/Strings/1237.py
|
LorranSutter/URI-Online-Judge
|
aef885b9a7caa83484cf172e29eea8ec92fc3627
|
[
"MIT"
] | null | null | null |
# Time limit exceeded
while True:
try:
A = input()
B = input()
lA = len(A)
lB = len(B)
biggest = ""
shortest = ""
lshortest = 0
if max(lA, lB) == lA:
biggest, shortest = A, B
lbiggest = lA
lshortest = lB
else:
biggest, shortest = B, A
lbiggest = lB
lshortest = lA
bSub = 0
currentSub = 0
for k in range(lshortest):
for w in range(lbiggest):
if shortest[k] == biggest[w]:
currentSub = 1
q = w+1
for p in range(k+1,lshortest):
if q >= lbiggest:
break
if shortest[p] == biggest[q]:
currentSub += 1
q += 1
else:
break
if currentSub >= bSub:
bSub = currentSub
print(bSub)
except:
break
| 14.916667
| 35
| 0.539106
|
# Time limit exceeded
while True:
try:
A = input()
B = input()
lA = len(A)
lB = len(B)
biggest = ""
shortest = ""
lshortest = 0
if max(lA, lB) == lA:
biggest, shortest = A, B
lbiggest = lA
lshortest = lB
else:
biggest, shortest = B, A
lbiggest = lB
lshortest = lA
bSub = 0
currentSub = 0
for k in range(lshortest):
for w in range(lbiggest):
if shortest[k] == biggest[w]:
currentSub = 1
q = w+1
for p in range(k+1,lshortest):
if q >= lbiggest:
break
if shortest[p] == biggest[q]:
currentSub += 1
q += 1
else:
break
if currentSub >= bSub:
bSub = currentSub
print(bSub)
except:
break
| 0
| 0
| 0
|
32ed63da26a1030fea2213fe16ac945a18313b1b
| 1,490
|
py
|
Python
|
galvatron_lib/core/location_handlers/chrome_handler.py
|
viking333/galvatron
|
c2dc9c94ad64e79ad234c4a04b039f6cf429e0f3
|
[
"MIT"
] | null | null | null |
galvatron_lib/core/location_handlers/chrome_handler.py
|
viking333/galvatron
|
c2dc9c94ad64e79ad234c4a04b039f6cf429e0f3
|
[
"MIT"
] | null | null | null |
galvatron_lib/core/location_handlers/chrome_handler.py
|
viking333/galvatron
|
c2dc9c94ad64e79ad234c4a04b039f6cf429e0f3
|
[
"MIT"
] | 7
|
2019-08-22T15:17:44.000Z
|
2020-01-03T17:18:06.000Z
|
import os, urllib, requests, json
priority = 1
| 37.25
| 230
| 0.672483
|
import os, urllib, requests, json
priority = 1
def get_extension_id(arg1):
package = "\"{}\"".format(arg1.replace("chrome://", ""))
package = urllib.quote_plus(package)
url = "https://chrome.google.com/webstore/ajax/item?hl=en&gl=GB&pv=20181009&count=2&searchTerm={}".format(package)
resp = requests.post(url)
data = json.loads(resp.text.replace(")]}'\n\n", ""))
items = [x[0] for x in data[1][1] if x[1] == arg1.replace("chrome://", "")]
if len(items):
return items[0]
return None
def download_extension(app, extension_id, browser_version="49.0"):
url = "https://clients2.google.com/service/update2/crx?response=redirect&acceptformat=crx3&prodversion={version}&x=id%3D{extension_id}%26installsource%3Dondemand%26uc".format(version=browser_version, extension_id=extension_id)
dest_location = os.path.join(os.sep, "tmp", "{}.zip".format(extension_id))
print(dest_location)
try:
urllib.urlretrieve(url, dest_location)
except Exception as ex:
app.output(ex)
return None
return dest_location
def handles(location):
return location.lower().startswith("chrome://")
def handle(app, location):
extension_id = get_extension_id(location)
if extension_id:
app.output("Downloading chrome extension {}".format(extension_id))
return (download_extension(app, extension_id), None)
else:
app.error("Chrome extension not found")
return (None, None)
| 1,351
| 0
| 92
|
eb509f37c00b60320a88eccf0074acfe172aadc6
| 124
|
py
|
Python
|
Code/lecture 17/code6_list.py
|
capacitybuilding/Introduction-to-Computng
|
77639e53ed53896581c4a8431f32198237ac70dc
|
[
"MIT"
] | null | null | null |
Code/lecture 17/code6_list.py
|
capacitybuilding/Introduction-to-Computng
|
77639e53ed53896581c4a8431f32198237ac70dc
|
[
"MIT"
] | null | null | null |
Code/lecture 17/code6_list.py
|
capacitybuilding/Introduction-to-Computng
|
77639e53ed53896581c4a8431f32198237ac70dc
|
[
"MIT"
] | null | null | null |
list1 = [1, 4, 8, 2, 9]
print len(list1)
print max(list1), min(list1)
print list1[-2]
print list1[-5:3]
print list1[-3:]
| 12.4
| 28
| 0.629032
|
list1 = [1, 4, 8, 2, 9]
print len(list1)
print max(list1), min(list1)
print list1[-2]
print list1[-5:3]
print list1[-3:]
| 0
| 0
| 0
|
698015ddf789237819b0eaa04da727793abc9f2a
| 2,079
|
py
|
Python
|
plot/histogram.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
plot/histogram.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
plot/histogram.py
|
IvoryLu/data-processing
|
65d91537dea777d037e9a419a355a0c8493aa19c
|
[
"BSD-3-Clause"
] | null | null | null |
from matplotlib import pyplot as plt
from script import sales_times1
from script import sales_times2
# normed=True This command divides the height of each column by
# a constant such that the total shaded area of the histogram sums
# to 1
plt.hist(sales_times1, bins=20, alpha=0.4, normed=True)
plt.hist(sales_times2, bins=20, alpha=0.4, normed=True)
plt.show()
#%%
from matplotlib import pyplot as plt
exam_scores1 = [62.58, 67.63, 81.37, 52.53, 62.98, 72.15, 59.05, 73.85, 97.24, 76.81, 89.34, 74.44, 68.52, 85.13, 90.75, 70.29, 75.62, 85.38, 77.82, 98.31, 79.08, 61.72, 71.33, 80.77, 80.31, 78.16, 61.15, 64.99, 72.67, 78.94]
exam_scores2 = [72.38, 71.28, 79.24, 83.86, 84.42, 79.38, 75.51, 76.63, 81.48,78.81,79.23,74.38,79.27,81.07,75.42,90.35,82.93,86.74,81.33,95.1,86.57,83.66,85.58,81.87,92.14,72.15,91.64,74.21,89.04,76.54,81.9,96.5,80.05,74.77,72.26,73.23,92.6,66.22,70.09,77.2]
# Make your plot here
plt.figure(figsize=(10,8))
plt.hist(exam_scores1,bins=12,normed=True,
histtype='step',linewidth=2)
plt.hist(exam_scores2,bins=12,normed=True,
histtype='step',linewidth=2)
legends=["1st Yr Teaching","2nd Yr Teaching"]
plt.legend(legends)
plt.title("Final Exam Score Distribution")
plt.xlabel("Percentage")
plt.ylabel("Frequency")
plt.savefig("my_histogram.png")
#%%
import numpy as np
import pandas as pd
# Import matplotlib pyplot
from matplotlib import pyplot as plt
# Read in transactions data
greatest_books = pd.read_csv("top-hundred-books.csv")
# Save transaction times to a separate numpy array
author_ages = greatest_books['Ages']
# Use numpy to calculate the average age of the top 100 authors
average_age = np.average(author_ages)
print("The average age of the 100 greatest authors, according to Le Monde is: " + str(average_age))
# Plot the figure
plt.hist(author_ages, range=(10, 80), bins=14, edgecolor='black')
plt.title("Age of Top 100 Novel Authors at Publication")
plt.xlabel("Publication Age")
plt.ylabel("Count")
plt.axvline(average_age, color='r', linestyle='solid', linewidth=2, label="Mean")
plt.legend()
plt.show()
| 36.473684
| 259
| 0.721501
|
from matplotlib import pyplot as plt
from script import sales_times1
from script import sales_times2
# normed=True This command divides the height of each column by
# a constant such that the total shaded area of the histogram sums
# to 1
plt.hist(sales_times1, bins=20, alpha=0.4, normed=True)
plt.hist(sales_times2, bins=20, alpha=0.4, normed=True)
plt.show()
#%%
from matplotlib import pyplot as plt
exam_scores1 = [62.58, 67.63, 81.37, 52.53, 62.98, 72.15, 59.05, 73.85, 97.24, 76.81, 89.34, 74.44, 68.52, 85.13, 90.75, 70.29, 75.62, 85.38, 77.82, 98.31, 79.08, 61.72, 71.33, 80.77, 80.31, 78.16, 61.15, 64.99, 72.67, 78.94]
exam_scores2 = [72.38, 71.28, 79.24, 83.86, 84.42, 79.38, 75.51, 76.63, 81.48,78.81,79.23,74.38,79.27,81.07,75.42,90.35,82.93,86.74,81.33,95.1,86.57,83.66,85.58,81.87,92.14,72.15,91.64,74.21,89.04,76.54,81.9,96.5,80.05,74.77,72.26,73.23,92.6,66.22,70.09,77.2]
# Make your plot here
plt.figure(figsize=(10,8))
plt.hist(exam_scores1,bins=12,normed=True,
histtype='step',linewidth=2)
plt.hist(exam_scores2,bins=12,normed=True,
histtype='step',linewidth=2)
legends=["1st Yr Teaching","2nd Yr Teaching"]
plt.legend(legends)
plt.title("Final Exam Score Distribution")
plt.xlabel("Percentage")
plt.ylabel("Frequency")
plt.savefig("my_histogram.png")
#%%
import numpy as np
import pandas as pd
# Import matplotlib pyplot
from matplotlib import pyplot as plt
# Read in transactions data
greatest_books = pd.read_csv("top-hundred-books.csv")
# Save transaction times to a separate numpy array
author_ages = greatest_books['Ages']
# Use numpy to calculate the average age of the top 100 authors
average_age = np.average(author_ages)
print("The average age of the 100 greatest authors, according to Le Monde is: " + str(average_age))
# Plot the figure
plt.hist(author_ages, range=(10, 80), bins=14, edgecolor='black')
plt.title("Age of Top 100 Novel Authors at Publication")
plt.xlabel("Publication Age")
plt.ylabel("Count")
plt.axvline(average_age, color='r', linestyle='solid', linewidth=2, label="Mean")
plt.legend()
plt.show()
| 0
| 0
| 0
|
9f18495a49f1afc15c2ebfbfac14e88650c4cc8b
| 214
|
py
|
Python
|
basico/aula002/aula02.py
|
KaicPierre/Curso-de-Python3
|
6267ba92c298f0ffa103e374615f040b0db13339
|
[
"MIT"
] | 1
|
2020-09-04T07:46:45.000Z
|
2020-09-04T07:46:45.000Z
|
basico/aula002/aula02.py
|
KaicPierre/Curso-de-Python3
|
6267ba92c298f0ffa103e374615f040b0db13339
|
[
"MIT"
] | null | null | null |
basico/aula002/aula02.py
|
KaicPierre/Curso-de-Python3
|
6267ba92c298f0ffa103e374615f040b0db13339
|
[
"MIT"
] | null | null | null |
# print(123456)
# print('Kaic', 'Pierre', 'Outra Coisa')
# print('Kaic', 'Pierre', sep='-', end='')
# print('Testando', 'Outras', 'Coisas', sep='-', end='')
print('428', '330', '048', sep='.', end='-')
print('93')
| 30.571429
| 56
| 0.537383
|
# print(123456)
# print('Kaic', 'Pierre', 'Outra Coisa')
# print('Kaic', 'Pierre', sep='-', end='')
# print('Testando', 'Outras', 'Coisas', sep='-', end='')
print('428', '330', '048', sep='.', end='-')
print('93')
| 0
| 0
| 0
|
6dda831cadd31c8f32d1dc02a292c613ee99c7ef
| 6,245
|
py
|
Python
|
voc_classifier/bert_model.py
|
myeonghak/kobert-multi-label-VOC-classifier
|
983524e8331b5e833d85779dfe7521c21bf2d1cd
|
[
"Apache-2.0"
] | 6
|
2021-08-18T00:52:38.000Z
|
2021-12-03T12:37:18.000Z
|
voc_classifier/bert_model.py
|
myeonghak/kobert-multi-label-VOC-classifier
|
983524e8331b5e833d85779dfe7521c21bf2d1cd
|
[
"Apache-2.0"
] | null | null | null |
voc_classifier/bert_model.py
|
myeonghak/kobert-multi-label-VOC-classifier
|
983524e8331b5e833d85779dfe7521c21bf2d1cd
|
[
"Apache-2.0"
] | 1
|
2022-03-24T08:02:44.000Z
|
2022-03-24T08:02:44.000Z
|
import sys
sys.path.append("../")
# KoBERT 모델
import config
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import gluonnlp as nlp
from tqdm import tqdm, tqdm_notebook
from KoBERT.kobert.utils import get_tokenizer
from KoBERT.kobert.pytorch_kobert import get_pytorch_kobert_model
from transformers import AdamW
# from transformers.optimization import WarmupLinearSchedule
from transformers import get_linear_schedule_with_warmup
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
bertmodel, vocab = get_pytorch_kobert_model()
# 토크나이저 메서드를 tokenizer에 호출
# 코퍼스를 토큰으로 만드는 과정을 수행, 이 때 토크나이저는 kobert 패키지에 있는 get_tokenizer()를 사용하고,
# 토큰화를 위해 필요한 단어 사전은 kobert의 vocab을 사용함.
# uncased로 투입해야 하므로 lower = False
tokenizer = get_tokenizer()
tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower = False)
print(f'device using: {device}')
model_config=config.model_config
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, path='checkpoint.pt', trace_func=print):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path = path
self.trace_func = trace_func
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
self.trace_func(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.path)
self.val_loss_min = val_loss
| 36.098266
| 151
| 0.63699
|
import sys
sys.path.append("../")
# KoBERT 모델
import config
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import gluonnlp as nlp
from tqdm import tqdm, tqdm_notebook
from KoBERT.kobert.utils import get_tokenizer
from KoBERT.kobert.pytorch_kobert import get_pytorch_kobert_model
from transformers import AdamW
# from transformers.optimization import WarmupLinearSchedule
from transformers import get_linear_schedule_with_warmup
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
bertmodel, vocab = get_pytorch_kobert_model()
# 토크나이저 메서드를 tokenizer에 호출
# 코퍼스를 토큰으로 만드는 과정을 수행, 이 때 토크나이저는 kobert 패키지에 있는 get_tokenizer()를 사용하고,
# 토큰화를 위해 필요한 단어 사전은 kobert의 vocab을 사용함.
# uncased로 투입해야 하므로 lower = False
tokenizer = get_tokenizer()
tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower = False)
print(f'device using: {device}')
model_config=config.model_config
class Data_for_BERT(Dataset):
def __init__(self, dataset, max_len, pad, pair, label_cols):
# gluon nlp 패키지의 data.BERTSentenceTransform 메서드를 사용,
# 버트 활용을 위한 토크나이저를 bert_tokenizer로 주고,
# 문장 내 시퀀스의 최대 수를 max_len 인자로 제공. 이 말은 max_len개의 (단어를 쪼갠) 덩어리만 활용한다는 의미
# pad 인자는 max_len보다 짧은 문장을 패딩해주겠냐는 것을 묻는 것,
# pair 인자는 문장으로 변환할지, 문장 쌍으로 변환할지.
transform = nlp.data.BERTSentenceTransform(tok, max_seq_length = max_len, pad=pad, pair=pair)
self.sentences = [transform([txt]) for txt in dataset.text]
# self.sentences_Customer = [transform([txt]) for txt in dataset.Customer]
# self.labels = [np.int32(i) for i in dataset.label]
self.labels=dataset[label_cols].values
# ohe = OneHotEncoder().fit(pd.Series(self.labels).values.reshape(-1,1))
# self.labels = ohe.transform(pd.Series(self.labels).values.reshape(-1,1)).toarray()
# target.bcat
# self.labels = b_ohe.fit_transform(pd.Series(self.labels).values.reshape(-1,1))
def __getitem__(self,i):
return (self.sentences[i] + (self.labels[i],))
def __len__(self):
return(len(self.labels))
class BERTClassifier(nn.Module):
def __init__(self, hidden_size = 768, num_classes = 8, dr_rate = None, params = None):
# BERTClassifier의 자식 노드에 클래스 속성을 상속시켜준다?
# 인풋으로 넣는 bert 모델을 클래스 내부의 bert 메서드로 넣어주고
# dr_rate(drop-out rate)를 dr_rate로 넣어줌
super(BERTClassifier, self).__init__()
self.bert = bertmodel
self.dr_rate = dr_rate
# 여기서 nn.Linear는 keras의 Dense 층과 같은 fully connected layer
# hidden layer의 사이즈를 입력해주고(여기서는 768)
# out-put layer의 사이즈를 num_classes 인자의 수만큼 잡아줌.
# self.lstm_layer = nn.LSTM(512, 128, 2)
self.classifier = nn.Linear(hidden_size, num_classes)
# dr_rate가 정의되어 있을 경우, 넣어준 비율에 맞게 weight를 drop-out 시켜줌
if dr_rate:
self.dropout = nn.Dropout(p=dr_rate)
def generate_attention_mask(self, token_ids, valid_length):
# 버트 모델에 사용할 attention_mask를 만들어 줌.
# token_id를 인풋으로 받아, attention mask를 만들어 냄
# torch.zeros_like()는 토치 텐서를 인풋으로 받아, 스칼라 값 0으로 채워진 같은 사이즈의 텐서를 뱉어냄
attention_mask = torch.zeros_like(token_ids)
for i,v in enumerate(valid_length):
attention_mask[i][:v] = 1
return attention_mask.float()
def forward(self, token_ids, valid_length, segment_ids):
# attention mask 를 만들어 내고, 버트 모델을 넣어줌.
attention_mask = self.generate_attention_mask(token_ids, valid_length)
# .long() pytorch는 .to()와 같은 기능을 수행함. 이는 장치(GPU)에 모델을 넣어주는 역할을 수행
# 출력값으로 classifier()
_, pooler = self.bert(input_ids = token_ids, token_type_ids = segment_ids.long(), attention_mask = attention_mask.float().to(token_ids.device))
if self.dr_rate:
out=self.dropout(pooler)
# output=self.lstm_layer(out)
return self.classifier(out)
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, path='checkpoint.pt', trace_func=print):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.path = path
self.trace_func = trace_func
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
self.trace_func(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
torch.save(model.state_dict(), self.path)
self.val_loss_min = val_loss
| 4,011
| 19
| 261
|
55ed52af5669483d0d12633c9132b7026ed87a23
| 2,061
|
py
|
Python
|
src/plugins/pic_searcher/model.py
|
Lycreal/qbot
|
e0cb5af8295efb1d58780ac4a420551e1183ba8b
|
[
"MIT"
] | 7
|
2019-10-09T07:09:37.000Z
|
2020-07-15T01:30:25.000Z
|
src/plugins/pic_searcher/model.py
|
Lycreal/cqbot
|
b189a17283a63e982bf7f99e529486af8d2bfb76
|
[
"MIT"
] | 59
|
2021-05-20T07:21:50.000Z
|
2022-03-25T21:17:07.000Z
|
src/plugins/pic_searcher/model.py
|
Lycreal/cqbot
|
b189a17283a63e982bf7f99e529486af8d2bfb76
|
[
"MIT"
] | 2
|
2020-12-31T06:23:10.000Z
|
2021-07-27T07:40:16.000Z
|
import urllib.parse
from .saucenao import get_saucenao_detail, SauceNAOError
| 32.203125
| 95
| 0.5541
|
import urllib.parse
from .saucenao import get_saucenao_detail, SauceNAOError
class PicSearcher:
@classmethod
async def do_search(cls, image: str, site: str = 'saucenao') -> str:
try:
if site == 'saucenao':
reply: str = await cls.do_search_saucenao(image)
else:
reply = await cls.do_search_saucenao(image)
except Exception as e:
import traceback
reply = '处理异常:{}'.format("\n".join(traceback.format_exception_only(type(e), e)))
return reply
@classmethod
async def do_search_saucenao(cls, img_url: str) -> str:
try:
results = await get_saucenao_detail(img_url)
for result in [result for result in results if cls.float(result.Similarity) > 0.9]:
if cls.check_pixiv_url(result.URL):
break
else:
if results and cls.float(results[0].Similarity) > 0.6:
result = results[0]
else:
result = None
if result:
result.URL = cls.shorten_pixiv_url(result.URL)
reply = str(result)
else:
reply = '未找到相似图片\n'
except SauceNAOError as e:
reply = str(e)
return reply.strip()
@staticmethod
def float(string: str) -> float:
"""百分数转为int"""
if string.endswith('%'):
return float(string.rstrip("%")) / 100
else:
return float(string)
@staticmethod
def check_pixiv_url(url: str) -> bool:
parse = urllib.parse.urlparse(url)
return bool('pixiv' in parse.hostname)
@staticmethod
def shorten_pixiv_url(url: str) -> str:
parse = urllib.parse.urlparse(url)
if 'pixiv' in parse.hostname:
querys = dict(urllib.parse.parse_qsl(parse.query))
illust_id = querys.get('illust_id')
if illust_id:
return f'https://www.pixiv.net/artworks/{illust_id}'
return url
| 1,599
| 394
| 23
|
47c13dd4c0d747a6ab363ae514e4d9c517553a49
| 48
|
py
|
Python
|
week1/logical1.py
|
pkoarmy/Learning-Python
|
79067de3c09240d26939ca23ec98e96304660e7c
|
[
"MIT"
] | null | null | null |
week1/logical1.py
|
pkoarmy/Learning-Python
|
79067de3c09240d26939ca23ec98e96304660e7c
|
[
"MIT"
] | null | null | null |
week1/logical1.py
|
pkoarmy/Learning-Python
|
79067de3c09240d26939ca23ec98e96304660e7c
|
[
"MIT"
] | null | null | null |
A = True
B = False
print(A and B)
print(A or B)
| 9.6
| 14
| 0.625
|
A = True
B = False
print(A and B)
print(A or B)
| 0
| 0
| 0
|
49326b7f76171a6e4c3018363dc191db9d2b6e72
| 8,565
|
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/gsl_function.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1
|
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/gsl_function.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1
|
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/gsl_function.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2
|
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
#!/usr/bin/env python
# Author : Pierre Schnizer
"""
Collection of Callbacks systems for pygsl. They follow the GSL definitions as
close as possible. Instead os a struct python classes are used.
All solvers accept a C void pointer, which is passed to the callback. In Pygsl
this is an abitrary python object. See the doc strings of the various classes
for further detail.
"""
from . import _callback
class gsl_function(_gsl_function):
"""
This class defines the callbacks known as gsl_function to
gsl.
e.g to supply the function f:
def f(x, params):
a = params[0]
b = parmas[1]
c = params[3]
return a * x ** 2 + b * x + c
to some solver, use
function = gsl_function(f, params)
"""
initfunc = _callback.gsl_function_init
freefunc = _callback.gsl_function_free
class gsl_function_fdf(_gsl_function_fdf):
"""
This class defines the callbacks known as gsl_function_fdf to
gsl.
e.g to supply the function f:
def f(x, None):
return exp(2 * x)
def df(x, None):
return 2 * exp(2 * x)
def fdf(x, None):
myf = f(x, None)
mydf = df(x, None)
return myf, mydf
to some solver, accepting gsl_function_fdf, use
function = gsl_function_fdf(f, df, fdf, params)
"""
initfunc = _callback.gsl_function_init_fdf
freefunc = _callback.gsl_function_free_fdf
class gsl_multiroot_function(_gsl_function):
"""
This class defines the callbacks for gsl_multiroot_function.
To supply the function rosenbrock define the function:
def rosenbrock_f(x, params):
a = params[0]
b = params[1]
y = copy.copy(x)
y[0] = a * (1 - x[0])
y[1] = b * (x[1] - x[0] * x[0])
return y
sys = multiroots.gsl_multiroot_function(rosenbrock_f, params, 2)
"""
initfunc = _callback.gsl_multiroot_function_init
freefunc = _callback.gsl_multiroot_function_free
class gsl_multiroot_function_fdf(_gsl_function_fdf):
"""
This class defines the callbacks for gsl_multiroot_function.
To supply the function rosenbrock define the functions:
def rosenbrock_f(x, params):
a = params[0]
b = params[1]
y = copy.copy(x)
y[0] = a * (1 - x[0])
y[1] = b * (x[1] - x[0] * x[0])
return y
def rosenbrock_df(x, params):
a = params[0]
b = params[1]
df = Numeric.zeros((x.shape[0], x.shape[0]), Numeric.Float)
df[0,0] = -a
df[0,1] = 0
df[1,0] = -2 * b * x[0]
df[1,1] = b
return df
def rosenbrock_fdf(x, params):
f = rosenbrock_f(x, params)
df = rosenbrock_df(x, params)
return f, df
# dimension of x
size = 2
sys = multiroots.gsl_multiroot_function(rosenbrock_f, rosenbrock_df,
rosenbrock_fdf, params, size)
"""
initfunc = _callback.gsl_multiroot_function_init_fdf
freefunc = _callback.gsl_multiroot_function_free_fdf
class gsl_multifit_function(_gsl_function):
"""
This class defines the callbacks for gsl_multimin_function.
To fit a exponential function to data write the following function:
def exp_f(x, params):
A = x[0]
lambda_ = x[1]
b = x[2]
t= params[0]
yi = params[1]
sigma = params[2]
Yi = A * exp(-lambda_ * t) + b
f = yi - Yi / sigma
return f
# Number of data samples
n = len(data)
# Number of paramters
p = 3
multifit_nlin.gsl_multifit_function(exp_f, data, n, p)
"""
initfunc = _callback.gsl_multifit_function_init
freefunc = _callback.gsl_multifit_function_free
class gsl_multifit_function_fdf(_gsl_function_fdf):
"""
This class defines the callbacks for gsl_multimin_function.
def exp_f(x, params):
A = x[0]
lambda_ = x[1]
b = x[2]
t= params[0]
yi = params[1]
sigma = params[2]
Yi = A * exp(-lambda_ * t) + b
f = yi - Yi / sigma
return f
def exp_df(x, params):
A = x[0]
lambda_ = x[1]
b = x[2]
t= params[0]
yi = params[1]
sigma = params[2]
e = exp(-lambda_ * t)
e_s = e/sigma
df = Numeric.array((e_s, -t * A * e_s, 1/sigma))
df = Numeric.transpose(df)
print df.shape
return df
def exp_fdf(x, params):
f = exp_f(x, params)
df = exp_df(x, params)
return f, df
# Number of data samples
n = len(data)
# Number of paramters
p = 3
multifit_nlin.gsl_multifit_function_fdf(exp_f, exp_df, exp_fdf, data, n, p)
"""
initfunc = _callback.gsl_multifit_function_init_fdf
freefunc = _callback.gsl_multifit_function_free_fdf
class gsl_multimin_function(gsl_multiroot_function):
"""
This class defines the callbacks for gsl_multimin_function.
The following example function defines a simple paraboloid with two
parameters.
To supply the system define the function:
def my_f(v, params):
x = v[0]
y = v[1]
dp = params
t1 = (x - dp[0])
t2 = (y - dp[1])
f = 10.0 * t1 * t1 + 20.0 * t2 * t2 + 30.0
return f
# dimension of x
size = 2
sys = multimin.gsl_multifit_function(my_f, params, 2)
"""
initfunc = _callback.gsl_multimin_function_init
freefunc = _callback.gsl_multimin_function_free
class gsl_multimin_function_fdf(gsl_multiroot_function_fdf):
"""
This class defines the callbacks for gsl_multimin_function_fdf.
The following example function defines a simple paraboloid with two
parameters.
To supply the system define the function:
def my_f(v, params):
x = v[0]
y = v[1]
dp = params
t1 = (x - dp[0])
t2 = (y - dp[1])
f = 10.0 * t1 * t1 + 20.0 * t2 * t2 + 30.0
return f
def my_df(v, params):
x = v[0]
y = v[1]
df = Numeric.zeros(v.shape, Numeric.Float)
dp = params
df[0] = 20. * (x - dp[0])
df[1] = 40. * (y - dp[1])
return df
def my_fdf(v, params):
f = my_f(v, params)
df = my_df(v,params)
return f, df
# dimension of x
size = 2
sys = multimin.gsl_multifit_function(my_f, my_df, my_fdf, params, size)
"""
initfunc = _callback.gsl_multimin_function_init_fdf
freefunc = _callback.gsl_multimin_function_free_fdf
class gsl_monte_function(gsl_multiroot_function):
"""
This class defines the callbacks for gsl_monte_function.
"""
initfunc = _callback.gsl_monte_function_init
freefunc = _callback.gsl_monte_function_free
| 26.682243
| 79
| 0.585873
|
#!/usr/bin/env python
# Author : Pierre Schnizer
"""
Collection of Callbacks systems for pygsl. They follow the GSL definitions as
close as possible. Instead os a struct python classes are used.
All solvers accept a C void pointer, which is passed to the callback. In Pygsl
this is an abitrary python object. See the doc strings of the various classes
for further detail.
"""
from . import _callback
class _gsl_function:
initfunc = None
freefunc = None
def __init__(self, func, args):
"""
input : func, args
func ... a callable Python object accepting a double
and args
args ... additional arguments. Supply None if not needed.
"""
self._ptr = None
assert(self.initfunc != None)
assert(self.freefunc != None)
self._ptr = self.initfunc((func, args))
def __del__(self,):
if hasattr(self, '_ptr'):
if self._ptr != None:
self.freefunc(self._ptr)
def get_ptr(self):
return self._ptr
class _gsl_function_fdf(_gsl_function):
def __init__(self, func, deriv, fdf, args):
self._ptr = None
assert(self.initfunc != None)
assert(self.freefunc != None)
self._ptr = self.initfunc((func, deriv, fdf, args))
class gsl_function(_gsl_function):
"""
This class defines the callbacks known as gsl_function to
gsl.
e.g to supply the function f:
def f(x, params):
a = params[0]
b = parmas[1]
c = params[3]
return a * x ** 2 + b * x + c
to some solver, use
function = gsl_function(f, params)
"""
initfunc = _callback.gsl_function_init
freefunc = _callback.gsl_function_free
class gsl_function_fdf(_gsl_function_fdf):
"""
This class defines the callbacks known as gsl_function_fdf to
gsl.
e.g to supply the function f:
def f(x, None):
return exp(2 * x)
def df(x, None):
return 2 * exp(2 * x)
def fdf(x, None):
myf = f(x, None)
mydf = df(x, None)
return myf, mydf
to some solver, accepting gsl_function_fdf, use
function = gsl_function_fdf(f, df, fdf, params)
"""
initfunc = _callback.gsl_function_init_fdf
freefunc = _callback.gsl_function_free_fdf
class gsl_multiroot_function(_gsl_function):
"""
This class defines the callbacks for gsl_multiroot_function.
To supply the function rosenbrock define the function:
def rosenbrock_f(x, params):
a = params[0]
b = params[1]
y = copy.copy(x)
y[0] = a * (1 - x[0])
y[1] = b * (x[1] - x[0] * x[0])
return y
sys = multiroots.gsl_multiroot_function(rosenbrock_f, params, 2)
"""
initfunc = _callback.gsl_multiroot_function_init
freefunc = _callback.gsl_multiroot_function_free
def __init__(self, func, args, size):
self._ptr = None
assert(self.initfunc != None)
assert(self.freefunc != None)
self._ptr = self.initfunc((func, args, size))
class gsl_multiroot_function_fdf(_gsl_function_fdf):
"""
This class defines the callbacks for gsl_multiroot_function.
To supply the function rosenbrock define the functions:
def rosenbrock_f(x, params):
a = params[0]
b = params[1]
y = copy.copy(x)
y[0] = a * (1 - x[0])
y[1] = b * (x[1] - x[0] * x[0])
return y
def rosenbrock_df(x, params):
a = params[0]
b = params[1]
df = Numeric.zeros((x.shape[0], x.shape[0]), Numeric.Float)
df[0,0] = -a
df[0,1] = 0
df[1,0] = -2 * b * x[0]
df[1,1] = b
return df
def rosenbrock_fdf(x, params):
f = rosenbrock_f(x, params)
df = rosenbrock_df(x, params)
return f, df
# dimension of x
size = 2
sys = multiroots.gsl_multiroot_function(rosenbrock_f, rosenbrock_df,
rosenbrock_fdf, params, size)
"""
initfunc = _callback.gsl_multiroot_function_init_fdf
freefunc = _callback.gsl_multiroot_function_free_fdf
def __init__(self, f, df, fdf, args, size):
self._ptr = None
assert(self.initfunc != None)
assert(self.freefunc != None)
self._ptr = self.initfunc((f, df, fdf, args, size))
class gsl_multifit_function(_gsl_function):
"""
This class defines the callbacks for gsl_multimin_function.
To fit a exponential function to data write the following function:
def exp_f(x, params):
A = x[0]
lambda_ = x[1]
b = x[2]
t= params[0]
yi = params[1]
sigma = params[2]
Yi = A * exp(-lambda_ * t) + b
f = yi - Yi / sigma
return f
# Number of data samples
n = len(data)
# Number of paramters
p = 3
multifit_nlin.gsl_multifit_function(exp_f, data, n, p)
"""
initfunc = _callback.gsl_multifit_function_init
freefunc = _callback.gsl_multifit_function_free
def __init__(self, f, args, n, p):
self._ptr = None
assert(self.initfunc != None)
assert(self.freefunc != None)
self._ptr = self.initfunc((f, args, n, p))
class gsl_multifit_function_fdf(_gsl_function_fdf):
"""
This class defines the callbacks for gsl_multimin_function.
def exp_f(x, params):
A = x[0]
lambda_ = x[1]
b = x[2]
t= params[0]
yi = params[1]
sigma = params[2]
Yi = A * exp(-lambda_ * t) + b
f = yi - Yi / sigma
return f
def exp_df(x, params):
A = x[0]
lambda_ = x[1]
b = x[2]
t= params[0]
yi = params[1]
sigma = params[2]
e = exp(-lambda_ * t)
e_s = e/sigma
df = Numeric.array((e_s, -t * A * e_s, 1/sigma))
df = Numeric.transpose(df)
print df.shape
return df
def exp_fdf(x, params):
f = exp_f(x, params)
df = exp_df(x, params)
return f, df
# Number of data samples
n = len(data)
# Number of paramters
p = 3
multifit_nlin.gsl_multifit_function_fdf(exp_f, exp_df, exp_fdf, data, n, p)
"""
initfunc = _callback.gsl_multifit_function_init_fdf
freefunc = _callback.gsl_multifit_function_free_fdf
def __init__(self, f, df, fdf, args, n, p):
self._ptr = None
assert(self.initfunc != None)
assert(self.freefunc != None)
self._ptr = self.initfunc((f, df, fdf, args, n, p))
class gsl_multimin_function(gsl_multiroot_function):
"""
This class defines the callbacks for gsl_multimin_function.
The following example function defines a simple paraboloid with two
parameters.
To supply the system define the function:
def my_f(v, params):
x = v[0]
y = v[1]
dp = params
t1 = (x - dp[0])
t2 = (y - dp[1])
f = 10.0 * t1 * t1 + 20.0 * t2 * t2 + 30.0
return f
# dimension of x
size = 2
sys = multimin.gsl_multifit_function(my_f, params, 2)
"""
initfunc = _callback.gsl_multimin_function_init
freefunc = _callback.gsl_multimin_function_free
class gsl_multimin_function_fdf(gsl_multiroot_function_fdf):
"""
This class defines the callbacks for gsl_multimin_function_fdf.
The following example function defines a simple paraboloid with two
parameters.
To supply the system define the function:
def my_f(v, params):
x = v[0]
y = v[1]
dp = params
t1 = (x - dp[0])
t2 = (y - dp[1])
f = 10.0 * t1 * t1 + 20.0 * t2 * t2 + 30.0
return f
def my_df(v, params):
x = v[0]
y = v[1]
df = Numeric.zeros(v.shape, Numeric.Float)
dp = params
df[0] = 20. * (x - dp[0])
df[1] = 40. * (y - dp[1])
return df
def my_fdf(v, params):
f = my_f(v, params)
df = my_df(v,params)
return f, df
# dimension of x
size = 2
sys = multimin.gsl_multifit_function(my_f, my_df, my_fdf, params, size)
"""
initfunc = _callback.gsl_multimin_function_init_fdf
freefunc = _callback.gsl_multimin_function_free_fdf
class gsl_monte_function(gsl_multiroot_function):
"""
This class defines the callbacks for gsl_monte_function.
"""
initfunc = _callback.gsl_monte_function_init
freefunc = _callback.gsl_monte_function_free
| 1,015
| 520
| 186
|
25348b308d580d2c34e6c0dbc12c85b434a6ba49
| 441
|
py
|
Python
|
d05/main.py
|
burk3/aoc2020
|
541d6102276978ea5e4e7abbd25a8811268be148
|
[
"MIT"
] | null | null | null |
d05/main.py
|
burk3/aoc2020
|
541d6102276978ea5e4e7abbd25a8811268be148
|
[
"MIT"
] | null | null | null |
d05/main.py
|
burk3/aoc2020
|
541d6102276978ea5e4e7abbd25a8811268be148
|
[
"MIT"
] | null | null | null |
import argparse
if __name__ == "__main__":
main()
| 20.045455
| 61
| 0.562358
|
import argparse
def to_n(s: str) -> int:
s = s.replace("F", "0").replace("B", "1")
s = s.replace("L", "0").replace("R", "1")
return int(s, base=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType("r"))
args = parser.parse_args()
r = 0
for line in args.input:
r = max(r, to_n(line.strip()))
print(r)
if __name__ == "__main__":
main()
| 339
| 0
| 46
|
f7651a1cc1b8afbcfdaaac06947975fdaeb6345c
| 1,128
|
py
|
Python
|
Bio-StrongHold/src/Creating_a_Restriction_Map.py
|
crf1111/Bio-Informatics-Learning
|
2ccc02d7a23584c12aee44c5620160cdcaf70bd4
|
[
"MIT"
] | 1
|
2018-10-10T19:03:52.000Z
|
2018-10-10T19:03:52.000Z
|
Bio-StrongHold/src/Creating_a_Restriction_Map.py
|
crf1111/Bio-Informatics-Learning
|
2ccc02d7a23584c12aee44c5620160cdcaf70bd4
|
[
"MIT"
] | null | null | null |
Bio-StrongHold/src/Creating_a_Restriction_Map.py
|
crf1111/Bio-Informatics-Learning
|
2ccc02d7a23584c12aee44c5620160cdcaf70bd4
|
[
"MIT"
] | null | null | null |
from collections import Counter
def partial_digest(distances):
'''Returns a set whose positive pairwise differences generate 'distances'.'''
# Initialize variables.
X = {0}
width = max(distances)
# Create lambda functions for multiset operations.
new_dist = lambda y, S: Counter(abs(y-s) for s in S)
containment = lambda a, b: all(a[x] <= b[x] for x in a)
# Create the multiset which generates 'distances'.
while len(distances) > 0:
y = max(distances)
if containment(new_dist(y, X), distances):
X |= {y}
distances -= new_dist(y, X)
else:
X |= {width - y}
distances -= new_dist(width - y, X)
return X
def main():
'''Main call. Reads, runs, and saves problem specific data.'''
# Read the input data.
with open('data/data.dat') as input_data:
distances = Counter(map(int,input_data.read().strip().split()))
# Get the partial digest.
X = sorted(list(partial_digest(distances)))
# Print and save the answer.
print ' '.join(map(str, X))
if __name__ == '__main__':
main()
| 26.857143
| 81
| 0.606383
|
from collections import Counter
def partial_digest(distances):
'''Returns a set whose positive pairwise differences generate 'distances'.'''
# Initialize variables.
X = {0}
width = max(distances)
# Create lambda functions for multiset operations.
new_dist = lambda y, S: Counter(abs(y-s) for s in S)
containment = lambda a, b: all(a[x] <= b[x] for x in a)
# Create the multiset which generates 'distances'.
while len(distances) > 0:
y = max(distances)
if containment(new_dist(y, X), distances):
X |= {y}
distances -= new_dist(y, X)
else:
X |= {width - y}
distances -= new_dist(width - y, X)
return X
def main():
'''Main call. Reads, runs, and saves problem specific data.'''
# Read the input data.
with open('data/data.dat') as input_data:
distances = Counter(map(int,input_data.read().strip().split()))
# Get the partial digest.
X = sorted(list(partial_digest(distances)))
# Print and save the answer.
print ' '.join(map(str, X))
if __name__ == '__main__':
main()
| 0
| 0
| 0
|
5249edbb51e3631d816de207fdd0682d7f1d7da4
| 1,291
|
py
|
Python
|
exercises/exercise_10_25_16.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
exercises/exercise_10_25_16.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
exercises/exercise_10_25_16.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
text = input("Give words: ")
print(pig_latin(text))
| 35.861111
| 74
| 0.459334
|
def pig_latin(text_input):
final = "" # create final string, couples, and split text_input
couples = ['bl', 'br', 'ch', 'cl', 'cr', 'dr', 'fl', 'fr', 'gh', 'gl',
'gr', 'ph', 'pl', 'pr', 'qu', 'sh', 'sk', 'sl', 'sm', 'sn',
'sp', 'st', 'sw', 'th', 'tr', 'tw', 'wh', 'wr']
split_text = text_input.strip().lower().split()
for i in range(len(split_text)): # test for symbols and numbers
if not split_text[i].isalpha():
return "Invalid."
for i in range(len(split_text)): # test passed, convert to pig latin
word = split_text[i]
if word[0] in 'aeiou': # vowel rule
if i == 0:
final += word.title()
else:
final += word
elif word[0:2] in couples: # first two letters rule
if i == 0:
final += word[2:].title()
else:
final += word[2:]
final += word[0:2]
else: # regular rule
if i == 0:
final += word[1:].title()
else:
final += word[1:]
final += word[0]
final += 'ay ' # add 'ay'
return final.strip()
if __name__ == '__main__':
text = input("Give words: ")
print(pig_latin(text))
| 1,180
| 0
| 22
|
8a17fe6998c6e108cf61b5bc941dda0925aaff14
| 5,144
|
py
|
Python
|
North Pacific/PacificTimestepDensities.py
|
OceanParcels/SKIM-garbagepatchlocations
|
3c028e3ceba902ff79f52e31b83bed811bde1133
|
[
"MIT"
] | 1
|
2021-07-13T12:55:20.000Z
|
2021-07-13T12:55:20.000Z
|
North Pacific/PacificTimestepDensities.py
|
OceanParcels/SKIM-garbagepatchlocations
|
3c028e3ceba902ff79f52e31b83bed811bde1133
|
[
"MIT"
] | null | null | null |
North Pacific/PacificTimestepDensities.py
|
OceanParcels/SKIM-garbagepatchlocations
|
3c028e3ceba902ff79f52e31b83bed811bde1133
|
[
"MIT"
] | 1
|
2022-02-28T14:03:13.000Z
|
2022-02-28T14:03:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 13:35:23 2018
@author: Victor Onink
Here we create a figure that has the 24h, and the 3h flow field densities
for the North Pacific
"""
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from scipy import io
import pandas as pd
# cbar=my_map.colorbar(density)
# cbar.ax.tick_params(labelsize=12)
# cbar.set_label("Plastic Counts ($10^{-3}$ # km$^{-2}$)", rotation=90,fontsize=12)
#%%
location='D:\Desktop\Thesis\ParcelsFigData\Data\North Pacific\OutputFiles\Onink et al\Densities/'
File=['NorthPacificTotalDensity24h','NorthPacificStokesTotalDensity24h',
'NorthPacificTotalDensity3h','NorthPacificStokesTotalDensity3h']
axeslabelsize=14
textsize=12
fig,axes=plt.subplots(nrows=2, ncols=1,figsize=(10*2,8*1))
for i in range(len(File)):
density=np.load(location+File[i])
density[np.isnan(density)]=0
meanFinalYear=np.sum(density[-183:,:,:]/density[-183:,:,:].shape[0],axis=0)
meanFinalYear[meanFinalYear==0]=np.nan
latD=np.linspace(-80,80,160)
lonD=np.linspace(0,359,360)
plt.subplot(2,2,i+1)
density=plotDensity(i,lonD,latD,meanFinalYear)
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.93, 0.12, 0.02, 0.74])
cbar=fig.colorbar(density,cax=cbar_ax)
cbar.ax.tick_params(labelsize=textsize)
cbar.set_label("Plastic Counts ($10^{-3}$ # km$^{-2}$)", rotation=90,fontsize=axeslabelsize)
cbar.ax.set_yticklabels(['<0.1','0.3','0.5','0.7','0.9','1.1','1.3','1.5','1.7','1.9<'])
plt.subplots_adjust(wspace=0.06)
plt.savefig('D:\Desktop\Thesis\ParcelsFigData\Data\North Pacific\Figures\NorthPacificTimeStepDensities.jpg',
bbox_inches='tight')
| 43.965812
| 108
| 0.666796
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 13:35:23 2018
@author: Victor Onink
Here we create a figure that has the 24h, and the 3h flow field densities
for the North Pacific
"""
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from scipy import io
import pandas as pd
def AreaCalc(sizeLat,sizeLon): #Calculate surface area of grid cells
deg2rd = np.pi/180.
r=6378.1
lon_bins = np.linspace(0,360., sizeLon+1)
lat_bins = np.linspace(-90, 90, sizeLat+1)
Area=np.array([[deg2rd*(lon_bins[i+1]-lon_bins[i])*(np.sin(deg2rd*lat_bins[j+1])
- np.sin(deg2rd*lat_bins[j])) for i in range(len(lon_bins)-1)]
for j in range(len(lat_bins)-1)])
Area=r*r*Area*1000*1000 #convert it to m^2 instead of km^2
return Area
def AreaWeighAverage(dataset,coarseness):
AreaInitial=AreaCalc(dataset.shape[0],dataset.shape[1])
AreaFinal=AreaCalc(dataset.shape[0]/coarseness,dataset.shape[1]/coarseness)
dataset[np.isnan(dataset)]=0
DataSetArea=np.multiply(AreaInitial,dataset)
temp=DataSetArea.reshape((DataSetArea.shape[0] // coarseness,coarseness,
DataSetArea.shape[1] // coarseness,coarseness))
finalDataset = np.sum(temp,axis=(1,3))
finalDataset=np.divide(finalDataset,AreaFinal)
finalDataset[finalDataset==0]=np.nan
return finalDataset
def plotParticles(lon, lat):
latmin,latmax=-5,75
lonmin,lonmax=-100,20
my_map = Basemap(projection='cyl', llcrnrlon=lonmin,
urcrnrlon=lonmax,llcrnrlat=latmin,urcrnrlat=latmax,
resolution='l')
my_map.drawcoastlines()
my_map.fillcontinents(color = 'gray')
my_map.drawmapboundary()
my_map.drawmeridians(np.arange(0, 360, 30))
my_map.drawparallels(np.arange(-90, 90, 30))
my_map.plot(lon,lat,'r.',markersize=3)
def HistogramFunction(londata,latdata):
londata,latdata=londata.reshape(np.size(londata)),latdata.reshape(np.size(latdata))
binsLon=np.arange(-180,180)
binsLat=np.arange(-90,90)
density=np.zeros((len(binsLon),len(binsLat)))
for i in range(np.array(londata).shape[0]):
density[np.argmin(np.abs(londata[i]-binsLon)),np.argmin(np.abs(latdata[i]-binsLat))]+=1
#Now, normalize it by area
area=AreaCalc(len(binsLat),len(binsLon)).T
density/=area
density[density==0]=np.nan
return density
def plotDensity(typ,lon,lat,dens):
Lat,Lon=np.meshgrid(lat,lon)
# Lon,Lat=np.meshgrid(lon,lat)
latmin,latmax=0,75
lonmin,lonmax=95,285
my_map = Basemap(projection='cyl', llcrnrlon=lonmin,
urcrnrlon=lonmax,llcrnrlat=latmin,urcrnrlat=latmax,
resolution='l')
# my_map.drawcoastlines()
my_map.fillcontinents(color = 'gray')
my_map.drawmapboundary()
my_map.drawmeridians(np.arange(0, 360, 30),labels=[0,0,0,1],fontsize=12)
if (typ+1)%2==1:
my_map.drawparallels(np.arange(-90, 91, 30),labels=[1,0,0,0],fontsize=12)
else:
my_map.drawparallels(np.arange(-90, 91, 30),fontsize=12)
density=my_map.contourf(Lon,Lat,dens/1e-8,np.linspace(1e-1,2e0,20),
#norm=colors.LogNorm(1e-10,1e-9),
cmap='rainbow',extend='both')
# my_map.contour(LonSam,LatSam,sample,[1e4,1e5],colors='k',zorder=100)
# my_map.contour(LonP,LatP,CountPvS,[5e4],colors='k',zorder=100)
title=['(a) Total Currents ($\Delta$t=24h)','(b) Total Currents + Stokes Drift ($\Delta$t=24h)',
'(c) Total Currents ($\Delta$t=3h)','(d) Total Currents + Stokes Drift ($\Delta$t=3h)']
plt.title(title[typ],fontsize=14,fontweight='bold')
return density
# cbar=my_map.colorbar(density)
# cbar.ax.tick_params(labelsize=12)
# cbar.set_label("Plastic Counts ($10^{-3}$ # km$^{-2}$)", rotation=90,fontsize=12)
#%%
location='D:\Desktop\Thesis\ParcelsFigData\Data\North Pacific\OutputFiles\Onink et al\Densities/'
File=['NorthPacificTotalDensity24h','NorthPacificStokesTotalDensity24h',
'NorthPacificTotalDensity3h','NorthPacificStokesTotalDensity3h']
axeslabelsize=14
textsize=12
fig,axes=plt.subplots(nrows=2, ncols=1,figsize=(10*2,8*1))
for i in range(len(File)):
density=np.load(location+File[i])
density[np.isnan(density)]=0
meanFinalYear=np.sum(density[-183:,:,:]/density[-183:,:,:].shape[0],axis=0)
meanFinalYear[meanFinalYear==0]=np.nan
latD=np.linspace(-80,80,160)
lonD=np.linspace(0,359,360)
plt.subplot(2,2,i+1)
density=plotDensity(i,lonD,latD,meanFinalYear)
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.93, 0.12, 0.02, 0.74])
cbar=fig.colorbar(density,cax=cbar_ax)
cbar.ax.tick_params(labelsize=textsize)
cbar.set_label("Plastic Counts ($10^{-3}$ # km$^{-2}$)", rotation=90,fontsize=axeslabelsize)
cbar.ax.set_yticklabels(['<0.1','0.3','0.5','0.7','0.9','1.1','1.3','1.5','1.7','1.9<'])
plt.subplots_adjust(wspace=0.06)
plt.savefig('D:\Desktop\Thesis\ParcelsFigData\Data\North Pacific\Figures\NorthPacificTimeStepDensities.jpg',
bbox_inches='tight')
| 3,294
| 0
| 118
|
abd35021221bc3bef34a47edf3e936e59ef5dd0a
| 1,563
|
py
|
Python
|
openduty/settings_environment.py
|
pataquets/openduty
|
5604e22b6a1aec7406ccd94ab2c17c1a6abd56c7
|
[
"MIT"
] | null | null | null |
openduty/settings_environment.py
|
pataquets/openduty
|
5604e22b6a1aec7406ccd94ab2c17c1a6abd56c7
|
[
"MIT"
] | 2
|
2020-07-18T18:13:51.000Z
|
2020-07-18T18:14:43.000Z
|
openduty/settings_environment.py
|
pataquets/openduty
|
5604e22b6a1aec7406ccd94ab2c17c1a6abd56c7
|
[
"MIT"
] | 3
|
2016-10-24T17:36:00.000Z
|
2018-04-30T18:07:51.000Z
|
from settings import *
BASE_URL = os.getenv('OPENDUTY_BASE_URL', "http://localhost")
XMPP_SETTINGS = {
'user': os.getenv('OPENDUTY_XMPP_USER'),
'password': os.getenv('OPENDUTY_XMPP_PASS'),
'server': os.getenv('OPENDUTY_XMPP_SERVER', 'xmpp'),
'port': os.getenv('OPENDUTY_XMPP_PORT', 5222),
}
EMAIL_SETTINGS = {
'user': os.getenv('OPENDUTY_EMAIL_USER'),
'password': os.getenv('OPENDUTY_EMAIL_PASS'),
}
'''
TWILIO_SETTINGS = {
'SID': "TWILIO_ACCOUNT_SID",
'token': "TWILIO_ACCOUNT_TOKEN",
'phone_number': "your_twilio_phone_number",
'sms_number': "your_twilio_sms_number",
'twiml_url': "http://www.website.org/voice.xml"
}
'''
SLACK_SETTINGS = {
'apikey': os.getenv('OPENDUTY_SLACK_APIKEY', "YOUR_SLACK_API_KEY")
}
'''
PROWL_SETTINGS = {
'priority': 0
'application': 'openduty'
}
'''
DATABASES = {
'default': {
'ENGINE': os.getenv('OPENDUTY_DATABASE_ENGINE', 'django.db.backends.mysql'),
'NAME': os.getenv('OPENDUTY_DATABASE_NAME', 'openduty'),
'USER': os.getenv('OPENDUTY_DATABASE_USER', 'openduty'),
'PASSWORD': os.getenv('OPENDUTY_DATABASE_PASS', 'dutyfree'),
'HOST': os.getenv('OPENDUTY_DATABASE_HOST', 'db'),
'PORT': os.getenv('OPENDUTY_DATABASE_PORT', '3306')
}
}
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('OPENDUTY_SECRET_KEY', 'yoursecretkey')
ALLOWED_HOSTS = ['your.dutyfree.host']
DEBUG = os.getenv('OPENDUTY_DEBUG', False)
TEMPLATE_DEBUG = os.getenv('OPENDUTY_TEMPLATE_DEBUG', False)
| 27.421053
| 84
| 0.676903
|
from settings import *
BASE_URL = os.getenv('OPENDUTY_BASE_URL', "http://localhost")
XMPP_SETTINGS = {
'user': os.getenv('OPENDUTY_XMPP_USER'),
'password': os.getenv('OPENDUTY_XMPP_PASS'),
'server': os.getenv('OPENDUTY_XMPP_SERVER', 'xmpp'),
'port': os.getenv('OPENDUTY_XMPP_PORT', 5222),
}
EMAIL_SETTINGS = {
'user': os.getenv('OPENDUTY_EMAIL_USER'),
'password': os.getenv('OPENDUTY_EMAIL_PASS'),
}
'''
TWILIO_SETTINGS = {
'SID': "TWILIO_ACCOUNT_SID",
'token': "TWILIO_ACCOUNT_TOKEN",
'phone_number': "your_twilio_phone_number",
'sms_number': "your_twilio_sms_number",
'twiml_url': "http://www.website.org/voice.xml"
}
'''
SLACK_SETTINGS = {
'apikey': os.getenv('OPENDUTY_SLACK_APIKEY', "YOUR_SLACK_API_KEY")
}
'''
PROWL_SETTINGS = {
'priority': 0
'application': 'openduty'
}
'''
DATABASES = {
'default': {
'ENGINE': os.getenv('OPENDUTY_DATABASE_ENGINE', 'django.db.backends.mysql'),
'NAME': os.getenv('OPENDUTY_DATABASE_NAME', 'openduty'),
'USER': os.getenv('OPENDUTY_DATABASE_USER', 'openduty'),
'PASSWORD': os.getenv('OPENDUTY_DATABASE_PASS', 'dutyfree'),
'HOST': os.getenv('OPENDUTY_DATABASE_HOST', 'db'),
'PORT': os.getenv('OPENDUTY_DATABASE_PORT', '3306')
}
}
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('OPENDUTY_SECRET_KEY', 'yoursecretkey')
ALLOWED_HOSTS = ['your.dutyfree.host']
DEBUG = os.getenv('OPENDUTY_DEBUG', False)
TEMPLATE_DEBUG = os.getenv('OPENDUTY_TEMPLATE_DEBUG', False)
| 0
| 0
| 0
|
6df353ed1986cfdd80127c2f8b54ef1714e4f5d8
| 1,313
|
py
|
Python
|
NaiveBayes/src/test.py
|
quqixun/MLAlgorithms
|
1ad46a899a6280a08c196fb4eb0931408c8636c7
|
[
"MIT"
] | 2
|
2018-04-25T18:00:28.000Z
|
2018-08-08T09:39:18.000Z
|
NaiveBayes/src/test.py
|
quqixun/MLAlgorithms
|
1ad46a899a6280a08c196fb4eb0931408c8636c7
|
[
"MIT"
] | null | null | null |
NaiveBayes/src/test.py
|
quqixun/MLAlgorithms
|
1ad46a899a6280a08c196fb4eb0931408c8636c7
|
[
"MIT"
] | 2
|
2019-03-03T02:55:48.000Z
|
2021-01-21T04:50:46.000Z
|
# Conventional Machine Learning Algorithms
# Test Script for Class of "NaiveBayes".
# Author: Qixun Qu
# Create on: 2018/04/24
# Modify on: 2018/04/25
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
from __future__ import division
from __future__ import print_function
from utils import *
from NaiveBayes import *
from sklearn.datasets import make_hastie_10_2
# Basic settings
random_state = 9527
n_samples = 10000
test_size = 0.2
# Generate Dataset for training and testing
# Obtain all samples
X, y = make_hastie_10_2(n_samples=n_samples,
random_state=random_state)
# Split dataset
X_train, y_train, X_test, y_test = split_dataset(X, y, test_size,
random_state)
# Normalize dataset
X_train_scaled, X_test_scaled = scale_dataset(X_train, X_test)
# Train Gaussian Naive Bayes Classifier
nb = NaiveBayes(alpha=1)
nb.fit(X_train_scaled, y_train, cont_feat_idx="all")
# Predict test set and evaluate results
y_pred = nb.predict(X_test_scaled)
print("Accuracy of test set:", accuracy(y_pred, y_test))
# Accuracy can reach 0.9765.
| 24.314815
| 65
| 0.603199
|
# Conventional Machine Learning Algorithms
# Test Script for Class of "NaiveBayes".
# Author: Qixun Qu
# Create on: 2018/04/24
# Modify on: 2018/04/25
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
from __future__ import division
from __future__ import print_function
from utils import *
from NaiveBayes import *
from sklearn.datasets import make_hastie_10_2
# Basic settings
random_state = 9527
n_samples = 10000
test_size = 0.2
# Generate Dataset for training and testing
# Obtain all samples
X, y = make_hastie_10_2(n_samples=n_samples,
random_state=random_state)
# Split dataset
X_train, y_train, X_test, y_test = split_dataset(X, y, test_size,
random_state)
# Normalize dataset
X_train_scaled, X_test_scaled = scale_dataset(X_train, X_test)
# Train Gaussian Naive Bayes Classifier
nb = NaiveBayes(alpha=1)
nb.fit(X_train_scaled, y_train, cont_feat_idx="all")
# Predict test set and evaluate results
y_pred = nb.predict(X_test_scaled)
print("Accuracy of test set:", accuracy(y_pred, y_test))
# Accuracy can reach 0.9765.
| 0
| 0
| 0
|
192178a5c7c75105a3ee01c6e1f52067461ee41b
| 2,921
|
py
|
Python
|
login_script.py
|
ThBlitz/Minecraft-Packet-Mapper-MPM-
|
a2583574d176b57df341ee3eb023b1f29f0f1183
|
[
"MIT"
] | null | null | null |
login_script.py
|
ThBlitz/Minecraft-Packet-Mapper-MPM-
|
a2583574d176b57df341ee3eb023b1f29f0f1183
|
[
"MIT"
] | null | null | null |
login_script.py
|
ThBlitz/Minecraft-Packet-Mapper-MPM-
|
a2583574d176b57df341ee3eb023b1f29f0f1183
|
[
"MIT"
] | null | null | null |
from Bridge import Proxy2Server
import os
from DataTypes import Packet, A_Packet_Class
from DataTypes import VarInt, Output_Streamer, Bytes_Streamer, Socket_Streamer
import time
output = Output_Streamer()
input = Bytes_Streamer()
login_packets = A_Packet_Class()
SOCK = Socket_Streamer('connect.2b2t.org', 25565, login_packets)
handshake = Packet(login_packets)
handshake.set(['VarInt', 'VarInt', 'String', 'Ushort', 'VarInt'])
status = Packet(login_packets)
status.set(['VarInt', 'String'])
request = Packet(login_packets)
request.set(['VarInt'])
ping_pong = Packet(login_packets)
ping_pong.set(['VarInt', 'Long'])
encryption_req = Packet(login_packets)
encryption_req.set(['VarInt', 'String', 'String', 'String'])
encryption_res = Packet(login_packets)
encryption_res.set(['VarInt', 'String', 'String'])
login_success = Packet(login_packets)
login_success.set(['VarInt', 'String', 'String'])
set_compression = Packet(login_packets)
set_compression.set(['VarInt', 'VarInt'])
login_packets.map_pack(pack_0)
login_packets.map_unpack(unpack_0)
# data = handshake.pack([0x00, 340, b'2b2t.org', 25565, 1])
# server_sock.write(data)
# data = request.pack([0x00])
# server_sock.write(data)
# status.unpack(server_sock, output)
input.write(handshake.pack([0x00, 340, b'2b2t.org', 25565, 2]))
SOCK.write(input)
input.write(status.pack([0x00, b'ThBlitz']))
SOCK.write(input)
SOCK.read(input)
encryption_req.unpack(input, output)
print(f'encryption_req : {output.getvalue()}')
data = output.getvalue()
login_packets.server_id = data[1]
login_packets.server_public_key = data[2]
login_packets.verification_token = data[3]
import secrets
login_packets.aes_key = secrets.randbits(128).to_bytes(16, 'big')
hash , ver_token , shared_secret = login_packets.get_hash()
import mojang_api
uuid , name , token , login_data = mojang_api.login_through_microsoft()
res = mojang_api.join_server(token, uuid, hash)
print(f'response from mojang : {res}')
input.reset()
input.write(encryption_res.pack([0x01, shared_secret, ver_token]))
SOCK.write(input)
login_packets.encryption_enabled = True
SOCK.read(input)
set_compression.unpack(input, output)
login_packets.compression_threshold = output.getvalue()[1]
login_packets.compression_enabled = True
print(f'compression_packet : {output.getvalue()}')
SOCK.read(input)
login_success.unpack(input, output)
print(f'login_success : {output.getvalue()}')
SOCK.read(input)
status.unpack(input, output)
print(input.getvalue())
while True:
SOCK.read(input)
print(hex(VarInt.unpack(input)))
print(input.read())
time.sleep(1)
# t
| 20.716312
| 78
| 0.740157
|
from Bridge import Proxy2Server
import os
from DataTypes import Packet, A_Packet_Class
from DataTypes import VarInt, Output_Streamer, Bytes_Streamer, Socket_Streamer
import time
output = Output_Streamer()
input = Bytes_Streamer()
login_packets = A_Packet_Class()
SOCK = Socket_Streamer('connect.2b2t.org', 25565, login_packets)
handshake = Packet(login_packets)
handshake.set(['VarInt', 'VarInt', 'String', 'Ushort', 'VarInt'])
status = Packet(login_packets)
status.set(['VarInt', 'String'])
request = Packet(login_packets)
request.set(['VarInt'])
ping_pong = Packet(login_packets)
ping_pong.set(['VarInt', 'Long'])
encryption_req = Packet(login_packets)
encryption_req.set(['VarInt', 'String', 'String', 'String'])
encryption_res = Packet(login_packets)
encryption_res.set(['VarInt', 'String', 'String'])
login_success = Packet(login_packets)
login_success.set(['VarInt', 'String', 'String'])
set_compression = Packet(login_packets)
set_compression.set(['VarInt', 'VarInt'])
def pack_0(self, input):
if self.compression_enabled == True:
self.compress(input)
packet_len = input.add_len()
# print(packet_len)
return
def unpack_0(self, input, output):
# if self.compression_enabled == True:
# self.decompress(input)
output.reset()
return
login_packets.map_pack(pack_0)
login_packets.map_unpack(unpack_0)
# data = handshake.pack([0x00, 340, b'2b2t.org', 25565, 1])
# server_sock.write(data)
# data = request.pack([0x00])
# server_sock.write(data)
# status.unpack(server_sock, output)
input.write(handshake.pack([0x00, 340, b'2b2t.org', 25565, 2]))
SOCK.write(input)
input.write(status.pack([0x00, b'ThBlitz']))
SOCK.write(input)
SOCK.read(input)
encryption_req.unpack(input, output)
print(f'encryption_req : {output.getvalue()}')
data = output.getvalue()
login_packets.server_id = data[1]
login_packets.server_public_key = data[2]
login_packets.verification_token = data[3]
import secrets
login_packets.aes_key = secrets.randbits(128).to_bytes(16, 'big')
hash , ver_token , shared_secret = login_packets.get_hash()
import mojang_api
uuid , name , token , login_data = mojang_api.login_through_microsoft()
res = mojang_api.join_server(token, uuid, hash)
print(f'response from mojang : {res}')
input.reset()
input.write(encryption_res.pack([0x01, shared_secret, ver_token]))
SOCK.write(input)
login_packets.encryption_enabled = True
SOCK.read(input)
set_compression.unpack(input, output)
login_packets.compression_threshold = output.getvalue()[1]
login_packets.compression_enabled = True
print(f'compression_packet : {output.getvalue()}')
SOCK.read(input)
login_success.unpack(input, output)
print(f'login_success : {output.getvalue()}')
SOCK.read(input)
status.unpack(input, output)
print(input.getvalue())
while True:
SOCK.read(input)
print(hex(VarInt.unpack(input)))
print(input.read())
time.sleep(1)
# t
| 264
| 0
| 46
|
fe59772b65a640da95a676113cf3e37dc12af265
| 4,836
|
py
|
Python
|
src/data_analysis/analyze_goodreads_reviews_cleaned.py
|
ejgenc/data-analysis_goodreads_translation_reviews
|
c00dc7b59d5e3295df89d080e7d49792c15fa760
|
[
"MIT"
] | 1
|
2021-09-04T15:11:20.000Z
|
2021-09-04T15:11:20.000Z
|
src/data_analysis/analyze_goodreads_reviews_cleaned.py
|
ejgenc/data-analysis_goodreads_translation_reviews
|
c00dc7b59d5e3295df89d080e7d49792c15fa760
|
[
"MIT"
] | null | null | null |
src/data_analysis/analyze_goodreads_reviews_cleaned.py
|
ejgenc/data-analysis_goodreads_translation_reviews
|
c00dc7b59d5e3295df89d080e7d49792c15fa760
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 13:35:39 2021
@author: ejgen
------ What is this file? ------
This script targets the files goodreads_reviews_cleaned.csv and
review_sentences_analyzed.csv, calculating summary statistics such as
review length and sentiment score.
This script targets the following files:
../../data/cleaned/goodreads_reviews_cleaned.csv
../../data/analysis_results/review_sentences_analyzed.csv
The resulting csv file is located at:
../../data/analysis_results/goodreads_reviews_analyzed.csv
"""
#%% --- Import required packages ---
import os
from pathlib import Path # To wrap around filepaths
import pandas as pd
#%% --- Set proper directory to assure integration with doit ---
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
#%% --- Import data ---
#goodreads_reviews_cleaned
import_fp = Path("../../data/cleaned/goodreads_reviews_cleaned.csv")
goodreads_reviews = pd.read_csv(import_fp, encoding = "utf-8", index_col = False)
#review_sentences_analyzed
import_fp = Path("../../data/analysis_results/review_sentences_analyzed.csv")
sentences_analyzed = pd.read_csv(import_fp, encoding = "utf-8")
#%% --- Prepare data ---
sentences_analyzed = sentences_analyzed.loc[:,["review_id",
"sentence_id",
"sent_mentions_original",
"sent_mentions_trans",
"length_in_words",
"VADER_score_compound"]]
# Take a subset of goodreads reviews to include only reviews whose review no
# appear in sentences_analyzed.
rid_mask = goodreads_reviews["review_id"].isin(sentences_analyzed["review_id"])
goodreads_reviews = goodreads_reviews.loc[rid_mask, :]
#%% --- Analyze: review length in sentences and words. ---
length_per_review = (sentences_analyzed
.groupby("review_id")
["length_in_words"]
.agg(["sum","count"])
.rename({"sum" : "total_length_in_words",
"count" : "total_length_in_sentences"},
axis = 1))
goodreads_reviews = (goodreads_reviews
.merge(length_per_review,
how = "left",
on = "review_id"))
#%% --- Analyze: mention ratios for explicit translation/author mentions
orig_mention_mask = sentences_analyzed["sent_mentions_original"] == True
trans_mention_mask = sentences_analyzed["sent_mentions_trans"] == True
only_orig_mention_mask = (orig_mention_mask & ~trans_mention_mask)
only_trans_mention_mask = (~orig_mention_mask & trans_mention_mask)
both_mention_mask = (orig_mention_mask & trans_mention_mask)
masks = {"share_of_only_trans_mentions" : only_trans_mention_mask,
"share_of_trans_mentions" : trans_mention_mask,
"share_of_only_orig_mentions": only_orig_mention_mask,
"share_of_orig_mentions": orig_mention_mask}
for prefix, mask in masks.items():
calc = (sentences_analyzed[mask].
groupby("review_id")
["length_in_words"]
.agg(["count"])
.rename({"count": prefix},
axis = 1)
.reset_index())
goodreads_reviews = (goodreads_reviews.merge(calc,
how = "left",
on = "review_id")
.fillna(value = 0,
axis = 0))
goodreads_reviews[prefix] = ((goodreads_reviews[prefix]
/ goodreads_reviews["total_length_in_sentences"])
* 100)
#%% --- Analyze: VADER score for the whole review ---
VADER_score_per_review = (sentences_analyzed
.groupby("review_id")
["VADER_score_compound"]
.agg(["sum","count"])
.reset_index())
VADER_score_per_review["avg_VADER_score"] = (VADER_score_per_review["sum"]
/ VADER_score_per_review["count"])
VADER_score_per_review = VADER_score_per_review.drop(labels = ["sum","count"],
axis = "columns")
goodreads_reviews = goodreads_reviews.merge(VADER_score_per_review,
how = "left",
on = "review_id")
#%% --- Export data ---
export_fp = Path("../../data/analysis_results/goodreads_reviews_analyzed.csv")
goodreads_reviews.to_csv(export_fp, encoding = "utf-8", index = False)
| 38.07874
| 81
| 0.580645
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 13:35:39 2021
@author: ejgen
------ What is this file? ------
This script targets the files goodreads_reviews_cleaned.csv and
review_sentences_analyzed.csv, calculating summary statistics such as
review length and sentiment score.
This script targets the following files:
../../data/cleaned/goodreads_reviews_cleaned.csv
../../data/analysis_results/review_sentences_analyzed.csv
The resulting csv file is located at:
../../data/analysis_results/goodreads_reviews_analyzed.csv
"""
#%% --- Import required packages ---
import os
from pathlib import Path # To wrap around filepaths
import pandas as pd
#%% --- Set proper directory to assure integration with doit ---
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
#%% --- Import data ---
#goodreads_reviews_cleaned
import_fp = Path("../../data/cleaned/goodreads_reviews_cleaned.csv")
goodreads_reviews = pd.read_csv(import_fp, encoding = "utf-8", index_col = False)
#review_sentences_analyzed
import_fp = Path("../../data/analysis_results/review_sentences_analyzed.csv")
sentences_analyzed = pd.read_csv(import_fp, encoding = "utf-8")
#%% --- Prepare data ---
sentences_analyzed = sentences_analyzed.loc[:,["review_id",
"sentence_id",
"sent_mentions_original",
"sent_mentions_trans",
"length_in_words",
"VADER_score_compound"]]
# Take a subset of goodreads reviews to include only reviews whose review no
# appear in sentences_analyzed.
rid_mask = goodreads_reviews["review_id"].isin(sentences_analyzed["review_id"])
goodreads_reviews = goodreads_reviews.loc[rid_mask, :]
#%% --- Analyze: review length in sentences and words. ---
length_per_review = (sentences_analyzed
.groupby("review_id")
["length_in_words"]
.agg(["sum","count"])
.rename({"sum" : "total_length_in_words",
"count" : "total_length_in_sentences"},
axis = 1))
goodreads_reviews = (goodreads_reviews
.merge(length_per_review,
how = "left",
on = "review_id"))
#%% --- Analyze: mention ratios for explicit translation/author mentions
orig_mention_mask = sentences_analyzed["sent_mentions_original"] == True
trans_mention_mask = sentences_analyzed["sent_mentions_trans"] == True
only_orig_mention_mask = (orig_mention_mask & ~trans_mention_mask)
only_trans_mention_mask = (~orig_mention_mask & trans_mention_mask)
both_mention_mask = (orig_mention_mask & trans_mention_mask)
masks = {"share_of_only_trans_mentions" : only_trans_mention_mask,
"share_of_trans_mentions" : trans_mention_mask,
"share_of_only_orig_mentions": only_orig_mention_mask,
"share_of_orig_mentions": orig_mention_mask}
for prefix, mask in masks.items():
calc = (sentences_analyzed[mask].
groupby("review_id")
["length_in_words"]
.agg(["count"])
.rename({"count": prefix},
axis = 1)
.reset_index())
goodreads_reviews = (goodreads_reviews.merge(calc,
how = "left",
on = "review_id")
.fillna(value = 0,
axis = 0))
goodreads_reviews[prefix] = ((goodreads_reviews[prefix]
/ goodreads_reviews["total_length_in_sentences"])
* 100)
#%% --- Analyze: VADER score for the whole review ---
VADER_score_per_review = (sentences_analyzed
.groupby("review_id")
["VADER_score_compound"]
.agg(["sum","count"])
.reset_index())
VADER_score_per_review["avg_VADER_score"] = (VADER_score_per_review["sum"]
/ VADER_score_per_review["count"])
VADER_score_per_review = VADER_score_per_review.drop(labels = ["sum","count"],
axis = "columns")
goodreads_reviews = goodreads_reviews.merge(VADER_score_per_review,
how = "left",
on = "review_id")
#%% --- Export data ---
export_fp = Path("../../data/analysis_results/goodreads_reviews_analyzed.csv")
goodreads_reviews.to_csv(export_fp, encoding = "utf-8", index = False)
| 0
| 0
| 0
|
14d38902ff34fd61ae460246a58b5f14f1a88d4b
| 2,614
|
py
|
Python
|
save_face.py
|
idea4good/ReadFace
|
0482befcfe6e2d232cae87fd9802aa80d037c907
|
[
"Apache-2.0"
] | 2
|
2018-10-13T10:42:04.000Z
|
2020-05-13T03:49:54.000Z
|
save_face.py
|
idea4good/ReadFace
|
0482befcfe6e2d232cae87fd9802aa80d037c907
|
[
"Apache-2.0"
] | 1
|
2018-05-03T06:09:21.000Z
|
2018-05-07T09:51:35.000Z
|
save_face.py
|
idea4good/ReadFace
|
0482befcfe6e2d232cae87fd9802aa80d037c907
|
[
"Apache-2.0"
] | null | null | null |
import cv2, json, sys, datetime
import tensorflow as tf
import numpy as np
from face_filter import c_face_filter
from mtcnn_detect import c_MTCNNDetect
from face_attr import c_face_attr_reader
standard_face_size = 160 # 160(weight) * 160(height)
detect_resolution = 80 # 80(weight) * 80(height)
the_face_attrs_reader = c_face_attr_reader(standard_face_size)
the_filter = c_face_filter()
face_detect = c_MTCNNDetect(tf.Graph(), scale_factor=2) #scale_factor, rescales image for faster detection
vs = cv2.VideoCapture(0)
ret = 0
while ret >= 0:
ret = record_single_face()
| 39.014925
| 121
| 0.594109
|
import cv2, json, sys, datetime
import tensorflow as tf
import numpy as np
from face_filter import c_face_filter
from mtcnn_detect import c_MTCNNDetect
from face_attr import c_face_attr_reader
standard_face_size = 160 # 160(weight) * 160(height)
detect_resolution = 80 # 80(weight) * 80(height)
def record_single_face():
face_imgs = {"Left" : [], "Right": [], "Center": []}
face_img_cnt = {"Left" : 0, "Right": 0, "Center": 0}
face_attrs = {"Left" : [], "Right": [], "Center": []}
while True:
_, frame = vs.read()
rects, landmarks = face_detect.detect_face(frame, detect_resolution);
tip = ""; tip_color = 0;
if(len(rects) <= 0):
tip = "No face found!"; tip_color = (0, 0, 255);
else:
face, direction = the_filter.filter_standard_face(frame, standard_face_size, landmarks[0]);# save 1 face only
if len(face) == standard_face_size and len(face[0]) == standard_face_size:
face_imgs[direction].append(face)
face_img_cnt[direction] += 1
tip = "Recording..."; tip_color = (0, 255, 0)
else:
tip = "Filter face failed!"; tip_color = (0, 255, 255)
if(face_img_cnt["Left"] > 0 and face_img_cnt["Right"] > 0 and face_img_cnt["Center"] > 0):
tip = "Press 's'/'c' to save/clear this record"; tip_color = (0, 255, 255)
cv2.putText(frame, tip, (0, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, tip_color, 2, cv2.LINE_AA)
cv2.imshow("Press 'q' to exit", frame)
key = cv2.waitKey(1) & 0xFF
if (key == ord("q")):
return -1
elif (key == ord("c")):
return 0
elif (key == ord("s")):
break
if(face_img_cnt["Left"] == 0 or face_img_cnt["Right"] == 0 or face_img_cnt["Center"] == 0):
return 0
file_name = './faces/Face.' + datetime.datetime.now().strftime("%Y-%m-%d.%H-%M-%S") + '.attr'
print('-------------- ' + file_name + ' --------------')
for key in face_img_cnt:
print(key + " image count: " + str(face_img_cnt[key]))
for key in face_imgs:
face_attrs[key] = [np.mean(the_face_attrs_reader.get_face_attr(face_imgs[key]),axis=0).tolist()]
f = open(file_name, 'w')
f.write(json.dumps(face_attrs))
return 1
the_face_attrs_reader = c_face_attr_reader(standard_face_size)
the_filter = c_face_filter()
face_detect = c_MTCNNDetect(tf.Graph(), scale_factor=2) #scale_factor, rescales image for faster detection
vs = cv2.VideoCapture(0)
ret = 0
while ret >= 0:
ret = record_single_face()
| 2,014
| 0
| 23
|
d5bd8672c6fe31af95abb4e2bcd7359d4196917f
| 15,661
|
py
|
Python
|
otp/level/LevelSpec.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 8
|
2017-10-10T11:41:01.000Z
|
2021-02-23T12:55:47.000Z
|
otp/level/LevelSpec.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
otp/level/LevelSpec.py
|
AnonymousDeveloper65535/open-toontown
|
3d05c22a7d960ad843dde231140447c46973dba5
|
[
"BSD-3-Clause"
] | 2
|
2019-04-06T16:18:23.000Z
|
2021-02-25T06:25:01.000Z
|
from pandac import PandaModules as PM
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import list2dict, uniqueElements
import string
import LevelConstants
import types
if __dev__:
import os
| 37.023641
| 162
| 0.534066
|
from pandac import PandaModules as PM
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import list2dict, uniqueElements
import string
import LevelConstants
import types
if __dev__:
import os
class LevelSpec:
notify = DirectNotifyGlobal.directNotify.newCategory('LevelSpec')
SystemEntIds = (LevelConstants.UberZoneEntId, LevelConstants.LevelMgrEntId, LevelConstants.EditMgrEntId)
def __init__(self, spec = None, scenario = 0):
newSpec = 0
if type(spec) is types.ModuleType:
if __dev__:
reload(spec)
self.specDict = spec.levelSpec
if __dev__:
self.setFilename(spec.__file__)
elif type(spec) is types.DictType:
self.specDict = spec
elif spec is None:
if __dev__:
newSpec = 1
self.specDict = {'globalEntities': {},
'scenarios': [{}]}
self.entId2specDict = {}
self.entId2specDict.update(list2dict(self.getGlobalEntIds(), value=self.privGetGlobalEntityDict()))
for i in range(self.getNumScenarios()):
self.entId2specDict.update(list2dict(self.getScenarioEntIds(i), value=self.privGetScenarioEntityDict(i)))
self.setScenario(scenario)
if __dev__:
if newSpec:
import EntityTypes
import EntityTypeRegistry
etr = EntityTypeRegistry.EntityTypeRegistry(EntityTypes)
self.setEntityTypeReg(etr)
entId = LevelConstants.UberZoneEntId
self.insertEntity(entId, 'zone')
self.doSetAttrib(entId, 'name', 'UberZone')
entId = LevelConstants.LevelMgrEntId
self.insertEntity(entId, 'levelMgr')
self.doSetAttrib(entId, 'name', 'LevelMgr')
entId = LevelConstants.EditMgrEntId
self.insertEntity(entId, 'editMgr')
self.doSetAttrib(entId, 'name', 'EditMgr')
return
def destroy(self):
del self.specDict
del self.entId2specDict
del self.scenario
if hasattr(self, 'level'):
del self.level
if hasattr(self, 'entTypeReg'):
del self.entTypeReg
def getNumScenarios(self):
return len(self.specDict['scenarios'])
def setScenario(self, scenario):
self.scenario = scenario
def getScenario(self):
return self.scenario
def getGlobalEntIds(self):
return self.privGetGlobalEntityDict().keys()
def getScenarioEntIds(self, scenario = None):
if scenario is None:
scenario = self.scenario
return self.privGetScenarioEntityDict(scenario).keys()
def getAllEntIds(self):
return self.getGlobalEntIds() + self.getScenarioEntIds()
def getAllEntIdsFromAllScenarios(self):
entIds = self.getGlobalEntIds()
for scenario in xrange(self.getNumScenarios()):
entIds.extend(self.getScenarioEntIds(scenario))
return entIds
def getEntitySpec(self, entId):
specDict = self.entId2specDict[entId]
return specDict[entId]
def getCopyOfSpec(self, spec):
specCopy = {}
if not isClient():
print 'EXECWARNING LevelSpec exec: %s' % self.getSpecImportsModuleName()
printStack()
exec 'from %s import *' % self.getSpecImportsModuleName()
for key in spec.keys():
specCopy[key] = eval(repr(spec[key]))
return specCopy
def getEntitySpecCopy(self, entId):
specDict = self.entId2specDict[entId]
return self.getCopyOfSpec(specDict[entId])
def getEntityType(self, entId):
return self.getEntitySpec(entId)['type']
def getEntityZoneEntId(self, entId):
spec = self.getEntitySpec(entId)
type = spec['type']
if type == 'zone':
return entId
return self.getEntityZoneEntId(spec['parentEntId'])
def getEntType2ids(self, entIds):
entType2ids = {}
for entId in entIds:
type = self.getEntityType(entId)
entType2ids.setdefault(type, [])
entType2ids[type].append(entId)
return entType2ids
def privGetGlobalEntityDict(self):
return self.specDict['globalEntities']
def privGetScenarioEntityDict(self, scenario):
return self.specDict['scenarios'][scenario]
def printZones(self):
allIds = self.getAllEntIds()
type2id = self.getEntType2ids(allIds)
zoneIds = type2id['zone']
if 0 in zoneIds:
zoneIds.remove(0)
zoneIds.sort()
for zoneNum in zoneIds:
spec = self.getEntitySpec(zoneNum)
print 'zone %s: %s' % (zoneNum, spec['name'])
if __dev__:
def setLevel(self, level):
self.level = level
def hasLevel(self):
return hasattr(self, 'level')
def setEntityTypeReg(self, entTypeReg):
self.entTypeReg = entTypeReg
for entId in self.getAllEntIds():
spec = self.getEntitySpec(entId)
type = self.getEntityType(entId)
typeDesc = self.entTypeReg.getTypeDesc(type)
attribDescDict = typeDesc.getAttribDescDict()
for attribName, desc in attribDescDict.iteritems():
if attribName not in spec:
spec[attribName] = desc.getDefaultValue()
self.checkSpecIntegrity()
def hasEntityTypeReg(self):
return hasattr(self, 'entTypeReg')
def setFilename(self, filename):
self.filename = filename
def doSetAttrib(self, entId, attrib, value):
specDict = self.entId2specDict[entId]
specDict[entId][attrib] = value
def setAttribChange(self, entId, attrib, value, username):
LevelSpec.notify.info('setAttribChange(%s): %s, %s = %s' % (username,
entId,
attrib,
repr(value)))
self.doSetAttrib(entId, attrib, value)
if self.hasLevel():
self.level.handleAttribChange(entId, attrib, value, username)
def insertEntity(self, entId, entType, parentEntId = 'unspecified'):
LevelSpec.notify.info('inserting entity %s (%s)' % (entId, entType))
globalEnts = self.privGetGlobalEntityDict()
self.entId2specDict[entId] = globalEnts
globalEnts[entId] = {}
spec = globalEnts[entId]
attribDescs = self.entTypeReg.getTypeDesc(entType).getAttribDescDict()
for name, desc in attribDescs.items():
spec[name] = desc.getDefaultValue()
spec['type'] = entType
if parentEntId != 'unspecified':
spec['parentEntId'] = parentEntId
if self.hasLevel():
self.level.handleEntityInsert(entId)
else:
LevelSpec.notify.warning('no level to be notified of insertion')
def removeEntity(self, entId):
LevelSpec.notify.info('removing entity %s' % entId)
if self.hasLevel():
self.level.handleEntityRemove(entId)
else:
LevelSpec.notify.warning('no level to be notified of removal')
dict = self.entId2specDict[entId]
del dict[entId]
del self.entId2specDict[entId]
def removeZoneReferences(self, removedZoneNums):
type2ids = self.getEntType2ids(self.getAllEntIdsFromAllScenarios())
for type in type2ids:
typeDesc = self.entTypeReg.getTypeDesc(type)
visZoneListAttribs = typeDesc.getAttribsOfType('visZoneList')
if len(visZoneListAttribs) > 0:
for entId in type2ids[type]:
spec = self.getEntitySpec(entId)
for attribName in visZoneListAttribs:
for zoneNum in removedZoneNums:
while zoneNum in spec[attribName]:
spec[attribName].remove(zoneNum)
def getSpecImportsModuleName(self):
return 'toontown.coghq.SpecImports'
def getFilename(self):
return self.filename
def privGetBackupFilename(self, filename):
return '%s.bak' % filename
def saveToDisk(self, filename = None, makeBackup = 1):
if filename is None:
filename = self.filename
if filename.endswith('.pyc'):
filename = filename.replace('.pyc', '.py')
if makeBackup and self.privFileExists(filename):
try:
backupFilename = self.privGetBackupFilename(filename)
self.privRemoveFile(backupFilename)
os.rename(filename, backupFilename)
except OSError, e:
LevelSpec.notify.warning('error during backup: %s' % str(e))
LevelSpec.notify.info("writing to '%s'" % filename)
self.privRemoveFile(filename)
self.privSaveToDisk(filename)
return
def privSaveToDisk(self, filename):
retval = 1
f = file(filename, 'wb')
try:
f.write(self.getPrettyString())
except IOError:
retval = 0
f.close()
return retval
def privFileExists(self, filename):
try:
os.stat(filename)
return 1
except OSError:
return 0
def privRemoveFile(self, filename):
try:
os.remove(filename)
return 1
except OSError:
return 0
def getPrettyString(self):
import pprint
tabWidth = 4
tab = ' ' * tabWidth
globalEntitiesName = 'GlobalEntities'
scenarioEntitiesName = 'Scenario%s'
topLevelName = 'levelSpec'
def getPrettyEntityDictStr(name, dict, tabs = 0):
def t(n):
return (tabs + n) * tab
def sortList(lst, firstElements = []):
elements = list(lst)
result = []
for el in firstElements:
if el in elements:
result.append(el)
elements.remove(el)
elements.sort()
result.extend(elements)
return result
firstTypes = ('levelMgr', 'editMgr', 'zone')
firstAttribs = ('type', 'name', 'comment', 'parentEntId', 'pos', 'x', 'y', 'z', 'hpr', 'h', 'p', 'r', 'scale', 'sx', 'sy', 'sz', 'color', 'model')
str = t(0) + '%s = {\n' % name
entIds = dict.keys()
entType2ids = self.getEntType2ids(entIds)
types = sortList(entType2ids.keys(), firstTypes)
for type in types:
str += t(1) + '# %s\n' % string.upper(type)
entIds = entType2ids[type]
entIds.sort()
for entId in entIds:
str += t(1) + '%s: {\n' % entId
spec = dict[entId]
attribs = sortList(spec.keys(), firstAttribs)
for attrib in attribs:
str += t(2) + "'%s': %s,\n" % (attrib, repr(spec[attrib]))
str += t(2) + '}, # end entity %s\n' % entId
str += t(1) + '}\n'
return str
def getPrettyTopLevelDictStr(tabs = 0):
def t(n):
return (tabs + n) * tab
str = t(0) + '%s = {\n' % topLevelName
str += t(1) + "'globalEntities': %s,\n" % globalEntitiesName
str += t(1) + "'scenarios': [\n"
for i in range(self.getNumScenarios()):
str += t(2) + '%s,\n' % (scenarioEntitiesName % i)
str += t(2) + '],\n'
str += t(1) + '}\n'
return str
str = 'from %s import *\n' % self.getSpecImportsModuleName()
str += '\n'
str += getPrettyEntityDictStr('GlobalEntities', self.privGetGlobalEntityDict())
str += '\n'
numScenarios = self.getNumScenarios()
for i in range(numScenarios):
str += getPrettyEntityDictStr('Scenario%s' % i, self.privGetScenarioEntityDict(i))
str += '\n'
str += getPrettyTopLevelDictStr()
self.testPrettyString(prettyString=str)
return str
def _recurKeyTest(self, dict1, dict2):
s = ''
errorCount = 0
if set(dict1.keys()) != set(dict2.keys()):
return 0
for key in dict1:
if type(dict1[key]) == type({}) and type(dict2[key]) == type({}):
if not self._recurKeyTest(dict1[key], dict2[key]):
return 0
else:
strd1 = repr(dict1[key])
strd2 = repr(dict2[key])
if strd1 != strd2:
s += '\nBAD VALUE(%s): %s != %s\n' % (key, strd1, strd2)
errorCount += 1
print s
if errorCount == 0:
return 1
else:
return 0
def testPrettyString(self, prettyString = None):
if prettyString is None:
prettyString = self.getPrettyString()
if not isClient():
print 'EXECWARNING LevelSpec exec 2: %s' % prettyString
printStack()
exec prettyString
if self._recurKeyTest(levelSpec, self.specDict):
return 1
return
def checkSpecIntegrity(self):
entIds = self.getGlobalEntIds()
entIds = list2dict(entIds)
for i in range(self.getNumScenarios()):
for id in self.getScenarioEntIds(i):
entIds[id] = None
if self.entTypeReg is not None:
allEntIds = entIds
for entId in allEntIds:
spec = self.getEntitySpec(entId)
entType = spec['type']
typeDesc = self.entTypeReg.getTypeDesc(entType)
attribNames = typeDesc.getAttribNames()
attribDescs = typeDesc.getAttribDescDict()
for attrib in spec.keys():
if attrib not in attribNames:
LevelSpec.notify.warning("entId %s (%s): unknown attrib '%s', omitting" % (entId, spec['type'], attrib))
del spec[attrib]
for attribName in attribNames:
if not spec.has_key(attribName):
LevelSpec.notify.warning("entId %s (%s): missing attrib '%s'" % (entId, spec['type'], attribName))
return
def stringHash(self):
h = PM.HashVal()
h.hashString(repr(self))
return h.asHex()
def __hash__(self):
return hash(repr(self))
def __str__(self):
return 'LevelSpec'
def __repr__(self):
return 'LevelSpec(%s, scenario=%s)' % (repeatableRepr(self.specDict), repeatableRepr(self.scenario))
| 13,957
| 1,452
| 23
|
8612134a84d546ccb13eba2fcebd4f7f0b9a3940
| 6,349
|
py
|
Python
|
grantTempAdmin.py
|
hoyle-ub/MakeMeAdminPy
|
9a674fb829b1a83e9f6e61436d36fd0b13897196
|
[
"BSD-3-Clause"
] | null | null | null |
grantTempAdmin.py
|
hoyle-ub/MakeMeAdminPy
|
9a674fb829b1a83e9f6e61436d36fd0b13897196
|
[
"BSD-3-Clause"
] | null | null | null |
grantTempAdmin.py
|
hoyle-ub/MakeMeAdminPy
|
9a674fb829b1a83e9f6e61436d36fd0b13897196
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Copyright (c) 2017 Jamf. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Jamf nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JAMF SOFTWARE, LLC "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JAMF SOFTWARE, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# This script was modified from Andrina Kelly's version presented at JNUC2013 for allowing
# a user to elevate their privelages to administrator once per day for 60 minutes. After
# the 60 minutes if a user created a new admin account that account will have admin rights
# also revoked.
#
# To accomplish this the following will be performed:
# - A launch daemon will be put in place in order to remove admin rights
# - Log will be written to tempAdmin.log
# - This policy in Jamf will be set to only be allowed once per day
#
# REQUIREMENTS:
# - Jamf Pro
# - Policy for enabling tempAdmin via Self Service
# - Policy to remove tempAdmin via custom trigger
# - tempAdmin.sh & removeTempAdmin.sh Scripts
#
#
# Written by: Joshua Roskos | Professional Services Engineer | Jamf
#
# Created On: June 20th, 2017
# Updated On: July 26th, 2017
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# IMPORTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import os, plistlib, pwd, grp, subprocess, sys
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
from datetime import datetime
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# VARIABLES
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
userName = (SCDynamicStoreCopyConsoleUser(None, None, None) or [None])[0] # get the logged in user's name
workingDir = '/usr/local/jamfps/' # working directory for script
launchdFile = 'com.jamfps.adminremove.plist' # launch daemon file name
launchdLabel = launchdFile.replace('.plist', '') # launch daemon label
plistFile = 'MakeMeAdmin.plist' # settings file name
tempAdminLog = 'tempAdmin.log' # script log file
adminTimer = 3600 # how long should they have admin rights for (in seconds)
policyCustomTrigger = 'adminremove' # custom trigger specified for removeTempAdmin.py policy
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LAUNCH DAEMON
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# place launchd plist to call JSS policy to remove admin rights.
print 'Creating LaunchDaemon...'
launchDaemon = { 'Label':launchdLabel,
'LaunchOnlyOnce':True,
'ProgramArguments':['/usr/local/jamf/bin/jamf', 'policy', '-trigger', policyCustomTrigger],
'StartInterval':adminTimer,
'UserName':'root',
}
plistlib.writePlist(launchDaemon, '/Library/LaunchDaemons/' + launchdFile)
# set the permission on the file just made.
userID = pwd.getpwnam("root").pw_uid
groupID = grp.getgrnam("wheel").gr_gid
os.chown('/Library/LaunchDaemons/' + launchdFile, userID, groupID)
os.chmod('/Library/LaunchDaemons/' + launchdFile, 0644)
# load the removal plist timer.
print 'Loading LaunchDaemon...'
subprocess.call(["launchctl", "load", "-w", '/Library/LaunchDaemons/' + launchdFile])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# APPLICATION
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# build log files
if not os.path.exists(workingDir):
os.makedirs(workingDir)
# record user that will need to have admin rights removed
# record current existing admins
print 'Retrieving List of Current Admins...'
currentAdmins = grp.getgrnam('admin').gr_mem
print 'Updating Plist...'
plist = { 'User2Remove':userName,
'CurrentAdminUsers':currentAdmins}
plistlib.writePlist(plist, workingDir + plistFile)
# give current logged user admin rights
subprocess.call(["dseditgroup", "-o", "edit", "-a", userName, "-t", "user", "admin"])
# add log entry
log = open(workingDir + tempAdminLog, "a+")
log.write("{} - MakeMeAdmin Granted Admin Rights for {}\r\n".format(datetime.now(), userName))
log.close()
print 'Granted Admin Right to ' + userName
| 49.217054
| 133
| 0.556466
|
#!/usr/bin/env python
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Copyright (c) 2017 Jamf. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Jamf nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY JAMF SOFTWARE, LLC "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JAMF SOFTWARE, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# This script was modified from Andrina Kelly's version presented at JNUC2013 for allowing
# a user to elevate their privelages to administrator once per day for 60 minutes. After
# the 60 minutes if a user created a new admin account that account will have admin rights
# also revoked.
#
# To accomplish this the following will be performed:
# - A launch daemon will be put in place in order to remove admin rights
# - Log will be written to tempAdmin.log
# - This policy in Jamf will be set to only be allowed once per day
#
# REQUIREMENTS:
# - Jamf Pro
# - Policy for enabling tempAdmin via Self Service
# - Policy to remove tempAdmin via custom trigger
# - tempAdmin.sh & removeTempAdmin.sh Scripts
#
#
# Written by: Joshua Roskos | Professional Services Engineer | Jamf
#
# Created On: June 20th, 2017
# Updated On: July 26th, 2017
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# IMPORTS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import os, plistlib, pwd, grp, subprocess, sys
from SystemConfiguration import SCDynamicStoreCopyConsoleUser
from datetime import datetime
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# VARIABLES
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
userName = (SCDynamicStoreCopyConsoleUser(None, None, None) or [None])[0] # get the logged in user's name
workingDir = '/usr/local/jamfps/' # working directory for script
launchdFile = 'com.jamfps.adminremove.plist' # launch daemon file name
launchdLabel = launchdFile.replace('.plist', '') # launch daemon label
plistFile = 'MakeMeAdmin.plist' # settings file name
tempAdminLog = 'tempAdmin.log' # script log file
adminTimer = 3600 # how long should they have admin rights for (in seconds)
policyCustomTrigger = 'adminremove' # custom trigger specified for removeTempAdmin.py policy
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# LAUNCH DAEMON
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# place launchd plist to call JSS policy to remove admin rights.
print 'Creating LaunchDaemon...'
launchDaemon = { 'Label':launchdLabel,
'LaunchOnlyOnce':True,
'ProgramArguments':['/usr/local/jamf/bin/jamf', 'policy', '-trigger', policyCustomTrigger],
'StartInterval':adminTimer,
'UserName':'root',
}
plistlib.writePlist(launchDaemon, '/Library/LaunchDaemons/' + launchdFile)
# set the permission on the file just made.
userID = pwd.getpwnam("root").pw_uid
groupID = grp.getgrnam("wheel").gr_gid
os.chown('/Library/LaunchDaemons/' + launchdFile, userID, groupID)
os.chmod('/Library/LaunchDaemons/' + launchdFile, 0644)
# load the removal plist timer.
print 'Loading LaunchDaemon...'
subprocess.call(["launchctl", "load", "-w", '/Library/LaunchDaemons/' + launchdFile])
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# APPLICATION
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# build log files
if not os.path.exists(workingDir):
os.makedirs(workingDir)
# record user that will need to have admin rights removed
# record current existing admins
print 'Retrieving List of Current Admins...'
currentAdmins = grp.getgrnam('admin').gr_mem
print 'Updating Plist...'
plist = { 'User2Remove':userName,
'CurrentAdminUsers':currentAdmins}
plistlib.writePlist(plist, workingDir + plistFile)
# give current logged user admin rights
subprocess.call(["dseditgroup", "-o", "edit", "-a", userName, "-t", "user", "admin"])
# add log entry
log = open(workingDir + tempAdminLog, "a+")
log.write("{} - MakeMeAdmin Granted Admin Rights for {}\r\n".format(datetime.now(), userName))
log.close()
print 'Granted Admin Right to ' + userName
| 0
| 0
| 0
|
8d63061f1915c19b1edccebd546ba851e25c34a5
| 86
|
py
|
Python
|
boj/10930.py
|
JeongHoLim/practice
|
5f8914ba42b2ae01e0a00e92a7af9fcf63c8b7c2
|
[
"MIT"
] | 1
|
2022-01-16T19:57:28.000Z
|
2022-01-16T19:57:28.000Z
|
boj/10930.py
|
JeongHoLim/practice
|
5f8914ba42b2ae01e0a00e92a7af9fcf63c8b7c2
|
[
"MIT"
] | null | null | null |
boj/10930.py
|
JeongHoLim/practice
|
5f8914ba42b2ae01e0a00e92a7af9fcf63c8b7c2
|
[
"MIT"
] | null | null | null |
import hashlib
message = input()
print(hashlib.sha256(message.encode()).hexdigest())
| 17.2
| 51
| 0.755814
|
import hashlib
message = input()
print(hashlib.sha256(message.encode()).hexdigest())
| 0
| 0
| 0
|
29f85f52ee9b12989c3c1794431c329b04a4a16a
| 2,127
|
py
|
Python
|
my_app/migrations/0001_initial.py
|
B0und/kotanima_server
|
01b25531de219d16831d97a76c7e5f6326b6e99d
|
[
"MIT"
] | 1
|
2021-10-03T20:20:22.000Z
|
2021-10-03T20:20:22.000Z
|
my_app/migrations/0001_initial.py
|
Kotanima/kotanima_server
|
01b25531de219d16831d97a76c7e5f6326b6e99d
|
[
"MIT"
] | null | null | null |
my_app/migrations/0001_initial.py
|
Kotanima/kotanima_server
|
01b25531de219d16831d97a76c7e5f6326b6e99d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-04-17 11:19
import django.contrib.postgres.fields
from django.db import migrations, models
| 42.54
| 147
| 0.586272
|
# Generated by Django 3.1.6 on 2021-04-17 11:19
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RedditPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_name', models.CharField(default=None, max_length=20)),
('post_id', models.CharField(max_length=6)),
('author', models.CharField(max_length=20)),
('title', models.CharField(max_length=300)),
('url', models.CharField(max_length=64)),
('created_utc', models.CharField(max_length=10)),
('phash', models.CharField(max_length=16, null=True)),
('dislike', models.BooleanField(default=None, null=True)),
('wrong_format', models.BooleanField(default=False)),
('selected', models.BooleanField(default=False)),
('source_link', models.TextField(default=None, null=True)),
('visible_tags', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=9999), blank=True, size=None)),
('invisible_tags', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=9999), blank=True, size=None)),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='VkPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scheduled_date', models.CharField(max_length=10, null=True)),
('phash', models.CharField(max_length=16)),
],
),
migrations.AddConstraint(
model_name='redditpost',
constraint=models.UniqueConstraint(fields=('sub_name', 'post_id'), name='unique_post_in_sub'),
),
]
| 0
| 1,975
| 23
|
37a24d22cb3598933be530ff93291dd60b3d68a5
| 5,591
|
py
|
Python
|
PyStationB/libraries/ABEX/tests/test_generic_parsing.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 6
|
2021-09-29T15:46:55.000Z
|
2021-12-14T18:39:51.000Z
|
PyStationB/libraries/ABEX/tests/test_generic_parsing.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | null | null | null |
PyStationB/libraries/ABEX/tests/test_generic_parsing.py
|
BrunoKM/station-b-libraries
|
ea3591837e4a33f0bef789d905467754c27913b3
|
[
"MIT"
] | 3
|
2021-09-27T10:35:20.000Z
|
2021-10-02T17:53:07.000Z
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import param
import pytest
from abex.common.generic_parsing import GenericConfig, IntTuple
def test_overridable_parameter() -> None:
"""
Test to check overridable parameters are correctly identified.
"""
param_dict = ParamClass.get_overridable_parameters()
assert "name" in param_dict
assert "flag" in param_dict
assert "seed" in param_dict
assert "number" in param_dict
assert "integers" in param_dict
assert "optional_int" in param_dict
assert "optional_float" in param_dict
assert "tuple1" in param_dict
assert "int_tuple" in param_dict
assert "enum" in param_dict
assert "readonly" not in param_dict
assert "_non_override" not in param_dict
assert "constant" not in param_dict
def test_create_parser() -> None:
"""
Check that parse_args works as expected, with both non default and default values.
"""
check(["--name=foo"], "name", "foo")
check(["--seed", "42"], "seed", 42)
check(["--seed", ""], "seed", 42)
check(["--number", "2.17"], "number", 2.17)
check(["--number", ""], "number", 3.14)
check(["--integers", "1,2,3"], "integers", [1, 2, 3])
check(["--optional_int", ""], "optional_int", None)
check(["--optional_int", "2"], "optional_int", 2)
check(["--optional_float", ""], "optional_float", None)
check(["--optional_float", "3.14"], "optional_float", 3.14)
check(["--tuple1", "1,2"], "tuple1", (1, 2.0))
check(["--int_tuple", "1,2,3"], "int_tuple", (1, 2, 3))
check(["--enum=2"], "enum", ParamEnum.EnumValue2)
check(["--floats=1,2,3.14"], "floats", [1.0, 2.0, 3.14])
check(["--integers=1,2,3"], "integers", [1, 2, 3])
check(["--flag"], "flag", True)
# Check that default values are created as expected, and that the non-overridable parameters
# are omitted.
defaults = vars(ParamClass.create_argparser().parse_args([]))
assert defaults["seed"] == 42
assert defaults["tuple1"] == (1, 2.3)
assert defaults["int_tuple"] == (1, 1, 1)
assert defaults["enum"] == ParamEnum.EnumValue1
assert "readonly" not in defaults
assert "constant" not in defaults
assert "_non_override" not in defaults
# We can't test if all invalid cases are handled because argparse call sys.exit
# upon errors.
def test_apply_overrides() -> None:
"""
Test that overrides are applied correctly, ond only to overridable parameters,
"""
m = ParamClass()
overrides = {"name": "newName", "int_tuple": (0, 1, 2)}
actual_overrides = m.apply_overrides(overrides)
assert actual_overrides == overrides
assert all([x == i and isinstance(x, int) for i, x in enumerate(m.int_tuple)])
assert m.name == "newName"
# Attempt to change seed and constant, but the latter should be ignored.
change_seed: Dict[str, Any] = {"seed": 123}
old_constant = m.constant
changes2 = m.apply_overrides({**change_seed, "constant": "Nothing"})
assert changes2 == change_seed
assert m.seed == 123
assert m.constant == old_constant
@pytest.mark.parametrize("value_idx_0", [1.0, 1])
@pytest.mark.parametrize("value_idx_1", [2.0, 2])
@pytest.mark.parametrize("value_idx_2", [3.0, 3])
def test_int_tuple_validation(value_idx_0: Any, value_idx_1: Any, value_idx_2: Any) -> None:
"""
Test integer tuple parameter is validated correctly.
"""
m = ParamClass()
val = (value_idx_0, value_idx_1, value_idx_2)
if not all([isinstance(x, int) for x in val]):
with pytest.raises(ValueError):
m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)
else:
m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)
| 43.341085
| 111
| 0.636738
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import param
import pytest
from abex.common.generic_parsing import GenericConfig, IntTuple
class ParamEnum(Enum):
EnumValue1 = ("1",)
EnumValue2 = "2"
class ParamClass(GenericConfig):
name: str = param.String(None, doc="Name") # type: ignore # auto
seed: int = param.Integer(42, doc="Seed") # type: ignore # auto
flag: int = param.Boolean(False, doc="Flag") # type: ignore # auto
number: float = param.Number(3.14) # type: ignore # auto
integers: List[int] = param.List(None, class_=int) # type: ignore # auto
optional_int: Optional[int] = param.Integer(None, doc="Optional int") # type: ignore # auto
optional_float: Optional[float] = param.Number(None, doc="Optional float") # type: ignore # auto
floats: List[float] = param.List(None, class_=float) # type: ignore # auto
tuple1: Tuple[int, float] = param.NumericTuple((1, 2.3), length=2, doc="Tuple") # type: ignore # auto
int_tuple: Tuple[int, int, int] = IntTuple((1, 1, 1), length=3, doc="Integer Tuple") # type: ignore # auto
enum: ParamEnum = param.ClassSelector(
default=ParamEnum.EnumValue1, class_=ParamEnum, instantiate=False # type: ignore # auto
)
readonly: str = param.String("Nope", readonly=True) # type: ignore # auto
_non_override: str = param.String("Nope") # type: ignore # auto
constant: str = param.String("Nope", constant=True) # type: ignore # auto
def test_overridable_parameter() -> None:
"""
Test to check overridable parameters are correctly identified.
"""
param_dict = ParamClass.get_overridable_parameters()
assert "name" in param_dict
assert "flag" in param_dict
assert "seed" in param_dict
assert "number" in param_dict
assert "integers" in param_dict
assert "optional_int" in param_dict
assert "optional_float" in param_dict
assert "tuple1" in param_dict
assert "int_tuple" in param_dict
assert "enum" in param_dict
assert "readonly" not in param_dict
assert "_non_override" not in param_dict
assert "constant" not in param_dict
def test_create_parser() -> None:
"""
Check that parse_args works as expected, with both non default and default values.
"""
def check(arg: List[str], expected_key: str, expected_value: Any) -> None:
parsed = ParamClass.parse_args(arg)
assert getattr(parsed, expected_key) == expected_value
check(["--name=foo"], "name", "foo")
check(["--seed", "42"], "seed", 42)
check(["--seed", ""], "seed", 42)
check(["--number", "2.17"], "number", 2.17)
check(["--number", ""], "number", 3.14)
check(["--integers", "1,2,3"], "integers", [1, 2, 3])
check(["--optional_int", ""], "optional_int", None)
check(["--optional_int", "2"], "optional_int", 2)
check(["--optional_float", ""], "optional_float", None)
check(["--optional_float", "3.14"], "optional_float", 3.14)
check(["--tuple1", "1,2"], "tuple1", (1, 2.0))
check(["--int_tuple", "1,2,3"], "int_tuple", (1, 2, 3))
check(["--enum=2"], "enum", ParamEnum.EnumValue2)
check(["--floats=1,2,3.14"], "floats", [1.0, 2.0, 3.14])
check(["--integers=1,2,3"], "integers", [1, 2, 3])
check(["--flag"], "flag", True)
# Check that default values are created as expected, and that the non-overridable parameters
# are omitted.
defaults = vars(ParamClass.create_argparser().parse_args([]))
assert defaults["seed"] == 42
assert defaults["tuple1"] == (1, 2.3)
assert defaults["int_tuple"] == (1, 1, 1)
assert defaults["enum"] == ParamEnum.EnumValue1
assert "readonly" not in defaults
assert "constant" not in defaults
assert "_non_override" not in defaults
# We can't test if all invalid cases are handled because argparse call sys.exit
# upon errors.
def test_apply_overrides() -> None:
"""
Test that overrides are applied correctly, ond only to overridable parameters,
"""
m = ParamClass()
overrides = {"name": "newName", "int_tuple": (0, 1, 2)}
actual_overrides = m.apply_overrides(overrides)
assert actual_overrides == overrides
assert all([x == i and isinstance(x, int) for i, x in enumerate(m.int_tuple)])
assert m.name == "newName"
# Attempt to change seed and constant, but the latter should be ignored.
change_seed: Dict[str, Any] = {"seed": 123}
old_constant = m.constant
changes2 = m.apply_overrides({**change_seed, "constant": "Nothing"})
assert changes2 == change_seed
assert m.seed == 123
assert m.constant == old_constant
@pytest.mark.parametrize("value_idx_0", [1.0, 1])
@pytest.mark.parametrize("value_idx_1", [2.0, 2])
@pytest.mark.parametrize("value_idx_2", [3.0, 3])
def test_int_tuple_validation(value_idx_0: Any, value_idx_1: Any, value_idx_2: Any) -> None:
"""
Test integer tuple parameter is validated correctly.
"""
m = ParamClass()
val = (value_idx_0, value_idx_1, value_idx_2)
if not all([isinstance(x, int) for x in val]):
with pytest.raises(ValueError):
m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)
else:
m.int_tuple = (value_idx_0, value_idx_1, value_idx_2)
| 160
| 1,279
| 73
|
1cfd6c15d5a97dcc496e4cf4e6fa21334efe7b93
| 1,873
|
py
|
Python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/virtual_network_configuration_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/virtual_network_configuration_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/virtual_network_configuration_py3.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualNetworkConfiguration(Model):
"""Configuration of a virtual network to which API Management service is
deployed.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar vnetid: The virtual network ID. This is typically a GUID. Expect a
null GUID by default.
:vartype vnetid: str
:ivar subnetname: The name of the subnet.
:vartype subnetname: str
:param subnet_resource_id: The full resource ID of a subnet in a virtual
network to deploy the API Management service in.
:type subnet_resource_id: str
"""
_validation = {
'vnetid': {'readonly': True},
'subnetname': {'readonly': True},
'subnet_resource_id': {'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.(ClassicNetwork|Network)/virtualNetworks/[^/]*/subnets/[^/]*$'},
}
_attribute_map = {
'vnetid': {'key': 'vnetid', 'type': 'str'},
'subnetname': {'key': 'subnetname', 'type': 'str'},
'subnet_resource_id': {'key': 'subnetResourceId', 'type': 'str'},
}
| 38.22449
| 171
| 0.623065
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VirtualNetworkConfiguration(Model):
"""Configuration of a virtual network to which API Management service is
deployed.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar vnetid: The virtual network ID. This is typically a GUID. Expect a
null GUID by default.
:vartype vnetid: str
:ivar subnetname: The name of the subnet.
:vartype subnetname: str
:param subnet_resource_id: The full resource ID of a subnet in a virtual
network to deploy the API Management service in.
:type subnet_resource_id: str
"""
_validation = {
'vnetid': {'readonly': True},
'subnetname': {'readonly': True},
'subnet_resource_id': {'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.(ClassicNetwork|Network)/virtualNetworks/[^/]*/subnets/[^/]*$'},
}
_attribute_map = {
'vnetid': {'key': 'vnetid', 'type': 'str'},
'subnetname': {'key': 'subnetname', 'type': 'str'},
'subnet_resource_id': {'key': 'subnetResourceId', 'type': 'str'},
}
def __init__(self, *, subnet_resource_id: str=None, **kwargs) -> None:
super(VirtualNetworkConfiguration, self).__init__(**kwargs)
self.vnetid = None
self.subnetname = None
self.subnet_resource_id = subnet_resource_id
| 228
| 0
| 27
|
7fd3683a28de92b0f6f5f6203676d99dc6051d1a
| 1,289
|
py
|
Python
|
django/app_website/views.py
|
a-rey/aaronmreyes_heroku
|
f397741ec33a35c318b6e4d51837b352183085f9
|
[
"MIT"
] | 1
|
2022-03-12T22:23:44.000Z
|
2022-03-12T22:23:44.000Z
|
django/app_website/views.py
|
a-rey/docker_website
|
f397741ec33a35c318b6e4d51837b352183085f9
|
[
"MIT"
] | 2
|
2020-04-07T22:09:50.000Z
|
2020-04-07T22:09:50.000Z
|
django/app_website/views.py
|
a-rey/docker_website
|
f397741ec33a35c318b6e4d51837b352183085f9
|
[
"MIT"
] | null | null | null |
import django.shortcuts
def main(request):
"""
request handler for '/'.
"""
return django.shortcuts.render(request, 'app_website/index.html', {})
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# global error handlers for app_website
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def error_400(request, exception):
"""
request handler for a 400 error.
"""
context = {
'err': '[400 Bad Request] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
def error_403(request, exception):
"""
request handler for a 403 error.
"""
context = {
'err': '[403 Permission Denied] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
def error_404(request, exception):
"""
request handler for a 404 error.
"""
context = {
'err': '[404 Page Not Found] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
def error_500(request):
"""
request handler for a 500 error.
"""
context = {
'err': '[500 Server Error] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
| 23.87037
| 79
| 0.582622
|
import django.shortcuts
def main(request):
"""
request handler for '/'.
"""
return django.shortcuts.render(request, 'app_website/index.html', {})
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# global error handlers for app_website
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def error_400(request, exception):
"""
request handler for a 400 error.
"""
context = {
'err': '[400 Bad Request] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
def error_403(request, exception):
"""
request handler for a 403 error.
"""
context = {
'err': '[403 Permission Denied] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
def error_404(request, exception):
"""
request handler for a 404 error.
"""
context = {
'err': '[404 Page Not Found] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
def error_500(request):
"""
request handler for a 500 error.
"""
context = {
'err': '[500 Server Error] Path: ' + request.path,
}
return django.shortcuts.render(request, 'app_website/error.html', context)
| 0
| 0
| 0
|
f3f230d0ddb68e27c72ea274c9b6285a7b13450d
| 53
|
py
|
Python
|
backend/app/views/sculpt_a_sequence/__init__.py
|
Edinburgh-Genome-Foundry/CUBA
|
d57565951ead619ef9263e8b356b451001fb910f
|
[
"MIT"
] | 15
|
2018-02-12T13:12:13.000Z
|
2021-08-15T11:37:59.000Z
|
backend/app/views/sculpt_a_sequence/__init__.py
|
Edinburgh-Genome-Foundry/CUBA
|
d57565951ead619ef9263e8b356b451001fb910f
|
[
"MIT"
] | 9
|
2020-06-05T17:54:54.000Z
|
2022-02-12T12:03:19.000Z
|
backend/app/views/sculpt_a_sequence/__init__.py
|
Edinburgh-Genome-Foundry/CUBA
|
d57565951ead619ef9263e8b356b451001fb910f
|
[
"MIT"
] | 3
|
2018-10-18T13:08:50.000Z
|
2020-08-17T14:09:46.000Z
|
from .SculptASequenceView import SculptASequenceView
| 26.5
| 52
| 0.90566
|
from .SculptASequenceView import SculptASequenceView
| 0
| 0
| 0
|
28bef48c033a719579453f4084bb87df283e7408
| 13,037
|
py
|
Python
|
ansible/lib/ansible/module_utils/netcfg.py
|
kiv-box/kafka
|
debec1c4bc8c43776070ee447a53b55fef42bd52
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/module_utils/netcfg.py
|
kiv-box/kafka
|
debec1c4bc8c43776070ee447a53b55fef42bd52
|
[
"Apache-2.0"
] | null | null | null |
ansible/lib/ansible/module_utils/netcfg.py
|
kiv-box/kafka
|
debec1c4bc8c43776070ee447a53b55fef42bd52
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import itertools
import re
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import zip, zip_longest
DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/']
| 31.490338
| 92
| 0.554192
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import itertools
import re
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import zip, zip_longest
DEFAULT_COMMENT_TOKENS = ['#', '!', '/*', '*/']
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class Config(object):
def __init__(self, connection):
self.connection = connection
def __call__(self, commands, **kwargs):
lines = to_list(commands)
return self.connection.configure(lines, **kwargs)
def load_config(self, commands, **kwargs):
commands = to_list(commands)
return self.connection.load_config(commands, **kwargs)
def get_config(self, **kwargs):
return self.connection.get_config(**kwargs)
def save_config(self):
return self.connection.save_config()
class ConfigLine(object):
def __init__(self, text):
self.text = text
self.children = list()
self.parents = list()
self.raw = None
@property
def line(self):
line = [p.text for p in self.parents]
line.append(self.text)
return ' '.join(line)
def __str__(self):
return self.raw
def __eq__(self, other):
return self.line == other.line
def __ne__(self, other):
return not self.__eq__(other)
def ignore_line(text, tokens=None):
for item in (tokens or DEFAULT_COMMENT_TOKENS):
if text.startswith(item):
return True
def get_next(iterable):
item, next_item = itertools.tee(iterable, 2)
next_item = itertools.islice(next_item, 1, None)
return zip_longest(item, next_item)
def parse(lines, indent, comment_tokens=None):
toplevel = re.compile(r'\S')
childline = re.compile(r'^\s*(.+)$')
ancestors = list()
config = list()
for line in str(lines).split('\n'):
text = str(re.sub(r'([{};])', '', line)).strip()
cfg = ConfigLine(text)
cfg.raw = line
if not text or ignore_line(text, comment_tokens):
continue
# handle top level commands
if toplevel.match(line):
ancestors = [cfg]
# handle sub level commands
else:
match = childline.match(line)
line_indent = match.start(1)
level = int(line_indent / indent)
parent_level = level - 1
cfg.parents = ancestors[:level]
if level > len(ancestors):
config.append(cfg)
continue
for i in range(level, len(ancestors)):
ancestors.pop()
ancestors.append(cfg)
ancestors[parent_level].children.append(cfg)
config.append(cfg)
return config
def dumps(objects, output='block'):
if output == 'block':
items = [c.raw for c in objects]
elif output == 'commands':
items = [c.text for c in objects]
elif output == 'lines':
items = list()
for obj in objects:
line = list()
line.extend([p.text for p in obj.parents])
line.append(obj.text)
items.append(' '.join(line))
else:
raise TypeError('unknown value supplied for keyword output')
return '\n'.join(items)
class NetworkConfig(object):
def __init__(self, indent=None, contents=None, device_os=None):
self.indent = indent or 1
self._config = list()
self._device_os = device_os
self._syntax = 'block' # block, lines, junos
if self._device_os == 'junos':
self._syntax = 'junos'
if contents:
self.load(contents)
@property
def items(self):
return self._config
def __str__(self):
if self._device_os == 'junos':
return dumps(self.expand_line(self.items), 'lines')
return dumps(self.expand_line(self.items))
def load(self, contents):
# Going to start adding device profiles post 2.2
tokens = list(DEFAULT_COMMENT_TOKENS)
if self._device_os == 'sros':
tokens.append('echo')
self._config = parse(contents, indent=4, comment_tokens=tokens)
else:
self._config = parse(contents, indent=self.indent)
def load_from_file(self, filename):
self.load(open(filename).read())
def get(self, path):
if isinstance(path, string_types):
path = [path]
for item in self._config:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def search(self, regexp, path=None):
regex = re.compile(r'^%s' % regexp, re.M)
if path:
parent = self.get(path)
if not parent or not parent.children:
return
children = [c.text for c in parent.children]
data = '\n'.join(children)
else:
data = str(self)
match = regex.search(data)
if match:
if match.groups():
values = match.groupdict().values()
groups = list(set(match.groups()).difference(values))
return (groups, match.groupdict())
else:
return match.group()
def findall(self, regexp):
regexp = r'%s' % regexp
return re.findall(regexp, str(self))
def expand_line(self, objs):
visited = set()
expanded = list()
for o in objs:
for p in o.parents:
if p not in visited:
visited.add(p)
expanded.append(p)
expanded.append(o)
visited.add(o)
return expanded
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def expand_block(self, objects, visited=None):
items = list()
if not visited:
visited = set()
for o in objects:
items.append(o)
visited.add(o)
for child in o.children:
items.extend(self.expand_block([child], visited))
return items
def diff_line(self, other, path=None):
diff = list()
for item in self.items:
if item not in other:
diff.append(item)
return diff
def diff_strict(self, other, path=None):
diff = list()
for index, item in enumerate(self.items):
try:
if item != other[index]:
diff.append(item)
except IndexError:
diff.append(item)
return diff
def diff_exact(self, other, path=None):
diff = list()
if len(other) != len(self.items):
diff.extend(self.items)
else:
for ours, theirs in zip(self.items, other):
if ours != theirs:
diff.extend(self.items)
break
return diff
def difference(self, other, path=None, match='line', replace='line'):
try:
if path and match != 'line':
try:
other = other.get_section_objects(path)
except ValueError:
other = list()
else:
other = other.items
func = getattr(self, 'diff_%s' % match)
updates = func(other, path=path)
except AttributeError:
raise
raise TypeError('invalid value for match keyword')
if self._device_os == 'junos':
return updates
if replace == 'block':
parents = list()
for u in updates:
if u.parents is None:
if u not in parents:
parents.append(u)
else:
for p in u.parents:
if p not in parents:
parents.append(p)
return self.expand_block(parents)
return self.expand_line(updates)
def replace(self, patterns, repl, parents=None, add_if_missing=False,
ignore_whitespace=True):
match = None
parents = to_list(parents) or list()
patterns = [re.compile(r, re.I) for r in to_list(patterns)]
for item in self.items:
for regexp in patterns:
text = item.text
if not ignore_whitespace:
text = item.raw
if regexp.search(text):
if item.text != repl:
if parents == [p.text for p in item.parents]:
match = item
break
if match:
match.text = repl
indent = len(match.raw) - len(match.raw.lstrip())
match.raw = repl.rjust(len(repl) + indent)
elif add_if_missing:
self.add(repl, parents=parents)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
| 8,508
| 2,341
| 319
|
315d972515790c8be08d568bf8607f2d1b58ff02
| 4,054
|
py
|
Python
|
tests/func/test_check_ignore.py
|
santos22/dvc
|
adce620621230da180c184d5b295ad3129560251
|
[
"Apache-2.0"
] | null | null | null |
tests/func/test_check_ignore.py
|
santos22/dvc
|
adce620621230da180c184d5b295ad3129560251
|
[
"Apache-2.0"
] | null | null | null |
tests/func/test_check_ignore.py
|
santos22/dvc
|
adce620621230da180c184d5b295ad3129560251
|
[
"Apache-2.0"
] | null | null | null |
import os
import pytest
from dvc.ignore import DvcIgnore
from dvc.main import main
@pytest.mark.parametrize(
"file,ret,output", [("ignored", 0, True), ("not_ignored", 1, False)]
)
@pytest.mark.parametrize(
"file,ret,output",
[
("file", 0, "{}:1:f*\tfile\n".format(DvcIgnore.DVCIGNORE_FILE)),
("foo", 0, "{}:2:!foo\tfoo\n".format(DvcIgnore.DVCIGNORE_FILE)),
(
os.path.join("dir", "foobar"),
0,
"{}:1:foobar\t{}\n".format(
os.path.join("dir", DvcIgnore.DVCIGNORE_FILE),
os.path.join("dir", "foobar"),
),
),
],
)
@pytest.mark.parametrize("non_matching", [True, False])
@pytest.mark.parametrize(
"args",
[
["-n", "file"],
["-a", "file"],
["-q", "-d", "file"],
["--stdin", "file"],
[],
],
)
@pytest.mark.parametrize("path,ret", [({"dir": {}}, 0), ({"dir": "files"}, 1)])
@pytest.mark.parametrize(
"file,ret,output", [("ignored", 0, True), ("not_ignored", 1, False)]
)
| 29.808824
| 79
| 0.602121
|
import os
import pytest
from dvc.ignore import DvcIgnore
from dvc.main import main
@pytest.mark.parametrize(
"file,ret,output", [("ignored", 0, True), ("not_ignored", 1, False)]
)
def test_check_ignore(tmp_dir, dvc, file, ret, output, caplog):
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "ignored")
assert main(["check-ignore", file]) == ret
assert (file in caplog.text) is output
assert "Having any troubles?" not in caplog.text
@pytest.mark.parametrize(
"file,ret,output",
[
("file", 0, "{}:1:f*\tfile\n".format(DvcIgnore.DVCIGNORE_FILE)),
("foo", 0, "{}:2:!foo\tfoo\n".format(DvcIgnore.DVCIGNORE_FILE)),
(
os.path.join("dir", "foobar"),
0,
"{}:1:foobar\t{}\n".format(
os.path.join("dir", DvcIgnore.DVCIGNORE_FILE),
os.path.join("dir", "foobar"),
),
),
],
)
def test_check_ignore_details(tmp_dir, dvc, file, ret, output, caplog):
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "f*\n!foo")
tmp_dir.gen({"dir": {DvcIgnore.DVCIGNORE_FILE: "foobar"}})
assert main(["check-ignore", "-d", file]) == ret
assert output in caplog.text
@pytest.mark.parametrize("non_matching", [True, False])
def test_check_ignore_non_matching(tmp_dir, dvc, non_matching, caplog):
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "other")
if non_matching:
assert main(["check-ignore", "-d", "-n", "file"]) == 1
else:
assert main(["check-ignore", "-d", "file"]) == 1
assert ("::\tfile\n" in caplog.text) is non_matching
@pytest.mark.parametrize(
"args",
[
["-n", "file"],
["-a", "file"],
["-q", "-d", "file"],
["--stdin", "file"],
[],
],
)
def test_check_ignore_error_args_cases(tmp_dir, dvc, args, caplog):
assert main(["check-ignore"] + args) == 255
assert ("Having any troubles?" in caplog.text) == ("-q" not in args)
@pytest.mark.parametrize("path,ret", [({"dir": {}}, 0), ({"dir": "files"}, 1)])
def test_check_ignore_dir(tmp_dir, dvc, path, ret):
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "dir/")
tmp_dir.gen(path)
assert main(["check-ignore", "-q", "dir"]) == ret
def test_check_ignore_default_dir(tmp_dir, dvc):
assert main(["check-ignore", "-q", ".dvc"]) == 1
def test_check_ignore_out_side_repo(tmp_dir, dvc):
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "file")
assert main(["check-ignore", "-q", "../file"]) == 1
def test_check_ignore_sub_repo(tmp_dir, dvc):
tmp_dir.gen(
{DvcIgnore.DVCIGNORE_FILE: "other", "dir": {".dvc": {}, "foo": "bar"}}
)
assert main(["check-ignore", "-q", os.path.join("dir", "foo")]) == 1
def test_check_sub_dir_ignore_file(tmp_dir, dvc, caplog):
tmp_dir.gen(
{
DvcIgnore.DVCIGNORE_FILE: "other",
"dir": {DvcIgnore.DVCIGNORE_FILE: "bar\nfoo", "foo": "bar"},
}
)
assert main(["check-ignore", "-d", os.path.join("dir", "foo")]) == 0
assert (
"{}:2:foo\t{}".format(
os.path.join("dir", DvcIgnore.DVCIGNORE_FILE),
os.path.join("dir", "foo"),
)
in caplog.text
)
sub_dir = tmp_dir / "dir"
with sub_dir.chdir():
assert main(["check-ignore", "-d", "foo"]) == 0
assert ".dvcignore:2:foo\tfoo" in caplog.text
def test_check_ignore_details_all(tmp_dir, dvc, caplog):
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "f*\n!foo")
assert main(["check-ignore", "-d", "-a", "foo"]) == 0
assert "{}:1:f*\tfoo\n".format(DvcIgnore.DVCIGNORE_FILE) in caplog.text
assert "{}:2:!foo\tfoo\n".format(DvcIgnore.DVCIGNORE_FILE) in caplog.text
@pytest.mark.parametrize(
"file,ret,output", [("ignored", 0, True), ("not_ignored", 1, False)]
)
def test_check_ignore_stdin_mode(
tmp_dir, dvc, file, ret, output, caplog, mocker
):
tmp_dir.gen(DvcIgnore.DVCIGNORE_FILE, "ignored")
mocker.patch("builtins.input", side_effect=[file, ""])
assert main(["check-ignore", "--stdin"]) == ret
assert (file in caplog.text) is output
| 2,741
| 0
| 247
|
14401b26c4f2699577a5a8de47556e68420e2570
| 3,923
|
py
|
Python
|
streamdeck.py
|
the-gearheads/StreamDeck
|
616d06e28c0e79093507a656a6972a916ab17fc7
|
[
"MIT"
] | null | null | null |
streamdeck.py
|
the-gearheads/StreamDeck
|
616d06e28c0e79093507a656a6972a916ab17fc7
|
[
"MIT"
] | null | null | null |
streamdeck.py
|
the-gearheads/StreamDeck
|
616d06e28c0e79093507a656a6972a916ab17fc7
|
[
"MIT"
] | null | null | null |
import os
import threading
import time
from networktables import NetworkTables
from PIL import Image
from PIL.ImageColor import getcolor, getrgb
from PIL.ImageOps import grayscale
from StreamDeck.DeviceManager import DeviceManager
from StreamDeck.ImageHelpers import PILHelper
ASSETS_PATH = os.path.join(os.path.dirname(__file__), "assets")
ASSETS_PATH = os.path.join(os.path.dirname(__file__), "icons")
# As a client to connect to a robot
NetworkTables.initialize(server="10.11.89.2")
# NetworkTables.initialize(server="127.0.0.1")
time.sleep(3)
sd = NetworkTables.getTable("StreamDeck/0")
# a = [
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# ]
# sd.putStringArray("Icons", a)
buttons = []
for i in range(0, 15):
sd.putBoolean(f"Action/{i}", False)
sd.putBoolean(f"Status/{i}", False)
button = Button(i)
buttons.append(button)
deck = DeviceManager().enumerate()[0]
deck.open()
deck.reset()
print(
"Opened '{}' device (serial number: '{}')".format(
deck.deck_type(), deck.get_serial_number()
)
)
# Set initial screen brightness to 30%.
deck.set_brightness(30)
# Set initial key images.
# for key in range(deck.key_count()):
# update_key_image(deck, key, False)
# Register callback function for when a key state changes.
deck.set_key_callback(key_change_callback)
while True:
for button in buttons:
button.update(deck)
# Wait until all application threads have terminated (for this example,
# this is when all deck handles are closed).
for t in threading.enumerate():
if t is threading.currentThread():
continue
if t.is_alive():
t.join()
| 25.640523
| 81
| 0.624522
|
import os
import threading
import time
from networktables import NetworkTables
from PIL import Image
from PIL.ImageColor import getcolor, getrgb
from PIL.ImageOps import grayscale
from StreamDeck.DeviceManager import DeviceManager
from StreamDeck.ImageHelpers import PILHelper
ASSETS_PATH = os.path.join(os.path.dirname(__file__), "assets")
def image_tint(src, tint="#ffffff"): # From https://stackoverflow.com/a/12310820
if src.mode not in ["RGB", "RGBA"]:
raise TypeError("Unsupported source image mode: {}".format(src.mode))
src.load()
tr, tg, tb = getrgb(tint)
tl = getcolor(tint, "L")
if not tl:
tl = 1
tl = float(tl)
sr, sg, sb = map(lambda tv: tv / tl, (tr, tg, tb))
luts = (
tuple(map(lambda lr: int(lr * sr + 0.5), range(256)))
+ tuple(map(lambda lg: int(lg * sg + 0.5), range(256)))
+ tuple(map(lambda lb: int(lb * sb + 0.5), range(256)))
)
l = grayscale(src)
if Image.getmodebands(src.mode) < 4:
merge_args = (src.mode, (l, l, l))
else:
a = Image.new("L", src.size)
a.putdata(src.getdata(3))
luts += tuple(range(256))
return Image.merge(*merge_args).point(luts)
ASSETS_PATH = os.path.join(os.path.dirname(__file__), "icons")
def render_key_image(deck, icon_filename):
image = PILHelper.create_image(deck)
icon = Image.open(ASSETS_PATH + "/" + icon_filename).convert("RGBA")
icon.thumbnail((image.width, image.height - 20), Image.LANCZOS)
icon_pos = ((image.width - icon.width) // 2, 0)
image.paste(icon, icon_pos, icon)
return image
def key_change_callback(deck, key, state):
if state:
button = buttons[key]
button.set(deck)
class Button:
def __init__(self, key):
self.key = key
self.active = False
def set(self, deck):
sd.putBoolean(f"Action/{self.key}", True)
self.update(deck)
def update(self, deck):
x = sd.getBoolean(f"Status/{self.key}", False)
y = sd.getBoolean(f"Action/{self.key}", False)
icon_array = sd.getStringArray("Icons", [])
name = icon_array[self.key]
image = None
if x:
image = render_key_image(deck, name + "/active.png")
else:
image = render_key_image(deck, name + "/inactive.png")
if y:
image = image_tint(image, tint="#882020")
image = PILHelper.to_native_format(deck, image)
deck.set_key_image(self.key, image)
# As a client to connect to a robot
NetworkTables.initialize(server="10.11.89.2")
# NetworkTables.initialize(server="127.0.0.1")
time.sleep(3)
sd = NetworkTables.getTable("StreamDeck/0")
# a = [
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# "default",
# ]
# sd.putStringArray("Icons", a)
buttons = []
for i in range(0, 15):
sd.putBoolean(f"Action/{i}", False)
sd.putBoolean(f"Status/{i}", False)
button = Button(i)
buttons.append(button)
deck = DeviceManager().enumerate()[0]
deck.open()
deck.reset()
print(
"Opened '{}' device (serial number: '{}')".format(
deck.deck_type(), deck.get_serial_number()
)
)
# Set initial screen brightness to 30%.
deck.set_brightness(30)
# Set initial key images.
# for key in range(deck.key_count()):
# update_key_image(deck, key, False)
# Register callback function for when a key state changes.
deck.set_key_callback(key_change_callback)
while True:
for button in buttons:
button.update(deck)
# Wait until all application threads have terminated (for this example,
# this is when all deck handles are closed).
for t in threading.enumerate():
if t is threading.currentThread():
continue
if t.is_alive():
t.join()
| 1,919
| -8
| 172
|
873966cecd914fb9e587a731142d35c5dac67b39
| 9,649
|
py
|
Python
|
_curry.py
|
GoNZooo/dragonfly-grammars
|
22d639d8f86f4f5a7c44caa73e75c4938c0ce199
|
[
"MIT"
] | 3
|
2020-09-06T10:40:19.000Z
|
2020-09-29T20:39:52.000Z
|
_curry.py
|
GoNZooo/dragonfly-grammars
|
22d639d8f86f4f5a7c44caa73e75c4938c0ce199
|
[
"MIT"
] | null | null | null |
_curry.py
|
GoNZooo/dragonfly-grammars
|
22d639d8f86f4f5a7c44caa73e75c4938c0ce199
|
[
"MIT"
] | null | null | null |
from dragonfly import (Grammar, CompoundRule, Text, MappingRule, Dictation, Function, Choice)
from macro_utilities import (replace_in_text, comment_choice, execute_with_dictation)
from vim.rules.letter import (camel_case, proper)
comparison_choice_map = {
"equal": "==",
"not equal": "/=",
"less or equal": "<=",
"greater or equal": ">=",
"less": "<",
"greater": ">",
}
stack_command_choice_map = {
"build fast": "build --fast",
"build": "build",
"shell": "repl",
"shall": "repl",
"test": "test",
"test fast": "test --fast",
"run": "run",
"install": "install",
}
# The main Curry grammar rules are activated here
curryBootstrap = Grammar("curry bootstrap")
curryBootstrap.add_rule(CurryEnabler())
curryBootstrap.load()
curryGrammar = Grammar("curry grammar")
curryGrammar.add_rule(CurryUtilities())
curryGrammar.add_rule(CurryDisabler())
curryGrammar.load()
curryGrammar.disable()
| 31.025723
| 96
| 0.635092
|
from dragonfly import (Grammar, CompoundRule, Text, MappingRule, Dictation, Function, Choice)
from macro_utilities import (replace_in_text, comment_choice, execute_with_dictation)
from vim.rules.letter import (camel_case, proper)
class CurryEnabler(CompoundRule):
spec = "enable Curry"
def _process_recognition(self, node, extras):
curryBootstrap.disable()
curryGrammar.enable()
print("Curry grammar enabled!")
class CurryDisabler(CompoundRule):
spec = "disable Curry"
def _process_recognition(self, node, extras):
curryGrammar.disable()
curryBootstrap.enable()
print("Curry grammar disabled")
def dictation_to_identifier(dictation):
return camel_case(str(dictation).lower())
def output_bind(name):
if name == "":
command = replace_in_text("$ <- _")
else:
name = dictation_to_identifier(name)
command = replace_in_text("%s <- $" % name)
command.execute()
def output_let(name):
if name == "":
command = replace_in_text("let $ = _")
else:
name = dictation_to_identifier(name)
command = replace_in_text("let %s = $" % name)
command.execute()
def output_case(name):
if name == "":
command = replace_in_text("case $ of")
else:
name = dictation_to_identifier(name)
command = Text("case %s of" % name)
command.execute()
comparison_choice_map = {
"equal": "==",
"not equal": "/=",
"less or equal": "<=",
"greater or equal": ">=",
"less": "<",
"greater": ">",
}
def comparison_choice(name="comparison"):
return Choice(name, comparison_choice_map)
def output_if_comparison(name, construct, comparison=None):
if comparison is not None:
execute_with_dictation(
name,
lambda n: replace_in_text(
"%s %s %s $ then _ else _" % (construct, dictation_to_identifier(n), comparison)
),
lambda n: replace_in_text("%s $ %s _ then _ else _" % (construct, comparison))
)
def output_if(name):
if name == "":
command = replace_in_text("if $ then _ else _")
else:
name = dictation_to_identifier(name)
command = replace_in_text("if %s then $ else _" % name)
command.execute()
def output_type_signature(name):
if name == "":
command = replace_in_text("$ :: _")
else:
name = dictation_to_identifier(name)
command = replace_in_text("%s :: $" % name)
command.execute()
def format_module_name(module_name):
module_name = str(module_name)
components = module_name.split(".")
formatted_components = []
for component in components:
formatted_components.append(proper(component))
return ".".join(formatted_components)
def output_import(import_name):
if import_name == "":
command = replace_in_text("import $ (_)")
else:
import_name = format_module_name(import_name)
command = replace_in_text("import %s ($)" % import_name)
command.execute()
def output_import_qualified(import_name):
if import_name == "":
command = replace_in_text("import qualified $ as _")
else:
import_name = format_module_name(import_name)
command = replace_in_text("import qualified %s as $" % import_name)
command.execute()
def output_import_as(import_name):
if import_name == "":
command = replace_in_text("import $ as _")
else:
import_name = format_module_name(import_name)
command = replace_in_text("import %s as $" % import_name)
command.execute()
def output_binding(name):
if name == "":
command = replace_in_text("$ = _")
else:
name = dictation_to_identifier(name)
command = Text("%s = " % name)
command.execute()
def output_check_equal(name):
output_check_comparison("==", name)
def output_check_not_equal(name):
output_check_comparison("/=", name)
def output_check_comparison(comparison, name):
if name == "":
command = replace_in_text("$ %s _" % comparison)
else:
name = dictation_to_identifier(name)
command = replace_in_text("%s %s $" % (name, comparison))
command.execute()
def output_data_type(type_name):
if type_name == "":
command = replace_in_text("data $ =")
else:
type_name = dictation_to_type_name(type_name)
command = Text("data %s = " % type_name)
command.execute()
def dictation_to_type_name(name):
return proper(str(name).replace("-", ""))
def output_new_type(type_name, new_type_base):
if type_name == "" and new_type_base == "":
command = replace_in_text("newtype $ = _")
elif type_name == "":
new_type_base = dictation_to_type_name(new_type_base)
command = replace_in_text("newtype $ = _ %s" % new_type_base)
elif new_type_base == "":
type_name = dictation_to_type_name(type_name)
command = Text("newtype %s = %s " % (type_name, type_name))
else:
type_name = dictation_to_type_name(type_name)
new_type_base = dictation_to_type_name(new_type_base)
command = Text("newtype %s = %s %s" % (type_name, type_name, new_type_base))
command.execute()
def output_wrapped_type(type_name, new_type_base):
if type_name == "" and new_type_base == "":
command = replace_in_text("type $ = Wrapped \"_\" _")
elif type_name == "":
new_type_base = dictation_to_type_name(new_type_base)
command = replace_in_text("type $ = Wrapped \"_\" %s" % new_type_base)
elif new_type_base == "":
type_name = dictation_to_type_name(type_name)
command = Text("type %s = Wrapped \"%s\" " % (type_name, type_name))
else:
type_name = dictation_to_type_name(type_name)
new_type_base = dictation_to_type_name(new_type_base)
command = Text("type %s = Wrapped \"%s\" %s" % (type_name, type_name, new_type_base))
command.execute()
def output_language_extension(language_extension):
Text("{-# LANGUAGE %s #-}" % proper(str(language_extension))).execute()
def output_comment(comment, comment_type=None):
if comment_type is None:
command = Text("-- %s" % comment)
else:
command = Text("-- %s %s" % (comment_type, comment))
command.execute()
stack_command_choice_map = {
"build fast": "build --fast",
"build": "build",
"shell": "repl",
"shall": "repl",
"test": "test",
"test fast": "test --fast",
"run": "run",
"install": "install",
}
def stack_command_choice(name="stack_command"):
return Choice(name, stack_command_choice_map)
def output_stack_command(stack_command=None):
command_text = "stack "
if stack_command is None:
command = Text(command_text)
else:
command = Text(command_text + str(stack_command))
command.execute()
class CurryUtilities(MappingRule):
mapping = {
"if [<name>] is <comparison>": Function(output_if_comparison, construct="if"),
"if [<name>]": Function(output_if),
"case [on <name>]": Function(output_case),
"let [<name>]": Function(output_let),
"anonymous function": replace_in_text("\\$ -> _"),
"signature [for <name>]": Function(output_type_signature),
"import [<import_name>]": Function(output_import),
"import qualified [<import_name>]": Function(output_import_qualified),
"import [<import_name>] as": Function(output_import_as),
"qualified pure script import": replace_in_text("import $ as _"),
"check [<name>] is equal": Function(output_check_equal),
"check [<name>] is not equal": Function(output_check_not_equal),
"map": Text(" <$> "),
"apply": Text(" <*> "),
"operator bind": Text(" >>= "),
"discard": Text(" >> "),
"[<name>] equals": Function(output_binding),
"bind [<name>]": Function(output_bind),
"backwards arrow": Text(" <- "),
"backwards fat arrow": Text(" <= "),
"arrow": Text(" -> "),
"fat arrow": Text(" => "),
"monad reader": Text("MonadReader env m"),
"monad IO": Text("MonadIO m"),
"monad unlift": Text("MonadUnliftIO m"),
"monad state": Text("MonadState s m"),
"data type [<type_name>]": Function(output_data_type),
"new type [<new_type_base>] is called [<type_name>]": Function(output_new_type),
"wrapped [<new_type_base>] is called [<type_name>]": Function(output_wrapped_type),
"language extension <language_extension>": Function(output_language_extension),
"[<comment_type>] comment [<comment>]": Function(output_comment),
# terminal commands
"stack [<stack_command>]": Function(output_stack_command),
}
extras = [
Dictation("name", default=""),
Dictation("comment", default=""),
Dictation("type_name", default=""),
Dictation("new_type_base", default=""),
Dictation("import_name", default=""),
Dictation("language_extension", default=""),
comment_choice("comment_type"),
stack_command_choice("stack_command"),
comparison_choice("comparison"),
]
# The main Curry grammar rules are activated here
curryBootstrap = Grammar("curry bootstrap")
curryBootstrap.add_rule(CurryEnabler())
curryBootstrap.load()
curryGrammar = Grammar("curry grammar")
curryGrammar.add_rule(CurryUtilities())
curryGrammar.add_rule(CurryDisabler())
curryGrammar.load()
curryGrammar.disable()
def unload():
global curryGrammar
if curryGrammar:
curryGrammar.unload()
curryGrammar = None
| 5,599
| 2,434
| 644
|
3bbe192c0bd0ecce276a3a5226d771eca7a1f92d
| 712
|
py
|
Python
|
updatorr/__init__.py
|
idlesign/deluge-updatorr
|
aa7c5b0a312fdef580c8dd263760d48f3ac2a56e
|
[
"BSD-3-Clause"
] | 14
|
2015-02-24T21:50:06.000Z
|
2022-01-17T07:51:35.000Z
|
updatorr/__init__.py
|
idlesign/deluge-updatorr
|
aa7c5b0a312fdef580c8dd263760d48f3ac2a56e
|
[
"BSD-3-Clause"
] | 1
|
2016-07-07T05:15:37.000Z
|
2016-07-07T05:50:56.000Z
|
updatorr/__init__.py
|
idlesign/deluge-updatorr
|
aa7c5b0a312fdef580c8dd263760d48f3ac2a56e
|
[
"BSD-3-Clause"
] | 4
|
2015-04-27T10:47:23.000Z
|
2019-01-08T02:53:18.000Z
|
from deluge.plugins.init import PluginInitBase
VERSION = (0, 1, 8)
| 25.428571
| 54
| 0.714888
|
from deluge.plugins.init import PluginInitBase
VERSION = (0, 1, 8)
class CorePlugin(PluginInitBase):
def __init__(self, plugin_name):
from core import Core as _plugin_cls
self._plugin_cls = _plugin_cls
super(CorePlugin, self).__init__(plugin_name)
class GtkUIPlugin(PluginInitBase):
def __init__(self, plugin_name):
from gtkui import GtkUI as _plugin_cls
self._plugin_cls = _plugin_cls
super(GtkUIPlugin, self).__init__(plugin_name)
class WebUIPlugin(PluginInitBase):
def __init__(self, plugin_name):
from webui import WebUI as _plugin_cls
self._plugin_cls = _plugin_cls
super(WebUIPlugin, self).__init__(plugin_name)
| 453
| 38
| 149
|
18263fc551610af8df9f1b2b30108a9e0d99788f
| 993
|
py
|
Python
|
applications/users/views/profile.py
|
arielcalzadadeveloper/django-base-project
|
6e6b8d4f25f45d76f34633f38eaec8d4880130ea
|
[
"MIT"
] | null | null | null |
applications/users/views/profile.py
|
arielcalzadadeveloper/django-base-project
|
6e6b8d4f25f45d76f34633f38eaec8d4880130ea
|
[
"MIT"
] | null | null | null |
applications/users/views/profile.py
|
arielcalzadadeveloper/django-base-project
|
6e6b8d4f25f45d76f34633f38eaec8d4880130ea
|
[
"MIT"
] | null | null | null |
from django.shortcuts import reverse
from django.views.generic import UpdateView
from applications.users.forms.profile import ProfileForm
from applications.users.layouts.profile import ProfileLayout
from applications.users.mixins.authenticated import AuthenticatedMixin
from applications.common.mixins.add_message import AddMessageMixin
from applications.common.mixins.add_request_to_form import AddRequestToFormMixin
Profile = ProfileCBV.as_view()
| 31.03125
| 89
| 0.769386
|
from django.shortcuts import reverse
from django.views.generic import UpdateView
from applications.users.forms.profile import ProfileForm
from applications.users.layouts.profile import ProfileLayout
from applications.users.mixins.authenticated import AuthenticatedMixin
from applications.common.mixins.add_message import AddMessageMixin
from applications.common.mixins.add_request_to_form import AddRequestToFormMixin
class ProfileCBV(AddRequestToFormMixin, AddMessageMixin, AuthenticatedMixin, UpdateView):
template_name = "users/profile.html"
form_class = ProfileForm
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
context_data = super(ProfileCBV, self).get_context_data(**kwargs)
context_data.update({
"form_layout": ProfileLayout(),
})
return context_data
def get_success_url(self):
return reverse("users:profile")
Profile = ProfileCBV.as_view()
| 298
| 219
| 23
|
eadbb2d6d149a0b57c452f236114b2ae0e66a136
| 9,334
|
py
|
Python
|
app/main/video/video.py
|
BorrowHome/flasky-sandbox
|
70ef7aa087a0954f7ff4b4845f6599d8481ef0b1
|
[
"Apache-2.0"
] | 1
|
2021-03-15T02:59:13.000Z
|
2021-03-15T02:59:13.000Z
|
app/main/video/video.py
|
BorrowHome/flasky-sandbox
|
70ef7aa087a0954f7ff4b4845f6599d8481ef0b1
|
[
"Apache-2.0"
] | 6
|
2021-03-19T09:49:44.000Z
|
2022-03-12T00:10:14.000Z
|
app/main/video/video.py
|
BorrowHome/flasky-sandbox
|
70ef7aa087a0954f7ff4b4845f6599d8481ef0b1
|
[
"Apache-2.0"
] | 2
|
2020-01-11T13:39:22.000Z
|
2020-07-02T03:57:43.000Z
|
# -*- coding: utf-8 -*-
import csv
import os
import cv2
import numpy as np
from flask import render_template, request, redirect, url_for
from flask import jsonify
from app.main import main
from app.utils.frame.frame import base64_to_png
from app.utils.frame.site import Site
from app.utils.frame.sub import PictureSub
from config import Config
import json
@main.route('/')
@main.route('/picture/', methods=['GET', 'POST'])
# INFO 2019/12/25 15:18 liliangbin 背景图片设置
@main.route('/background/', methods=['GET', 'POST'])
# TODO 2020/1/4 15:13 liliangbin 返回的地址应该是画框的位置(视频名字和时间位置)通过前端设置了
@main.route('/site/', methods=['GET', 'POST'])
# TODO 2020/6/12 15:50 liliangbin 代码可以优化一波
@main.route('/change_datas/', methods=['GET', 'POST'])
# INFO 2020/6/12 15:51 liliangbin 获取用户
@main.route("/site_get/", methods=['GET', 'POST'])
@main.route('/video_location/', methods=['POST'])
| 35.9
| 117
| 0.607028
|
# -*- coding: utf-8 -*-
import csv
import os
import cv2
import numpy as np
from flask import render_template, request, redirect, url_for
from flask import jsonify
from app.main import main
from app.utils.frame.frame import base64_to_png
from app.utils.frame.site import Site
from app.utils.frame.sub import PictureSub
from config import Config
import json
@main.route('/')
def index():
# 这里的主入口是我们函数的dir 最好用绝对路径,临时用相对路径
# 使用url_for的时候使用的是函数名(路由名和函数名应一样。)
video_names = []
path_in = './app/static/video/'
path_out = 'http://localhost:8082/static/video/'
image_path = Config.UPLOAD_IMAGE_PATH
document_path = Config.SAVE_DOCUMENT_PATH
for dirpath, dirnames, filenames in os.walk(path_in):
for filename in filenames:
# dir_file_name = os.path.join(dirpath, filename)
dir_file_name = filename
if os.path.splitext(dir_file_name)[1] == '.mp4' or '.avi': # (('./app/static/movie', '.mp4'))
print(dir_file_name)
video_names.append(dir_file_name)
with open(document_path + "site_0.txt", "r+") as f:
a = f.readlines()
print(a)
frame_location = Site(int(a[0]), int(a[1]), int(a[2]), int(a[3]))
video_src = video_names[0]
tmp2 = frame_location.locate_y + frame_location.move_y
tmp1 = frame_location.locate_x + frame_location.move_x
site_left_top = [str(frame_location.locate_x), str(frame_location.locate_y)]
site_left_bottom = [str(frame_location.locate_x), str(tmp2)]
site_right_top = [str(tmp1), str(frame_location.locate_y)]
site_right_bottom = [str(tmp1), str(tmp2)]
return jsonify(
{
'video_names': video_names,
'site': {
'site_left_top': site_left_top,
'site_left_bottom': site_left_bottom,
'site_right_top': site_right_top,
'site_right_bottom': site_right_bottom,
},
'video_src': video_src
}
)
@main.route('/picture/', methods=['GET', 'POST'])
def picture():
# TODO 2019/10/1 12:02 liliangbin 当前帧解码 ,并调用图像处理函数 返回一个字符串
# 输入的base64编码字符串必须符合base64的padding规则。“当原数据长度不是3的整数倍时, 如果最后剩下两个输入数据,在编码结果后加1个“=”;
# 如果最后剩下一个输入数据,编码结果后加2个“=”;如果没有剩下任何数据,就什么都不要加,这样才可以保证资料还原的正确性。”
#
import time
start = time.clock()
# 中间写上代码块
image_path = Config.UPLOAD_IMAGE_PATH
document_path = Config.SAVE_DOCUMENT_PATH
if request.method == 'POST':
data = json.loads(request.get_data(as_text=True))
str = data.get('current_frame')
video_name = data.get('video_name')
imageStart = time.clock()
img_np = base64_to_png(str)
cv2.imwrite(image_path + "current_" + video_name + ".png", img_np)
imageEnd = time.clock()
print('base64topng: %s Seconds' % (imageEnd - imageStart))
res = {}
sub = PictureSub()
# 背景图
writeImgeStart = time.clock()
background = cv2.imread(image_path + "back_" + video_name + ".png")
print(background.shape)
writeImgeEnd = time.clock()
print('WriteImge: %s Seconds' % (writeImgeEnd - writeImgeStart))
currentFrame = img_np
print(currentFrame.shape)
q = sub.subtract_demo(background, currentFrame)
substractIMRun = time.clock()
print('substract : %s Seconds' % (substractIMRun - writeImgeEnd))
s = sub.inverse(q)
InvserseSTOP = time.clock()
print('inverse : %s Seconds' % (InvserseSTOP - substractIMRun))
# t = sub.iblack(s, 220)
imageRun = time.clock()
print('iblack: %s Seconds' % (imageRun - InvserseSTOP))
print('sub starct ALL : %s Seconds' % (imageRun - writeImgeEnd))
# cv2.imwrite(image_path + "iblack_" + id + ".png", t)
cv2.imwrite(image_path + "ipaint_" + video_name + ".png", s)
imageStop = time.clock()
print('write twoImge: %s Seconds' % (imageStop - imageRun))
with open(document_path + "site_" + video_name + ".txt", "r+") as f:
a = f.readlines()
print(a)
frame_location = Site(int(a[0]), int(a[1]), int(a[2]), int(a[3]))
res = sub.ipaint(s, 220, video_name, frame_location.locate_x, frame_location.move_x, frame_location.locate_y,
frame_location.move_y)
res['max'] = frame_location.locate_y + frame_location.move_y
# 变化得y轴
list_y = np.array(res['list_y'])
data_total = res['max'] - list_y
max_index = max(data_total.tolist())
res['list_y'] = data_total.tolist()
res['max'] = max_index + 20
res['video_name'] = video_name
# 以前使用的是jsonify===> 前端使用 data["list_y"]==>有什么区别
end = time.clock()
print('Running time: %s Seconds' % (end - start))
return res
# INFO 2019/12/25 15:18 liliangbin 背景图片设置
@main.route('/background/', methods=['GET', 'POST'])
def background():
image_path = Config.UPLOAD_IMAGE_PATH
document_path = Config.SAVE_DOCUMENT_PATH
if request.method == 'POST':
jsonData = json.loads(request.get_data(as_text=True))
frame = jsonData.get('current_frame')
video_name = jsonData.get('video_name')
img_np = base64_to_png(frame)
cv2.imwrite(image_path + "back_" + video_name + ".png", img_np)
return 'done'
# TODO 2020/1/4 15:13 liliangbin 返回的地址应该是画框的位置(视频名字和时间位置)通过前端设置了
@main.route('/site/', methods=['GET', 'POST'])
def site():
image_path = Config.UPLOAD_IMAGE_PATH
document_path = Config.SAVE_DOCUMENT_PATH
if request.method == 'POST':
print("post")
print(request.form)
# 数据识别得的时候最好使用整数实现,int和float的转化有问题,就在计算得时候。脑阔疼
# 大坑
locate_x = int(float(request.form['locate_x']))
locate_y = int(float(request.form['locate_y']))
move_x = int(float(request.form['move_x']))
move_y = int(float(request.form['move_y']))
id = request.form['id']
print(id, "fdsf")
with open(document_path + "site_" + id + ".txt", 'w') as f:
f.write(str(locate_x) + '\n')
f.write(str(locate_y) + '\n')
f.write(str(move_x) + '\n')
f.write(str(move_y) + '\n')
return "done"
# TODO 2020/6/12 15:50 liliangbin 代码可以优化一波
@main.route('/change_datas/', methods=['GET', 'POST'])
def change_datas():
# 输入的base64编码字符串必须符合base64的padding规则。“当原数据长度不是3的整数倍时, 如果最后剩下两个输入数据,在编码结果后加1个“=”;
# 如果最后剩下一个输入数据,编码结果后加2个“=”;如果没有剩下任何数据,就什么都不要加,这样才可以保证资料还原的正确性。”
s = []
image_path = Config.UPLOAD_IMAGE_PATH
document_path = Config.SAVE_DOCUMENT_PATH
if request.method == 'POST':
new = eval(request.form.getlist("current_frame")[0])
id = request.form['id']
print(type(new), new)
# new=[int(new[0]),int(new[1])]
with open(document_path + "site_" + id + ".txt", "r+") as f:
a = f.readlines()
print(a)
frame_location = Site(int(a[0]), int(a[1]), int(a[2]), int(a[3]))
with open(document_path + "sand_" + id + ".csv", "r+", encoding="utf-8", newline="")as f:
reader = csv.reader(f)
# writer = csv.writer(f)
print("正在修改csv文件")
for i in reader:
s.append(i)
for i in s:
# print(i)
if str(new[0]) == i[0]:
s[s.index(i)][1] = str(frame_location.move_y + frame_location.locate_y - new[1])
break
with open(document_path + "sand_" + id + ".csv", "w", newline="")as f:
writer = csv.writer(f)
for i in s:
writer.writerow(i)
print("csv文件修改成功")
return "true"
# INFO 2020/6/12 15:51 liliangbin 获取用户
@main.route("/site_get/", methods=['GET', 'POST'])
def site_get():
document_path = Config.SAVE_DOCUMENT_PATH
res = {}
if request.method == 'POST':
id = request.form["id"]
try:
with open(document_path + "site_" + id + ".txt", "r+") as f:
a = f.readlines()
print(a)
frame_location = Site(int(a[0]), int(a[1]), int(a[2]), int(a[3]))
except Exception as e:
print(e.__cause__)
print('现在还没有存储该site')
frame_location = Site(0, 0, 0, 0)
tmp2 = frame_location.locate_y + frame_location.move_y
tmp1 = frame_location.locate_x + frame_location.move_x
res['site_left_top'] = str(frame_location.locate_x) + ',' + str(frame_location.locate_y)
res['site_left_bottom'] = str(frame_location.locate_x) + ',' + str(tmp2)
res['site_right_top'] = str(tmp1) + ',' + str(frame_location.locate_y)
res['site_right_bottom'] = str(tmp1) + ',' + str(tmp2)
# return redirect(url_for('main.index'))
return res
@main.route('/video_location/', methods=['POST'])
def video_location():
document_path = Config.SAVE_DOCUMENT_PATH
video_save_location = request.form.get('video_location')
location = request.args.get('location')
print(video_save_location)
print(location)
with open(document_path + "video_save_location.txt", 'w') as f:
f.write(str(video_save_location))
if location == 'ipc':
return redirect(url_for('.ipc'))
elif location == 'multi_video':
return redirect(url_for('.multi_ipc_video'))
return redirect('.')
| 9,068
| 0
| 154
|
ec9fd9597bdca1b68870394bf77473afe8484e61
| 233
|
py
|
Python
|
lfs/manage/discounts/forms.py
|
naro/django-lfs
|
312404370e497d00aa0f7221dc55a70a20490fb5
|
[
"BSD-3-Clause"
] | null | null | null |
lfs/manage/discounts/forms.py
|
naro/django-lfs
|
312404370e497d00aa0f7221dc55a70a20490fb5
|
[
"BSD-3-Clause"
] | null | null | null |
lfs/manage/discounts/forms.py
|
naro/django-lfs
|
312404370e497d00aa0f7221dc55a70a20490fb5
|
[
"BSD-3-Clause"
] | null | null | null |
# django imports
from django.forms import ModelForm
# lfs imports
from lfs.discounts.models import Discount
class DiscountForm(ModelForm):
"""
Form to manage discount data.
"""
| 16.642857
| 41
| 0.690987
|
# django imports
from django.forms import ModelForm
# lfs imports
from lfs.discounts.models import Discount
class DiscountForm(ModelForm):
"""
Form to manage discount data.
"""
class Meta:
model = Discount
| 0
| 15
| 26
|
161fc216cd97b68346d08db33a110ef3a04fb9bd
| 5,738
|
py
|
Python
|
betka/utils.py
|
phracek/betka
|
12c920bd76d33f81bbf8cda6f27d673c8826cf9e
|
[
"MIT"
] | 1
|
2020-11-05T21:16:28.000Z
|
2020-11-05T21:16:28.000Z
|
betka/utils.py
|
phracek/betka
|
12c920bd76d33f81bbf8cda6f27d673c8826cf9e
|
[
"MIT"
] | 16
|
2020-03-20T11:23:27.000Z
|
2022-03-08T17:09:11.000Z
|
betka/utils.py
|
phracek/betka
|
12c920bd76d33f81bbf8cda6f27d673c8826cf9e
|
[
"MIT"
] | 1
|
2020-03-11T09:29:48.000Z
|
2020-03-11T09:29:48.000Z
|
# MIT License
#
# Copyright (c) 2020 SCL team at Red Hat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from contextlib import contextmanager
import logging
import shutil
import os
import json
import jinja2
import subprocess
from pathlib import Path
from betka.constants import HOME
logger = logging.getLogger(__name__)
def run_cmd(cmd, return_output=False, ignore_error=False, shell=False, **kwargs):
"""
Run provided command on host system using the same user as invoked this code.
Raises subprocess.CalledProcessError if it fails.
:param cmd: list or str
:param return_output: bool, return output of the command
:param ignore_error: bool, do not fail in case nonzero return code
:param shell: bool, run command in shell
:param kwargs: pass keyword arguments to subprocess.check_* functions; for more info,
please check `help(subprocess.Popen)`
:return: None or str
"""
logger.debug("command: %r", cmd)
try:
if return_output:
return subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
universal_newlines=True,
shell=shell,
**kwargs,
)
else:
return subprocess.check_call(cmd, shell=shell, **kwargs)
except subprocess.CalledProcessError as cpe:
if ignore_error:
if return_output:
return cpe.output
else:
return cpe.returncode
else:
logger.error(f"failed with code {cpe.returncode} and output:\n{cpe.output}")
raise cpe
def text_from_template(template_dir, template_filename, template_data):
"""
Create text based on template in path template_dir/template_filename
:param template_dir: string, directory containing templates
:param template_filename: template for text in jinja
:param template_data: dict, data for substitution in template
:return: string
"""
if not os.path.exists(os.path.join(template_dir, template_filename)):
raise FileNotFoundError("Path to template not found.")
template_loader = jinja2.FileSystemLoader(searchpath=template_dir)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_filename)
output_text = template.render(template_data=template_data)
logger.debug("Text from template created:")
logger.debug(output_text)
return output_text
def copy_upstream2downstream(src_parent: Path, dest_parent: Path):
"""Copies content from upstream repo to downstream repo
Copies all files/dirs/symlinks from upstream source to dist-git one by one,
while removing previous if exists.
:param src_parent: path to source directory
:param dest_parent: path to destination directory
"""
for f in src_parent.iterdir():
if f.name.startswith(".git"):
continue
dest = dest_parent / f.name
src = src_parent / f.name
logger.debug(f"Copying {str(src)} to {str(dest)}.")
# First remove the dest only if it is not symlink.
if dest.is_dir() and not dest.is_symlink():
logger.debug("rmtree %s", dest)
shutil.rmtree(dest)
else:
if dest.exists():
dest.unlink()
# Now copy the src to dest
if src.is_symlink() or not src.is_dir():
logger.debug("cp %s %s", src, dest)
shutil.copy2(src, dest, follow_symlinks=False)
else:
logger.debug("cp -r %s %s", src, dest)
shutil.copytree(src, dest, symlinks=True)
def clean_directory(path: Path):
"""
Function cleans directory except itself
:param path: directory path which is cleaned
"""
for d in path.iterdir():
src = path / d
if src.is_dir():
logger.debug("rmtree %s", str(src))
shutil.rmtree(src)
else:
src.unlink()
def list_dir_content(dir_name: Path):
"""
Lists all content of dir_name
:param dir_name: Directory for showing files
"""
logger.info("Look for a content in '%s' directory", str(dir_name))
for f in dir_name.rglob("*"):
if str(f).startswith(".git"):
continue
logger.debug(f"{f.parent / f.name}")
@contextmanager
def cwd(path):
"""
Switch to Path directory and once action is done
returns back
:param path:
:return:
"""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
| 32.788571
| 89
| 0.66626
|
# MIT License
#
# Copyright (c) 2020 SCL team at Red Hat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from contextlib import contextmanager
import logging
import shutil
import os
import json
import jinja2
import subprocess
from pathlib import Path
from betka.constants import HOME
logger = logging.getLogger(__name__)
def run_cmd(cmd, return_output=False, ignore_error=False, shell=False, **kwargs):
"""
Run provided command on host system using the same user as invoked this code.
Raises subprocess.CalledProcessError if it fails.
:param cmd: list or str
:param return_output: bool, return output of the command
:param ignore_error: bool, do not fail in case nonzero return code
:param shell: bool, run command in shell
:param kwargs: pass keyword arguments to subprocess.check_* functions; for more info,
please check `help(subprocess.Popen)`
:return: None or str
"""
logger.debug("command: %r", cmd)
try:
if return_output:
return subprocess.check_output(
cmd,
stderr=subprocess.STDOUT,
universal_newlines=True,
shell=shell,
**kwargs,
)
else:
return subprocess.check_call(cmd, shell=shell, **kwargs)
except subprocess.CalledProcessError as cpe:
if ignore_error:
if return_output:
return cpe.output
else:
return cpe.returncode
else:
logger.error(f"failed with code {cpe.returncode} and output:\n{cpe.output}")
raise cpe
def text_from_template(template_dir, template_filename, template_data):
"""
Create text based on template in path template_dir/template_filename
:param template_dir: string, directory containing templates
:param template_filename: template for text in jinja
:param template_data: dict, data for substitution in template
:return: string
"""
if not os.path.exists(os.path.join(template_dir, template_filename)):
raise FileNotFoundError("Path to template not found.")
template_loader = jinja2.FileSystemLoader(searchpath=template_dir)
template_env = jinja2.Environment(loader=template_loader)
template = template_env.get_template(template_filename)
output_text = template.render(template_data=template_data)
logger.debug("Text from template created:")
logger.debug(output_text)
return output_text
def copy_upstream2downstream(src_parent: Path, dest_parent: Path):
"""Copies content from upstream repo to downstream repo
Copies all files/dirs/symlinks from upstream source to dist-git one by one,
while removing previous if exists.
:param src_parent: path to source directory
:param dest_parent: path to destination directory
"""
for f in src_parent.iterdir():
if f.name.startswith(".git"):
continue
dest = dest_parent / f.name
src = src_parent / f.name
logger.debug(f"Copying {str(src)} to {str(dest)}.")
# First remove the dest only if it is not symlink.
if dest.is_dir() and not dest.is_symlink():
logger.debug("rmtree %s", dest)
shutil.rmtree(dest)
else:
if dest.exists():
dest.unlink()
# Now copy the src to dest
if src.is_symlink() or not src.is_dir():
logger.debug("cp %s %s", src, dest)
shutil.copy2(src, dest, follow_symlinks=False)
else:
logger.debug("cp -r %s %s", src, dest)
shutil.copytree(src, dest, symlinks=True)
def clean_directory(path: Path):
"""
Function cleans directory except itself
:param path: directory path which is cleaned
"""
for d in path.iterdir():
src = path / d
if src.is_dir():
logger.debug("rmtree %s", str(src))
shutil.rmtree(src)
else:
src.unlink()
def list_dir_content(dir_name: Path):
"""
Lists all content of dir_name
:param dir_name: Directory for showing files
"""
logger.info("Look for a content in '%s' directory", str(dir_name))
for f in dir_name.rglob("*"):
if str(f).startswith(".git"):
continue
logger.debug(f"{f.parent / f.name}")
def load_config_json():
with open(f"{HOME}/config.json") as config_file:
data = json.load(config_file)
return data
@contextmanager
def cwd(path):
"""
Switch to Path directory and once action is done
returns back
:param path:
:return:
"""
prev_cwd = Path.cwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
| 109
| 0
| 23
|
7f019039c93471670011b52c66c98041328e2ea4
| 2,848
|
py
|
Python
|
deepspeech/io/collator.py
|
iclementine/DeepSpeech
|
d0635c6592a2e787ca296e15241e7371a83ca55f
|
[
"Apache-2.0"
] | 1
|
2021-05-14T23:27:13.000Z
|
2021-05-14T23:27:13.000Z
|
deepspeech/io/collator.py
|
xihuanafeng/DeepSpeech
|
2bdf4c946af66cc173d638c072ba6435cd18a286
|
[
"Apache-2.0"
] | null | null | null |
deepspeech/io/collator.py
|
xihuanafeng/DeepSpeech
|
2bdf4c946af66cc173d638c072ba6435cd18a286
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from deepspeech.frontend.utility import IGNORE_ID
from deepspeech.io.utility import pad_sequence
from deepspeech.utils.log import Log
__all__ = ["SpeechCollator"]
logger = Log(__name__).getlog()
| 36.050633
| 86
| 0.61236
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from deepspeech.frontend.utility import IGNORE_ID
from deepspeech.io.utility import pad_sequence
from deepspeech.utils.log import Log
__all__ = ["SpeechCollator"]
logger = Log(__name__).getlog()
class SpeechCollator():
def __init__(self, keep_transcription_text=True):
"""
Padding audio features with zeros to make them have the same shape (or
a user-defined shape) within one bach.
if ``keep_transcription_text`` is False, text is token ids else is raw string.
"""
self._keep_transcription_text = keep_transcription_text
def __call__(self, batch):
"""batch examples
Args:
batch ([List]): batch is (audio, text)
audio (np.ndarray) shape (D, T)
text (List[int] or str): shape (U,)
Returns:
tuple(audio, text, audio_lens, text_lens): batched data.
audio : (B, Tmax, D)
audio_lens: (B)
text : (B, Umax)
text_lens: (B)
"""
audios = []
audio_lens = []
texts = []
text_lens = []
for audio, text in batch:
# audio
audios.append(audio.T) # [T, D]
audio_lens.append(audio.shape[1])
# text
# for training, text is token ids
# else text is string, convert to unicode ord
tokens = []
if self._keep_transcription_text:
assert isinstance(text, str), (type(text), text)
tokens = [ord(t) for t in text]
else:
tokens = text # token ids
tokens = tokens if isinstance(tokens, np.ndarray) else np.array(
tokens, dtype=np.int64)
texts.append(tokens)
text_lens.append(tokens.shape[0])
padded_audios = pad_sequence(
audios, padding_value=0.0).astype(np.float32) #[B, T, D]
audio_lens = np.array(audio_lens).astype(np.int64)
padded_texts = pad_sequence(
texts, padding_value=IGNORE_ID).astype(np.int64)
text_lens = np.array(text_lens).astype(np.int64)
return padded_audios, audio_lens, padded_texts, text_lens
| 0
| 1,997
| 23
|
c7c5f6d1f99a696768cc8b80c7193f77e06c25f7
| 28,956
|
py
|
Python
|
lib/vclib/svn/svn_ra.py
|
cmanley/viewvc
|
18ce398586ff99ee13ac64f85c205efdf9c23bad
|
[
"BSD-2-Clause"
] | null | null | null |
lib/vclib/svn/svn_ra.py
|
cmanley/viewvc
|
18ce398586ff99ee13ac64f85c205efdf9c23bad
|
[
"BSD-2-Clause"
] | null | null | null |
lib/vclib/svn/svn_ra.py
|
cmanley/viewvc
|
18ce398586ff99ee13ac64f85c205efdf9c23bad
|
[
"BSD-2-Clause"
] | null | null | null |
# -*-python-*-
#
# Copyright (C) 1999-2018 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
"Version Control lib driver for remotely accessible Subversion repositories."
import vclib
import sys
import os
import re
import tempfile
import time
import urllib
from svn_repos import Revision, SVNChangedPath, _datestr_to_date, \
_compare_paths, _path_parts, _cleanup_path, \
_rev2optrev, _fix_subversion_exception, \
_split_revprops, _canonicalize_path
from svn import core, delta, client, wc, ra
### Require Subversion 1.3.1 or better. (for svn_ra_get_locations support)
if (core.SVN_VER_MAJOR, core.SVN_VER_MINOR, core.SVN_VER_PATCH) < (1, 3, 1):
raise Exception, "Version requirement not met (needs 1.3.1 or better)"
### BEGIN COMPATABILITY CODE ###
try:
SVN_INVALID_REVNUM = core.SVN_INVALID_REVNUM
except AttributeError: # The 1.4.x bindings are missing core.SVN_INVALID_REVNUM
SVN_INVALID_REVNUM = -1
### END COMPATABILITY CODE ###
def cat_to_tempfile(svnrepos, path, rev):
"""Check out file revision to temporary file"""
temp = tempfile.mktemp()
stream = core.svn_stream_from_aprfile(temp)
url = svnrepos._geturl(path)
client.svn_client_cat(core.Stream(stream), url, _rev2optrev(rev),
svnrepos.ctx)
core.svn_stream_close(stream)
return temp
| 36.285714
| 79
| 0.645738
|
# -*-python-*-
#
# Copyright (C) 1999-2018 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
"Version Control lib driver for remotely accessible Subversion repositories."
import vclib
import sys
import os
import re
import tempfile
import time
import urllib
from svn_repos import Revision, SVNChangedPath, _datestr_to_date, \
_compare_paths, _path_parts, _cleanup_path, \
_rev2optrev, _fix_subversion_exception, \
_split_revprops, _canonicalize_path
from svn import core, delta, client, wc, ra
### Require Subversion 1.3.1 or better. (for svn_ra_get_locations support)
if (core.SVN_VER_MAJOR, core.SVN_VER_MINOR, core.SVN_VER_PATCH) < (1, 3, 1):
raise Exception, "Version requirement not met (needs 1.3.1 or better)"
### BEGIN COMPATABILITY CODE ###
try:
SVN_INVALID_REVNUM = core.SVN_INVALID_REVNUM
except AttributeError: # The 1.4.x bindings are missing core.SVN_INVALID_REVNUM
SVN_INVALID_REVNUM = -1
def list_directory(url, peg_rev, rev, flag, ctx):
try:
dirents, locks = client.svn_client_ls3(url, peg_rev, rev, flag, ctx)
except TypeError: # 1.4.x bindings are goofed
dirents = client.svn_client_ls3(None, url, peg_rev, rev, flag, ctx)
locks = {}
return dirents, locks
def get_directory_props(ra_session, path, rev):
try:
dirents, fetched_rev, props = ra.svn_ra_get_dir(ra_session, path, rev)
except ValueError: # older bindings are goofed
props = ra.svn_ra_get_dir(ra_session, path, rev)
return props
def client_log(url, start_rev, end_rev, log_limit, include_changes,
cross_copies, cb_func, ctx):
include_changes = include_changes and 1 or 0
cross_copies = cross_copies and 1 or 0
try:
client.svn_client_log4([url], start_rev, start_rev, end_rev,
log_limit, include_changes, not cross_copies,
0, None, cb_func, ctx)
except AttributeError:
# Wrap old svn_log_message_receiver_t interface with a
# svn_log_entry_t one.
def cb_convert(paths, revision, author, date, message, pool):
class svn_log_entry_t:
pass
log_entry = svn_log_entry_t()
log_entry.changed_paths = paths
log_entry.revision = revision
log_entry.revprops = { core.SVN_PROP_REVISION_LOG : message,
core.SVN_PROP_REVISION_AUTHOR : author,
core.SVN_PROP_REVISION_DATE : date,
}
cb_func(log_entry, pool)
client.svn_client_log2([url], start_rev, end_rev, log_limit,
include_changes, not cross_copies, cb_convert, ctx)
def setup_client_ctx(config_dir):
# Ensure that the configuration directory exists.
core.svn_config_ensure(config_dir)
# Fetch the configuration (and 'config' bit thereof).
cfg = core.svn_config_get_config(config_dir)
config = cfg.get(core.SVN_CONFIG_CATEGORY_CONFIG)
# Here's the compat-sensitive part: try to use
# svn_cmdline_create_auth_baton(), and fall back to making our own
# if that fails.
try:
auth_baton = core.svn_cmdline_create_auth_baton(1, None, None, config_dir,
1, 1, config, None)
except AttributeError:
auth_baton = core.svn_auth_open([
client.svn_client_get_simple_provider(),
client.svn_client_get_username_provider(),
client.svn_client_get_ssl_server_trust_file_provider(),
client.svn_client_get_ssl_client_cert_file_provider(),
client.svn_client_get_ssl_client_cert_pw_file_provider(),
])
if config_dir is not None:
core.svn_auth_set_parameter(auth_baton,
core.SVN_AUTH_PARAM_CONFIG_DIR,
config_dir)
# Create, setup, and return the client context baton.
ctx = client.svn_client_create_context()
ctx.config = cfg
ctx.auth_baton = auth_baton
return ctx
### END COMPATABILITY CODE ###
class LogCollector:
def __init__(self, path, show_all_logs, lockinfo, access_check_func):
# This class uses leading slashes for paths internally
if not path:
self.path = '/'
else:
self.path = path[0] == '/' and path or '/' + path
self.logs = []
self.show_all_logs = show_all_logs
self.lockinfo = lockinfo
self.access_check_func = access_check_func
self.done = False
def add_log(self, log_entry, pool):
if self.done:
return
paths = log_entry.changed_paths
revision = log_entry.revision
msg, author, date, revprops = _split_revprops(log_entry.revprops)
# Changed paths have leading slashes
changed_paths = paths.keys()
changed_paths.sort(lambda a, b: _compare_paths(a, b))
this_path = None
if self.path in changed_paths:
this_path = self.path
change = paths[self.path]
if change.copyfrom_path:
this_path = change.copyfrom_path
for changed_path in changed_paths:
if changed_path != self.path:
# If a parent of our path was copied, our "next previous"
# (huh?) path will exist elsewhere (under the copy source).
if (self.path.rfind(changed_path) == 0) and \
self.path[len(changed_path)] == '/':
change = paths[changed_path]
if change.copyfrom_path:
this_path = change.copyfrom_path + self.path[len(changed_path):]
if self.show_all_logs or this_path:
if self.access_check_func is None \
or self.access_check_func(self.path[1:], revision):
entry = Revision(revision, date, author, msg, None, self.lockinfo,
self.path[1:], None, None)
self.logs.append(entry)
else:
self.done = True
if this_path:
self.path = this_path
def cat_to_tempfile(svnrepos, path, rev):
"""Check out file revision to temporary file"""
temp = tempfile.mktemp()
stream = core.svn_stream_from_aprfile(temp)
url = svnrepos._geturl(path)
client.svn_client_cat(core.Stream(stream), url, _rev2optrev(rev),
svnrepos.ctx)
core.svn_stream_close(stream)
return temp
class SelfCleanFP:
def __init__(self, path):
self._fp = open(path, 'r')
self._path = path
self._eof = 0
def read(self, len=None):
if len:
chunk = self._fp.read(len)
else:
chunk = self._fp.read()
if chunk == '':
self._eof = 1
return chunk
def readline(self):
chunk = self._fp.readline()
if chunk == '':
self._eof = 1
return chunk
def readlines(self):
lines = self._fp.readlines()
self._eof = 1
return lines
def close(self):
self._fp.close()
os.remove(self._path)
def __del__(self):
self.close()
def eof(self):
return self._eof
class RemoteSubversionRepository(vclib.Repository):
def __init__(self, name, rootpath, authorizer, utilities, config_dir):
self.name = name
self.rootpath = rootpath
self.auth = authorizer
self.diff_cmd = utilities.diff or 'diff'
self.config_dir = config_dir or None
# See if this repository is even viewable, authz-wise.
if not vclib.check_root_access(self):
raise vclib.ReposNotFound(name)
def open(self):
# Setup the client context baton, complete with non-prompting authstuffs.
self.ctx = setup_client_ctx(self.config_dir)
ra_callbacks = ra.svn_ra_callbacks_t()
ra_callbacks.auth_baton = self.ctx.auth_baton
self.ra_session = ra.svn_ra_open(self.rootpath, ra_callbacks, None,
self.ctx.config)
self.youngest = ra.svn_ra_get_latest_revnum(self.ra_session)
self._dirent_cache = { }
self._revinfo_cache = { }
# See if a universal read access determination can be made.
if self.auth and self.auth.check_universal_access(self.name) == 1:
self.auth = None
def rootname(self):
return self.name
def rootpath(self):
return self.rootpath
def roottype(self):
return vclib.SVN
def authorizer(self):
return self.auth
def itemtype(self, path_parts, rev):
pathtype = None
if not len(path_parts):
pathtype = vclib.DIR
else:
path = self._getpath(path_parts)
rev = self._getrev(rev)
try:
kind = ra.svn_ra_check_path(self.ra_session, path, rev)
if kind == core.svn_node_file:
pathtype = vclib.FILE
elif kind == core.svn_node_dir:
pathtype = vclib.DIR
except:
pass
if pathtype is None:
raise vclib.ItemNotFound(path_parts)
if not vclib.check_path_access(self, path_parts, pathtype, rev):
raise vclib.ItemNotFound(path_parts)
return pathtype
def openfile(self, path_parts, rev, options):
path = self._getpath(path_parts)
if self.itemtype(path_parts, rev) != vclib.FILE: # does auth-check
raise vclib.Error("Path '%s' is not a file." % path)
rev = self._getrev(rev)
url = self._geturl(path)
### rev here should be the last history revision of the URL
fp = SelfCleanFP(cat_to_tempfile(self, path, rev))
lh_rev, c_rev = self._get_last_history_rev(path_parts, rev)
return fp, lh_rev
def listdir(self, path_parts, rev, options):
path = self._getpath(path_parts)
if self.itemtype(path_parts, rev) != vclib.DIR: # does auth-check
raise vclib.Error("Path '%s' is not a directory." % path)
rev = self._getrev(rev)
entries = []
dirents, locks = self._get_dirents(path, rev)
for name in dirents.keys():
entry = dirents[name]
if entry.kind == core.svn_node_dir:
kind = vclib.DIR
elif entry.kind == core.svn_node_file:
kind = vclib.FILE
else:
kind = None
entries.append(vclib.DirEntry(name, kind))
return entries
def dirlogs(self, path_parts, rev, entries, options):
path = self._getpath(path_parts)
if self.itemtype(path_parts, rev) != vclib.DIR: # does auth-check
raise vclib.Error("Path '%s' is not a directory." % path)
rev = self._getrev(rev)
dirents, locks = self._get_dirents(path, rev)
for entry in entries:
entry_path_parts = path_parts + [entry.name]
dirent = dirents.get(entry.name, None)
# dirents is authz-sanitized, so ensure the entry is found therein.
if dirent is None:
continue
# Get authz-sanitized revision metadata.
entry.date, entry.author, entry.log, revprops, changes = \
self._revinfo(dirent.created_rev)
entry.rev = str(dirent.created_rev)
entry.size = dirent.size
entry.lockinfo = None
if locks.has_key(entry.name):
entry.lockinfo = locks[entry.name].owner
def itemlog(self, path_parts, rev, sortby, first, limit, options):
assert sortby == vclib.SORTBY_DEFAULT or sortby == vclib.SORTBY_REV
path_type = self.itemtype(path_parts, rev) # does auth-check
path = self._getpath(path_parts)
rev = self._getrev(rev)
url = self._geturl(path)
# If this is a file, fetch the lock status and size (as of REV)
# for this item.
lockinfo = size_in_rev = None
if path_type == vclib.FILE:
basename = path_parts[-1]
list_url = self._geturl(self._getpath(path_parts[:-1]))
dirents, locks = list_directory(list_url, _rev2optrev(rev),
_rev2optrev(rev), 0, self.ctx)
if locks.has_key(basename):
lockinfo = locks[basename].owner
if dirents.has_key(basename):
size_in_rev = dirents[basename].size
# Special handling for the 'svn_latest_log' scenario.
### FIXME: Don't like this hack. We should just introduce
### something more direct in the vclib API.
if options.get('svn_latest_log', 0):
dir_lh_rev, dir_c_rev = self._get_last_history_rev(path_parts, rev)
date, author, log, revprops, changes = self._revinfo(dir_lh_rev)
return [vclib.Revision(dir_lh_rev, str(dir_lh_rev), date, author,
None, log, size_in_rev, lockinfo)]
def _access_checker(check_path, check_rev):
return vclib.check_path_access(self, _path_parts(check_path),
path_type, check_rev)
# It's okay if we're told to not show all logs on a file -- all
# the revisions should match correctly anyway.
lc = LogCollector(path, options.get('svn_show_all_dir_logs', 0),
lockinfo, _access_checker)
cross_copies = options.get('svn_cross_copies', 0)
log_limit = 0
if limit:
log_limit = first + limit
client_log(url, _rev2optrev(rev), _rev2optrev(1), log_limit, 1,
cross_copies, lc.add_log, self.ctx)
revs = lc.logs
revs.sort()
prev = None
for rev in revs:
# Swap out revision info with stuff from the cache (which is
# authz-sanitized).
rev.date, rev.author, rev.log, revprops, changes \
= self._revinfo(rev.number)
rev.prev = prev
prev = rev
revs.reverse()
if len(revs) < first:
return []
if limit:
return revs[first:first+limit]
return revs
def itemprops(self, path_parts, rev):
path = self._getpath(path_parts)
path_type = self.itemtype(path_parts, rev) # does auth-check
rev = self._getrev(rev)
url = self._geturl(path)
pairs = client.svn_client_proplist2(url, _rev2optrev(rev),
_rev2optrev(rev), 0, self.ctx)
return pairs and pairs[0][1] or {}
def annotate(self, path_parts, rev, include_text=False):
path = self._getpath(path_parts)
if self.itemtype(path_parts, rev) != vclib.FILE: # does auth-check
raise vclib.Error("Path '%s' is not a file." % path)
rev = self._getrev(rev)
url = self._geturl(path)
# Examine logs for the file to determine the oldest revision we are
# permitted to see.
log_options = {
'svn_cross_copies' : 1,
'svn_show_all_dir_logs' : 1,
}
revs = self.itemlog(path_parts, rev, vclib.SORTBY_REV, 0, 0, log_options)
oldest_rev = revs[-1].number
# Now calculate the annotation data. Note that we'll not
# inherently trust the provided author and date, because authz
# rules might necessitate that we strip that information out.
blame_data = []
def _blame_cb(line_no, revision, author, date,
line, pool, blame_data=blame_data):
prev_rev = None
if revision > 1:
prev_rev = revision - 1
# If we have an invalid revision, clear the date and author
# values. Otherwise, if we have authz filtering to do, use the
# revinfo cache to do so.
if revision < 0:
date = author = None
elif self.auth:
date, author, msg, revprops, changes = self._revinfo(revision)
# Strip text if the caller doesn't want it.
if not include_text:
line = None
blame_data.append(vclib.Annotation(line, line_no + 1, revision, prev_rev,
author, date))
client.blame2(url, _rev2optrev(rev), _rev2optrev(oldest_rev),
_rev2optrev(rev), _blame_cb, self.ctx)
return blame_data, rev
def revinfo(self, rev):
return self._revinfo(rev, 1)
def rawdiff(self, path_parts1, rev1, path_parts2, rev2, type, options={}):
p1 = self._getpath(path_parts1)
p2 = self._getpath(path_parts2)
r1 = self._getrev(rev1)
r2 = self._getrev(rev2)
if not vclib.check_path_access(self, path_parts1, vclib.FILE, rev1):
raise vclib.ItemNotFound(path_parts1)
if not vclib.check_path_access(self, path_parts2, vclib.FILE, rev2):
raise vclib.ItemNotFound(path_parts2)
args = vclib._diff_args(type, options)
def _date_from_rev(rev):
date, author, msg, revprops, changes = self._revinfo(rev)
return date
try:
temp1 = cat_to_tempfile(self, p1, r1)
temp2 = cat_to_tempfile(self, p2, r2)
info1 = p1, _date_from_rev(r1), r1
info2 = p2, _date_from_rev(r2), r2
return vclib._diff_fp(temp1, temp2, info1, info2, self.diff_cmd, args)
except core.SubversionException, e:
_fix_subversion_exception(e)
if e.apr_err == vclib.svn.core.SVN_ERR_FS_NOT_FOUND:
raise vclib.InvalidRevision
raise
def isexecutable(self, path_parts, rev):
props = self.itemprops(path_parts, rev) # does authz-check
return props.has_key(core.SVN_PROP_EXECUTABLE)
def filesize(self, path_parts, rev):
path = self._getpath(path_parts)
if self.itemtype(path_parts, rev) != vclib.FILE: # does auth-check
raise vclib.Error("Path '%s' is not a file." % path)
rev = self._getrev(rev)
dirents, locks = self._get_dirents(self._getpath(path_parts[:-1]), rev)
dirent = dirents.get(path_parts[-1], None)
return dirent.size
def _getpath(self, path_parts):
return '/'.join(path_parts)
def _getrev(self, rev):
if rev is None or rev == 'HEAD':
return self.youngest
try:
if type(rev) == type(''):
while rev[0] == 'r':
rev = rev[1:]
rev = int(rev)
except:
raise vclib.InvalidRevision(rev)
if (rev < 0) or (rev > self.youngest):
raise vclib.InvalidRevision(rev)
return rev
def _geturl(self, path=None):
if not path:
return self.rootpath
path = self.rootpath + '/' + urllib.quote(path)
return _canonicalize_path(path)
def _get_dirents(self, path, rev):
"""Return a 2-type of dirents and locks, possibly reading/writing
from a local cache of that information. This functions performs
authz checks, stripping out unreadable dirents."""
dir_url = self._geturl(path)
path_parts = _path_parts(path)
if path:
key = str(rev) + '/' + path
else:
key = str(rev)
# Ensure that the cache gets filled...
dirents_locks = self._dirent_cache.get(key)
if not dirents_locks:
tmp_dirents, locks = list_directory(dir_url, _rev2optrev(rev),
_rev2optrev(rev), 0, self.ctx)
dirents = {}
for name, dirent in tmp_dirents.items():
dirent_parts = path_parts + [name]
kind = dirent.kind
if (kind == core.svn_node_dir or kind == core.svn_node_file) \
and vclib.check_path_access(self, dirent_parts,
kind == core.svn_node_dir \
and vclib.DIR or vclib.FILE, rev):
lh_rev, c_rev = self._get_last_history_rev(dirent_parts, rev)
dirent.created_rev = lh_rev
dirents[name] = dirent
dirents_locks = [dirents, locks]
self._dirent_cache[key] = dirents_locks
# ...then return the goodies from the cache.
return dirents_locks[0], dirents_locks[1]
def _get_last_history_rev(self, path_parts, rev):
"""Return the a 2-tuple which contains:
- the last interesting revision equal to or older than REV in
the history of PATH_PARTS.
- the created_rev of of PATH_PARTS as of REV."""
path = self._getpath(path_parts)
url = self._geturl(self._getpath(path_parts))
optrev = _rev2optrev(rev)
# Get the last-changed-rev.
revisions = []
def _info_cb(path, info, pool, retval=revisions):
revisions.append(info.last_changed_rev)
client.svn_client_info(url, optrev, optrev, _info_cb, 0, self.ctx)
last_changed_rev = revisions[0]
# Now, this object might not have been directly edited since the
# last-changed-rev, but it might have been the child of a copy.
# To determine this, we'll run a potentially no-op log between
# LAST_CHANGED_REV and REV.
lc = LogCollector(path, 1, None, None)
client_log(url, optrev, _rev2optrev(last_changed_rev), 1, 1, 0,
lc.add_log, self.ctx)
revs = lc.logs
if revs:
revs.sort()
return revs[0].number, last_changed_rev
else:
return last_changed_rev, last_changed_rev
def _revinfo_fetch(self, rev, include_changed_paths=0):
need_changes = include_changed_paths or self.auth
revs = []
def _log_cb(log_entry, pool, retval=revs):
# If Subversion happens to call us more than once, we choose not
# to care.
if retval:
return
revision = log_entry.revision
msg, author, date, revprops = _split_revprops(log_entry.revprops)
action_map = { 'D' : vclib.DELETED,
'A' : vclib.ADDED,
'R' : vclib.REPLACED,
'M' : vclib.MODIFIED,
}
# Easy out: if we won't use the changed-path info, just return a
# changes-less tuple.
if not need_changes:
return revs.append([date, author, msg, revprops, None])
# Subversion 1.5 and earlier didn't offer the 'changed_paths2'
# hash, and in Subversion 1.6, it's offered but broken.
try:
changed_paths = log_entry.changed_paths2
paths = (changed_paths or {}).keys()
except:
changed_paths = log_entry.changed_paths
paths = (changed_paths or {}).keys()
paths.sort(lambda a, b: _compare_paths(a, b))
# If we get this far, our caller needs changed-paths, or we need
# them for authz-related sanitization.
changes = []
found_readable = found_unreadable = 0
for path in paths:
change = changed_paths[path]
# svn_log_changed_path_t (which we might get instead of the
# svn_log_changed_path2_t we'd prefer) doesn't have the
# 'node_kind' member.
pathtype = None
if hasattr(change, 'node_kind'):
if change.node_kind == core.svn_node_dir:
pathtype = vclib.DIR
elif change.node_kind == core.svn_node_file:
pathtype = vclib.FILE
# svn_log_changed_path2_t only has the 'text_modified' and
# 'props_modified' bits in Subversion 1.7 and beyond. And
# svn_log_changed_path_t is without.
text_modified = props_modified = 0
if hasattr(change, 'text_modified'):
if change.text_modified == core.svn_tristate_true:
text_modified = 1
if hasattr(change, 'props_modified'):
if change.props_modified == core.svn_tristate_true:
props_modified = 1
# Wrong, diddily wrong wrong wrong. Can you say,
# "Manufacturing data left and right because it hurts to
# figure out the right stuff?"
action = action_map.get(change.action, vclib.MODIFIED)
if change.copyfrom_path and change.copyfrom_rev:
is_copy = 1
base_path = change.copyfrom_path
base_rev = change.copyfrom_rev
elif action == vclib.ADDED or action == vclib.REPLACED:
is_copy = 0
base_path = base_rev = None
else:
is_copy = 0
base_path = path
base_rev = revision - 1
# Check authz rules (sadly, we have to lie about the path type)
parts = _path_parts(path)
if vclib.check_path_access(self, parts, vclib.FILE, revision):
if is_copy and base_path and (base_path != path):
parts = _path_parts(base_path)
if not vclib.check_path_access(self, parts, vclib.FILE, base_rev):
is_copy = 0
base_path = None
base_rev = None
found_unreadable = 1
changes.append(SVNChangedPath(path, revision, pathtype, base_path,
base_rev, action, is_copy,
text_modified, props_modified))
found_readable = 1
else:
found_unreadable = 1
# If our caller doesn't want changed-path stuff, and we have
# the info we need to make an authz determination already,
# quit this loop and get on with it.
if (not include_changed_paths) and found_unreadable and found_readable:
break
# Filter unreadable information.
if found_unreadable:
msg = None
if not found_readable:
author = None
date = None
# Drop unrequested changes.
if not include_changed_paths:
changes = None
# Add this revision information to the "return" array.
retval.append([date, author, msg, revprops, changes])
optrev = _rev2optrev(rev)
client_log(self.rootpath, optrev, optrev, 1, need_changes, 0,
_log_cb, self.ctx)
return tuple(revs[0])
def _revinfo(self, rev, include_changed_paths=0):
"""Internal-use, cache-friendly revision information harvester."""
# Consult the revinfo cache first. If we don't have cached info,
# or our caller wants changed paths and we don't have those for
# this revision, go do the real work.
rev = self._getrev(rev)
cached_info = self._revinfo_cache.get(rev)
if not cached_info \
or (include_changed_paths and cached_info[4] is None):
cached_info = self._revinfo_fetch(rev, include_changed_paths)
self._revinfo_cache[rev] = cached_info
return cached_info
##--- custom --##
def get_youngest_revision(self):
return self.youngest
def get_location(self, path, rev, old_rev):
try:
results = ra.get_locations(self.ra_session, path, rev, [old_rev])
except core.SubversionException, e:
_fix_subversion_exception(e)
if e.apr_err == core.SVN_ERR_FS_NOT_FOUND:
raise vclib.ItemNotFound(path)
raise
try:
old_path = results[old_rev]
except KeyError:
raise vclib.ItemNotFound(path)
old_path = _cleanup_path(old_path)
old_path_parts = _path_parts(old_path)
# Check access (lying about path types)
if not vclib.check_path_access(self, old_path_parts, vclib.FILE, old_rev):
raise vclib.ItemNotFound(path)
return old_path
def created_rev(self, path, rev):
lh_rev, c_rev = self._get_last_history_rev(_path_parts(path), rev)
return lh_rev
def last_rev(self, path, peg_revision, limit_revision=None):
"""Given PATH, known to exist in PEG_REVISION, find the youngest
revision older than, or equal to, LIMIT_REVISION in which path
exists. Return that revision, and the path at which PATH exists in
that revision."""
# Here's the plan, man. In the trivial case (where PEG_REVISION is
# the same as LIMIT_REVISION), this is a no-brainer. If
# LIMIT_REVISION is older than PEG_REVISION, we can use Subversion's
# history tracing code to find the right location. If, however,
# LIMIT_REVISION is younger than PEG_REVISION, we suffer from
# Subversion's lack of forward history searching. Our workaround,
# ugly as it may be, involves a binary search through the revisions
# between PEG_REVISION and LIMIT_REVISION to find our last live
# revision.
peg_revision = self._getrev(peg_revision)
limit_revision = self._getrev(limit_revision)
if peg_revision == limit_revision:
return peg_revision, path
elif peg_revision > limit_revision:
path = self.get_location(path, peg_revision, limit_revision)
return limit_revision, path
else:
direction = 1
while peg_revision != limit_revision:
mid = (peg_revision + 1 + limit_revision) / 2
try:
path = self.get_location(path, peg_revision, mid)
except vclib.ItemNotFound:
limit_revision = mid - 1
else:
peg_revision = mid
return peg_revision, path
def get_symlink_target(self, path_parts, rev):
"""Return the target of the symbolic link versioned at PATH_PARTS
in REV, or None if that object is not a symlink."""
path = self._getpath(path_parts)
path_type = self.itemtype(path_parts, rev) # does auth-check
rev = self._getrev(rev)
url = self._geturl(path)
# Symlinks must be files with the svn:special property set on them
# and with file contents which read "link SOME_PATH".
if path_type != vclib.FILE:
return None
pairs = client.svn_client_proplist2(url, _rev2optrev(rev),
_rev2optrev(rev), 0, self.ctx)
props = pairs and pairs[0][1] or {}
if not props.has_key(core.SVN_PROP_SPECIAL):
return None
pathspec = ''
### FIXME: We're being a touch sloppy here, first by grabbing the
### whole file and then by checking only the first line
### of it.
fp = SelfCleanFP(cat_to_tempfile(self, path, rev))
pathspec = fp.readline()
fp.close()
if pathspec[:5] != 'link ':
return None
return pathspec[5:]
| 20,482
| 6,375
| 407
|
defb121c8e657f37a2d4f8f5f56bc62934ede14f
| 14,143
|
py
|
Python
|
pdfannots.py
|
pykong/pdfannots
|
4d95245d95dca6d1114b39674c2bd5d329d825e7
|
[
"MIT"
] | 1
|
2018-11-23T12:29:52.000Z
|
2018-11-23T12:29:52.000Z
|
pdfannots.py
|
pykong/pdfannots
|
4d95245d95dca6d1114b39674c2bd5d329d825e7
|
[
"MIT"
] | null | null | null |
pdfannots.py
|
pykong/pdfannots
|
4d95245d95dca6d1114b39674c2bd5d329d825e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
import pathlib
import sys
import tempfile
from multiprocessing import Pool, cpu_count
import PyPDF2 as PyPDF2
import click
import pdfminer.pdftypes as pdftypes
import pdfminer.settings
from fpdf import FPDF
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams, LTAnno, LTContainer, LTText, LTTextBox
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.psparser import PSLiteral, PSLiteralTable
from tqdm import tqdm
pdfminer.settings.STRICT = False
SUBSTITUTIONS = {
u'ff': 'ff',
u'fi': 'fi',
u'fl': 'fl',
u'’': "'",
}
ANNOT_SUBTYPES = set(['Text', 'Highlight', 'Squiggly', 'StrikeOut', 'Underline'])
DEBUG_BOXHIT = False
OUTDIR = ""
@click.command()
@click.option('--outdir', default="", help='Specify output directory')
@click.argument('files', nargs=-1)
if __name__ == "__main__":
main()
| 32.143182
| 114
| 0.581489
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import io
import pathlib
import sys
import tempfile
from multiprocessing import Pool, cpu_count
import PyPDF2 as PyPDF2
import click
import pdfminer.pdftypes as pdftypes
import pdfminer.settings
from fpdf import FPDF
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams, LTAnno, LTContainer, LTText, LTTextBox
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
from pdfminer.psparser import PSLiteral, PSLiteralTable
from tqdm import tqdm
pdfminer.settings.STRICT = False
SUBSTITUTIONS = {
u'ff': 'ff',
u'fi': 'fi',
u'fl': 'fl',
u'’': "'",
}
ANNOT_SUBTYPES = set(['Text', 'Highlight', 'Squiggly', 'StrikeOut', 'Underline'])
DEBUG_BOXHIT = False
OUTDIR = ""
def box_hit(item, box):
(x0, y0, x1, y1) = box
assert item.x0 <= item.x1 and item.y0 <= item.y1
assert x0 <= x1 and y0 <= y1
# does most of the item area overlap the box?
# http://math.stackexchange.com/questions/99565/simplest-way-to-calculate-the-intersect-area-of-two-rectangles
x_overlap = max(0, min(item.x1, x1) - max(item.x0, x0))
y_overlap = max(0, min(item.y1, y1) - max(item.y0, y0))
overlap_area = x_overlap * y_overlap
item_area = (item.x1 - item.x0) * (item.y1 - item.y0)
assert overlap_area <= item_area
if DEBUG_BOXHIT and overlap_area != 0:
print("'%s' %f-%f,%f-%f in %f-%f,%f-%f %2.0f%%" %
(item.get_text(), item.x0, item.x1, item.y0, item.y1, x0, x1, y0, y1,
100 * overlap_area / item_area))
if item_area == 0:
return False
else:
return overlap_area >= 0.5 * item_area
class RectExtractor(TextConverter):
def __init__(self, rsrcmgr, codec='utf-8', page_num=1):
dummy = io.StringIO()
TextConverter.__init__(self, rsrcmgr, outfp=dummy, codec=codec, pageno=page_num, laparams=LAParams())
self.annots = []
def set_coords(self, annots):
self.annots = [a for a in annots if a.boxes]
self._last_hit = []
def test_boxes(self, item):
self._last_hit = []
for a in self.annots:
if any([box_hit(item, b) for b in a.boxes]):
self._last_hit.append(a)
return self._last_hit
def receive_layout(self, lt_page):
def render(item):
if isinstance(item, LTContainer):
for child in item:
render(child)
elif isinstance(item, LTAnno):
# this catches whitespace
for a in self._last_hit:
a.capture(item.get_text())
elif isinstance(item, LTText):
for a in self.test_boxes(item):
a.capture(item.get_text())
if isinstance(item, LTTextBox):
for a in self.test_boxes(item):
a.capture('\n')
render(lt_page)
class Annotation:
def __init__(self, page_num, tag_name, coords=None, rect=None, contents=None):
self.page_num = page_num
self.tag_name = tag_name
if contents == '':
self.contents = None
else:
self.contents = contents
self.rect = rect
self.text = ''
if coords is None:
self.boxes = None
else:
assert (len(coords) % 8 == 0)
self.boxes = []
while coords:
(x0, y0, x1, y1, x2, y2, x3, y3) = coords[:8]
coords = coords[8:]
x_coords = [x0, x1, x2, x3]
y_coords = [y0, y1, y2, y3]
box = (min(x_coords), min(y_coords), max(x_coords), max(y_coords))
self.boxes.append(box)
def capture(self, text):
if text == '\n':
# kludge for latex: elide hyphens, join lines
if self.text.endswith('-'):
self.text = self.text[:-1]
else:
self.text += ' '
else:
self.text += text
def get_text(self):
if self.text:
# replace tex ligatures (and other common odd characters)
return ''.join([SUBSTITUTIONS.get(c, c) for c in self.text.strip()])
else:
return None
def get_start_pos(self):
if self.rect:
(x0, y0, x1, y1) = self.rect
elif self.boxes:
(x0, y0, x1, y1) = self.boxes[0]
else:
return None
return min(x0, x1), max(y0, y1) # assume left-to-right top-to-bottom text :)
def get_annots(pdf_annots, page_num):
annots = []
for pa in pdf_annots:
subtype = pa.get('Subtype')
if subtype is not None and subtype.name not in ANNOT_SUBTYPES:
continue
contents = pa.get('Contents')
if contents is not None:
contents = str(contents, 'utf-8') # 'utf-8' , iso8859-15
contents = contents.replace('\r\n', '\n').replace('\r', '\n')
a = Annotation(page_num, subtype.name.lower(), pa.get('QuadPoints'), pa.get('Rect'), contents)
annots.append(a)
return annots
def normalise_to_box(pos, box):
(x, y) = pos
(x0, y0, x1, y1) = box
if x < x0:
x = x0
elif x > x1:
x = x1
if y < y0:
y = y0
elif y > y1:
y = y1
return x, y
def nearest_outline(outlines, page_num, media_box, pos):
(x, y) = normalise_to_box(pos, media_box)
prev = None
for o in outlines:
if o.page_num < page_num:
prev = o
elif o.page_num > page_num:
return prev
else:
# XXX: assume two-column left-to-right top-to-bottom documents
(o_x, o_y) = normalise_to_box((o.x, o.y), media_box)
(x0, y0, x1, y1) = media_box
colwidth = (x1 - x0) / 2
outline_col = (o_x - x0) // colwidth
pos_col = (x - x0) // colwidth
if outline_col > pos_col or (outline_col == pos_col and o.y < y):
return prev
else:
prev = o
return prev
def prettify(text):
# re.sub(r"\s\s+", " ", text)
# return text
return " ".join(text.split())
def structure_extracts(annots, outlines, media_boxes):
def format_paging(annot):
apos = annot.get_start_pos()
if apos:
o = nearest_outline(outlines, annot.page_num, media_boxes[annot.page_num], apos)
if o:
return o.title, annot.page_num + 1
return "- Missing chapter name -", annot.page_num + 1
def format_text(annot):
if annot.boxes:
if annot.get_text():
return prettify(annot.get_text())
else:
return "(XXX: missing text!)"
else:
return ''
nits = [a for a in annots if a.tag_name in ['squiggly', 'strikeout', 'underline']]
comments = [a for a in annots if a.tag_name in ['highlight', 'text'] and a.contents]
highlights = [a for a in annots if a.tag_name == 'highlight' and a.contents is None]
annot_list = [(highlights, "Highlights"), (comments, "Comments"), (nits, "Nits"), ]
annot_dic = {}
for annot in annot_list:
annot_type = annot[0]
annot_name = annot[1]
for t in annot_type:
title, page = format_paging(t)
text = format_text(t)
try:
annot_dic[title][page][annot_name]
except KeyError:
annot_dic.setdefault(title, {})
annot_dic[title].setdefault(page, {})
annot_dic[title][page].setdefault(annot_name, [])
finally:
annot_dic[title][page][annot_name].append(text)
return annot_dic
def resolve_dest(doc, dest):
if isinstance(dest, bytes):
dest = pdftypes.resolve1(doc.get_dest(dest))
elif isinstance(dest, PSLiteral):
dest = pdftypes.resolve1(doc.get_dest(dest.name))
if isinstance(dest, dict):
dest = dest['D']
return dest
class Outline:
def __init__(self, title, dest, page_num, x, y):
self.title = title
self.dest = dest
self.page_num = page_num
self.x = x
self.y = y
def get_outlines(doc, pages_dict):
result = []
for (level, title, dest_name, action_ref, _) in doc.get_outlines():
if dest_name is None and action_ref:
action = action_ref.resolve()
if isinstance(action, dict):
subtype = action.get('S')
if subtype is PSLiteralTable.intern('GoTo'):
dest_name = action.get('D')
if dest_name is None:
continue
dest = resolve_dest(doc, dest_name)
page_num = pages_dict[dest[0].objid]
(_, _, target_x, target_y, _) = dest
result.append(Outline(title, dest_name, page_num, target_x, target_y))
return result
class PDF(FPDF):
def _page_setup(self):
self.set_margins(left=20, top=15, right=20)
# self.set_title(title)
# self.set_author('Jules Verne')
def _chapter_title(self, title):
self.ln()
self.set_font('Arial', '', 22)
self.cell(w=0, h=6, txt=str(title), border=0, ln=0, align='L', fill=0, link="")
self.ln(12) # Line break
def _chapter_page(self, page_num):
self.set_font('Arial', '', 19)
self.cell(w=0, h=6, txt="Page: " + str(page_num), border=0, ln=0, align='L', fill=0, link="")
self.ln(9)
def _chapter_body(self, annotations):
# Times 12
self.set_font('Arial', 'U', 17)
for key, annot in annotations.items():
self.cell(w=0, h=6, txt=str(key), border=0, ln=0, align='L', fill=0, link="")
# Line break
self.ln(7)
self.set_font('Times', '', 14)
for a in annot:
# Output justified text
self.multi_cell(0, 5, a)
# Line break
self.ln(2)
def print_chapter(self, title, page_num, page_content):
self.add_page() # TODO: transfer to wider scope
self._page_setup()
self._chapter_title(title)
self._chapter_page(page_num)
self._chapter_body(page_content)
def create_cover(cover_doner, body_donor, output_path):
with open(cover_doner, 'rb') as pdf1File, open(body_donor, 'rb') as pdf2File:
pdf1Reader = PyPDF2.PdfFileReader(pdf1File)
pdf2Reader = PyPDF2.PdfFileReader(pdf2File)
pdfWriter = PyPDF2.PdfFileWriter()
# get cover = 1st page from donor
pageObj = pdf1Reader.getPage(0)
pdfWriter.addPage(pageObj)
for pageNum in range(pdf2Reader.numPages):
pageObj = pdf2Reader.getPage(pageNum)
pdfWriter.addPage(pageObj)
with open(output_path, 'wb') as pdfOutputFile:
pdfWriter.write(pdfOutputFile)
def extract_annots(fh):
rsrcmgr = PDFResourceManager()
device = RectExtractor(rsrcmgr)
interpreter = PDFPageInterpreter(rsrcmgr, device)
with open(fh, 'rb') as pdf_file:
parser = PDFParser(pdf_file)
doc = PDFDocument(parser)
pages_dict = {}
media_boxes = {}
all_annots = []
tqdm_desc = fh.ljust(25)[:25] # make string exactly 25 chars long
for (page_num, page) in tqdm(enumerate(PDFPage.create_pages(doc)), desc=tqdm_desc):
pages_dict[page.pageid] = page_num
media_boxes[page_num] = page.mediabox
if page.annots is None or page.annots is []:
continue
# emit progress indicator
sys.stderr.write((" " if page_num > 0 else "") + "%d" % (page_num + 1))
sys.stderr.flush()
pdf_annots = [ar.resolve() for ar in pdftypes.resolve1(page.annots)]
page_annots = get_annots(pdf_annots, page_num)
device.set_coords(page_annots)
interpreter.process_page(page)
all_annots.extend(page_annots)
outlines = []
try:
outlines = get_outlines(doc, pages_dict)
except PDFNoOutlines:
sys.stderr.write("Document doesn't include outlines (\"bookmarks\")\n")
except:
e = sys.exc_info()[0]
sys.stderr.write("Warning: failed to retrieve outlines: %s\n" % e)
device.close()
# pretty_print(all_annots, outlines, media_boxes)
extract_dic = structure_extracts(all_annots, outlines, media_boxes)
pdf = PDF()
for key_1, chapter in extract_dic.items():
for key_2, page_content in chapter.items():
pdf.print_chapter(title=key_1, page_num=key_2, page_content=page_content)
# constructing output file path
with tempfile.NamedTemporaryFile(suffix='.pdf') as tmp:
tmp_path = tmp.name
pdf.output(tmp_path, 'F')
# copy cover from source pdf in outputpath
p = pathlib.Path(fh)
out_fname = pathlib.Path(p.stem + ".sum" + p.suffix)
out_dir = pathlib.Path(OUTDIR)
output_path = pathlib.Path.joinpath(out_dir, out_fname)
create_cover(cover_doner=fh, body_donor=tmp_path, output_path=output_path)
@click.command()
@click.option('--outdir', default="", help='Specify output directory')
@click.argument('files', nargs=-1)
def main(outdir, files):
# ugly hack to work around maps arg limit
global OUTDIR
OUTDIR = outdir
if not files:
sys.stderr.write("Usage: FILE_1.PDF FILE_2.PDF ...")
sys.exit(1)
else:
for f in files:
if not f.lower().endswith(".pdf"):
sys.stderr.write("Wrong file extension: " + f)
sys.exit(1)
files = set(files) # make sure all files are unique
if outdir: # create target dir if not existing
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
p = Pool(processes=cpu_count()) # does processes default to this value anyway?
p.map(extract_annots, files)
if __name__ == "__main__":
main()
| 12,337
| -2
| 718
|
c015a9dfa5a763bce4bb483cef8e2db5dd41369b
| 1,196
|
py
|
Python
|
scripts/loss.py
|
Spritaro/condinst_tensorrt
|
22063a75e015bba45b588cdb6ebf1ac663ff1924
|
[
"MIT"
] | 3
|
2021-11-14T14:11:10.000Z
|
2022-02-16T11:42:40.000Z
|
scripts/loss.py
|
datomi79/condinst_tensorrt
|
22063a75e015bba45b588cdb6ebf1ac663ff1924
|
[
"MIT"
] | null | null | null |
scripts/loss.py
|
datomi79/condinst_tensorrt
|
22063a75e015bba45b588cdb6ebf1ac663ff1924
|
[
"MIT"
] | 1
|
2022-02-14T21:47:55.000Z
|
2022-02-14T21:47:55.000Z
|
import torch
def heatmap_focal_loss(preds, gt_heatmap, alpha, gamma, eps=1e-3):
"""
Params:
preds: Tensor[num_classes, height, width]
gt_heatmap: Tensor[num_classes, height, width]
alpha:
gamma: how much you want to reduce penalty around the ground truth locations
eps: add small number to prevent inf error
Returns:
loss: Tensor[]
"""
# See CornerNet paper for detail https://arxiv.org/abs/1808.01244
loss = -torch.where(
gt_heatmap == 1,
(1 - preds)**alpha * torch.log(preds + eps), # Loss for positive locations
(1 - gt_heatmap) ** gamma * (preds)**alpha * torch.log(1 - preds - eps) # loss for negative locations
).sum()
return loss
def dice_loss(inputs, targets, smooth=1.0):
"""
Params:
inputs: arbitrary size of Tensor
targets: arbitrary size of Tensor
smooth: smoothing factor
Returns:
loss: Tensor[]
"""
inputs = inputs.view(-1)
targets = targets.view(-1)
# Squred denominator version of Dice loss
dice = (2 * (inputs*targets).sum() + smooth) / ((inputs**2).sum() + (targets**2).sum() + smooth)
return 1 - dice
| 31.473684
| 109
| 0.612876
|
import torch
def heatmap_focal_loss(preds, gt_heatmap, alpha, gamma, eps=1e-3):
"""
Params:
preds: Tensor[num_classes, height, width]
gt_heatmap: Tensor[num_classes, height, width]
alpha:
gamma: how much you want to reduce penalty around the ground truth locations
eps: add small number to prevent inf error
Returns:
loss: Tensor[]
"""
# See CornerNet paper for detail https://arxiv.org/abs/1808.01244
loss = -torch.where(
gt_heatmap == 1,
(1 - preds)**alpha * torch.log(preds + eps), # Loss for positive locations
(1 - gt_heatmap) ** gamma * (preds)**alpha * torch.log(1 - preds - eps) # loss for negative locations
).sum()
return loss
def dice_loss(inputs, targets, smooth=1.0):
"""
Params:
inputs: arbitrary size of Tensor
targets: arbitrary size of Tensor
smooth: smoothing factor
Returns:
loss: Tensor[]
"""
inputs = inputs.view(-1)
targets = targets.view(-1)
# Squred denominator version of Dice loss
dice = (2 * (inputs*targets).sum() + smooth) / ((inputs**2).sum() + (targets**2).sum() + smooth)
return 1 - dice
| 0
| 0
| 0
|
cfcd628d6daa7217e23c473f633d9ebe42bea5a0
| 3,352
|
py
|
Python
|
preprocessors/verilog_tokenize.py
|
JonathanPierce/Algae
|
8fc42981f8a97a3ee9d9f71cb051621422150a08
|
[
"MIT"
] | 18
|
2015-09-01T03:47:42.000Z
|
2021-01-28T21:49:09.000Z
|
preprocessors/verilog_tokenize.py
|
JonathanPierce/Algae
|
8fc42981f8a97a3ee9d9f71cb051621422150a08
|
[
"MIT"
] | 5
|
2015-11-24T21:51:39.000Z
|
2016-01-04T18:59:43.000Z
|
preprocessors/verilog_tokenize.py
|
JonathanPierce/Algae
|
8fc42981f8a97a3ee9d9f71cb051621422150a08
|
[
"MIT"
] | 4
|
2016-01-22T11:23:48.000Z
|
2018-02-22T23:41:27.000Z
|
import pygments.lexers.hdl as lexers
from multiprocessing import Process
import helpers.common as common
tokenizer = lexers.VerilogLexer()
| 37.662921
| 1,353
| 0.666766
|
import pygments.lexers.hdl as lexers
from multiprocessing import Process
import helpers.common as common
tokenizer = lexers.VerilogLexer()
def compress_value(value):
names = ['always', 'always_comb', 'always_ff', 'always_latch', 'and', 'assign', 'automatic', 'begin', 'break', 'buf', 'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cmos', 'const', 'continue', 'deassign', 'default', 'defparam', 'disable', 'do', 'edge', 'else', 'end', 'endcase', 'endfunction', 'endgenerate', 'endmodule', 'endpackage', 'endprimitive','endspecify', 'endtable', 'endtask', 'enum', 'event', 'final', 'for','force', 'forever', 'fork', 'function', 'generate', 'genvar', 'highz0','highz1', 'if', 'initial', 'inout', 'input', 'integer', 'join', 'large','localparam', 'macromodule', 'medium', 'module', 'nand', 'negedge','nmos', 'nor', 'not', 'notif0', 'notif1', 'or', 'output', 'packed','parameter', 'pmos', 'posedge', 'primitive', 'pull0', 'pull1','pulldown', 'pullup', 'rcmos', 'ref', 'release', 'repeat', 'return','rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 'scalared', 'signed','small', 'specify', 'specparam', 'strength', 'string', 'strong0','strong1', 'struct', 'table', 'task', 'tran', 'tranif0', 'tranif1','type', 'typedef', 'unsigned', 'var', 'vectored', 'void', 'wait','weak0', 'weak1', 'while', 'xnor', 'xor', 'byte', 'shortint', 'int', 'longint', 'integer', 'time','bit', 'logic', 'reg', 'supply0', 'supply1', 'tri', 'triand','trior', 'tri0', 'tri1', 'trireg', 'uwire', 'wire', 'wand', 'wo','shortreal', 'real', 'realtime']
value = value.lower()
if value in names:
# compress these down to a single letter
index = names.index(value)
return chr(48 + index)
# leave punctuation and operators alone
return value
def process_token(token):
kind = str(token[1])
value = token[2]
if "Token.Comment" in kind or "Token.Text" in kind:
# ignore comments
return ""
if "Token.Keyword" in kind or "Token.Punctuation" in kind or "Token.Operator" in kind:
# ignore common tokens to speed edit distance
return compress_value(value)
if "Token.Name" in kind:
return "@"
if "Token.Literal" in kind or "Token.String" in kind:
return "#"
# catch all for all others
return "?"
def tokenize(text):
iterable = tokenizer.get_tokens_unprocessed(text)
result = ""
for token in iterable:
result += process_token(token)
return result
def doAssignment(students, assign, helpers):
helpers.printf("processing '{}' in parellel...\n".format(assign.name))
# for each student
for student in students:
# for each entry
entries = assign.args["entries"]
for entry in entries:
sources = entry["sources"]
# try to read the text
text = helpers.readFromAssignment(student, assign.name, sources[0])
if text != None:
# tokenize the file
result = tokenize(text)
# write the result
safeFilename = common.makeFilenameSafe(sources[0]) + "vted.txt"
helpers.writeToPreprocessed(result, student, assign.name, safeFilename)
# all done
helpers.printf("Finished '{}'!\n".format(assign.name))
def run(students, assignments, args, helpers):
# threads to join later
threads = []
# for each assignment
for assign in assignments:
t = Process(target=doAssignment, args=(students, assign, helpers))
threads.append(t)
t.start()
# join the threads
for t in threads:
t.join()
# all done here
return True
| 3,097
| 0
| 115
|
8674b3f9d5c0f0928f6536ab7399dbeda0ee16e1
| 20,839
|
py
|
Python
|
tests/test_event_handlers.py
|
star302b/dj-stripe
|
1d26394414515c4f3ada7132b0eae8f793a0badd
|
[
"MIT"
] | null | null | null |
tests/test_event_handlers.py
|
star302b/dj-stripe
|
1d26394414515c4f3ada7132b0eae8f793a0badd
|
[
"MIT"
] | null | null | null |
tests/test_event_handlers.py
|
star302b/dj-stripe
|
1d26394414515c4f3ada7132b0eae8f793a0badd
|
[
"MIT"
] | null | null | null |
"""
.. module:: dj-stripe.tests.test_event_handlers
:synopsis: dj-stripe Event Handler Tests.
.. moduleauthor:: Alex Kavanaugh (@kavdev)
.. moduleauthor:: Lee Skillen (@lskillen)
"""
from copy import deepcopy
import decimal
from django.contrib.auth import get_user_model
from django.test import TestCase
from mock import patch
from djstripe.models import Event, Charge, Transfer, Account, Plan, Customer, InvoiceItem, Invoice, Card, Subscription
from tests import (FAKE_CARD, FAKE_CHARGE, FAKE_CHARGE_II, FAKE_CUSTOMER, FAKE_CUSTOMER_II,
FAKE_EVENT_CHARGE_SUCCEEDED, FAKE_EVENT_CUSTOMER_CREATED,
FAKE_EVENT_CUSTOMER_DELETED, FAKE_EVENT_CUSTOMER_SOURCE_CREATED,
FAKE_EVENT_CUSTOMER_SOURCE_DELETED, FAKE_EVENT_CUSTOMER_SOURCE_DELETED_DUPE,
FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED, FAKE_EVENT_CUSTOMER_SUBSCRIPTION_DELETED,
FAKE_EVENT_INVOICE_CREATED, FAKE_EVENT_INVOICE_DELETED, FAKE_EVENT_INVOICEITEM_CREATED,
FAKE_EVENT_INVOICEITEM_DELETED, FAKE_EVENT_PLAN_CREATED, FAKE_EVENT_PLAN_DELETED,
FAKE_EVENT_TRANSFER_CREATED, FAKE_EVENT_TRANSFER_DELETED, FAKE_INVOICE, FAKE_INVOICE_II,
FAKE_INVOICEITEM, FAKE_PLAN, FAKE_SUBSCRIPTION, FAKE_SUBSCRIPTION_III, FAKE_TRANSFER)
| 46.619687
| 118
| 0.729018
|
"""
.. module:: dj-stripe.tests.test_event_handlers
:synopsis: dj-stripe Event Handler Tests.
.. moduleauthor:: Alex Kavanaugh (@kavdev)
.. moduleauthor:: Lee Skillen (@lskillen)
"""
from copy import deepcopy
import decimal
from django.contrib.auth import get_user_model
from django.test import TestCase
from mock import patch
from djstripe.models import Event, Charge, Transfer, Account, Plan, Customer, InvoiceItem, Invoice, Card, Subscription
from tests import (FAKE_CARD, FAKE_CHARGE, FAKE_CHARGE_II, FAKE_CUSTOMER, FAKE_CUSTOMER_II,
FAKE_EVENT_CHARGE_SUCCEEDED, FAKE_EVENT_CUSTOMER_CREATED,
FAKE_EVENT_CUSTOMER_DELETED, FAKE_EVENT_CUSTOMER_SOURCE_CREATED,
FAKE_EVENT_CUSTOMER_SOURCE_DELETED, FAKE_EVENT_CUSTOMER_SOURCE_DELETED_DUPE,
FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED, FAKE_EVENT_CUSTOMER_SUBSCRIPTION_DELETED,
FAKE_EVENT_INVOICE_CREATED, FAKE_EVENT_INVOICE_DELETED, FAKE_EVENT_INVOICEITEM_CREATED,
FAKE_EVENT_INVOICEITEM_DELETED, FAKE_EVENT_PLAN_CREATED, FAKE_EVENT_PLAN_DELETED,
FAKE_EVENT_TRANSFER_CREATED, FAKE_EVENT_TRANSFER_DELETED, FAKE_INVOICE, FAKE_INVOICE_II,
FAKE_INVOICEITEM, FAKE_PLAN, FAKE_SUBSCRIPTION, FAKE_SUBSCRIPTION_III, FAKE_TRANSFER)
class EventTestCase(TestCase):
#
# Helpers
#
@patch('stripe.Event.retrieve')
def _create_event(self, event_data, event_retrieve_mock, patch_data=None):
event_data = deepcopy(event_data)
if patch_data:
event_data.update(patch_data)
event_retrieve_mock.return_value = event_data
event = Event.sync_from_stripe_data(event_data)
event.validate()
return event
class TestChargeEvents(EventTestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(username="pydanny", email="pydanny@gmail.com")
@patch("djstripe.models.Account.get_default_account")
@patch('stripe.Customer.retrieve', return_value=deepcopy(FAKE_CUSTOMER))
@patch('stripe.Charge.retrieve')
@patch("stripe.Event.retrieve")
def test_charge_created(self, event_retrieve_mock, charge_retrieve_mock, customer_retrieve_mock, account_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CHARGE_SUCCEEDED)
event_retrieve_mock.return_value = fake_stripe_event
charge_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
account_mock.return_value = Account.objects.create()
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
charge = Charge.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertEquals(charge.amount, fake_stripe_event["data"]["object"]["amount"] / decimal.Decimal("100"))
self.assertEquals(charge.status, fake_stripe_event["data"]["object"]["status"])
class TestCustomerEvents(EventTestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(username="pydanny", email="pydanny@gmail.com")
@patch("stripe.Customer.retrieve")
@patch("stripe.Event.retrieve")
def test_customer_created(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
customer_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
customer = Customer.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertEquals(customer.account_balance, fake_stripe_event["data"]["object"]["account_balance"])
self.assertEquals(customer.currency, fake_stripe_event["data"]["object"]["currency"])
@patch("stripe.Customer.retrieve")
@patch("stripe.Event.retrieve")
def test_customer_created_no_customer_exists(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
customer_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
self.assertFalse(Customer.objects.filter(stripe_id=fake_stripe_event["data"]["object"]["id"]).exists())
@patch("stripe.Customer.retrieve", return_value=FAKE_CUSTOMER)
def test_customer_deleted(self, customer_retrieve_mock):
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = self._create_event(FAKE_EVENT_CUSTOMER_CREATED)
self.assertTrue(event.process())
event = self._create_event(FAKE_EVENT_CUSTOMER_DELETED)
self.assertTrue(event.process())
customer = Customer.objects.get(stripe_id=FAKE_CUSTOMER["id"])
self.assertIsNotNone(customer.date_purged)
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Event.retrieve")
def test_customer_card_created(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_SOURCE_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
customer = Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
card = Card.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertIn(card, customer.sources.all())
self.assertEqual(card.brand, fake_stripe_event["data"]["object"]["brand"])
self.assertEqual(card.last4, fake_stripe_event["data"]["object"]["last4"])
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Event.retrieve")
def test_customer_unknown_source_created(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_SOURCE_CREATED)
fake_stripe_event["data"]["object"]["object"] = "unknown"
event_retrieve_mock.return_value = fake_stripe_event
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
self.assertFalse(Card.objects.filter(stripe_id=fake_stripe_event["data"]["object"]["id"]).exists())
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
def test_customer_default_source_deleted(self, customer_retrieve_mock):
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_CREATED)
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
self.assertTrue(event.process())
card = Card.objects.get(stripe_id=FAKE_CARD["id"])
customer = Customer.objects.get(stripe_id=FAKE_CUSTOMER["id"])
customer.default_source = card
customer.save()
self.assertIsNotNone(customer.default_source)
self.assertTrue(customer.has_valid_source())
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_DELETED)
self.assertTrue(event.process())
customer = Customer.objects.get(stripe_id=FAKE_CUSTOMER["id"])
self.assertIsNone(customer.default_source)
self.assertFalse(customer.has_valid_source())
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
def test_customer_source_double_delete(self, customer_retrieve_mock):
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_CREATED)
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
self.assertTrue(event.process())
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_DELETED)
self.assertTrue(event.process())
event = self._create_event(FAKE_EVENT_CUSTOMER_SOURCE_DELETED_DUPE)
self.assertTrue(event.process())
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Event.retrieve")
def test_customer_subscription_created(self, event_retrieve_mock, customer_retrieve_mock,
subscription_retrieve_mock, plan_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
customer = Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
subscription = Subscription.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertIn(subscription, customer.subscriptions.all())
self.assertEqual(subscription.status, fake_stripe_event["data"]["object"]["status"])
self.assertEqual(subscription.quantity, fake_stripe_event["data"]["object"]["quantity"])
@patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN))
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
def test_customer_subscription_deleted(
self, customer_retrieve_mock, subscription_retrieve_mock, plan_retrieve_mock):
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = self._create_event(FAKE_EVENT_CUSTOMER_SUBSCRIPTION_CREATED)
self.assertTrue(event.process())
Subscription.objects.get(stripe_id=FAKE_SUBSCRIPTION["id"])
event = self._create_event(FAKE_EVENT_CUSTOMER_SUBSCRIPTION_DELETED)
self.assertTrue(event.process())
with self.assertRaises(Subscription.DoesNotExist):
Subscription.objects.get(stripe_id=FAKE_SUBSCRIPTION["id"])
@patch("stripe.Customer.retrieve")
@patch("stripe.Event.retrieve")
def test_customer_bogus_event_type(self, event_retrieve_mock, customer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_CUSTOMER_CREATED)
fake_stripe_event["data"]["object"]["customer"] = fake_stripe_event["data"]["object"]["id"]
fake_stripe_event["type"] = "customer.praised"
event_retrieve_mock.return_value = fake_stripe_event
customer_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
Customer.objects.create(subscriber=self.user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
customer = Customer.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertEqual(None, customer.account_balance)
class TestInvoiceEvents(EventTestCase):
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
@patch("stripe.Event.retrieve")
def test_invoice_created_no_existing_customer(self, event_retrieve_mock, invoice_retrieve_mock,
charge_retrieve_mock, customer_retrieve_mock,
subscription_retrieve_mock, default_account_mock):
default_account_mock.return_value = Account.objects.create()
fake_stripe_event = deepcopy(FAKE_EVENT_INVOICE_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
invoice_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
self.assertEquals(Customer.objects.count(), 1)
customer = Customer.objects.get()
self.assertEquals(customer.subscriber, None)
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE))
@patch("stripe.Invoice.retrieve")
@patch("stripe.Event.retrieve")
def test_invoice_created(self, event_retrieve_mock, invoice_retrieve_mock, charge_retrieve_mock,
customer_retrieve_mock, subscription_retrieve_mock, default_account_mock):
default_account_mock.return_value = Account.objects.create()
user = get_user_model().objects.create_user(username="pydanny", email="pydanny@gmail.com")
Customer.objects.create(subscriber=user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
fake_stripe_event = deepcopy(FAKE_EVENT_INVOICE_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
invoice_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
invoice = Invoice.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertEquals(
invoice.amount_due,
fake_stripe_event["data"]["object"]["amount_due"] / decimal.Decimal("100")
)
self.assertEquals(invoice.paid, fake_stripe_event["data"]["object"]["paid"])
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE))
def test_invoice_deleted(self, invoice_retrieve_mock, charge_retrieve_mock, customer_retrieve_mock,
subscription_retrieve_mock, default_account_mock):
default_account_mock.return_value = Account.objects.create()
user = get_user_model().objects.create_user(username="pydanny", email="pydanny@gmail.com")
Customer.objects.create(subscriber=user, stripe_id=FAKE_CUSTOMER["id"], livemode=False)
event = self._create_event(FAKE_EVENT_INVOICE_CREATED)
self.assertTrue(event.process())
Invoice.objects.get(stripe_id=FAKE_INVOICE["id"])
event = self._create_event(FAKE_EVENT_INVOICE_DELETED)
self.assertTrue(event.process())
with self.assertRaises(Invoice.DoesNotExist):
Invoice.objects.get(stripe_id=FAKE_INVOICE["id"])
class TestInvoiceItemEvents(EventTestCase):
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION_III))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER_II))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE_II))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE_II))
@patch("stripe.InvoiceItem.retrieve")
@patch("stripe.Event.retrieve")
def test_invoiceitem_created(self, event_retrieve_mock, invoiceitem_retrieve_mock, invoice_retrieve_mock,
charge_retrieve_mock, customer_retrieve_mock, subscription_retrieve_mock,
default_account_mock):
default_account_mock.return_value = Account.objects.create()
user = get_user_model().objects.create_user(username="pydanny", email="pydanny@gmail.com")
Customer.objects.create(subscriber=user, stripe_id=FAKE_CUSTOMER_II["id"], livemode=False)
fake_stripe_event = deepcopy(FAKE_EVENT_INVOICEITEM_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
invoiceitem_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
invoiceitem = InvoiceItem.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertEquals(invoiceitem.amount, fake_stripe_event["data"]["object"]["amount"] / decimal.Decimal("100"))
@patch("djstripe.models.Account.get_default_account")
@patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION_III))
@patch("stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER_II))
@patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE_II))
@patch("stripe.Invoice.retrieve", return_value=deepcopy(FAKE_INVOICE_II))
@patch("stripe.InvoiceItem.retrieve", return_value=deepcopy(FAKE_INVOICEITEM))
def test_invoiceitem_deleted(
self, invoiceitem_retrieve_mock, invoice_retrieve_mock,
charge_retrieve_mock, customer_retrieve_mock,
subscription_retrieve_mock, default_account_mock):
default_account_mock.return_value = Account.objects.create()
user = get_user_model().objects.create_user(username="pydanny", email="pydanny@gmail.com")
Customer.objects.create(subscriber=user, stripe_id=FAKE_CUSTOMER_II["id"], livemode=False)
event = self._create_event(FAKE_EVENT_INVOICEITEM_CREATED)
self.assertTrue(event.process())
InvoiceItem.objects.get(stripe_id=FAKE_INVOICEITEM["id"])
event = self._create_event(FAKE_EVENT_INVOICEITEM_DELETED)
self.assertTrue(event.process())
with self.assertRaises(InvoiceItem.DoesNotExist):
InvoiceItem.objects.get(stripe_id=FAKE_INVOICEITEM["id"])
class TestPlanEvents(EventTestCase):
@patch('stripe.Plan.retrieve')
@patch("stripe.Event.retrieve")
def test_plan_created(self, event_retrieve_mock, plan_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_PLAN_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
plan_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
plan = Plan.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertEquals(plan.name, fake_stripe_event["data"]["object"]["name"])
@patch('stripe.Plan.retrieve', return_value=FAKE_PLAN)
def test_plan_deleted(self, plan_retrieve_mock):
event = self._create_event(FAKE_EVENT_PLAN_CREATED)
self.assertTrue(event.process())
Plan.objects.get(stripe_id=FAKE_PLAN["id"])
event = self._create_event(FAKE_EVENT_PLAN_DELETED)
self.assertTrue(event.process())
with self.assertRaises(Plan.DoesNotExist):
Plan.objects.get(stripe_id=FAKE_PLAN["id"])
class TestTransferEvents(EventTestCase):
@patch('stripe.Transfer.retrieve')
@patch("stripe.Event.retrieve")
def test_transfer_created(self, event_retrieve_mock, transfer_retrieve_mock):
fake_stripe_event = deepcopy(FAKE_EVENT_TRANSFER_CREATED)
event_retrieve_mock.return_value = fake_stripe_event
transfer_retrieve_mock.return_value = fake_stripe_event["data"]["object"]
event = Event.sync_from_stripe_data(fake_stripe_event)
event.validate()
event.process()
transfer = Transfer.objects.get(stripe_id=fake_stripe_event["data"]["object"]["id"])
self.assertEquals(transfer.amount, fake_stripe_event["data"]["object"]["amount"] / decimal.Decimal("100"))
self.assertEquals(transfer.status, fake_stripe_event["data"]["object"]["status"])
@patch('stripe.Transfer.retrieve', return_value=FAKE_TRANSFER)
def test_transfer_deleted(self, transfer_retrieve_mock):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
self.assertTrue(event.process())
Transfer.objects.get(stripe_id=FAKE_TRANSFER["id"])
event = self._create_event(FAKE_EVENT_TRANSFER_DELETED)
self.assertTrue(event.process())
with self.assertRaises(Transfer.DoesNotExist):
Transfer.objects.get(stripe_id=FAKE_TRANSFER["id"])
| 14,830
| 4,514
| 161
|
9ff7a9dd754ac5e203b75c39934d21c4dbb3bd05
| 4,489
|
py
|
Python
|
kms_utils.py
|
unai-ttxu/marathon-lb-sec
|
a893cc7c550af02b15d307f941023c7d26a9f1ba
|
[
"Apache-2.0"
] | 23
|
2017-04-17T14:15:10.000Z
|
2021-03-27T12:14:58.000Z
|
kms_utils.py
|
unai-ttxu/marathon-lb-sec
|
a893cc7c550af02b15d307f941023c7d26a9f1ba
|
[
"Apache-2.0"
] | 13
|
2017-11-10T11:31:04.000Z
|
2019-07-02T08:13:21.000Z
|
kms_utils.py
|
unai-ttxu/marathon-lb-sec
|
a893cc7c550af02b15d307f941023c7d26a9f1ba
|
[
"Apache-2.0"
] | 17
|
2017-03-17T08:48:59.000Z
|
2020-04-13T15:06:48.000Z
|
import common
import json
import logging
import os
import subprocess
import time
from dateutil import parser
head_vault_hosts = 'OLD_IFS=${IFS};IFS=\',\' read -r -a VAULT_HOSTS <<< \"$STRING_VAULT_HOST\";IFS=${OLD_IFS};'
source_kms_utils = '. /usr/sbin/kms_utils.sh;'
global vault_token
global vault_accessor
global MAX_PERCENTAGE_EXPIRATION
vault_token = os.getenv('VAULT_TOKEN', '')
vault_accessor = os.getenv('ACCESSOR_TOKEN','')
MIN_PERCENTAGE_EXPIRATION = 0.2
logger = None
| 34.267176
| 183
| 0.704611
|
import common
import json
import logging
import os
import subprocess
import time
from dateutil import parser
head_vault_hosts = 'OLD_IFS=${IFS};IFS=\',\' read -r -a VAULT_HOSTS <<< \"$STRING_VAULT_HOST\";IFS=${OLD_IFS};'
source_kms_utils = '. /usr/sbin/kms_utils.sh;'
global vault_token
global vault_accessor
global MAX_PERCENTAGE_EXPIRATION
vault_token = os.getenv('VAULT_TOKEN', '')
vault_accessor = os.getenv('ACCESSOR_TOKEN','')
MIN_PERCENTAGE_EXPIRATION = 0.2
logger = None
def init_log():
global logger
logger = common.marathon_lb_logger.getChild('kms_utils.py')
def login():
global vault_token
global vault_accessor
resp,_ = exec_with_kms_utils('', 'login', 'echo "{\\\"vaulttoken\\\": \\\"$VAULT_TOKEN\\\",\\\"accessor\\\": \\\"$ACCESSOR_TOKEN\\\"}"')
jsonVal = json.loads(resp.decode("utf-8"))
vault_accessor = (jsonVal['accessor'])
vault_token = (jsonVal['vaulttoken'])
def get_cert(cluster, instance, fqdn, o_format, store_path):
variables = ''.join(['export VAULT_TOKEN=', vault_token, ';'])
command = ' '.join(['getCert', cluster, instance, fqdn, o_format, store_path])
resp,returncode = exec_with_kms_utils(variables, command , '')
logger.debug('get_cert for ' + instance + ' returned ' + str(returncode) + ' and ' + resp.decode("utf-8"))
return returncode == 0
def get_token_info():
variables = ''.join(['export VAULT_TOKEN=', vault_token, ';', 'export ACCESSOR_TOKEN=', vault_accessor, ';'])
command = 'token_info'
resp,_ = exec_with_kms_utils(variables, command, '')
respArr = resp.decode("utf-8").split(',')
jsonValue = json.loads(','.join(respArr[1:]))
logger.debug('status ' + respArr[0])
logger.debug(jsonValue)
return jsonValue
def check_token_needs_renewal(force):
jsonInfo = get_token_info()
creationTime = jsonInfo['data']['creation_time']
#Convert time as given from Vault to epoch time
expire_time_vault = jsonInfo['data']['expire_time']
expire_time = int(parser.parse(expire_time_vault).timestamp())
ttl = jsonInfo['data']['ttl']
lastRenewalTime = 0
try:
lastRenewalTime = jsonInfo['data']['last_renewal_time']
except KeyError: pass
if (lastRenewalTime > 0):
percentage = ttl / (expire_time - lastRenewalTime)
else:
percentage = ttl / (expire_time - creationTime)
logger.debug('Checked token expiration: percentage -> ' + str(percentage))
if (percentage <= MIN_PERCENTAGE_EXPIRATION and percentage > 0):
logger.info('Token about to expire... needs renewal')
jsonInfo = renewal_token()
lease_duration_vault = jsonInfo['auth']['lease_duration']
expire_time = int(time.time()) + int(lease_duration_vault)
elif (percentage <= 0):
logger.info('Token expired!!')
return False
elif force:
logger.info('Forced renewal')
jsonInfo = renewal_token()
lease_duration_vault = jsonInfo['auth']['lease_duration']
expire_time = int(time.time()) + int(lease_duration_vault)
#Write expire_time to file
with open('/marathon-lb/token-status', 'w') as fd:
fd.write(str(int(expire_time)))
return True
def renewal_token():
variables = ''.join(['export VAULT_TOKEN=', vault_token, ';'])
command = 'token_renewal'
resp,_ = exec_with_kms_utils(variables, command, '')
respArr = resp.decode("utf-8").split(',')
# Due to kms_utils.sh issue, response could contain a spurious status_code as follows
#
# 000{request_response}
#
# This 000 spurious status code is caused by an empty parameter set by kms_utils.sh
# which results in an additional curl to an empty URL.
#
# As fixing kms_utils.sh could generate strong side effects, we need to strip this
# spurious response code from the request response here
spurious_status_code = '000'
if respArr[1].startswith(spurious_status_code):
respArr[1] = respArr[1][len(spurious_status_code):]
jsonValue = json.loads(','.join(respArr[1:]))
logger.debug('status ' + respArr[0])
logger.debug(jsonValue)
return jsonValue
def exec_with_kms_utils(variables, command, extra_command):
logger.debug('>>> exec_with_kms_utils: [COMM:'+command+', VARS:'+variables+', EXTRA_COMM:'+extra_command+']')
proc = subprocess.Popen(['bash', '-c', head_vault_hosts + variables + source_kms_utils + command + ';' + extra_command], shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
resp,_ = proc.communicate(timeout=10)
except subprocess.TimeoutExpired as e:
proc.kill()
raise e
return resp, proc.returncode
| 3,844
| 0
| 160
|
7d51bd641910bdf93e36062ca99dbd8e06962b76
| 154
|
py
|
Python
|
chapter_2/intro-sys-mod.py
|
bimri/programming_python
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
[
"MIT"
] | null | null | null |
chapter_2/intro-sys-mod.py
|
bimri/programming_python
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
[
"MIT"
] | null | null | null |
chapter_2/intro-sys-mod.py
|
bimri/programming_python
|
ba52ccd18b9b4e6c5387bf4032f381ae816b5e77
|
[
"MIT"
] | null | null | null |
"Introducing the sys Module"
import sys
print(sys.platform)
print(sys.maxsize)
print(sys.version)
if sys.platform[:3] == 'win': print('hello windows')
| 17.111111
| 52
| 0.727273
|
"Introducing the sys Module"
import sys
print(sys.platform)
print(sys.maxsize)
print(sys.version)
if sys.platform[:3] == 'win': print('hello windows')
| 0
| 0
| 0
|
e62a25962c26e93b18a28ed5fccdc965fc0e07e8
| 31
|
py
|
Python
|
hyperion/importers/__init__.py
|
astrofrog/hyperion
|
e90d7af1df4f064a960594d812c07ff27d87fcc7
|
[
"BSD-2-Clause"
] | 2
|
2015-05-14T17:26:16.000Z
|
2019-03-13T17:33:18.000Z
|
hyperion/importers/__init__.py
|
astrofrog/hyperion
|
e90d7af1df4f064a960594d812c07ff27d87fcc7
|
[
"BSD-2-Clause"
] | null | null | null |
hyperion/importers/__init__.py
|
astrofrog/hyperion
|
e90d7af1df4f064a960594d812c07ff27d87fcc7
|
[
"BSD-2-Clause"
] | null | null | null |
from .orion import parse_orion
| 15.5
| 30
| 0.83871
|
from .orion import parse_orion
| 0
| 0
| 0
|
842d510acc09dda24940ad15781378acbc93a47e
| 1,009
|
py
|
Python
|
aoc/day09.py
|
ryanolsonx/aocpy
|
051f965c443c3d4798e15f1fe86327b0d755d27f
|
[
"MIT"
] | null | null | null |
aoc/day09.py
|
ryanolsonx/aocpy
|
051f965c443c3d4798e15f1fe86327b0d755d27f
|
[
"MIT"
] | null | null | null |
aoc/day09.py
|
ryanolsonx/aocpy
|
051f965c443c3d4798e15f1fe86327b0d755d27f
|
[
"MIT"
] | null | null | null |
with open("./day09.input") as file:
data = [int(line.strip()) for line in file.readlines()]
p1 = get_first_not_matching(25)
print(p1)
p2 = get_contiguous_ns_that_add_to(p1)
print(p2)
| 15.287879
| 56
| 0.581764
|
with open("./day09.input") as file:
data = [int(line.strip()) for line in file.readlines()]
def get_first_not_matching(preamble_size):
i = preamble_size
while i < len(data):
n = data[i]
is_match = False
ds = data[i - preamble_size:i]
for j in ds:
for k in ds:
if j == k:
continue
if j + k == n:
is_match = True
break
if is_match:
break
if not is_match:
return n
i += 1
p1 = get_first_not_matching(25)
print(p1)
def get_min_max(ns):
min = ns[0]
max = ns[0]
for n in ns:
if n < min:
min = n
if n > max:
max = n
return (min, max)
def get_contiguous_ns_that_add_to(value):
i = 0
while i < len(data):
n1 = data[i]
acc = n1
group = [n1]
j = i + 1
while j < len(data):
n2 = data[j]
if acc + n2 > value:
# start over
break
group.append(n2)
if acc + n2 == value:
min, max = get_min_max(group)
return min + max
else:
acc += n2
j += 1
i += 1
p2 = get_contiguous_ns_that_add_to(p1)
print(p2)
| 753
| 0
| 69
|
5f957dac4e3aaa4e9760d7ae025a52ebc53a0390
| 1,533
|
py
|
Python
|
utils/ppmi_data.py
|
Devin-Taylor/multi-head-co-attention
|
466bf1cfe39bc271ff5a1947e15756b69cf22967
|
[
"MIT"
] | 2
|
2019-11-04T14:26:21.000Z
|
2019-11-09T21:13:41.000Z
|
utils/ppmi_data.py
|
Devin-Taylor/multi-head-co-attention
|
466bf1cfe39bc271ff5a1947e15756b69cf22967
|
[
"MIT"
] | null | null | null |
utils/ppmi_data.py
|
Devin-Taylor/multi-head-co-attention
|
466bf1cfe39bc271ff5a1947e15756b69cf22967
|
[
"MIT"
] | null | null | null |
import json
import os
import nibabel as nib
import numpy as np
import pandas as pd
ROOT = "./"
DATA = os.path.join(ROOT, "data/")
| 38.325
| 127
| 0.714938
|
import json
import os
import nibabel as nib
import numpy as np
import pandas as pd
ROOT = "./"
DATA = os.path.join(ROOT, "data/")
def load_ppmi(dataset, filter_feats=True, normalise=True):
patient_summary = pd.read_csv(os.path.join(DATA, "patient_summary.csv"))
metadata = patient_summary[patient_summary.DIAGNOSIS == dataset]
methylation_data_path = os.path.join(DATA, "ppmi_only_caucasian_Zhou_filtered.pkl")
meth_data = pd.read_pickle(methylation_data_path)
metadata = pd.merge(metadata, meth_data, how="left", left_on="meth_file", right_index=True).dropna().reset_index(drop=True)
spect_root = "CONTROLS" if dataset == "CONTROL" else "PD"
spect_data = np.array([np.asarray(nib.load(os.path.join(DATA, spect_root, x)).dataobj) for x in metadata.spect_file])
spect_data = np.expand_dims(spect_data, axis=1) # insert channel
meth_data = metadata.iloc[:, 5:] # NOTE hard indexing so keep this in mind
metadata = metadata.iloc[:, :5]
with open(os.path.join(DATA, "meth_classification_filtered_xgb_tuned_441.json")) as fd:
meth_features = json.load(fd)
if filter_feats:
meth_data = meth_data.loc[:, meth_data.columns.isin(meth_features)]
if normalise:
# NOTE these values are the max and min across all control and PD images in the database
global_min = -1.0
global_max = 5.6598325
spect_data = (spect_data - global_min)/(global_max - global_min)
age = metadata.Age.values
return meth_data, spect_data, age, metadata
| 1,378
| 0
| 23
|
1a9eb26b2e45a592e8105ef8cc9bc6ff155afcb4
| 3,981
|
py
|
Python
|
Programming_for_GIA_Core_Skills/Assessment_2/final_model.py
|
jord9762/jordy9762.github.io
|
28bcc21d140371e08cd074895f48fe646e2e7c79
|
[
"Apache-2.0"
] | null | null | null |
Programming_for_GIA_Core_Skills/Assessment_2/final_model.py
|
jord9762/jordy9762.github.io
|
28bcc21d140371e08cd074895f48fe646e2e7c79
|
[
"Apache-2.0"
] | null | null | null |
Programming_for_GIA_Core_Skills/Assessment_2/final_model.py
|
jord9762/jordy9762.github.io
|
28bcc21d140371e08cd074895f48fe646e2e7c79
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib
import random
import operator
import csv
import drunkframework
import matplotlib.animation
import matplotlib.pyplot
"""WARNING!!!!!"""
"""This code was tested using Spyder 5.0.4, should any problems be encountered using older
models please try """
#creates a new empty list for what will be the csv environment data, see https://docs.python.org/3/library/csv.html for more
environment = []
#drunks adapted from agents from GUI's practical replacing "agents"
drunks = []
#density is an empty list which will track agent movement independent of the movement process
density= []
#specifies number of drunks/agents
num_of_drunks = 25
#outlines the number of iterations the line 64-78 code will undergo
num_of_iterations = 100
#sets the dimensions for the matplotlib plots
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
f = open('drunk.txt', newline='')
#Note that the correct directory must be navigated to in the terminal else the full file path will be needed
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
#Used for testing purposes to ascertain the lay of the environment
#matplotlib.pyplot.xlim(0, 300)
#matplotlib.pyplot.ylim(0, 300)
#matplotlib.pyplot.imshow(environment)
for row in reader:
rowlist =[]
for value in row:
rowlist.append(value)
environment.append(rowlist)
f.close()
#print (rowlist) Used this to check list structure
#Code on lines 46-50 appends the density list output to a 300x300 grid, this code is needed
#to prevent the error "IndexError: list index out of range"
for i in range(300):
rowlist = []
for j in range(300):
rowlist.append(0)
density.append(rowlist)
#matplotlib.pyplot.imshow(environment) run this in isolation to check the environment is
#correct
## Make drunks and assign them with an identification number.
for i in range(num_of_drunks):
identification = ((1+i)*10)
# print(identification) #this should print 10-250 giving each of the drunks an identification number, later to be matched up with houses
drunks.append(drunkframework.Drunk(environment, drunks, identification))
#This is is supposed to work whereby if the co-ordinates of stilldrunk match their identification number they are home
#In the prototype density of the environment changed throughout the iterations, as such the drunks would
#often stop in areas which were not their home. The work around this was seperating the process of track
#and move through the creation of the density list. Track is left in but commented.
for i in range (num_of_drunks):
stilldrunk = drunks[i]
for j in range(num_of_iterations):
while environment [stilldrunk._y][stilldrunk._x] != stilldrunk.identification:
density[drunks[i]._y][drunks[i]._x]+=1
drunks[i].move()
#drunks[i].track() omitted from the final iteration of the application
#saves density list (see lines 68 to 73)
with open('density.txt', 'w', newline='') as f:
csvwriter = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in density:
csvwriter.writerow(row)
#lines 79 to 90 serve the purpose of display the density and drunks in relation
#to their finishing position within the environment
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.imshow(density)
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.show(drunks)
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.imshow(environment)
#Code below just prints we're home for each of the 25 agents following a resolution of
#the code
for i in range(num_of_drunks):
matplotlib.pyplot.scatter(drunks[i]._x, drunks[i]._y)
print("we're home!")
| 36.190909
| 140
| 0.707109
|
import matplotlib
import random
import operator
import csv
import drunkframework
import matplotlib.animation
import matplotlib.pyplot
"""WARNING!!!!!"""
"""This code was tested using Spyder 5.0.4, should any problems be encountered using older
models please try """
#creates a new empty list for what will be the csv environment data, see https://docs.python.org/3/library/csv.html for more
environment = []
#drunks adapted from agents from GUI's practical replacing "agents"
drunks = []
#density is an empty list which will track agent movement independent of the movement process
density= []
#specifies number of drunks/agents
num_of_drunks = 25
#outlines the number of iterations the line 64-78 code will undergo
num_of_iterations = 100
#sets the dimensions for the matplotlib plots
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
f = open('drunk.txt', newline='')
#Note that the correct directory must be navigated to in the terminal else the full file path will be needed
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
#Used for testing purposes to ascertain the lay of the environment
#matplotlib.pyplot.xlim(0, 300)
#matplotlib.pyplot.ylim(0, 300)
#matplotlib.pyplot.imshow(environment)
for row in reader:
rowlist =[]
for value in row:
rowlist.append(value)
environment.append(rowlist)
f.close()
#print (rowlist) Used this to check list structure
#Code on lines 46-50 appends the density list output to a 300x300 grid, this code is needed
#to prevent the error "IndexError: list index out of range"
for i in range(300):
rowlist = []
for j in range(300):
rowlist.append(0)
density.append(rowlist)
#matplotlib.pyplot.imshow(environment) run this in isolation to check the environment is
#correct
## Make drunks and assign them with an identification number.
for i in range(num_of_drunks):
identification = ((1+i)*10)
# print(identification) #this should print 10-250 giving each of the drunks an identification number, later to be matched up with houses
drunks.append(drunkframework.Drunk(environment, drunks, identification))
#This is is supposed to work whereby if the co-ordinates of stilldrunk match their identification number they are home
#In the prototype density of the environment changed throughout the iterations, as such the drunks would
#often stop in areas which were not their home. The work around this was seperating the process of track
#and move through the creation of the density list. Track is left in but commented.
for i in range (num_of_drunks):
stilldrunk = drunks[i]
for j in range(num_of_iterations):
while environment [stilldrunk._y][stilldrunk._x] != stilldrunk.identification:
density[drunks[i]._y][drunks[i]._x]+=1
drunks[i].move()
#drunks[i].track() omitted from the final iteration of the application
#saves density list (see lines 68 to 73)
with open('density.txt', 'w', newline='') as f:
csvwriter = csv.writer(f, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)
for row in density:
csvwriter.writerow(row)
#lines 79 to 90 serve the purpose of display the density and drunks in relation
#to their finishing position within the environment
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.imshow(density)
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.show(drunks)
matplotlib.pyplot.xlim(0, 300)
matplotlib.pyplot.ylim(0, 300)
matplotlib.pyplot.imshow(environment)
#Code below just prints we're home for each of the 25 agents following a resolution of
#the code
for i in range(num_of_drunks):
matplotlib.pyplot.scatter(drunks[i]._x, drunks[i]._y)
print("we're home!")
| 0
| 0
| 0
|
7fb3bf3ccd9b91d3bdbcbdf7e10aa8941cf222d8
| 1,841
|
py
|
Python
|
script/test_caffemodel.py
|
duguyue100/transcaffe
|
aee7f34e98630ca4b1d717f65bc3b83edd00acd6
|
[
"MIT"
] | 1
|
2017-10-30T01:34:14.000Z
|
2017-10-30T01:34:14.000Z
|
script/test_caffemodel.py
|
duguyue100/transcaffe
|
aee7f34e98630ca4b1d717f65bc3b83edd00acd6
|
[
"MIT"
] | null | null | null |
script/test_caffemodel.py
|
duguyue100/transcaffe
|
aee7f34e98630ca4b1d717f65bc3b83edd00acd6
|
[
"MIT"
] | 1
|
2021-07-11T04:30:58.000Z
|
2021-07-11T04:30:58.000Z
|
"""Loading a .caffemodel and figure out the encoding.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import absolute_import
from __future__ import print_function
import os
# from keras.utils.visualize_util import plot
from keras.datasets import mnist as dataset
from keras.utils import np_utils
import transcaffe as tc
batch_size = 128
nb_classes = 10
nb_epoch = 40
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# color channels
chnls = 1
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = dataset.load_data()
X_train = X_train.reshape(X_train.shape[0], chnls, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], chnls, img_rows, img_cols)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# define model for testing
data_path = os.environ["TRANSCAFFE_DATA"]
# model_str = os.path.join(data_path,
# "VGG_ILSVRC_16_layers_deploy.prototxt.txt")
model_str = os.path.join(data_path, "lenet.prototxt.txt")
model_bin = os.path.join(data_path, "lenet_iter_10000.caffemodel")
model = tc.load(model_str, model_bin, target_lib="keras")
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| 27.893939
| 70
| 0.751222
|
"""Loading a .caffemodel and figure out the encoding.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import absolute_import
from __future__ import print_function
import os
# from keras.utils.visualize_util import plot
from keras.datasets import mnist as dataset
from keras.utils import np_utils
import transcaffe as tc
batch_size = 128
nb_classes = 10
nb_epoch = 40
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# color channels
chnls = 1
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = dataset.load_data()
X_train = X_train.reshape(X_train.shape[0], chnls, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], chnls, img_rows, img_cols)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# define model for testing
data_path = os.environ["TRANSCAFFE_DATA"]
# model_str = os.path.join(data_path,
# "VGG_ILSVRC_16_layers_deploy.prototxt.txt")
model_str = os.path.join(data_path, "lenet.prototxt.txt")
model_bin = os.path.join(data_path, "lenet_iter_10000.caffemodel")
model = tc.load(model_str, model_bin, target_lib="keras")
model.compile(loss='categorical_crossentropy', optimizer='adadelta',
metrics=['accuracy'])
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| 0
| 0
| 0
|
bdafb5b9a495fe79186ed42ceb1d64616f684e58
| 2,044
|
py
|
Python
|
tlaunch/lp_k8s/config.py
|
TARTRL/TLaunch
|
198dada129f2143b6f626a50b82d45575f4c1115
|
[
"Apache-2.0"
] | 18
|
2021-12-19T09:43:17.000Z
|
2021-12-30T06:09:03.000Z
|
tlaunch/lp_k8s/config.py
|
TARTRL/TLaunch
|
198dada129f2143b6f626a50b82d45575f4c1115
|
[
"Apache-2.0"
] | null | null | null |
tlaunch/lp_k8s/config.py
|
TARTRL/TLaunch
|
198dada129f2143b6f626a50b82d45575f4c1115
|
[
"Apache-2.0"
] | 1
|
2022-01-14T06:20:20.000Z
|
2022-01-14T06:20:20.000Z
|
from typing import Dict, List, Optional
from kubernetes import client
from tlaunch.lp_k8s.resource import Resource
from tlaunch.lp_k8s.util import map_opt
DEFAULT_PORT = 8001
DEFAULT_NAME = 'launchpad'
REVERB_IMAGE = 'reg.real-ai.cn/launchpad/reverb'
DEFAULT_COMMAND = ['python3', '-u', '-mlaunchpad_kubernetes.process_entry']
| 30.969697
| 79
| 0.627202
|
from typing import Dict, List, Optional
from kubernetes import client
from tlaunch.lp_k8s.resource import Resource
from tlaunch.lp_k8s.util import map_opt
DEFAULT_PORT = 8001
DEFAULT_NAME = 'launchpad'
REVERB_IMAGE = 'reg.real-ai.cn/launchpad/reverb'
DEFAULT_COMMAND = ['python3', '-u', '-mlaunchpad_kubernetes.process_entry']
class Container:
def __init__(self,
image: Optional[str] = None,
command: Optional[List[str]] = None,
flags: List[str] = [],
resources: Optional[Resource] = None,
env: Optional[Dict[str, str]] = None):
self.job_name = DEFAULT_NAME
self.image = image or REVERB_IMAGE
self.command = command or DEFAULT_COMMAND + flags
self.resources = resources
self.env = env
def build(self) -> client.V1Container:
return client.V1Container(
name=self.job_name,
image=self.image,
command=self.command,
ports=[
client.V1ContainerPort(name='launchpad',
container_port=DEFAULT_PORT)
],
resources=map_opt(
self.resources,
lambda x: client.V1ResourceRequirements(limits=x.limits,
requests=x.requests)),
env=map_opt(
self.env,
lambda e: [client.V1EnvVar(name=k, value=v)
for k, v in e.items()]))
def set_job_name(self, job_name: str) -> 'Container':
self.job_name = job_name
return self
class Config:
def __init__(self, container: Optional[Container] = None, **kwargs):
self.container = container or Container()
self.kwargs = kwargs
def build(self) -> client.V1PodSpec:
return client.V1PodSpec(**self.kwargs, containers=[self.container.build()])
def set_job_name(self, job_name: str) -> 'Config':
self.container.set_job_name(job_name)
return self
class DefaultReverbConfig(Config):
def __init__(self) -> None:
self.container = Container(image=REVERB_IMAGE)
| 1,470
| 0
| 241
|
2f89d30a8c0491da0365da93766c0592009a4f5a
| 2,694
|
py
|
Python
|
retry_download_errors.py
|
calstateteach/canvas_artifacts_download
|
1fbff01c80214d1e05616097aecd13cbb7faa3ca
|
[
"MIT"
] | null | null | null |
retry_download_errors.py
|
calstateteach/canvas_artifacts_download
|
1fbff01c80214d1e05616097aecd13cbb7faa3ca
|
[
"MIT"
] | null | null | null |
retry_download_errors.py
|
calstateteach/canvas_artifacts_download
|
1fbff01c80214d1e05616097aecd13cbb7faa3ca
|
[
"MIT"
] | null | null | null |
"""Retry downloading files that caused errors in http_downloader.
We can find files to try downloading again by parsing the err.txt file for error messages.
Error log lines we are interested in look like:
09-04-2017 12:45:17..Error_http_downloader 'exports/CalStateTEACH Term 1/grios/Schedule/Mentor Info.docx', 'https://ourdomain.instructure.com/files/8080/download?download_frd=1&verifier=zVZdnkpTmmJIGYAg2U0PaDqESrJBFLi0Xsm73Eldu'
A regex string that captures the file name & URL looks like:
[0-9][0-9]-[0-9][0-9]-[0-9][0-9][0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.\.Error_http_downloader '(.*)', '(.*)'$
09.04.2017 tps Created.
09.17.2018 tps Change bad global Null reference to None.
"""
import script_logging
import http_downloader
import os
import re
import shutil
######### Constants #########
# Regex pattern for extracting file download details from error log.
REGEX_PATTERN = "[0-9][0-9]-[0-9][0-9]-[0-9][0-9][0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.\.Error_http_downloader '(.*)', '(.*)'$"
def make_cp_file_name():
"""Create a unique file that looks like "retry000.txt", "retry001.txt",
"retry002.txt", etc.
"""
cp_file_name = None # Function return variable
n = 0
while True:
cp_file_name = 'retry%03d.txt' % n
if (not os.path.exists(cp_file_name)):
break
else:
n = n + 1
continue
return cp_file_name
######### Stand-Alone Execution #########
if __name__ == "__main__":
load_errors()
| 30.613636
| 228
| 0.639198
|
"""Retry downloading files that caused errors in http_downloader.
We can find files to try downloading again by parsing the err.txt file for error messages.
Error log lines we are interested in look like:
09-04-2017 12:45:17..Error_http_downloader 'exports/CalStateTEACH Term 1/grios/Schedule/Mentor Info.docx', 'https://ourdomain.instructure.com/files/8080/download?download_frd=1&verifier=zVZdnkpTmmJIGYAg2U0PaDqESrJBFLi0Xsm73Eldu'
A regex string that captures the file name & URL looks like:
[0-9][0-9]-[0-9][0-9]-[0-9][0-9][0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.\.Error_http_downloader '(.*)', '(.*)'$
09.04.2017 tps Created.
09.17.2018 tps Change bad global Null reference to None.
"""
import script_logging
import http_downloader
import os
import re
import shutil
######### Constants #########
# Regex pattern for extracting file download details from error log.
REGEX_PATTERN = "[0-9][0-9]-[0-9][0-9]-[0-9][0-9][0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\.\.Error_http_downloader '(.*)', '(.*)'$"
def make_cp_file_name():
"""Create a unique file that looks like "retry000.txt", "retry001.txt",
"retry002.txt", etc.
"""
cp_file_name = None # Function return variable
n = 0
while True:
cp_file_name = 'retry%03d.txt' % n
if (not os.path.exists(cp_file_name)):
break
else:
n = n + 1
continue
return cp_file_name
def cp_err_log():
err_log_file = script_logging.ERR_FILE_NAME
err_log_cp = make_cp_file_name()
script_logging.log_status('Copying error log %s to %s' % (err_log_file, err_log_cp))
shutil.copy(err_log_file, err_log_cp)
def load_errors():
pattern = re.compile(REGEX_PATTERN)
err_list = [] # Accumulate the download errors
file_name = script_logging.ERR_FILE_NAME
if (os.path.isfile(file_name)):
with open(file_name, 'r') as f:
for line in f:
match = pattern.match(line)
if match:
err_list.append((match.group(1), match.group(2)))
# Let's see what download errors we found
if len(err_list) > 0:
# This process might still generate errors, so save the current
# error before starting a new one.
cp_err_log()
script_logging.clear_error_log()
for err in err_list:
# Retry the download
download_file = err[0]
download_url = err[1]
http_downloader.download(download_url, download_file)
else:
script_logging.log_status('No download errors to retry.')
######### Stand-Alone Execution #########
if __name__ == "__main__":
load_errors()
| 1,131
| 0
| 46
|
fea9894192b3f338d5d4d6a5b7224a49e2e10fb5
| 4,769
|
py
|
Python
|
beatcrunch/utils/model_common.py
|
iero/BeatCrunch
|
baa88595670b0dc1fb2a0e95fe33b176a0c010d6
|
[
"Apache-2.0"
] | null | null | null |
beatcrunch/utils/model_common.py
|
iero/BeatCrunch
|
baa88595670b0dc1fb2a0e95fe33b176a0c010d6
|
[
"Apache-2.0"
] | null | null | null |
beatcrunch/utils/model_common.py
|
iero/BeatCrunch
|
baa88595670b0dc1fb2a0e95fe33b176a0c010d6
|
[
"Apache-2.0"
] | null | null | null |
from nltk import RegexpTokenizer
# Common stopwords in french and english
# Clean text or sentence, removing stopwords
# return list
| 108.386364
| 2,076
| 0.626756
|
from nltk import RegexpTokenizer
# Common stopwords in french and english
def get_stopwords(lang) :
stopset = []
stopwords_ponctuation = [',','"',';',':','.','?','!','*','—']
for w in stopwords_ponctuation: stopset.append(w)
if lang == "fr" or lang == "all":
stopwords_base = ['aussi','au','aux','avec','ça','ce','ces','dans','de','des','du','elle','en','et','eux','il','je','là','la','le','leur','leurs','lui','ma','mais','me','même','mes','moi','mon','ne','nos','notre','nous','on','ou','où','par','pas','pour','qu','que','qui','si','sa','se','ses','son','sur','ta','te','tes','toi','ton','tu','un','une','vos','votre','vous','ceci','cela','celà','cet','cette','ici','ils','les','leurs','quel','quels','quelle','quelles','sans','soi','tout','toutes','toute','tous']
stopwords_lettres_seules = ['c','d','j','l','à','m','n','s','t','y',"c’","d’","j’","l’","m’","n’","s’","t’","qu’"]
stopwords_verbeterne_etre = ['être','été','étée','étées','étés','étant','suis','es','est','sommes','êtes','sont','serai','seras','sera','serons','serez','seront','serais','serait','serions','seriez','seraient','étais','était','étions','étiez','étaient','fus','fut','fûmes','fûtes','furent','sois','soit','soyons','soyez','soient','fusse','fusses','fût','fussions','fussiez','fussent']
stopwords_verbeterne_avoir = ['a','avoir','ayant','eu','eue','eues','eus','ai','as','avons','avez','ont','aurai','auras','aura','aurons','aurez','auront','aurais','aurait','aurions','auriez','auraient','avais','avait','avions','aviez','avaient','eut','eûmes','eûtes','eurent','aie','aies','ait','ayons','ayez','aient','eusse','eusses','eût','eussions','eussiez','eussent']
for w in stopwords_base: stopset.append(w)
for w in stopwords_lettres_seules: stopset.append(w)
for w in stopwords_verbeterne_avoir: stopset.append(w)
for w in stopwords_verbeterne_etre: stopset.append(w)
if lang == "en" or lang == "all":
stopwords_base = ['a','about','above','above','across','after','afterwards','again','against','all','almost','alone','along','already','also','although','always','among','amongst','amoungst','amount','and','another','any','anyhow','anyone','anything','anyway','anywhere','are','around','as','at','back','because','before','beforehand','behind','below','beside','besides','between','beyond','bill','both','bottom','but','by','co','con','de','describe','detail','do','done','down','due','during','each','eg','eight','either','else','elsewhere','empty','enough','etc','even','ever','every','everyone','everything','everywhere','except','few','fire','for','former','formerly','from','front','full','further','he','hence','her','here','hereafter','hereby','herein','hereupon','hers','herself','him','himself','his','how','however','hundred','i','if','in','inc','indeed','interest','into','it','its','itself','last','latter','latterly','least','less','ltd','many','me','meanwhile','mill','mine','more','moreover','most','mostly','much','must','my','myself','name','namely','neither','never','nevertheless','next','no','nobody','none','nor','not','nothing','now','nowhere','of','off','often','on','once','only','onto','or','other','others','otherwise','our','ours','ourselves','out','over','own','part','per','perhaps','please','rather','re','same','serious','several','she','side','since','sincere','so','some','somehow','someone','something','sometime','sometimes','somewhere','still','such','than','that','the','their','them','themselves','then','there','thereafter','thereby','therefore','therein','thereupon','these','they','thin','this','those','though','through','throughout','thru','thus','to','together','too','toward','towards','under','until','upon','us','very','via','we','well','were','what','whatever','when','whence','whenever','where','whereafter','whereas','whereby','wherein','whereupon','wherever','whether','which','while','whither','who','whoever','whole','whom','whose','why','with','within','without','yet','you','your','yours','yourself','yourselves','the']
stopwords_verbs = ['am','be','became','become','becomes','becoming','been','being','call','can','cannot','cant','could','couldnt','cry','fill','find','found','get','give','go','had','has','hasnt','have','is','keep','made','may','might','move','say','says','see','seem','seemed','seeming','seems','should','show','take','put','was','will','would']
for w in stopwords_base: stopset.append(w)
for w in stopwords_verbs: stopset.append(w)
return stopset
# Clean text or sentence, removing stopwords
# return list
def nlp_clean(data,stopwords):
new_str = data.lower()
tokenizer = RegexpTokenizer(r'\w+')
dlist = tokenizer.tokenize(new_str)
for a in dlist :
if len(a) < 2 :
dlist.remove(a)
cleanList = [word for word in dlist if word not in stopwords]
return cleanList
| 4,638
| 0
| 44
|
b87d6c5cb06be96998619a9cb340e11359c13595
| 15,498
|
py
|
Python
|
faster_rcnn/rpn_util.py
|
Kelicious/faster_rcnn
|
fde1a2f342855b8a3b6c1a54878e59d29102a26d
|
[
"MIT"
] | 18
|
2018-05-13T14:50:03.000Z
|
2022-02-23T14:27:17.000Z
|
faster_rcnn/rpn_util.py
|
Kelicious/faster_rcnn
|
fde1a2f342855b8a3b6c1a54878e59d29102a26d
|
[
"MIT"
] | 3
|
2018-05-15T08:46:10.000Z
|
2020-03-17T12:46:31.000Z
|
faster_rcnn/rpn_util.py
|
Kelicious/faster_rcnn
|
fde1a2f342855b8a3b6c1a54878e59d29102a26d
|
[
"MIT"
] | 15
|
2018-05-13T14:50:24.000Z
|
2022-02-24T09:50:07.000Z
|
import random
from enum import Enum
import numpy as np
from custom_decorators import profile
from shapes import Box
from shared_constants import BBREG_MULTIPLIERS, DEFAULT_ANCHORS
from util import calc_iou, cross_ious, get_reg_params, get_bbox_coords
POS_OVERLAP = 0.7
NEG_OVERLAP = 0.3
SAMPLE_SIZE = 256
MAX_POS_SAMPLES = 128
class RpnTrainingManager:
"""
Encapsulates the details of generating training inputs for a region proposal network for a given image.
"""
def __init__(self, calc_conv_dims, stride, preprocess_func, anchor_dims=DEFAULT_ANCHORS):
"""
:param calc_conv_dims: function that accepts a tuple of the image's height and width in pixels and returns the
height and width of the convolutional layer prior to the rpn layers.
:param stride: positive integer, the cumulative stride at the convolutional layer prior to the rpn layers.
:param preprocess_func: function that applies the same transformation to the image's pixels as used for Imagenet
training. Otherwise the Imagenet pre-trained weights will be mismatched.
:param anchor_dims: list of lists of positive integers, one height and width pair for each anchor.
"""
self._cache = {}
self.calc_conv_dims = calc_conv_dims
self.stride = stride
self.preprocess_func = preprocess_func
self.anchor_dims = anchor_dims
@profile
def batched_image(self, image):
"""
Returns the image data to be fed into the network.
:param image: shapes.Image object.
:return: 4-d numpy array with a single batch of the image, should can be used as a Keras model input.
"""
return np.expand_dims(self.preprocess_func(image.data), axis=0)
@profile
@profile
def rpn_y_true(self, image):
"""
Takes an image and returns the Keras model inputs to train with.
:param image: shapes.Image object to generate training inputs for.
:return: tuple where the first element is a numpy array of the ground truth network output for whether each
anchor overlaps with an object, and the second element is a numpy array of the ground truth network output for the
bounding box transformation parameters to transform each anchor into an object's bounding box.
"""
'''
Consider removing caching - added when self.process was taking 0.4s to run. Since then, optimized it down to
0.02s locally, 0.003s on aws so the cache isn't too useful anymore.
'''
if image.cache_key not in self._cache:
self._process(image)
results = self._cache[image.cache_key]
# TODO: why is the cached result being deleted? Investigate whether restoring it improves training time.
del self._cache[image.cache_key]
can_use = _apply_sampling(results['is_pos'], results['can_use'])
conv_rows, conv_cols = self.calc_conv_dims(image.height, image.width)
is_pos = np.reshape(results['is_pos'], (conv_rows, conv_cols, len(self.anchor_dims)))
can_use = np.reshape(can_use, (conv_rows, conv_cols, len(self.anchor_dims)))
selected_is_pos = np.logical_and(is_pos, can_use)
# combine arrays with whether or not to use for the loss function
y_class = np.concatenate([can_use, is_pos], axis=2)
bbreg_can_use = np.repeat(selected_is_pos, 4, axis = 2)
bbreg_targets = np.reshape(results['bbreg_targets'], (conv_rows, conv_cols, 4 * len(self.anchor_dims)))
y_bbreg = np.concatenate([bbreg_can_use, bbreg_targets], axis = 2)
y_class = np.expand_dims(y_class, axis=0)
y_bbreg = np.expand_dims(y_bbreg, axis=0)
return y_class, y_bbreg
def _idx_to_conv(idx, conv_width, anchors_per_loc):
"""
Converts an anchor box index in a 1-d numpy array to its corresponding 3-d index representing its convolution
position and anchor index.
:param idx: non-negative integer, the position in a 1-d numpy array of anchors.
:param conv_width: the number of possible horizontal positions the convolutional layer's filters can occupy, i.e.
close to the width in pixels divided by the cumulative stride at that layer.
:param anchors_per_loc: positive integer, the number of anchors at each convolutional filter position.
:return: tuple of the row, column, and anchor index of the convolutional filter position for this index.
"""
divisor = conv_width * anchors_per_loc
y, remainder = idx // divisor, idx % divisor
x, anchor_idx = remainder // anchors_per_loc, remainder % anchors_per_loc
return y, x, anchor_idx
@profile
def _get_conv_center(conv_x, conv_y, stride):
"""
Finds the center of this convolution position in the image's original coordinate space.
:param conv_x: non-negative integer, x coordinate of the convolution position.
:param conv_y: non-negative integer, y coordinate of the convolution position.
:param stride: positive integer, the cumulative stride in pixels at this layer of the network.
:return: tuple of positive integers, the x and y coordinates of the center of the convolution position.
"""
x_center = stride * (conv_x + 0.5)
y_center = stride * (conv_y + 0.5)
return int(x_center), int(y_center)
@profile
@profile
@profile
@profile
# this function was a huge bottleneck so threw away box abstractions to optimize performance
@profile
def _get_all_anchor_coords(conv_rows, conv_cols, anchor_dims, stride):
"""
Given the shape of a convolutional layer and the anchors to generate for each position, return all anchors.
:param conv_rows: positive integer, height of this convolutional layer.
:param conv_cols: positive integer, width of this convolutional layer.
:param anchor_dims: list of lists of positive integers, one height and width pair for each anchor.
:param stride: positive integer, cumulative stride of this anchor position in pixels.
:return: 2-d numpy array with one row for each anchor box containing its [x1, y1, x2, y2] coordinates.
"""
num_boxes = conv_rows * conv_cols * len(anchor_dims)
y, x, anchor_idxs = _num_boxes_to_conv_np(num_boxes, conv_cols, len(anchor_dims))
x_center, y_center = _get_conv_center_np(x, y, stride)
anchor_coords = np.zeros((num_boxes, 4), dtype=np.float32)
anchor_height = anchor_dims[anchor_idxs, 0]
anchor_width = anchor_dims[anchor_idxs, 1]
anchor_coords[:, 0] = x_center - anchor_width // 2
anchor_coords[:, 1] = y_center - anchor_height // 2
anchor_coords[:, 2] = anchor_coords[:, 0] + anchor_width
anchor_coords[:, 3] = anchor_coords[:, 1] + anchor_height
return anchor_coords
@profile
@profile
def _apply_sampling(is_pos, can_use):
"""
Applies the sampling logic described in the Faster R-CNN paper to determine which anchors should be evaluated in the
loss function.
:param is_pos: 1-d numpy array of booleans for whether each anchor is a true positive for some object.
:param can_use: 1-d numpy array of booleans for whether each anchor can be used at all in the loss function.
:return: 1-d numpy array of booleans of which anchors were chosen to be used in the loss function.
"""
# extract [0] due to np.where returning a tuple
pos_locs = np.where(np.logical_and(is_pos == 1, can_use == 1))[0]
neg_locs = np.where(np.logical_and(is_pos == 0, can_use == 1))[0]
num_pos = len(pos_locs)
num_neg = len(neg_locs)
# cap the number of positive samples per batch to no more than half the batch size
if num_pos > MAX_POS_SAMPLES:
locs_off = random.sample(range(num_pos), num_pos - MAX_POS_SAMPLES)
can_use[pos_locs[locs_off]] = 0
num_pos = MAX_POS_SAMPLES
# fill remaining portion of the batch size with negative samples
if num_neg + num_pos > SAMPLE_SIZE:
locs_off = random.sample(range(num_neg), num_neg + num_pos - SAMPLE_SIZE)
can_use[neg_locs[locs_off]] = 0
return can_use
| 44.153846
| 154
| 0.703123
|
import random
from enum import Enum
import numpy as np
from custom_decorators import profile
from shapes import Box
from shared_constants import BBREG_MULTIPLIERS, DEFAULT_ANCHORS
from util import calc_iou, cross_ious, get_reg_params, get_bbox_coords
POS_OVERLAP = 0.7
NEG_OVERLAP = 0.3
SAMPLE_SIZE = 256
MAX_POS_SAMPLES = 128
class RpnClass(Enum):
NEG = 0
POS = 1
NEUTRAL = 2
class RpnTrainingManager:
"""
Encapsulates the details of generating training inputs for a region proposal network for a given image.
"""
def __init__(self, calc_conv_dims, stride, preprocess_func, anchor_dims=DEFAULT_ANCHORS):
"""
:param calc_conv_dims: function that accepts a tuple of the image's height and width in pixels and returns the
height and width of the convolutional layer prior to the rpn layers.
:param stride: positive integer, the cumulative stride at the convolutional layer prior to the rpn layers.
:param preprocess_func: function that applies the same transformation to the image's pixels as used for Imagenet
training. Otherwise the Imagenet pre-trained weights will be mismatched.
:param anchor_dims: list of lists of positive integers, one height and width pair for each anchor.
"""
self._cache = {}
self.calc_conv_dims = calc_conv_dims
self.stride = stride
self.preprocess_func = preprocess_func
self.anchor_dims = anchor_dims
@profile
def batched_image(self, image):
"""
Returns the image data to be fed into the network.
:param image: shapes.Image object.
:return: 4-d numpy array with a single batch of the image, should can be used as a Keras model input.
"""
return np.expand_dims(self.preprocess_func(image.data), axis=0)
@profile
def _process(self, image):
# internal method, performs the expensive calculations needed to produce training inputs.
conv_rows, conv_cols = self.calc_conv_dims(image.height, image.width)
num_anchors = conv_rows * conv_cols * len(self.anchor_dims)
bbreg_targets = np.zeros((num_anchors, 4), dtype=np.float32)
can_use = np.zeros(num_anchors, dtype=np.bool)
is_pos = np.zeros(num_anchors, dtype=np.bool)
gt_box_coords = get_bbox_coords(image.gt_boxes)
anchor_coords = _get_all_anchor_coords(conv_rows, conv_cols, self.anchor_dims, self.stride)
out_of_bounds_idxs = _get_out_of_bounds_idxs(anchor_coords, image.width, image.height)
all_ious = cross_ious(anchor_coords, gt_box_coords)
# all_ious, out_of_bounds_idxs = get_all_ious_faster(gt_box_coords, conv_rows, conv_cols, ANCHORS_PER_LOC, image.width, image.height, self.stride)
max_iou_by_anchor = np.amax(all_ious, axis=1)
max_idx_by_anchor = np.argmax(all_ious, axis=1)
max_iou_by_gt_box = np.amax(all_ious, axis=0)
max_idx_by_gt_box = np.argmax(all_ious, axis=0)
# anchors with more than 0.7 IOU with a gt box are positives
pos_box_idxs = np.where(max_iou_by_anchor > POS_OVERLAP)[0]
# for each gt box, the highest non-zero IOU anchor is a positive
eligible_idxs = np.where(max_iou_by_gt_box > 0.0)
more_pos_box_idxs = max_idx_by_gt_box[eligible_idxs]
total_pos_idxs = np.unique(np.concatenate((pos_box_idxs, more_pos_box_idxs)))
can_use[total_pos_idxs] = 1
is_pos[total_pos_idxs] = 1
# don't bother optimizing, profiling showed this loop's runtime is negligible
for box_idx in total_pos_idxs:
y, x, anchor_idx = _idx_to_conv(box_idx, conv_cols, len(self.anchor_dims))
x_center, y_center = _get_conv_center(x, y, self.stride)
anchor_height, anchor_width = self.anchor_dims[anchor_idx]
anchor_box = Box.from_center_dims_int(x_center, y_center, anchor_width, anchor_height)
gt_box_idx = max_idx_by_anchor[box_idx]
reg_params = get_reg_params(anchor_box.corners, gt_box_coords[gt_box_idx])
bbreg_targets[box_idx, :] = BBREG_MULTIPLIERS * reg_params
neg_box_idxs = np.where(np.logical_and(is_pos == 0, max_iou_by_anchor < NEG_OVERLAP))[0]
can_use[neg_box_idxs] = 1
can_use[out_of_bounds_idxs] = 0
self._cache[image.cache_key] = {
'can_use': can_use,
'is_pos': is_pos,
'bbreg_targets': bbreg_targets
}
@profile
def rpn_y_true(self, image):
"""
Takes an image and returns the Keras model inputs to train with.
:param image: shapes.Image object to generate training inputs for.
:return: tuple where the first element is a numpy array of the ground truth network output for whether each
anchor overlaps with an object, and the second element is a numpy array of the ground truth network output for the
bounding box transformation parameters to transform each anchor into an object's bounding box.
"""
'''
Consider removing caching - added when self.process was taking 0.4s to run. Since then, optimized it down to
0.02s locally, 0.003s on aws so the cache isn't too useful anymore.
'''
if image.cache_key not in self._cache:
self._process(image)
results = self._cache[image.cache_key]
# TODO: why is the cached result being deleted? Investigate whether restoring it improves training time.
del self._cache[image.cache_key]
can_use = _apply_sampling(results['is_pos'], results['can_use'])
conv_rows, conv_cols = self.calc_conv_dims(image.height, image.width)
is_pos = np.reshape(results['is_pos'], (conv_rows, conv_cols, len(self.anchor_dims)))
can_use = np.reshape(can_use, (conv_rows, conv_cols, len(self.anchor_dims)))
selected_is_pos = np.logical_and(is_pos, can_use)
# combine arrays with whether or not to use for the loss function
y_class = np.concatenate([can_use, is_pos], axis=2)
bbreg_can_use = np.repeat(selected_is_pos, 4, axis = 2)
bbreg_targets = np.reshape(results['bbreg_targets'], (conv_rows, conv_cols, 4 * len(self.anchor_dims)))
y_bbreg = np.concatenate([bbreg_can_use, bbreg_targets], axis = 2)
y_class = np.expand_dims(y_class, axis=0)
y_bbreg = np.expand_dims(y_bbreg, axis=0)
return y_class, y_bbreg
def _idx_to_conv(idx, conv_width, anchors_per_loc):
"""
Converts an anchor box index in a 1-d numpy array to its corresponding 3-d index representing its convolution
position and anchor index.
:param idx: non-negative integer, the position in a 1-d numpy array of anchors.
:param conv_width: the number of possible horizontal positions the convolutional layer's filters can occupy, i.e.
close to the width in pixels divided by the cumulative stride at that layer.
:param anchors_per_loc: positive integer, the number of anchors at each convolutional filter position.
:return: tuple of the row, column, and anchor index of the convolutional filter position for this index.
"""
divisor = conv_width * anchors_per_loc
y, remainder = idx // divisor, idx % divisor
x, anchor_idx = remainder // anchors_per_loc, remainder % anchors_per_loc
return y, x, anchor_idx
@profile
def _num_boxes_to_conv_np(num_boxes, conv_width, anchors_per_loc):
# similar to _idx_to_conv but for multiple boxes at once, uses vectorized operations to optimize the performance
idxs = np.arange(num_boxes)
divisor = conv_width * anchors_per_loc
y, remainder = idxs // divisor, idxs % divisor
x, anchor_idx = remainder // anchors_per_loc, remainder % anchors_per_loc
return y, x, anchor_idx
def _get_conv_center(conv_x, conv_y, stride):
"""
Finds the center of this convolution position in the image's original coordinate space.
:param conv_x: non-negative integer, x coordinate of the convolution position.
:param conv_y: non-negative integer, y coordinate of the convolution position.
:param stride: positive integer, the cumulative stride in pixels at this layer of the network.
:return: tuple of positive integers, the x and y coordinates of the center of the convolution position.
"""
x_center = stride * (conv_x + 0.5)
y_center = stride * (conv_y + 0.5)
return int(x_center), int(y_center)
@profile
def _get_conv_center_np(conv_x, conv_y, stride):
# like _get_conv_center but optimized for multiple boxes.
x_center = stride * (conv_x + 0.5)
y_center = stride * (conv_y + 0.5)
return x_center.astype('int32'), y_center.astype('int32')
@profile
def _get_all_ious(bbox_coords, conv_rows, conv_cols, anchor_dims, img_width, img_height, stride):
# not used anymore, might be useful to keep around as a reference
num_boxes = conv_rows * conv_cols * len(anchor_dims)
num_gt_boxes = len(bbox_coords)
result = np.zeros((num_boxes, num_gt_boxes))
out_of_bounds_idxs = []
num_boxes = conv_rows * conv_cols * len(anchor_dims)
for i in range(num_boxes):
y, x, anchor_idx = _idx_to_conv(i, conv_cols, len(anchor_dims))
x_center, y_center = _get_conv_center(x, y, stride)
anchor_height, anchor_width = anchor_dims[anchor_idx]
anchor_box = Box.from_center_dims_int(x_center, y_center, anchor_width, anchor_height)
if _out_of_bounds(anchor_box, img_width, img_height):
out_of_bounds_idxs.append(i)
continue
for bbox_idx in range(num_gt_boxes):
iou = calc_iou(bbox_coords[bbox_idx], anchor_box.corners)
result[i, bbox_idx] = iou
return result, out_of_bounds_idxs
@profile
def _get_all_ious_fast(bbox_coords, conv_rows, conv_cols, anchor_dims, img_width, img_height, stride):
# optimization of _get_all_ious using vectorized operations, also not used anymore
num_boxes = conv_rows * conv_cols * len(anchor_dims)
num_gt_boxes = len(bbox_coords)
result = np.zeros((num_boxes, num_gt_boxes))
out_of_bounds_idxs = []
num_boxes = conv_rows * conv_cols * len(anchor_dims)
coords = np.zeros((4))
for i in range(num_boxes):
y, x, anchor_idx = _idx_to_conv(i, conv_cols, len(anchor_dims))
x_center, y_center = _get_conv_center(x, y, stride)
anchor_height, anchor_width = anchor_dims[anchor_idx]
coords[0] = x_center - anchor_width // 2
coords[2] = coords[0] + anchor_width
coords[1] = y_center - anchor_height // 2
coords[3] = coords[1] + anchor_height
if _out_of_bounds_coords(coords, img_width, img_height):
out_of_bounds_idxs.append(i)
continue
for bbox_idx in range(num_gt_boxes):
iou = calc_iou(bbox_coords[bbox_idx], coords)
result[i, bbox_idx] = iou
return result, out_of_bounds_idxs
@profile
# this function was a huge bottleneck so threw away box abstractions to optimize performance
def _get_all_ious_faster(bbox_coords, conv_rows, conv_cols, anchor_dims, img_width, img_height, stride):
# even more optimized version of _get_all_ious_fast, also not used anymore
num_boxes = conv_rows * conv_cols * len(anchor_dims)
y, x, anchor_idxs = _num_boxes_to_conv_np(num_boxes, conv_cols, len(anchor_dims))
x_center, y_center = _get_conv_center_np(x, y, stride)
anchor_coords = np.zeros((num_boxes, 4))
anchor_height = anchor_dims[anchor_idxs, 0]
anchor_width = anchor_dims[anchor_idxs, 1]
anchor_coords[:, 0] = x_center - anchor_width // 2
anchor_coords[:, 1] = y_center - anchor_height // 2
anchor_coords[:, 2] = anchor_coords[:, 0] + anchor_width
anchor_coords[:, 3] = anchor_coords[:, 1] + anchor_height
result = cross_ious(anchor_coords, bbox_coords)
out_of_bounds_idxs = np.where(np.logical_or.reduce((
anchor_coords[:,0] < 0,
anchor_coords[:,1] < 0,
anchor_coords[:,2] >= img_width,
anchor_coords[:,3] >= img_height)))[0]
return result, out_of_bounds_idxs
@profile
def _get_all_anchor_coords(conv_rows, conv_cols, anchor_dims, stride):
"""
Given the shape of a convolutional layer and the anchors to generate for each position, return all anchors.
:param conv_rows: positive integer, height of this convolutional layer.
:param conv_cols: positive integer, width of this convolutional layer.
:param anchor_dims: list of lists of positive integers, one height and width pair for each anchor.
:param stride: positive integer, cumulative stride of this anchor position in pixels.
:return: 2-d numpy array with one row for each anchor box containing its [x1, y1, x2, y2] coordinates.
"""
num_boxes = conv_rows * conv_cols * len(anchor_dims)
y, x, anchor_idxs = _num_boxes_to_conv_np(num_boxes, conv_cols, len(anchor_dims))
x_center, y_center = _get_conv_center_np(x, y, stride)
anchor_coords = np.zeros((num_boxes, 4), dtype=np.float32)
anchor_height = anchor_dims[anchor_idxs, 0]
anchor_width = anchor_dims[anchor_idxs, 1]
anchor_coords[:, 0] = x_center - anchor_width // 2
anchor_coords[:, 1] = y_center - anchor_height // 2
anchor_coords[:, 2] = anchor_coords[:, 0] + anchor_width
anchor_coords[:, 3] = anchor_coords[:, 1] + anchor_height
return anchor_coords
@profile
def _get_out_of_bounds_idxs(anchor_coords, img_width, img_height):
# internal function for figuring out which anchors are out of bounds
out_of_bounds_idxs = np.where(np.logical_or.reduce((
anchor_coords[:,0] < 0,
anchor_coords[:,1] < 0,
anchor_coords[:,2] >= img_width,
anchor_coords[:,3] >= img_height)))[0]
return out_of_bounds_idxs
def _out_of_bounds(box, width, height):
# internal function for checking whether a box is out of bounds, not used anymore
return box.x1 < 0 or box.x2 >= width or box.y1 < 0 or box.y2 >= height
def _out_of_bounds_coords(coords, width, height):
# similar to _out_of_bounds but takes its argument as a numpy array instead of a shapes.Box instance
return coords[0] < 0 or coords[2] >= width or coords[1] < 0 or coords[3] >= height
@profile
def _apply_sampling(is_pos, can_use):
"""
Applies the sampling logic described in the Faster R-CNN paper to determine which anchors should be evaluated in the
loss function.
:param is_pos: 1-d numpy array of booleans for whether each anchor is a true positive for some object.
:param can_use: 1-d numpy array of booleans for whether each anchor can be used at all in the loss function.
:return: 1-d numpy array of booleans of which anchors were chosen to be used in the loss function.
"""
# extract [0] due to np.where returning a tuple
pos_locs = np.where(np.logical_and(is_pos == 1, can_use == 1))[0]
neg_locs = np.where(np.logical_and(is_pos == 0, can_use == 1))[0]
num_pos = len(pos_locs)
num_neg = len(neg_locs)
# cap the number of positive samples per batch to no more than half the batch size
if num_pos > MAX_POS_SAMPLES:
locs_off = random.sample(range(num_pos), num_pos - MAX_POS_SAMPLES)
can_use[pos_locs[locs_off]] = 0
num_pos = MAX_POS_SAMPLES
# fill remaining portion of the batch size with negative samples
if num_neg + num_pos > SAMPLE_SIZE:
locs_off = random.sample(range(num_neg), num_neg + num_pos - SAMPLE_SIZE)
can_use[neg_locs[locs_off]] = 0
return can_use
| 7,177
| 40
| 227
|
f7ded778c29259e2a89bf5a9a9ac772ebb515972
| 25,965
|
py
|
Python
|
samples/client/petstore/python-experimental/petstore_api/models/xml_item.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 2
|
2019-12-08T12:00:11.000Z
|
2022-01-02T13:47:52.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/xml_item.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 8
|
2021-03-01T21:18:19.000Z
|
2022-02-27T07:56:15.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/xml_item.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 1
|
2020-03-08T12:31:09.000Z
|
2020-03-08T12:31:09.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint # noqa: F401
import re # noqa: F401
import six # noqa: F401
from petstore_api.exceptions import ( # noqa: F401
ApiKeyError,
ApiTypeError,
ApiValueError,
)
from petstore_api.model_utils import ( # noqa: F401
ModelNormal,
ModelSimple,
check_allowed_values,
check_validations,
date,
datetime,
file_type,
get_simple_class,
int,
model_to_dict,
none_type,
str,
type_error_message,
validate_and_convert_types
)
class XmlItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
openapi_types (dict): The key is attribute name
and the value is attribute type.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
attribute_map = {
'attribute_string': 'attribute_string', # noqa: E501
'attribute_number': 'attribute_number', # noqa: E501
'attribute_integer': 'attribute_integer', # noqa: E501
'attribute_boolean': 'attribute_boolean', # noqa: E501
'wrapped_array': 'wrapped_array', # noqa: E501
'name_string': 'name_string', # noqa: E501
'name_number': 'name_number', # noqa: E501
'name_integer': 'name_integer', # noqa: E501
'name_boolean': 'name_boolean', # noqa: E501
'name_array': 'name_array', # noqa: E501
'name_wrapped_array': 'name_wrapped_array', # noqa: E501
'prefix_string': 'prefix_string', # noqa: E501
'prefix_number': 'prefix_number', # noqa: E501
'prefix_integer': 'prefix_integer', # noqa: E501
'prefix_boolean': 'prefix_boolean', # noqa: E501
'prefix_array': 'prefix_array', # noqa: E501
'prefix_wrapped_array': 'prefix_wrapped_array', # noqa: E501
'namespace_string': 'namespace_string', # noqa: E501
'namespace_number': 'namespace_number', # noqa: E501
'namespace_integer': 'namespace_integer', # noqa: E501
'namespace_boolean': 'namespace_boolean', # noqa: E501
'namespace_array': 'namespace_array', # noqa: E501
'namespace_wrapped_array': 'namespace_wrapped_array', # noqa: E501
'prefix_ns_string': 'prefix_ns_string', # noqa: E501
'prefix_ns_number': 'prefix_ns_number', # noqa: E501
'prefix_ns_integer': 'prefix_ns_integer', # noqa: E501
'prefix_ns_boolean': 'prefix_ns_boolean', # noqa: E501
'prefix_ns_array': 'prefix_ns_array', # noqa: E501
'prefix_ns_wrapped_array': 'prefix_ns_wrapped_array' # noqa: E501
}
openapi_types = {
'attribute_string': (str,), # noqa: E501
'attribute_number': (float,), # noqa: E501
'attribute_integer': (int,), # noqa: E501
'attribute_boolean': (bool,), # noqa: E501
'wrapped_array': ([int],), # noqa: E501
'name_string': (str,), # noqa: E501
'name_number': (float,), # noqa: E501
'name_integer': (int,), # noqa: E501
'name_boolean': (bool,), # noqa: E501
'name_array': ([int],), # noqa: E501
'name_wrapped_array': ([int],), # noqa: E501
'prefix_string': (str,), # noqa: E501
'prefix_number': (float,), # noqa: E501
'prefix_integer': (int,), # noqa: E501
'prefix_boolean': (bool,), # noqa: E501
'prefix_array': ([int],), # noqa: E501
'prefix_wrapped_array': ([int],), # noqa: E501
'namespace_string': (str,), # noqa: E501
'namespace_number': (float,), # noqa: E501
'namespace_integer': (int,), # noqa: E501
'namespace_boolean': (bool,), # noqa: E501
'namespace_array': ([int],), # noqa: E501
'namespace_wrapped_array': ([int],), # noqa: E501
'prefix_ns_string': (str,), # noqa: E501
'prefix_ns_number': (float,), # noqa: E501
'prefix_ns_integer': (int,), # noqa: E501
'prefix_ns_boolean': (bool,), # noqa: E501
'prefix_ns_array': ([int],), # noqa: E501
'prefix_ns_wrapped_array': ([int],), # noqa: E501
}
validations = {
}
additional_properties_type = None
discriminator = None
def __init__(self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs): # noqa: E501
"""XmlItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
attribute_string (str): [optional] # noqa: E501
attribute_number (float): [optional] # noqa: E501
attribute_integer (int): [optional] # noqa: E501
attribute_boolean (bool): [optional] # noqa: E501
wrapped_array ([int]): [optional] # noqa: E501
name_string (str): [optional] # noqa: E501
name_number (float): [optional] # noqa: E501
name_integer (int): [optional] # noqa: E501
name_boolean (bool): [optional] # noqa: E501
name_array ([int]): [optional] # noqa: E501
name_wrapped_array ([int]): [optional] # noqa: E501
prefix_string (str): [optional] # noqa: E501
prefix_number (float): [optional] # noqa: E501
prefix_integer (int): [optional] # noqa: E501
prefix_boolean (bool): [optional] # noqa: E501
prefix_array ([int]): [optional] # noqa: E501
prefix_wrapped_array ([int]): [optional] # noqa: E501
namespace_string (str): [optional] # noqa: E501
namespace_number (float): [optional] # noqa: E501
namespace_integer (int): [optional] # noqa: E501
namespace_boolean (bool): [optional] # noqa: E501
namespace_array ([int]): [optional] # noqa: E501
namespace_wrapped_array ([int]): [optional] # noqa: E501
prefix_ns_string (str): [optional] # noqa: E501
prefix_ns_number (float): [optional] # noqa: E501
prefix_ns_integer (int): [optional] # noqa: E501
prefix_ns_boolean (bool): [optional] # noqa: E501
prefix_ns_array ([int]): [optional] # noqa: E501
prefix_ns_wrapped_array ([int]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
self.__set_item(var_name, var_value)
def __setitem__(self, name, value):
"""this allows us to set values with instance[field_name] = val"""
self.__set_item(name, value)
def __getitem__(self, name):
"""this allows us to get a value with val = instance[field_name]"""
return self.__get_item(name)
@property
def attribute_string(self):
"""Gets the attribute_string of this XmlItem. # noqa: E501
Returns:
(str): The attribute_string of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_string')
@attribute_string.setter
def attribute_string(self, value):
"""Sets the attribute_string of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_string', value)
@property
def attribute_number(self):
"""Gets the attribute_number of this XmlItem. # noqa: E501
Returns:
(float): The attribute_number of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_number')
@attribute_number.setter
def attribute_number(self, value):
"""Sets the attribute_number of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_number', value)
@property
def attribute_integer(self):
"""Gets the attribute_integer of this XmlItem. # noqa: E501
Returns:
(int): The attribute_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_integer')
@attribute_integer.setter
def attribute_integer(self, value):
"""Sets the attribute_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_integer', value)
@property
def attribute_boolean(self):
"""Gets the attribute_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The attribute_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_boolean')
@attribute_boolean.setter
def attribute_boolean(self, value):
"""Sets the attribute_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_boolean', value)
@property
def wrapped_array(self):
"""Gets the wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('wrapped_array')
@wrapped_array.setter
def wrapped_array(self, value):
"""Sets the wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('wrapped_array', value)
@property
def name_string(self):
"""Gets the name_string of this XmlItem. # noqa: E501
Returns:
(str): The name_string of this XmlItem. # noqa: E501
"""
return self.__get_item('name_string')
@name_string.setter
def name_string(self, value):
"""Sets the name_string of this XmlItem. # noqa: E501
"""
return self.__set_item('name_string', value)
@property
def name_number(self):
"""Gets the name_number of this XmlItem. # noqa: E501
Returns:
(float): The name_number of this XmlItem. # noqa: E501
"""
return self.__get_item('name_number')
@name_number.setter
def name_number(self, value):
"""Sets the name_number of this XmlItem. # noqa: E501
"""
return self.__set_item('name_number', value)
@property
def name_integer(self):
"""Gets the name_integer of this XmlItem. # noqa: E501
Returns:
(int): The name_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('name_integer')
@name_integer.setter
def name_integer(self, value):
"""Sets the name_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('name_integer', value)
@property
def name_boolean(self):
"""Gets the name_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The name_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('name_boolean')
@name_boolean.setter
def name_boolean(self, value):
"""Sets the name_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('name_boolean', value)
@property
def name_array(self):
"""Gets the name_array of this XmlItem. # noqa: E501
Returns:
([int]): The name_array of this XmlItem. # noqa: E501
"""
return self.__get_item('name_array')
@name_array.setter
def name_array(self, value):
"""Sets the name_array of this XmlItem. # noqa: E501
"""
return self.__set_item('name_array', value)
@property
def name_wrapped_array(self):
"""Gets the name_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The name_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('name_wrapped_array')
@name_wrapped_array.setter
def name_wrapped_array(self, value):
"""Sets the name_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('name_wrapped_array', value)
@property
def prefix_string(self):
"""Gets the prefix_string of this XmlItem. # noqa: E501
Returns:
(str): The prefix_string of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_string')
@prefix_string.setter
def prefix_string(self, value):
"""Sets the prefix_string of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_string', value)
@property
def prefix_number(self):
"""Gets the prefix_number of this XmlItem. # noqa: E501
Returns:
(float): The prefix_number of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_number')
@prefix_number.setter
def prefix_number(self, value):
"""Sets the prefix_number of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_number', value)
@property
def prefix_integer(self):
"""Gets the prefix_integer of this XmlItem. # noqa: E501
Returns:
(int): The prefix_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_integer')
@prefix_integer.setter
def prefix_integer(self, value):
"""Sets the prefix_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_integer', value)
@property
def prefix_boolean(self):
"""Gets the prefix_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The prefix_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_boolean')
@prefix_boolean.setter
def prefix_boolean(self, value):
"""Sets the prefix_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_boolean', value)
@property
def prefix_array(self):
"""Gets the prefix_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_array')
@prefix_array.setter
def prefix_array(self, value):
"""Sets the prefix_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_array', value)
@property
def prefix_wrapped_array(self):
"""Gets the prefix_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_wrapped_array')
@prefix_wrapped_array.setter
def prefix_wrapped_array(self, value):
"""Sets the prefix_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_wrapped_array', value)
@property
def namespace_string(self):
"""Gets the namespace_string of this XmlItem. # noqa: E501
Returns:
(str): The namespace_string of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_string')
@namespace_string.setter
def namespace_string(self, value):
"""Sets the namespace_string of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_string', value)
@property
def namespace_number(self):
"""Gets the namespace_number of this XmlItem. # noqa: E501
Returns:
(float): The namespace_number of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_number')
@namespace_number.setter
def namespace_number(self, value):
"""Sets the namespace_number of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_number', value)
@property
def namespace_integer(self):
"""Gets the namespace_integer of this XmlItem. # noqa: E501
Returns:
(int): The namespace_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_integer')
@namespace_integer.setter
def namespace_integer(self, value):
"""Sets the namespace_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_integer', value)
@property
def namespace_boolean(self):
"""Gets the namespace_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The namespace_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_boolean')
@namespace_boolean.setter
def namespace_boolean(self, value):
"""Sets the namespace_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_boolean', value)
@property
def namespace_array(self):
"""Gets the namespace_array of this XmlItem. # noqa: E501
Returns:
([int]): The namespace_array of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_array')
@namespace_array.setter
def namespace_array(self, value):
"""Sets the namespace_array of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_array', value)
@property
def namespace_wrapped_array(self):
"""Gets the namespace_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The namespace_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_wrapped_array')
@namespace_wrapped_array.setter
def namespace_wrapped_array(self, value):
"""Sets the namespace_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_wrapped_array', value)
@property
def prefix_ns_string(self):
"""Gets the prefix_ns_string of this XmlItem. # noqa: E501
Returns:
(str): The prefix_ns_string of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_string')
@prefix_ns_string.setter
def prefix_ns_string(self, value):
"""Sets the prefix_ns_string of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_string', value)
@property
def prefix_ns_number(self):
"""Gets the prefix_ns_number of this XmlItem. # noqa: E501
Returns:
(float): The prefix_ns_number of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_number')
@prefix_ns_number.setter
def prefix_ns_number(self, value):
"""Sets the prefix_ns_number of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_number', value)
@property
def prefix_ns_integer(self):
"""Gets the prefix_ns_integer of this XmlItem. # noqa: E501
Returns:
(int): The prefix_ns_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_integer')
@prefix_ns_integer.setter
def prefix_ns_integer(self, value):
"""Sets the prefix_ns_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_integer', value)
@property
def prefix_ns_boolean(self):
"""Gets the prefix_ns_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The prefix_ns_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_boolean')
@prefix_ns_boolean.setter
def prefix_ns_boolean(self, value):
"""Sets the prefix_ns_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_boolean', value)
@property
def prefix_ns_array(self):
"""Gets the prefix_ns_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_ns_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_array')
@prefix_ns_array.setter
def prefix_ns_array(self, value):
"""Sets the prefix_ns_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_array', value)
@property
def prefix_ns_wrapped_array(self):
"""Gets the prefix_ns_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_ns_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_wrapped_array')
@prefix_ns_wrapped_array.setter
def prefix_ns_wrapped_array(self, value):
"""Sets the prefix_ns_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_wrapped_array', value)
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XmlItem):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in six.iteritems(self._data_store):
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if (not six.PY3 and
len(types) == 2 and unicode in types): # noqa: F821
vals_equal = (
this_val.encode('utf-8') == that_val.encode('utf-8')
)
if not vals_equal:
return False
return True
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.135318
| 174
| 0.605507
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint # noqa: F401
import re # noqa: F401
import six # noqa: F401
from petstore_api.exceptions import ( # noqa: F401
ApiKeyError,
ApiTypeError,
ApiValueError,
)
from petstore_api.model_utils import ( # noqa: F401
ModelNormal,
ModelSimple,
check_allowed_values,
check_validations,
date,
datetime,
file_type,
get_simple_class,
int,
model_to_dict,
none_type,
str,
type_error_message,
validate_and_convert_types
)
class XmlItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
openapi_types (dict): The key is attribute name
and the value is attribute type.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
attribute_map = {
'attribute_string': 'attribute_string', # noqa: E501
'attribute_number': 'attribute_number', # noqa: E501
'attribute_integer': 'attribute_integer', # noqa: E501
'attribute_boolean': 'attribute_boolean', # noqa: E501
'wrapped_array': 'wrapped_array', # noqa: E501
'name_string': 'name_string', # noqa: E501
'name_number': 'name_number', # noqa: E501
'name_integer': 'name_integer', # noqa: E501
'name_boolean': 'name_boolean', # noqa: E501
'name_array': 'name_array', # noqa: E501
'name_wrapped_array': 'name_wrapped_array', # noqa: E501
'prefix_string': 'prefix_string', # noqa: E501
'prefix_number': 'prefix_number', # noqa: E501
'prefix_integer': 'prefix_integer', # noqa: E501
'prefix_boolean': 'prefix_boolean', # noqa: E501
'prefix_array': 'prefix_array', # noqa: E501
'prefix_wrapped_array': 'prefix_wrapped_array', # noqa: E501
'namespace_string': 'namespace_string', # noqa: E501
'namespace_number': 'namespace_number', # noqa: E501
'namespace_integer': 'namespace_integer', # noqa: E501
'namespace_boolean': 'namespace_boolean', # noqa: E501
'namespace_array': 'namespace_array', # noqa: E501
'namespace_wrapped_array': 'namespace_wrapped_array', # noqa: E501
'prefix_ns_string': 'prefix_ns_string', # noqa: E501
'prefix_ns_number': 'prefix_ns_number', # noqa: E501
'prefix_ns_integer': 'prefix_ns_integer', # noqa: E501
'prefix_ns_boolean': 'prefix_ns_boolean', # noqa: E501
'prefix_ns_array': 'prefix_ns_array', # noqa: E501
'prefix_ns_wrapped_array': 'prefix_ns_wrapped_array' # noqa: E501
}
openapi_types = {
'attribute_string': (str,), # noqa: E501
'attribute_number': (float,), # noqa: E501
'attribute_integer': (int,), # noqa: E501
'attribute_boolean': (bool,), # noqa: E501
'wrapped_array': ([int],), # noqa: E501
'name_string': (str,), # noqa: E501
'name_number': (float,), # noqa: E501
'name_integer': (int,), # noqa: E501
'name_boolean': (bool,), # noqa: E501
'name_array': ([int],), # noqa: E501
'name_wrapped_array': ([int],), # noqa: E501
'prefix_string': (str,), # noqa: E501
'prefix_number': (float,), # noqa: E501
'prefix_integer': (int,), # noqa: E501
'prefix_boolean': (bool,), # noqa: E501
'prefix_array': ([int],), # noqa: E501
'prefix_wrapped_array': ([int],), # noqa: E501
'namespace_string': (str,), # noqa: E501
'namespace_number': (float,), # noqa: E501
'namespace_integer': (int,), # noqa: E501
'namespace_boolean': (bool,), # noqa: E501
'namespace_array': ([int],), # noqa: E501
'namespace_wrapped_array': ([int],), # noqa: E501
'prefix_ns_string': (str,), # noqa: E501
'prefix_ns_number': (float,), # noqa: E501
'prefix_ns_integer': (int,), # noqa: E501
'prefix_ns_boolean': (bool,), # noqa: E501
'prefix_ns_array': ([int],), # noqa: E501
'prefix_ns_wrapped_array': ([int],), # noqa: E501
}
validations = {
}
additional_properties_type = None
discriminator = None
def __init__(self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs): # noqa: E501
"""XmlItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
attribute_string (str): [optional] # noqa: E501
attribute_number (float): [optional] # noqa: E501
attribute_integer (int): [optional] # noqa: E501
attribute_boolean (bool): [optional] # noqa: E501
wrapped_array ([int]): [optional] # noqa: E501
name_string (str): [optional] # noqa: E501
name_number (float): [optional] # noqa: E501
name_integer (int): [optional] # noqa: E501
name_boolean (bool): [optional] # noqa: E501
name_array ([int]): [optional] # noqa: E501
name_wrapped_array ([int]): [optional] # noqa: E501
prefix_string (str): [optional] # noqa: E501
prefix_number (float): [optional] # noqa: E501
prefix_integer (int): [optional] # noqa: E501
prefix_boolean (bool): [optional] # noqa: E501
prefix_array ([int]): [optional] # noqa: E501
prefix_wrapped_array ([int]): [optional] # noqa: E501
namespace_string (str): [optional] # noqa: E501
namespace_number (float): [optional] # noqa: E501
namespace_integer (int): [optional] # noqa: E501
namespace_boolean (bool): [optional] # noqa: E501
namespace_array ([int]): [optional] # noqa: E501
namespace_wrapped_array ([int]): [optional] # noqa: E501
prefix_ns_string (str): [optional] # noqa: E501
prefix_ns_number (float): [optional] # noqa: E501
prefix_ns_integer (int): [optional] # noqa: E501
prefix_ns_boolean (bool): [optional] # noqa: E501
prefix_ns_array ([int]): [optional] # noqa: E501
prefix_ns_wrapped_array ([int]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
self.__set_item(var_name, var_value)
def __set_item(self, name, value):
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiKeyError(
"{0} has no key '{1}'".format(type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._from_server,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value
)
self._data_store[name] = value
def __get_item(self, name):
if name in self._data_store:
return self._data_store[name]
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
raise ApiKeyError(
"{0} has no key '{1}'".format(type(self).__name__, name),
[name]
)
def __setitem__(self, name, value):
"""this allows us to set values with instance[field_name] = val"""
self.__set_item(name, value)
def __getitem__(self, name):
"""this allows us to get a value with val = instance[field_name]"""
return self.__get_item(name)
@property
def attribute_string(self):
"""Gets the attribute_string of this XmlItem. # noqa: E501
Returns:
(str): The attribute_string of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_string')
@attribute_string.setter
def attribute_string(self, value):
"""Sets the attribute_string of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_string', value)
@property
def attribute_number(self):
"""Gets the attribute_number of this XmlItem. # noqa: E501
Returns:
(float): The attribute_number of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_number')
@attribute_number.setter
def attribute_number(self, value):
"""Sets the attribute_number of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_number', value)
@property
def attribute_integer(self):
"""Gets the attribute_integer of this XmlItem. # noqa: E501
Returns:
(int): The attribute_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_integer')
@attribute_integer.setter
def attribute_integer(self, value):
"""Sets the attribute_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_integer', value)
@property
def attribute_boolean(self):
"""Gets the attribute_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The attribute_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_boolean')
@attribute_boolean.setter
def attribute_boolean(self, value):
"""Sets the attribute_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_boolean', value)
@property
def wrapped_array(self):
"""Gets the wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('wrapped_array')
@wrapped_array.setter
def wrapped_array(self, value):
"""Sets the wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('wrapped_array', value)
@property
def name_string(self):
"""Gets the name_string of this XmlItem. # noqa: E501
Returns:
(str): The name_string of this XmlItem. # noqa: E501
"""
return self.__get_item('name_string')
@name_string.setter
def name_string(self, value):
"""Sets the name_string of this XmlItem. # noqa: E501
"""
return self.__set_item('name_string', value)
@property
def name_number(self):
"""Gets the name_number of this XmlItem. # noqa: E501
Returns:
(float): The name_number of this XmlItem. # noqa: E501
"""
return self.__get_item('name_number')
@name_number.setter
def name_number(self, value):
"""Sets the name_number of this XmlItem. # noqa: E501
"""
return self.__set_item('name_number', value)
@property
def name_integer(self):
"""Gets the name_integer of this XmlItem. # noqa: E501
Returns:
(int): The name_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('name_integer')
@name_integer.setter
def name_integer(self, value):
"""Sets the name_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('name_integer', value)
@property
def name_boolean(self):
"""Gets the name_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The name_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('name_boolean')
@name_boolean.setter
def name_boolean(self, value):
"""Sets the name_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('name_boolean', value)
@property
def name_array(self):
"""Gets the name_array of this XmlItem. # noqa: E501
Returns:
([int]): The name_array of this XmlItem. # noqa: E501
"""
return self.__get_item('name_array')
@name_array.setter
def name_array(self, value):
"""Sets the name_array of this XmlItem. # noqa: E501
"""
return self.__set_item('name_array', value)
@property
def name_wrapped_array(self):
"""Gets the name_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The name_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('name_wrapped_array')
@name_wrapped_array.setter
def name_wrapped_array(self, value):
"""Sets the name_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('name_wrapped_array', value)
@property
def prefix_string(self):
"""Gets the prefix_string of this XmlItem. # noqa: E501
Returns:
(str): The prefix_string of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_string')
@prefix_string.setter
def prefix_string(self, value):
"""Sets the prefix_string of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_string', value)
@property
def prefix_number(self):
"""Gets the prefix_number of this XmlItem. # noqa: E501
Returns:
(float): The prefix_number of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_number')
@prefix_number.setter
def prefix_number(self, value):
"""Sets the prefix_number of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_number', value)
@property
def prefix_integer(self):
"""Gets the prefix_integer of this XmlItem. # noqa: E501
Returns:
(int): The prefix_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_integer')
@prefix_integer.setter
def prefix_integer(self, value):
"""Sets the prefix_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_integer', value)
@property
def prefix_boolean(self):
"""Gets the prefix_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The prefix_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_boolean')
@prefix_boolean.setter
def prefix_boolean(self, value):
"""Sets the prefix_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_boolean', value)
@property
def prefix_array(self):
"""Gets the prefix_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_array')
@prefix_array.setter
def prefix_array(self, value):
"""Sets the prefix_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_array', value)
@property
def prefix_wrapped_array(self):
"""Gets the prefix_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_wrapped_array')
@prefix_wrapped_array.setter
def prefix_wrapped_array(self, value):
"""Sets the prefix_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_wrapped_array', value)
@property
def namespace_string(self):
"""Gets the namespace_string of this XmlItem. # noqa: E501
Returns:
(str): The namespace_string of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_string')
@namespace_string.setter
def namespace_string(self, value):
"""Sets the namespace_string of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_string', value)
@property
def namespace_number(self):
"""Gets the namespace_number of this XmlItem. # noqa: E501
Returns:
(float): The namespace_number of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_number')
@namespace_number.setter
def namespace_number(self, value):
"""Sets the namespace_number of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_number', value)
@property
def namespace_integer(self):
"""Gets the namespace_integer of this XmlItem. # noqa: E501
Returns:
(int): The namespace_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_integer')
@namespace_integer.setter
def namespace_integer(self, value):
"""Sets the namespace_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_integer', value)
@property
def namespace_boolean(self):
"""Gets the namespace_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The namespace_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_boolean')
@namespace_boolean.setter
def namespace_boolean(self, value):
"""Sets the namespace_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_boolean', value)
@property
def namespace_array(self):
"""Gets the namespace_array of this XmlItem. # noqa: E501
Returns:
([int]): The namespace_array of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_array')
@namespace_array.setter
def namespace_array(self, value):
"""Sets the namespace_array of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_array', value)
@property
def namespace_wrapped_array(self):
"""Gets the namespace_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The namespace_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_wrapped_array')
@namespace_wrapped_array.setter
def namespace_wrapped_array(self, value):
"""Sets the namespace_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_wrapped_array', value)
@property
def prefix_ns_string(self):
"""Gets the prefix_ns_string of this XmlItem. # noqa: E501
Returns:
(str): The prefix_ns_string of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_string')
@prefix_ns_string.setter
def prefix_ns_string(self, value):
"""Sets the prefix_ns_string of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_string', value)
@property
def prefix_ns_number(self):
"""Gets the prefix_ns_number of this XmlItem. # noqa: E501
Returns:
(float): The prefix_ns_number of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_number')
@prefix_ns_number.setter
def prefix_ns_number(self, value):
"""Sets the prefix_ns_number of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_number', value)
@property
def prefix_ns_integer(self):
"""Gets the prefix_ns_integer of this XmlItem. # noqa: E501
Returns:
(int): The prefix_ns_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_integer')
@prefix_ns_integer.setter
def prefix_ns_integer(self, value):
"""Sets the prefix_ns_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_integer', value)
@property
def prefix_ns_boolean(self):
"""Gets the prefix_ns_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The prefix_ns_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_boolean')
@prefix_ns_boolean.setter
def prefix_ns_boolean(self, value):
"""Sets the prefix_ns_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_boolean', value)
@property
def prefix_ns_array(self):
"""Gets the prefix_ns_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_ns_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_array')
@prefix_ns_array.setter
def prefix_ns_array(self, value):
"""Sets the prefix_ns_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_array', value)
@property
def prefix_ns_wrapped_array(self):
"""Gets the prefix_ns_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_ns_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_wrapped_array')
@prefix_ns_wrapped_array.setter
def prefix_ns_wrapped_array(self, value):
"""Sets the prefix_ns_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_wrapped_array', value)
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XmlItem):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in six.iteritems(self._data_store):
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if (not six.PY3 and
len(types) == 2 and unicode in types): # noqa: F821
vals_equal = (
this_val.encode('utf-8') == that_val.encode('utf-8')
)
if not vals_equal:
return False
return True
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1,959
| 0
| 54
|
9cc0213534d4746545b92434c9c6e16f37a99b1a
| 4,745
|
py
|
Python
|
Grundgeruest/models.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
Grundgeruest/models.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
Grundgeruest/models.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
"""
Die Modelle für Projektweite Daten: Nutzer/Profile
"""
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.utils.translation import ugettext as _
from userena.models import UserenaBaseProfile
from django.core.validators import RegexValidator
import random, string
from django.template.defaultfilters import slugify
from django.urls import reverse
def knoepfe_kopf(user):
""" gibt Knöpfe für Kopfleiste als Liste von Tupeln zurück """
anmelden = (reverse('userena_signin'), 'Anmelden')
registrieren = (reverse('userena_signup'), 'Registrieren')
abmelden = (reverse('userena_signout'), 'Abmelden')
profil = lambda nutzer: (reverse('userena_profile_detail',
kwargs={'username': nutzer.username}), 'Profil')
spam = ('spam', 'spam')
admin = ('/admin/', 'admin')
if user.username == 'admin':
liste = [abmelden, profil(user), spam]
elif user.is_authenticated():
liste = [abmelden, profil(user)]
else:
liste = [anmelden, registrieren]
if user.is_staff and user.get_all_permissions():
liste.append(admin)
return liste
def knoepfe_menü(user):
""" gibt Knöpfe für Menüleiste als Liste von Tupeln zurück """
alle = {
'index': ('/', 'Startseite'),
'olymp': (reverse('Wettbewerbe:index'), 'Wettbewerbe'),
'ehemalige': (reverse('Ehemalige:index'), 'Ehemalige'),
'impressum': (reverse('impressum'), 'Impressum'),
'db': ('https://olymp.piokg.de/static/db.pdf', 'Datenbanklayout'), # quick and very dirty :)
'todo': ('/todo/', 'ToDo-Liste'),
}
if user.username == 'admin':
return [alle[name] for name in ('index', 'olymp', 'ehemalige', 'todo', 'db')]
else:
return [alle[name] for name in ('index', 'olymp', 'db', 'impressum')]
class Nutzer(AbstractUser):
""" Nutzer-Klasse """
def knoepfe_kopf(nutzer):
""" soll Liste von Paaren für Knöpfe der Kopfleiste ausgeben
Nutzt im Moment die module-fkt gleichen Namens, könnte später vll
die Gruppenzugehörigkeit heranziehen, etc, ist flexibel """
return knoepfe_kopf(nutzer)
def knoepfe_menü(self):
""" soll Liste von Paaren für Knöpfe der Menüleiste ausgeben
Nutzt im Moment die module-fkt gleichen Namens, könnte später vll
die Gruppenzugehörigkeit heranziehen, etc, ist flexibel """
return knoepfe_menü(self)
| 32.278912
| 100
| 0.616228
|
"""
Die Modelle für Projektweite Daten: Nutzer/Profile
"""
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.utils.translation import ugettext as _
from userena.models import UserenaBaseProfile
from django.core.validators import RegexValidator
import random, string
from django.template.defaultfilters import slugify
from django.urls import reverse
class MinimalModel(models.Model):
zeit_erstellt = models.DateTimeField(
auto_now_add=True,
editable=False)
zeit_geaendert = models.DateTimeField(
auto_now=True,
editable=False)
class Meta:
abstract = True
ordering = ["-zeit_geaendert"]
def __str__(self):
return self.__class__().__name__() + ' geändert ' + str(zeit_geaendert)
class Grundklasse(MinimalModel):
bezeichnung = models.CharField(max_length=30)
slug = models.SlugField(
max_length=30,
null=False,
blank=True)
def save(self, **kwargs):
if not self.slug:
self.slug = slugify(self.bezeichnung)
super(Grundklasse, self).save()
class Meta:
abstract = True
ordering = ["bezeichnung"]
def __str__(self):
return str(self.bezeichnung)
def knoepfe_kopf(user):
""" gibt Knöpfe für Kopfleiste als Liste von Tupeln zurück """
anmelden = (reverse('userena_signin'), 'Anmelden')
registrieren = (reverse('userena_signup'), 'Registrieren')
abmelden = (reverse('userena_signout'), 'Abmelden')
profil = lambda nutzer: (reverse('userena_profile_detail',
kwargs={'username': nutzer.username}), 'Profil')
spam = ('spam', 'spam')
admin = ('/admin/', 'admin')
if user.username == 'admin':
liste = [abmelden, profil(user), spam]
elif user.is_authenticated():
liste = [abmelden, profil(user)]
else:
liste = [anmelden, registrieren]
if user.is_staff and user.get_all_permissions():
liste.append(admin)
return liste
def knoepfe_menü(user):
""" gibt Knöpfe für Menüleiste als Liste von Tupeln zurück """
alle = {
'index': ('/', 'Startseite'),
'olymp': (reverse('Wettbewerbe:index'), 'Wettbewerbe'),
'ehemalige': (reverse('Ehemalige:index'), 'Ehemalige'),
'impressum': (reverse('impressum'), 'Impressum'),
'db': ('https://olymp.piokg.de/static/db.pdf', 'Datenbanklayout'), # quick and very dirty :)
'todo': ('/todo/', 'ToDo-Liste'),
}
if user.username == 'admin':
return [alle[name] for name in ('index', 'olymp', 'ehemalige', 'todo', 'db')]
else:
return [alle[name] for name in ('index', 'olymp', 'db', 'impressum')]
class Nutzer(AbstractUser):
""" Nutzer-Klasse """
def knoepfe_kopf(nutzer):
""" soll Liste von Paaren für Knöpfe der Kopfleiste ausgeben
Nutzt im Moment die module-fkt gleichen Namens, könnte später vll
die Gruppenzugehörigkeit heranziehen, etc, ist flexibel """
return knoepfe_kopf(nutzer)
def knoepfe_menü(self):
""" soll Liste von Paaren für Knöpfe der Menüleiste ausgeben
Nutzt im Moment die module-fkt gleichen Namens, könnte später vll
die Gruppenzugehörigkeit heranziehen, etc, ist flexibel """
return knoepfe_menü(self)
def save(self, *args, **kwargs):
if not self.username:
self.username = ''.join(random.sample(string.ascii_lowercase, 20))
super(Nutzer, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Nutzer'
verbose_name = 'Nutzer'
def __str__(self):
return 'Nutzer %s (%s)' % (self.username, self.email)
class Nutzerprofil(UserenaBaseProfile):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
unique=True,
verbose_name=_('Nutzer'),
related_name='my_profile')
geschlecht = models.CharField(
max_length=1,
choices=[('w', 'weiblich'), ('m', 'männlich'), (' ', 'sonstiges')],
default=' ')
tel = models.CharField(
max_length=20,
null=True, blank=True)
strasse = models.CharField(
max_length=30,
blank=True)
plz = models.CharField(
max_length = 5,
validators=[RegexValidator('^[0-9]+$')],
blank=True)
ort = models.CharField(
max_length=30,
blank=True)
anredename = models.CharField(
max_length=30,
null=True, blank=True)
class Meta():
verbose_name = 'Nutzerprofil'
verbose_name_plural = 'Nutzerprofile'
| 461
| 1,595
| 158
|
b3e937dcb8ac9e14cc91bc39a1395544f1f257fb
| 8,595
|
py
|
Python
|
dataset/datasets.py
|
notplus/FaceLandmark_PFLD_UltraLight
|
89aa36d5369f7d8d6661eb67d8490c774ea4685a
|
[
"Apache-2.0"
] | 38
|
2021-05-10T01:22:44.000Z
|
2022-03-30T06:54:39.000Z
|
dataset/datasets.py
|
notplus/FaceLandmark_PFLD_UltraLight
|
89aa36d5369f7d8d6661eb67d8490c774ea4685a
|
[
"Apache-2.0"
] | 7
|
2021-06-01T06:39:47.000Z
|
2022-03-16T05:43:50.000Z
|
dataset/datasets.py
|
notplus/FaceLandmark_PFLD_UltraLight
|
89aa36d5369f7d8d6661eb67d8490c774ea4685a
|
[
"Apache-2.0"
] | 14
|
2021-05-10T01:22:46.000Z
|
2022-03-30T06:54:42.000Z
|
import numpy as np
import cv2
import sys
import torch
sys.path.append('..')
from torch.utils import data
from torch.utils.data import DataLoader
if __name__ == '__main__':
file_list = './data/test_data/list.txt'
wlfwdataset = WLFWDatasets(file_list)
dataloader = DataLoader(wlfwdataset, batch_size=256, shuffle=True, num_workers=0, drop_last=False)
for img, landmark, attribute, euler_angle in dataloader:
print("img shape", img.shape)
print("landmark size", landmark.size())
print("attrbute size", attribute)
print("euler_angle", euler_angle.size())
| 33.705882
| 166
| 0.601745
|
import numpy as np
import cv2
import sys
import torch
sys.path.append('..')
from torch.utils import data
from torch.utils.data import DataLoader
def flip(img, annotation):
# parse
img = np.fliplr(img).copy()
h, w = img.shape[:2]
x_min, y_min, x_max, y_max = annotation[0:4]
landmark_x = annotation[4::2]
landmark_y = annotation[4 + 1::2]
bbox = np.array([w - x_max, y_min, w - x_min, y_max])
for i in range(len(landmark_x)):
landmark_x[i] = w - landmark_x[i]
new_annotation = list()
new_annotation.append(x_min)
new_annotation.append(y_min)
new_annotation.append(x_max)
new_annotation.append(y_max)
for i in range(len(landmark_x)):
new_annotation.append(landmark_x[i])
new_annotation.append(landmark_y[i])
return img, new_annotation
def channel_shuffle(img, annotation):
if (img.shape[2] == 3):
ch_arr = [0, 1, 2]
np.random.shuffle(ch_arr)
img = img[..., ch_arr]
return img, annotation
def random_noise(img, annotation, limit=[0, 0.2], p=0.5):
if random.random() < p:
H, W = img.shape[:2]
noise = np.random.uniform(limit[0], limit[1], size=(H, W)) * 255
img = img + noise[:, :, np.newaxis] * np.array([1, 1, 1])
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_brightness(img, annotation, brightness=0.3):
alpha = 1 + np.random.uniform(-brightness, brightness)
img = alpha * image
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_contrast(img, annotation, contrast=0.3):
coef = np.array([[[0.114, 0.587, 0.299]]]) # rgb to gray (YCbCr)
alpha = 1.0 + np.random.uniform(-contrast, contrast)
gray = img * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
img = alpha * img + gray
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_saturation(img, annotation, saturation=0.5):
coef = nd.array([[[0.299, 0.587, 0.114]]])
alpha = np.random.uniform(-saturation, saturation)
gray = img * coef
gray = np.sum(gray, axis=2, keepdims=True)
img = alpha * img + (1.0 - alpha) * gray
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_hue(image, annotation, hue=0.5):
h = int(np.random.uniform(-hue, hue) * 180)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + h) % 180
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return image, annotation
def scale(img, annotation):
f_xy = np.random.uniform(-0.4, 0.8)
origin_h, origin_w = img.shape[:2]
bbox = annotation[0:4]
landmark_x = annotation[4::2]
landmark_y = annotation[4 + 1::2]
h, w = int(origin_h * f_xy), int(origin_w * f_xy)
image = resize(img, (h, w), preserve_range=True, anti_aliasing=True, mode='constant').astype(np.uint8)
new_annotation = list()
for i in range(len(bbox)):
bbox[i] = bbox[i] * f_xy
new_annotation.append(bbox[i])
for i in range(len(landmark_x)):
landmark_x[i] = landmark_x[i] * f_xy
landmark_y[i] = landmark_y[i] * f_xy
new_annotation.append(landmark_x[i])
new_annotation.append(landmark_y[i])
return image, new_annotation
def rotate(img, annotation, alpha=30):
bbox = annotation[0:4]
landmark_x = annotation[4::2]
landmark_y = annotation[4 + 1::2]
center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
rot_mat = cv2.getRotationMatrix2D(center, alpha, 1)
img_rotated_by_alpha = cv2.warpAffine(img, rot_mat, (img.shape[1], img.shape[0]))
point_x = [bbox[0], bbox[2], bbox[0], bbox[2]]
point_y = [bbox[1], bbox[3], bbox[3], bbox[1]]
new_point_x = list()
new_point_y = list()
for (x, y) in zip(landmark_x, landmark_y):
new_point_x.append(rot_mat[0][0] * x + rot_mat[0][1] * y + rot_mat[0][2])
new_point_y.append(rot_mat[1][0] * x + rot_mat[1][1] * y + rot_mat[1][2])
new_annotation = list()
new_annotation.append(min(new_point_x))
new_annotation.append(min(new_point_y))
new_annotation.append(max(new_point_x))
new_annotation.append(max(new_point_y))
for (x, y) in zip(landmark_x, landmark_y):
new_annotation.append(rot_mat[0][0] * x + rot_mat[0][1] * y + rot_mat[0][2])
new_annotation.append(rot_mat[1][0] * x + rot_mat[1][1] * y + rot_mat[1][2])
return img_rotated_by_alpha, new_annotation
def generate_FT(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
f = np.fft.fft2(image)
fshift = np.fft.fftshift(f)
fimg = np.log(np.abs(fshift) + 1)
maxx = -1
minn = 100000
for i in range(len(fimg)):
if maxx < max(fimg[i]):
maxx = max(fimg[i])
if minn > min(fimg[i]):
minn = min(fimg[i])
fimg = (fimg - minn + 1) / (maxx - minn + 1)
return fimg
def draw_labelmap(img, pt, sigma=1, type='Gaussian'):
# Draw a 2D gaussian
# Adopted from https://github.com/anewell/pose-hg-train/blob/master/src/pypose/draw.py
# Check that any part of the gaussian is in-bounds
ul = [int(int(pt[0]) - 3 * sigma), int(int(pt[1]) - 3 * sigma)]
br = [int(int(pt[0]) + 3 * sigma + 1), int(int(pt[1]) + 3 * sigma + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
if type == 'Gaussian':
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
elif type == 'Cauchy':
g = sigma / (((x - x0) ** 2 + (y - y0) ** 2 + sigma ** 2) ** 1.5)
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return img
class WLFWDatasets(data.Dataset):
def __init__(self, file_list, transforms=None):
self.line = None
self.lm_number = 98
self.img_size = 96
self.ft_size = self.img_size // 2
self.hm_size = self.img_size // 2
self.transforms = transforms
with open(file_list, 'r') as f:
self.lines = f.readlines()
def __getitem__(self, index):
self.line = self.lines[index].strip()
jpg_idx = self.line.find('png')
line_data = [self.line[:jpg_idx + 3]]
line_data.extend(self.line[jpg_idx + 4:].split())
self.line = line_data
self.img = cv2.imread(self.line[0])
# generate ft
# self.ft_img = generate_FT(self.img)
# self.ft_img = cv2.resize(self.ft_img, (self.ft_size, self.ft_size))
# self.ft_img = torch.from_numpy(self.ft_img).float()
# self.ft_img = torch.unsqueeze(self.ft_img, 0)
self.landmark = np.asarray(self.line[1:197], dtype=np.float32)
# generate heatmap
# self.heatmaps = np.zeros((self.lm_number, self.img_size, self.img_size))
# for idx in range(self.lm_number):
# self.heatmaps[idx, :, :] = draw_labelmap(self.heatmaps[idx, :, :], (self.landmark[idx * 2] * self.img_size, self.landmark[idx * 2 + 1] * self.img_size))
# self.heatmap = cv2.resize(self.heatmap, (self.hm_size, self.hm_size))
# self.heatmap = (self.heatmap * 255).astype(np.uint8)
# with open("heatmap.txt", "w") as f:
# for i in range(self.hm_size):
# str_ = ','.join(str(s) for s in self.heatmap[i, :])
# f.write(str_ + '\n')
# cv2.imwrite('heatmap.jpg', self.heatmap)
if self.transforms:
self.img = self.transforms(self.img)
return self.img, self.landmark
def __len__(self):
return len(self.lines)
if __name__ == '__main__':
file_list = './data/test_data/list.txt'
wlfwdataset = WLFWDatasets(file_list)
dataloader = DataLoader(wlfwdataset, batch_size=256, shuffle=True, num_workers=0, drop_last=False)
for img, landmark, attribute, euler_angle in dataloader:
print("img shape", img.shape)
print("landmark size", landmark.size())
print("attrbute size", attribute)
print("euler_angle", euler_angle.size())
| 7,612
| 12
| 356
|
966bc627671a534dd1312e08c0b1d98dc88fba85
| 1,671
|
py
|
Python
|
tests/test_masks.py
|
nexpy/nexusformat
|
8c7eccaae2e60d7643e473093c5087b653270f7b
|
[
"BSD-3-Clause-Clear"
] | 9
|
2015-01-12T22:26:35.000Z
|
2020-06-02T08:17:24.000Z
|
tests/test_masks.py
|
rayosborn/nexusformat
|
8c7eccaae2e60d7643e473093c5087b653270f7b
|
[
"BSD-3-Clause-Clear"
] | 23
|
2015-12-15T11:57:20.000Z
|
2021-11-17T18:03:18.000Z
|
tests/test_masks.py
|
rayosborn/nexusformat
|
8c7eccaae2e60d7643e473093c5087b653270f7b
|
[
"BSD-3-Clause-Clear"
] | 12
|
2015-05-22T18:27:43.000Z
|
2022-03-31T12:35:19.000Z
|
import os
import numpy as np
import pytest
from nexusformat.nexus.tree import NXfield, NXgroup, NXroot, nxload
@pytest.mark.parametrize("save", ["False", "True"])
| 33.42
| 76
| 0.652902
|
import os
import numpy as np
import pytest
from nexusformat.nexus.tree import NXfield, NXgroup, NXroot, nxload
def test_field_masks(arr1D):
field = NXfield(arr1D)
field[10:20] = np.ma.masked
assert isinstance(field.nxvalue, np.ma.masked_array)
assert np.all(field[8:12].mask == np.array([False, False, True, True]))
assert np.all(field.mask[8:12] == np.array([False, False, True, True]))
assert np.ma.is_masked(field[8:12].nxvalue)
assert np.ma.is_masked(field.nxvalue[10])
assert np.ma.is_masked(field[10].nxvalue)
assert field[10].mask
field.mask[10] = np.ma.nomask
assert np.all(field.mask[8:12] == np.array([False, False, False, True]))
assert not field[10].mask
@pytest.mark.parametrize("save", ["False", "True"])
def test_group_masks(tmpdir, arr1D, save):
group = NXgroup(NXfield(arr1D, name='field'))
group['field'][10:20] = np.ma.masked
if save:
root = NXroot(group)
filename = os.path.join(tmpdir, "file1.nxs")
root.save(filename, mode="w")
root = nxload(filename, "rw")
group = root['group']
assert isinstance(group['field'].nxvalue, np.ma.masked_array)
assert np.all(group['field'].mask[9:11] == np.array([False, True]))
assert 'mask' in group['field'].attrs
assert group['field'].attrs['mask'] == 'field_mask'
assert 'field_mask' in group
assert group['field_mask'].dtype == bool
assert group['field'].mask == group['field_mask']
group['field'].mask[10] = np.ma.nomask
assert np.all(group['field'].mask[10:12] == np.array([False, True]))
assert np.all(group['field_mask'][10:12] == np.array([False, True]))
| 1,459
| 0
| 45
|