max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/routing/test_handle.py
|
antonrh/django-apirouter
| 7
|
12781051
|
import pytest
from django.http import HttpResponse
from apirouter import APIRouter
from apirouter.exceptions import APIException
pytestmark = [pytest.mark.urls(__name__)]
def exception_handler(request, exc):
return HttpResponse(str(exc), status=400)
router = APIRouter(exception_handler=exception_handler)
@router.route("/string")
def handle_string(request):
return "OK"
@router.route("/dict")
def handle_dict(request):
return {"success": True}
@router.route("/list")
def handle_list(request):
return [1, 2, 3, 4, 5]
@router.route("/error")
def handle_error(request):
raise APIException(status_code=400, detail="Error")
urlpatterns = router.urls
def test_handle_string(client):
response = client.get("/string")
assert response.status_code == 200
assert response.json() == "OK"
def test_handle_dict(client):
response = client.get("/dict")
assert response.status_code == 200
assert response.json() == {"success": True}
def test_handle_list(client):
response = client.get("/list")
assert response.status_code == 200
assert response.json() == [1, 2, 3, 4, 5]
def test_handle_error(client):
response = client.get("/error")
assert response.status_code == 400
assert response.content == b"Error"
| 2.34375
| 2
|
v2ray_stats/config.py
|
Ricky-Hao/V2Ray.Stats
| 13
|
12781052
|
<gh_stars>10-100
import json
from typing import Any
class Config(object):
_config = {
'mail_host': None,
'mail_port': None,
'mail_user': None,
'mail_pass': None,
'mail_subject': 'V2Ray Traffic Report',
'database': 'v2ray_stats.sqlite3',
'server': '127.0.0.1:2335',
'interval': 5,
'debug': False
}
@classmethod
def load_config(cls, config_path: str):
"""
Load config
:param config_path: Config path.
"""
with open(config_path, 'r') as f:
cls._config.update(json.load(f))
@classmethod
def get(cls, item):
return cls._config[item]
@classmethod
def set(cls, key: str, value: Any, ignore_check: bool=False):
"""
Set config.
:param key: Config key
:param value: Config value
:param ignore_check: Set value even if value is None
:return:
"""
if ignore_check:
cls._config[key] = value
elif value is not None and key in cls._config.keys():
cls._config[key] = value
def __getitem__(self, item):
return self._config[item]
| 2.734375
| 3
|
python/lvmpwi/pwi/__init__.py
|
sdss/lvmpwi
| 0
|
12781053
|
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>
# @Date: 2021-06-15
# @Filename: __init__.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from .pwi4_client import PWI4
| 1.09375
| 1
|
disasm/disasmz80.py
|
wd5gnr/8080
| 26
|
12781054
|
<gh_stars>10-100
#! /usr/bin/env python3
#
# Disassembler for Zilog Z80 microprocessor.
# Copyright (c) 2013 by <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Possible enhancements:
# - Option for octal output
# - Read Intel HEX file format
import sys
import fileinput
import argparse
import signal
# Avoids an error when output piped, e.g. to "less"
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# Lookup table - given opcode byte as index, return mnemonic of instruction and length of instruction.
lookupTable = [
[ "nop", 1 ], # 00
[ "ld bc,", 3 ], # 01
[ "ld (bc),a", 1 ], # 02
[ "inc bc", 1 ], # 03
[ "inc b", 1 ], # 04
[ "dec b", 1 ], # 05
[ "ld b,", 2 ], # 06
[ "rlca", 1 ], # 07
[ "ex af,af", 1 ], # 08
[ "add hl,bc", 1 ], # 09
[ "ld a,(bc)", 1 ], # 0A
[ "dec bc", 1 ], # 0B
[ "inc c", 1 ], # 0C
[ "dec c", 1 ], # 0D
[ "ld c,", 2 ], # 0E
[ "rrca", 1 ], # 0F
[ "djnz ", 1 ], # 10
[ "ld de,", 3 ], # 11
[ "ld (de),a", 1 ], # 12
[ "inc de", 1 ], # 13
[ "inc d", 1 ], # 14
[ "dec d", 1 ], # 15
[ "ld d,", 2 ], # 16
[ "rla", 1 ], # 17
[ "jr ", 1 ], # 18
[ "add hl,de", 1 ], # 19
[ "ld a,(de)", 1 ], # 1A
[ "dec de", 1 ], # 1B
[ "inc e", 1 ], # 1C
[ "dec e", 1 ], # 1D
[ "ld e,", 2 ], # 1E
[ "rra", 1 ], # 1F
[ "jr nz,", 1 ], # 20
[ "ld hl,", 3 ], # 21
[ "ld (", 3 ], # 22 then append "),hl"
[ "inc hl", 1 ], # 23
[ "inc h", 1 ], # 24
[ "dec h", 1 ], # 25
[ "ld h,", 2 ], # 26
[ "daa", 1 ], # 27
[ "jr z,", 1 ], # 28
[ "add hl,jl", 1 ], # 29
[ "ld hl,(", 3 ], # 2A then append ")"
[ "dec hl", 1 ], # 2B
[ "inc l", 1 ], # 2C
[ "dec l", 1 ], # 2D
[ "ld l,", 2 ], # 2E
[ "cpl", 1 ], # 2F
[ "jr nc,", 2 ], # 30
[ "ld sp,", 3 ], # 31
[ "ld (", 3 ], # 32 then append "),a"
[ "inc sp", 1 ], # 33
[ "inc (hl)", 1 ], # 34
[ "dec (hl)", 1 ], # 35
[ "ld (hl),", 2 ], # 36
[ "scf", 1 ], # 37
[ "jr c,", 1 ], # 38
[ "add hl,sp", 1 ], # 39
[ "ld a,(", 3 ], # 3A then append ")"
[ "dec sp", 1 ], # 3B
[ "inc a", 1 ], # 3C
[ "dec a", 1 ], # 3D
[ "ld a,", 2 ], # 3E
[ "ccf", 1 ], # 3F
[ "ld b,b", 1 ], # 40
[ "ld b,c", 1 ], # 41
[ "ld b,d", 1 ], # 42
[ "ld b,e", 1 ], # 43
[ "ld b,h", 1 ], # 44
[ "ld b,l", 1 ], # 45
[ "ld b,(hl)", 1 ], # 46
[ "ld b,a", 1 ], # 47
[ "ld c,b", 1 ], # 48
[ "ld c,c", 1 ], # 49
[ "ld c,d", 1 ], # 4A
[ "ld c,e", 1 ], # 4B
[ "ld c,h", 1 ], # 4C
[ "ld c,l", 1 ], # 4D
[ "ld c,(hl)", 1 ], # 4E
[ "ld c,a", 1 ], # 4F
[ "ld d,b", 1 ], # 50
[ "ld d,c", 1 ], # 51
[ "ld d,d", 1 ], # 52
[ "ld d,e", 1 ], # 53
[ "ld d,h", 1 ], # 54
[ "ld d,l", 1 ], # 55
[ "ld d,(hl)", 1 ], # 56
[ "ld d,a", 1 ], # 57
[ "ld e,b", 1 ], # 58
[ "ld e,c", 1 ], # 59
[ "ld e,d", 1 ], # 5A
[ "ld e,e", 1 ], # 5B
[ "ld e,h", 1 ], # 5C
[ "ld e,l", 1 ], # 5D
[ "ld e,(hl)", 1 ], # 5E
[ "ld e,a", 1 ], # 5F
[ "ld h,b", 1 ], # 60
[ "ld h,c", 1 ], # 61
[ "ld h,d", 1 ], # 62
[ "ld h,e", 1 ], # 63
[ "ld h,h", 1 ], # 64
[ "ld h,l", 1 ], # 65
[ "ld h,(hl)", 1 ], # 66
[ "ld h,a", 1 ], # 67
[ "ld l,b", 1 ], # 68
[ "ld l,c", 1 ], # 69
[ "ld l,d", 1 ], # 6A
[ "ld l,e", 1 ], # 6B
[ "ld l,h", 1 ], # 6C
[ "ld l,l", 1 ], # 6D
[ "ld l,(hl)", 1 ], # 6E
[ "ld l,a", 1 ], # 6F
[ "ld (hl),b", 1 ], # 70
[ "ld (hl),c", 1 ], # 71
[ "ld (hl),d", 1 ], # 72
[ "ld (hl),e", 1 ], # 73
[ "ld (hl),h", 1 ], # 74
[ "ld (hl),l", 1 ], # 75
[ "halt", 1 ], # 76
[ "ld (hl),a", 1 ], # 77
[ "ld a,b", 1 ], # 78
[ "ld a,c", 1 ], # 79
[ "ld a,d", 1 ], # 7A
[ "ld a,e", 1 ], # 7B
[ "ld a,h", 1 ], # 7C
[ "ld a,l", 1 ], # 7D
[ "ld a,(hl)", 1 ], # 7E
[ "ld a,a", 1 ], # 7F
[ "add a,b", 1 ], # 80
[ "add a,c", 1 ], # 81
[ "add a,d", 1 ], # 82
[ "add a,ee", 1 ], # 83
[ "add a,h", 1 ], # 84
[ "add a,l", 1 ], # 85
[ "add a,(hl)", 1 ], # 86
[ "add a,a", 1 ], # 87
[ "adc a,b", 1 ], # 88
[ "adc a,c", 1 ], # 89
[ "adc a,d", 1 ], # 8A
[ "adc a,e", 1 ], # 8B
[ "adc a,h", 1 ], # 8C
[ "adc a,l", 1 ], # 8D
[ "adc a,(hl)", 1 ], # 8E
[ "adc a,a", 1 ], # 8F
[ "sub b", 1 ], # 90
[ "sub c", 1 ], # 91
[ "sub d", 1 ], # 92
[ "sub e", 1 ], # 93
[ "sub h", 1 ], # 94
[ "sub l", 1 ], # 95
[ "sub (hl)", 1 ], # 96
[ "sub a", 1 ], # 97
[ "sbc a,b", 1 ], # 98
[ "sbc a,c", 1 ], # 99
[ "sbc a,d", 1 ], # 9A
[ "sbc a,e", 1 ], # 9B
[ "sbc a,h", 1 ], # 9C
[ "sbc a,l", 1 ], # 9D
[ "sbc a,(hl)", 1 ], # 9E
[ "sbc a,a", 1 ], # 9F
[ "and b", 1 ], # A0
[ "and c", 1 ], # A1
[ "and d", 1 ], # A2
[ "and e", 1 ], # A3
[ "and h", 1 ], # A4
[ "and l", 1 ], # A5
[ "and (hl)", 1 ], # A6
[ "and a", 1 ], # A7
[ "xor b", 1 ], # A8
[ "xor c", 1 ], # A9
[ "xor d", 1 ], # AA
[ "xor e", 1 ], # AB
[ "xor h", 1 ], # AC
[ "xor l", 1 ], # AD
[ "xor (hl)", 1 ], # AE
[ "xor a", 1 ], # AF
[ "or b", 1 ], # B0
[ "or c", 1 ], # B1
[ "or d", 1 ], # B2
[ "or e", 1 ], # B3
[ "or h", 1 ], # B4
[ "or l", 1 ], # B5
[ "or (hl)", 1 ], # B6
[ "or a", 1 ], # B7
[ "cp b", 1 ], # B8
[ "cp c", 1 ], # B9
[ "cp d", 1 ], # BA
[ "cp e", 1 ], # BB
[ "cp h", 1 ], # BC
[ "cp l", 1 ], # BD
[ "cp (hl)", 1 ], # BE
[ "cp a", 1 ], # BF
[ "ret nz", 1 ], # C0
[ "pop bc", 1 ], # C1
[ "jp nz,", 3 ], # C2
[ "jp ", 3 ], # C3
[ "call nz,", 3 ], # C4
[ "push bc", 1 ], # C5
[ "ada a,", 2 ], # C6
[ "rst 00", 1 ], # C7
[ "ret z", 1 ], # C8
[ "ret", 1 ], # C9
[ "jp z,", 3 ], # CA
[ "prefix", 2 ], # CB
[ "call z,", 3 ], # CC
[ "call ", 3 ], # CD
[ "adc a,", 2 ], # CE
[ "rst 08", 1 ], # CF
[ "ret nc", 1 ], # D0
[ "pop de", 1 ], # D1
[ "jp nc,", 3 ], # D2
[ "out (", 2 ], # D3 then append "),a"
[ "call nc,", 3 ], # D4
[ "push de", 1 ], # D5
[ "sub ", 2 ], # D6
[ "rst 10", 1 ], # D7
[ "ret c", 1 ], # D8
[ "exx", 1 ], # D9
[ "jp c,", 3 ], # DA
[ "in a,(", 2 ], # DB then append ")"
[ "call c,", 3 ], # DC
[ "prefix", 2 ], # DD
[ "sbc a,", 2 ], # DE
[ "rst 18", 1 ], # DF
[ "ret po", 1 ], # E0
[ "pop hl", 1 ], # E1
[ "jp po,", 3 ], # E2
[ "ex (sp),hl", 1 ],# E3
[ "call po,", 3 ], # E4
[ "push hl", 1 ], # E5
[ "and ", 2 ], # E6
[ "rst 20", 1 ], # E7
[ "ret pe", 1 ], # E8
[ "jp (hl)", 1 ], # E9
[ "jp pe,", 3 ], # EA
[ "ex de,hl", 1 ], # EB
[ "call pe,", 3 ], # EC
[ "prefix", 2 ], # ED
[ "xor ", 2 ], # EE
[ "rst 28", 1 ], # EF
[ "ret p", 1 ], # F0
[ "pop af", 1 ], # F1
[ "jp p,", 3 ], # F2
[ "di", 1 ], # F3
[ "call p,", 3 ], # F4
[ "push af", 1 ], # F5
[ "or ", 2 ], # F6
[ "rst 30", 1 ], # F7
[ "ret m", 1 ], # F8
[ "ld sp,phl", 1 ], # F9
[ "jp m,", 3 ], # FA
[ "ei", 1 ], # FB
[ "call m,", 3 ], # FC
[ "prefix", 2 ], # FD
[ "cp ", 2 ], # FE
[ "rst 38", 1 ], # FF
]
# Lookup table for multibyte instructions starting with 0xCB
lookupTableCB = [
[ "rlc b", 2 ], # 00
[ "rlc c", 2 ], # 01
[ "rlc d", 2 ], # 02
[ "rlc e", 2 ], # 03
[ "rlc h", 2 ], # 04
[ "rlc l", 2 ], # 05
[ "rlc (hl)", 2 ], # 06
[ "rlc a", 2 ], # 07
[ "rrc b", 2 ], # 08
[ "rrc c", 2 ], # 09
[ "rrc d", 2 ], # 0A
[ "rrc e", 2 ], # 0B
[ "rrc h", 2 ], # 0C
[ "rrc l", 2 ], # 0D
[ "rrc (hl)", 2 ], # 0E
[ "rrc a", 2 ], # 0F
[ "rl b", 2 ], # 10
[ "rl c", 2 ], # 11
[ "rl d", 2 ], # 12
[ "rl e", 2 ], # 13
[ "rl h", 2 ], # 14
[ "rl l", 2 ], # 15
[ "rl (hl)", 2 ], # 16
[ "rl a", 2 ], # 17
[ "rr b", 2 ], # 18
[ "rr c", 2 ], # 19
[ "rr d", 2 ], # 1A
[ "rr e", 2 ], # 1B
[ "rr h", 2 ], # 1C
[ "rr l", 2 ], # 1D
[ "rr (hl)", 2 ], # 1E
[ "rr e", 2 ], # 1F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # 2F
[ "", 2 ], # 20
[ "", 2 ], # 21
[ "", 2 ], # 22
[ "", 2 ], # 23
[ "", 2 ], # 24
[ "", 2 ], # 25
[ "", 2 ], # 26
[ "", 2 ], # 27
[ "", 2 ], # 28
[ "", 2 ], # 29
[ "", 2 ], # 2A
[ "", 2 ], # 2B
[ "", 2 ], # 2C
[ "", 2 ], # 2D
[ "", 2 ], # 2E
[ "", 2 ], # FF
]
# Lookup table for multibyte instructions starting with 0xED
# Note that first index is 0x40, not 0x00
lookupTableED = [
[ "in b,(c)", 2 ], # 40
]
# Lookup table for multibyte instructions starting with 0xDD
lookupTableDD = [
]
# Lookup table for multibyte instructions starting with 0xFD
lookupTableFD = [
]
upperOption = False
def isprint(c):
"Return if character is printable ASCII"
if c >= '@' and c <= '~':
return True
else:
return False
def case(s):
"Return string or uppercase version of string if option is set."
global upperOption
if upperOption:
return s.upper()
else:
return s
# Parse command line options
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="Binary file to disassemble")
parser.add_argument("-n", "--nolist", help="Don't list instruction bytes (make output suitable for assembler)", action="store_true")
parser.add_argument("-u", "--uppercase", help="Use uppercase for mnemonics", action="store_true")
parser.add_argument("-a", "--address", help="Specify decimal starting address (defaults to 0)", default=0, type=int)
parser.add_argument("-f", "--format", help="Use number format: 1 = $1234 2 = 1234h 3 = 1234 (default 1)", default=1, type=int, choices=range(1, 4))
args = parser.parse_args()
# Get filename from command line arguments.
filename = args.filename
# Current instruction address. Silently force it to be in valid range.
address = args.address & 0xffff
# Uppercase output option
upperOption = args.uppercase
# Contains a line of output
line = ""
# Open input file.
# Display error and exit if filename does not exist.
try:
f = open(filename, "rb")
except FileNotFoundError:
print("error: input file '" + filename + "' not found.", file=sys.stderr)
sys.exit(1)
# Print initial origin address
if args.nolist == False:
if args.format == 1:
print("%04X %s $%04X" % (address, case("org"), address))
elif args.format == 2:
print("%04X %s %04Xh" % (address, case("org"), address))
else:
print("%04X %s %04X" % (address, case("org"), address))
while True:
try:
b = f.read(1) # Get binary byte from file
if len(b) == 0:
if args.nolist == False:
print("%04X %s" % (address, case("end"))) # Exit if end of file reached.
break
if args.nolist == False:
line = "%04X " % address # Print current address
op = ord(b) # Get opcode byte
n = lookupTable[op][1] # Look up number of instruction bytes
mnem = case(lookupTable[op][0]) # Get mnemonic
# Handle getting length of multi-byte opcodes (listed in table as "prefix").
if (mnem == "prefix"):
assert(op in [0xcb, 0xed, 0xdd, 0xfd])
if (op == 0xcb):
n = lookupTableCB[op][1] # Look up number of instruction bytes
# elif (op == 0xed):
# n = lookupTableED[op][1]
# elif (op == 0xdd):
# n = lookupTableDD[op][1]
# elif (op == 0xfd):
# n = lookupTableDD[op][1]
# Print instruction bytes
if (n == 1):
if args.nolist == False:
line += "%02X " % op
elif (n == 2):
try: # Possible to get exception here if EOF reached.
op1 = ord(f.read(1))
except TypeError:
op1 = 0 # Fake it to recover from EOF
if args.nolist == False:
line += "%02X %02X " % (op, op1)
elif (n == 3):
try: # Possible to get exception here if EOF reached.
op1 = ord(f.read(1))
op2 = ord(f.read(1))
except TypeError:
op1 = 0 # Fake it to recover from EOF
op2 = 0
if args.nolist == False:
line += "%02X %02X %02X " % (op, op1, op2)
if args.nolist == True:
line + " "
# Handle mnemonic of multi-byte opcodes (listed in table as "prefix").
if mnem == "prefix":
if (op == 0xcb):
mnem = case(lookupTableCB[op1][0]) # Get mnemonic
line += mnem
# Print any operands
if (n == 2):
if isprint(chr(op1)):
line += "'%c'" % op1
else:
if args.format == 1:
line += "$%02X" % op1
elif args.format == 2:
line += "%02Xh" % op1
else:
line += "%02X" % op1
elif (n == 3):
if args.format == 1:
line += "$%02X%02X" % (op2, op1)
elif args.format == 2:
line += "%02X%02Xh" % (op2, op1)
else:
line += "%02X%02X" % (op2, op1)
# Handle opcodes that are special cases that need additional characters appended at the end
if (op == 0x22):
line += "),hl"
elif (op in [0x2a, 0x3a, 0xdb]):
line += ")"
elif (op in [0x32, 0xd3]):
line += "),a"
# Update address
address = address + n
# Check for address exceeding 0xFFFF, if so wrap around.
if (address > 0xffff):
address = address & 0xffff
# Finished a line of disassembly
print(line)
line = ""
except KeyboardInterrupt:
print("Interrupted by Control-C", file=sys.stderr)
print("%04X %s" % (address, case("end"))) # Exit if end of file reached.
break
| 2.453125
| 2
|
app/api/posts/migrations/0001_initial.py
|
Suhas-G/Melton-App-Server
| 5
|
12781055
|
# Generated by Django 3.0.3 on 2020-07-12 15:21
from django.db import migrations, models
import markdownx.models
import posts.models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Tag",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"tag",
posts.models.CaseInsensitiveCharField(max_length=200, unique=True),
),
],
),
migrations.CreateModel(
name="Post",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=200)),
(
"content",
markdownx.models.MarkdownxField(
help_text="Write content of post in markdown"
),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
("tags", models.ManyToManyField(related_name="posts", to="posts.Tag")),
],
),
]
| 2.109375
| 2
|
torch/utils/data/datapipes/iter/httpreader.py
|
xiaohanhuang/pytorch
| 60,067
|
12781056
|
<reponame>xiaohanhuang/pytorch<filename>torch/utils/data/datapipes/iter/httpreader.py
from io import IOBase
from typing import Sized, Tuple
from torch.utils.data import IterDataPipe
from torch.utils.data.datapipes.utils.common import deprecation_warning_torchdata
class HTTPReaderIterDataPipe(IterDataPipe[Tuple[str, IOBase]]):
r""" :class:`HTTPReaderIterDataPipe`
Iterable DataPipe to load file url(s) (http url(s) pointing to file(s)),
yield file url and IO stream in a tuple
Args:
datapipe: Iterable DataPipe providing urls
timeout: Timeout for http request
"""
def __init__(self, datapipe, timeout=None):
self.datapipe = datapipe
self.timeout = timeout
deprecation_warning_torchdata(type(self).__name__)
def __iter__(self):
from requests import HTTPError, RequestException, Session
for url in self.datapipe:
try:
with Session() as session:
if self.timeout is None:
r = session.get(url, stream=True)
else:
r = session.get(url, timeout=self.timeout, stream=True)
return url, r.raw
except HTTPError as e:
raise Exception(f"Could not get the file. [HTTP Error] {e.response}.")
except RequestException as e:
raise Exception(f"Could not get the file at {url}. [RequestException] {e.response}.")
except Exception:
raise
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError("{} instance doesn't have valid length".format(type(self).__name__))
| 2.890625
| 3
|
FretboardGenerator.py
|
mdales/FretboardGenerator360
| 0
|
12781057
|
<reponame>mdales/FretboardGenerator360
# Author-Digital Flapjack Ltd
# Description-Test
import math
import adsk.core, adsk.fusion, adsk.cam, traceback
# Calculates the fret positions given a scale length and the number of frets. Includes the zero fret.
# @param {number} scaleLength - The scale length in whatever units.
# @param {number} frets - How many frets to calculate for.
# @return {[number]} A list of positions in the same units as the scale length was provided.
def generateFretPositions(scaleLength, frets):
positions = []
for i in range(frets + 1):
positions.append(scaleLength - (scaleLength / math.pow(2, i / 12)))
return positions
# Draws a single inlay marker of the style provided
# @param {Object} paths - The set of paths being built for the neck.
# @param {String} style - Either "dots" or "crosshairs".
# @param {Number} x_pos - The center x position on the inlay.
# @param {Number} y_pos - The center u position on the inlay.
# @param {Number} radius - The radius on the inlay.
def drawInlay(sketch, style, x_pos, y_pos, radius):
if style == "Dots":
circles = sketch.sketchCurves.sketchCircles
circles.addByCenterRadius(adsk.core.Point3D.create(x_pos, y_pos, 0), radius)
elif style == "Crosshairs":
lines = sketch.sketchCurves.sketchLines
lines.addByTwoPoints(adsk.core.Point3D.create(x_pos, y_pos - radius, 0.0), adsk.core.Point3D.create(x_pos, y_pos + radius, 0.0))
lines.addByTwoPoints(adsk.core.Point3D.create(x_pos - radius, y_pos, 0.0), adsk.core.Point3D.create(x_pos + radius, y_pos, 0.0))
# Returns the model for a fretboard in mm
# @param {Object} params - An object with all the form params.
# @return {Model} A Model object.
def generateFretboard(params, sketch):
height = 7.5
x_offset = 0.0
y_offset = 0.0
# slotWidth = 0.5
positions = generateFretPositions(params['scaleLength'], params['frets'])
lines = sketch.sketchCurves.sketchLines
# draw the nut far side
if params['slotStyle'] == "line":
lines.addByTwoPoints(adsk.core.Point3D.create(x_offset - params['nutWidth'], y_offset, 0.0),
adsk.core.Point3D.create(x_offset - params['nutWidth'], y_offset + height, 0.0))
# else:
# var r = new makerjs.models.Rectangle(slotWidth, height)
# r.origin = [(x_offset - params.nutWidth) - (slotWidth / 2.0), y_offset]
# models.append(r)
# draw the frets
for i in range(len(positions)):
pos = x_offset + positions[i]
# The fret itself
if params['slotStyle'] == "line":
lines.addByTwoPoints(adsk.core.Point3D.create(pos, y_offset, 0.0), adsk.core.Point3D.create(pos, y_offset + height, 0.0))
# else:
# r = new makerjs.models.Rectangle(slotWidth, height)
# r.origin = [pos - (slotWidth / 2.0), y_offset]
# models.append(r)
# Do the inlay markers next in a traditional style
if i == 0:
continue
fretNumber = i % 12
if fretNumber in {3, 5, 7, 9}:
x_pos = pos - ((positions[i] - positions[i - 1]) / 2.0)
y_pos = y_offset + (height / 2.0)
radius = params['inlayWidth'] / 2.0
drawInlay(sketch, params['inlayStyle'], x_pos, y_pos, radius)
elif fretNumber == 0:
x_pos = pos - ((positions[i] - positions[i - 1]) / 2.0)
upper_y = y_offset + (height * 3.0 / 4.0)
lower_y = y_offset + (height / 4.0)
radius = params['inlayWidth'] / 2.0
drawInlay(sketch, params['inlayStyle'], x_pos, upper_y, radius)
drawInlay(sketch, params['inlayStyle'], x_pos, lower_y, radius)
# Global list to keep all event handlers in scope.
# This is only needed with Python.
handlers = []
def run(context):
ui = None
try:
app = adsk.core.Application.get()
ui = app.userInterface
# Get the CommandDefinitions collection.
cmdDefs = ui.commandDefinitions
# Create a button command definition.
buttonSample = cmdDefs.addButtonDefinition('FreboardGeneratorButtonId',
'Freboard Generator',
'Generate a sketch containing a fretboard')
# Connect to the command created event.
sampleCommandCreated = SampleCommandCreatedEventHandler()
buttonSample.commandCreated.add(sampleCommandCreated)
handlers.append(sampleCommandCreated)
# Execute the command.
buttonSample.execute()
# Keep the script running.
adsk.autoTerminate(False)
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
# Event handler for the commandCreated event.
class SampleCommandCreatedEventHandler(adsk.core.CommandCreatedEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
eventArgs = adsk.core.CommandCreatedEventArgs.cast(args)
cmd = eventArgs.command
inputs = cmd.commandInputs
_ = inputs.addIntegerSliderCommandInput('frets', 'Fret Count', 21, 25, False)
_ = inputs.addValueInput('scale', 'Scale Length', '', adsk.core.ValueInput.createByReal(25.5))
inlayStyle = inputs.addDropDownCommandInput('inlayStyle', 'Inlay Style', adsk.core.DropDownStyles.TextListDropDownStyle)
inlayStyle.listItems.add('Dots', True)
inlayStyle.listItems.add('Crosshairs', False)
_ = inputs.addValueInput('nutWidth', 'Nut width', '', adsk.core.ValueInput.createByReal(3.0))
# Connect to the execute event.
onExecute = SampleCommandExecuteHandler()
cmd.execute.add(onExecute)
handlers.append(onExecute)
# Event handler for the execute event.
class SampleCommandExecuteHandler(adsk.core.CommandEventHandler):
def __init__(self):
super().__init__()
def notify(self, args):
eventArgs = adsk.core.CommandEventArgs.cast(args)
# # Get the values from the command inputs.
inputs = eventArgs.command.commandInputs
# Code to react to the event.
app = adsk.core.Application.get()
des = adsk.fusion.Design.cast(app.activeProduct)
if des:
rootComp = des.rootComponent
sketches = rootComp.sketches
xyPlane = rootComp.xYConstructionPlane
sketch = sketches.add(xyPlane)
params = {
'nutWidth': inputs.itemById('nutWidth').value / 10.0,
'slotStyle': 'line',
'scaleLength': inputs.itemById('scale').value * 2.54,
'frets': inputs.itemById('frets').valueOne,
'inlayStyle': inputs.itemById('inlayStyle').selectedItem.name,
'inlayWidth': 0.5,
}
generateFretboard(params, sketch)
# Force the termination of the command.
adsk.terminate()
def stop(context):
try:
app = adsk.core.Application.get()
ui = app.userInterface
# Delete the command definition.
cmdDef = ui.commandDefinitions.itemById('FreboardGeneratorButtonId')
if cmdDef:
cmdDef.deleteMe()
except:
if ui:
ui.messageBox('Failed:\n{}'.format(traceback.format_exc()))
| 2.359375
| 2
|
src/platformer.py
|
megamarc/TilenginePythonPlatformer
| 16
|
12781058
|
<gh_stars>10-100
""" Tilengine python platformer demo """
from tilengine import Engine, Window
from raster_effect import raster_effect
from world import World
from player import Player
from UI import UI
from sound import Sound
import game
# init tilengine
game.engine = Engine.create(game.WIDTH, game.HEIGHT, game.MAX_LAYERS, game.MAX_SPRITES, 0)
game.engine.set_load_path(game.ASSETS_PATH)
# set raster callback
game.engine.set_raster_callback(raster_effect)
# init global game entities
game.actors = list()
game.world = World()
game.player = Player()
game.ui = UI()
# load sound effects
game.sounds = Sound(4, game.ASSETS_PATH)
game.sounds.load("jump", "jump.wav")
game.sounds.load("crush", "crunch.wav")
game.sounds.load("pickup", "pickup.wav")
game.sounds.load("hurt", "hurt.wav")
game.sounds.load("eagle", "vulture.wav")
# create window & start main loop
game.window = Window.create()
game.world.start()
while game.window.process():
for actor in game.actors:
if not actor.update():
game.actors.remove(actor)
| 2.421875
| 2
|
custom/icds_reports/migrations/0067_ccsrecordmonthly_dob.py
|
tobiasmcnulty/commcare-hq
| 1
|
12781059
|
<reponame>tobiasmcnulty/commcare-hq
# Generated by Django 1.10.7 on 2018-09-25 13:50
from corehq.sql_db.operations import RawSQLMigration
from django.db import migrations
from custom.icds_reports.const import SQL_TEMPLATES_ROOT
migrator = RawSQLMigration((SQL_TEMPLATES_ROOT,))
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0066_aggregatesqlprofile_last_included_doc_time'),
]
operations = [
migrator.get_migration('update_tables28.sql')
]
| 1.65625
| 2
|
tools/testing/tests/solo.py
|
trustcrypto/solo
| 1
|
12781060
|
from solo.client import SoloClient
from fido2.ctap1 import ApduError
from .util import shannon_entropy
from .tester import Tester, Test
class SoloTests(Tester):
def __init__(self, tester=None):
super().__init__(tester)
def run(self,):
self.test_solo()
def test_solo(self,):
"""
Solo specific tests
"""
# RNG command
sc = SoloClient()
sc.find_device(self.dev)
sc.use_u2f()
memmap = (0x08005000, 0x08005000 + 198 * 1024 - 8)
total = 1024 * 16
with Test("Gathering %d random bytes..." % total):
entropy = b""
while len(entropy) < total:
entropy += sc.get_rng()
with Test("Test entropy is close to perfect"):
s = shannon_entropy(entropy)
assert s > 7.98
print("Entropy is %.5f bits per byte." % s)
with Test("Test Solo version command"):
assert len(sc.solo_version()) == 3
with Test("Test bootloader is not active"):
try:
sc.write_flash(memmap[0], b"1234")
except ApduError:
pass
sc.exchange = sc.exchange_fido2
with Test("Test Solo version and random commands with fido2 layer"):
assert len(sc.solo_version()) == 3
sc.get_rng()
def test_bootloader(self,):
sc = SoloClient()
sc.find_device(self.dev)
sc.use_u2f()
memmap = (0x08005000, 0x08005000 + 198 * 1024 - 8)
data = b"A" * 64
with Test("Test version command"):
assert len(sc.bootloader_version()) == 3
with Test("Test write command"):
sc.write_flash(memmap[0], data)
for addr in (memmap[0] - 8, memmap[0] - 4, memmap[1], memmap[1] - 8):
with Test("Test out of bounds write command at 0x%04x" % addr):
try:
sc.write_flash(addr, data)
except CtapError as e:
assert e.code == CtapError.ERR.NOT_ALLOWED
| 2.25
| 2
|
scipy/stats/tests/test_crosstab.py
|
Ennosigaeon/scipy
| 9,095
|
12781061
|
<filename>scipy/stats/tests/test_crosstab.py
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from scipy.stats.contingency import crosstab
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_basic(sparse):
a = [0, 0, 9, 9, 0, 0, 9]
b = [2, 1, 3, 1, 2, 3, 3]
expected_avals = [0, 9]
expected_bvals = [1, 2, 3]
expected_count = np.array([[1, 2, 1],
[1, 0, 2]])
(avals, bvals), count = crosstab(a, b, sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
def test_crosstab_basic_1d():
# Verify that a single input sequence works as expected.
x = [1, 2, 3, 1, 2, 3, 3]
expected_xvals = [1, 2, 3]
expected_count = np.array([2, 2, 3])
(xvals,), count = crosstab(x)
assert_array_equal(xvals, expected_xvals)
assert_array_equal(count, expected_count)
def test_crosstab_basic_3d():
# Verify the function for three input sequences.
a = 'a'
b = 'b'
x = [0, 0, 9, 9, 0, 0, 9, 9]
y = [a, a, a, a, b, b, b, a]
z = [1, 2, 3, 1, 2, 3, 3, 1]
expected_xvals = [0, 9]
expected_yvals = [a, b]
expected_zvals = [1, 2, 3]
expected_count = np.array([[[1, 1, 0],
[0, 1, 1]],
[[2, 0, 1],
[0, 0, 1]]])
(xvals, yvals, zvals), count = crosstab(x, y, z)
assert_array_equal(xvals, expected_xvals)
assert_array_equal(yvals, expected_yvals)
assert_array_equal(zvals, expected_zvals)
assert_array_equal(count, expected_count)
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_levels(sparse):
a = [0, 0, 9, 9, 0, 0, 9]
b = [1, 2, 3, 1, 2, 3, 3]
expected_avals = [0, 9]
expected_bvals = [0, 1, 2, 3]
expected_count = np.array([[0, 1, 2, 1],
[0, 1, 0, 2]])
(avals, bvals), count = crosstab(a, b, levels=[None, [0, 1, 2, 3]],
sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
@pytest.mark.parametrize('sparse', [False, True])
def test_crosstab_extra_levels(sparse):
# The pair of values (-1, 3) will be ignored, because we explicitly
# request the counted `a` values to be [0, 9].
a = [0, 0, 9, 9, 0, 0, 9, -1]
b = [1, 2, 3, 1, 2, 3, 3, 3]
expected_avals = [0, 9]
expected_bvals = [0, 1, 2, 3]
expected_count = np.array([[0, 1, 2, 1],
[0, 1, 0, 2]])
(avals, bvals), count = crosstab(a, b, levels=[[0, 9], [0, 1, 2, 3]],
sparse=sparse)
assert_array_equal(avals, expected_avals)
assert_array_equal(bvals, expected_bvals)
if sparse:
assert_array_equal(count.A, expected_count)
else:
assert_array_equal(count, expected_count)
def test_validation_at_least_one():
with pytest.raises(TypeError, match='At least one'):
crosstab()
def test_validation_same_lengths():
with pytest.raises(ValueError, match='must have the same length'):
crosstab([1, 2], [1, 2, 3, 4])
def test_validation_sparse_only_two_args():
with pytest.raises(ValueError, match='only two input sequences'):
crosstab([0, 1, 1], [8, 8, 9], [1, 3, 3], sparse=True)
def test_validation_len_levels_matches_args():
with pytest.raises(ValueError, match='number of input sequences'):
crosstab([0, 1, 1], [8, 8, 9], levels=([0, 1, 2, 3],))
| 2.515625
| 3
|
modules/rvorbitfitter.py
|
timberhill/radiant
| 0
|
12781062
|
<gh_stars>0
import numpy as np
import warnings
from modules.contracts import OrbitFitter
from scipy.optimize import fsolve
from scipy.optimize import curve_fit
from modules.settings import Settings
class RVOrbitFitter(OrbitFitter):
def _validatePositiveRange(self, r, name, alt=[0.0, float('inf')]):
if len(r) == 2:
if r[0] < r[1] or r[0] >= 0:
return r
warnings.warn('RVOrbitFitter: invalid <' + name + '> provided. Value should be positive and left boundary strictly less than right. Ignoring...')
return alt
"""
m/s, days, M_earth
starmass in solar masses
"""
def __init__(self, tdata, rvdata, errs=None, starmass=1.0, massrange=[], periodrange=[]):
if len(tdata) != len(rvdata):
raise ValueError('RVOrbitFitter: <tdata> and <rvdata> should have same dimensions.')
if errs != None and len(errs) != len(tdata):
raise ValueError('RVOrbitFitter: <tdata> and <rvdata> should have same dimensions as <errs>.')
self._G = 8.88677e-10 # AU^3 Me^-1 day^-2
self._SunInEarthMasses = 332978.9
self._ts = tdata
self._rvs = rvdata
self._errs = errs
self._starmass = starmass
self._massrange = self._validatePositiveRange(massrange, 'massrange', alt=[0, 0.1*starmass*self._SunInEarthMasses])
self._periodrange = self._validatePositiveRange(periodrange, 'periodrange')
self.settings = Settings()
def _curve(self, t, Mp, Ms, P, e, w, v0=0): # assume sin(i) = 1 or Mp is actually Mp*sin(i)
AUmeters = 1.496e11 # m
DAYseconds = 86400 # sec
Ms *= self._SunInEarthMasses
Mtot = Mp + Ms
a = np.power( self._G*Mtot*P*P / (4*np.pi*np.pi) , 1.0/3.0)
# P = np.power(4.0*np.power(np.pi, 2)*np.power(a, 3) / (self._G*Mtot), 0.5)
t1 = np.power(2*np.pi*self._G / P, 1.0/3.0)
t2 = Mp / np.power(Mtot, 2.0/3.0)
t3 = 1.0 / np.power(1 - np.power(e, 2), 0.5)
K = t1*t2*t3
keplerEq = lambda E : E - e*np.sin(E) - 2.0*np.pi*t/P
# compute eccentric anomaly from Kepler equation
E_initial_guess = 2.0*np.pi*t/P # as for circular orbit
E = E_initial_guess
if not e == 0.0:
E = fsolve(keplerEq, E_initial_guess)
# compute true anomaly
nu = 2.0 * np.arctan(np.power((1 + e) / (1 - e), 0.5) * np.tan(E/2.0))
return K*(np.sin(w + nu) + e*np.sin(w)) * (AUmeters/DAYseconds) + v0
def getParameters(self, initial_guess=None):
# 0: Mplanet [Earth masses]
# 1: period [days]
# 2: e (eccentricity)
# 3: w (argument of periastron) [rad]
# 4: v0, barycentric velocity [m/s]
if initial_guess == None:
initial_guess = [100, (self._periodrange[0] + self._periodrange[1])/2, 0.0, 0.0, 0.0]
fun = lambda t, _Mp, _p, _e, _w, _v0: self._curve(t, _Mp, self._starmass, _p, _e, _w, v0=_v0)
bounds = ( [self._massrange[0], self._periodrange[0], 0.0, 0.0, -1e8], \
[self._massrange[1], self._periodrange[1], self.settings.max_eccentricity, 2.0*np.pi, 1e8])
popt, pcov = curve_fit(fun, self._ts, self._rvs, p0=initial_guess, bounds=bounds, sigma=self._errs)
errors = []
for i in range(len(popt)):
try:
errors.append(np.absolute(pcov[i][i])**0.5)
except:
errors.append( 0.00 )
return (popt[0], errors[0]), \
(popt[1], errors[1]), \
(popt[2], errors[2]), \
(popt[3], errors[3]), \
(popt[4], errors[4])
| 2.328125
| 2
|
codes/quicksort.py
|
pedh/CLRS-Solutions
| 3
|
12781063
|
"""
Quicksort.
"""
import random
def partition(array, left, right):
"""Quicksort partition."""
pivot = array[right]
i = left - 1
for j in range(left, right):
if array[j] < pivot:
i += 1
array[i], array[j] = array[j], array[i]
array[right], array[i + 1] = array[i + 1], array[right]
return i + 1
def quicksort_r(array, left, right):
"""Quicksort recursion."""
if right > left:
pivot_i = partition(array, left, right)
quicksort_r(array, left, pivot_i - 1)
quicksort_r(array, pivot_i + 1, right)
def quicksort(array):
"""Quicksort."""
quicksort_r(array, 0, len(array) - 1)
def main():
"""The main function."""
array = list(range(20))
random.shuffle(array)
print(array)
quicksort(array)
print(array)
if __name__ == "__main__":
main()
| 4.21875
| 4
|
models/intphys/model/tvqa.py
|
hucvl/craft
| 9
|
12781064
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from intphys.extra.tvqa.tvqa_abc import ABC
from intphys.extra.tvqa_plus.stage import STAGE
from intphys.data import SimulationInput
from intphys.submodule import *
class TVQA(nn.Module):
SIMULATION_INPUT = SimulationInput.VIDEO
def __init__(self, config):
super().__init__()
config["tvqa"]["vocab_size"] = config["input_size"]
config["tvqa"]["dropout"] = config["dropout"]
config["tvqa"]["output_size"] = config["output_size"]
config["frame_encoder"]["depth_size"] = config["depth_size"]
config["frame_encoder"]["input_width"] = config["input_width"]
config["frame_encoder"]["input_height"] = config["input_height"]
self.config = config
self.frame_encoder = self.create_submodule("frame_encoder")
self.adaptive_pool = nn.AdaptiveAvgPool2d(config["pool_size"])
self.flatten = nn.Flatten()
config["tvqa"]["vid_feat_size"] = config["pool_size"]**2 * self.frame_encoder.out_channels
self.tvqa = ABC(config["tvqa"])
def create_submodule(self, submodule):
config = self.config[submodule]
submodule = eval(config["architecture"])(config)
return submodule
def process_simulation(self, simulations, **kwargs):
B, C, T, X1, X2 = simulations.shape
y = simulations.permute(0, 2, 1, 3, 4)
y = y.reshape(B*T, C, X1, X2)
y = self.frame_encoder(y)
y = self.adaptive_pool(y)
y = self.flatten(y)
y = y.reshape(B, T, -1)
return y
def forward(self, simulations, questions, lengths, **kwargs):
visual = self.process_simulation(simulations, **kwargs)
B, T = visual.shape[:2]
visual_lengths = torch.tensor([T for i in range(B)])
return self.tvqa(questions, torch.tensor(lengths), visual, visual_lengths)
class TVQAPlus(nn.Module):
SIMULATION_INPUT = SimulationInput.VIDEO
def __init__(self, config):
super().__init__()
config["stage"]["embed_size"] = config["question_encoder"]["hidden_size"]
config["stage"]["dropout"] = config["dropout"]
config["stage"]["output_size"] = config["output_size"]
config["question_encoder"]["vocab_size"] = config["input_size"]
config["frame_encoder"]["depth_size"] = config["depth_size"]
config["frame_encoder"]["input_width"] = config["input_width"]
config["frame_encoder"]["input_height"] = config["input_height"]
self.config = config
self.frame_encoder = self.create_submodule("frame_encoder")
self.question_encoder = self.create_submodule("question_encoder")
self.adaptive_pool = nn.AdaptiveAvgPool2d(config["pool_size"])
self.flatten = nn.Flatten()
config["stage"]["vfeat_size"] = self.frame_encoder.out_channels
self.stage = STAGE(config["stage"])
def create_submodule(self, submodule):
config = self.config[submodule]
submodule = eval(config["architecture"])(config)
return submodule
def process_simulation(self, simulations, **kwargs):
B, C, T, X1, X2 = simulations.shape
y = simulations.permute(0, 2, 1, 3, 4)
y = y.reshape(B*T, C, X1, X2)
y = self.frame_encoder(y)
y = self.adaptive_pool(y)
K, X1, X2 = y.shape[-3:]
y = y.view(B, T, K, X1 * X2)
y = y.permute(0, 1, 3, 2)
return y
def process_question(self, questions, lengths, **kwargs):
output, (hiddens, _) = self.question_encoder(questions, lengths)
return nn.utils.rnn.pad_packed_sequence(output, batch_first=True)[0]
def forward(self, simulations, questions, lengths, **kwargs):
visual = self.process_simulation(simulations, **kwargs)
textual = self.process_question(questions, lengths, **kwargs)
B, T, HW = visual.shape[:3]
device = visual.device
visual_lengths = torch.empty(B, T, HW, dtype=visual.dtype).fill_(1)
textual_lengths = torch.zeros(B, 1, max(lengths), dtype=visual.dtype)
for (i, length) in enumerate(lengths):
textual_lengths[i, 0, :length] = 1.0
batch = {
"qas_bert": textual,
"qas_mask": textual_lengths.to(device),
"vid": visual,
"vid_mask": visual_lengths.to(device),
}
return self.stage(batch)
| 2.21875
| 2
|
predict.py
|
nikhil133/ASAP-TextAbstractorSummary
| 1
|
12781065
|
import tensorflow as tf
from attention import AttentionLayer
from tensorflow.keras.models import load_model
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from text_cleaner import text_cleaner,rareword_coverage
import pickle
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def decode_sequence(input_seq,encoder_model,decoder_model,target_word_index,reverse_target_word_index,max_summary_len):
e_out,e_h,e_c=encoder_model.predict(input_seq)
target_seq=np.zeros((1,1))
target_seq[0,0]=target_word_index['sostok']
stop_condition=False
decoded_sentence=''
while not stop_condition:
output_tokens,h,c=decoder_model.predict([target_seq]+[e_out,e_h,e_c])
sampled_token_index=np.argmax(output_tokens[0,-1,:])
sampled_token=reverse_target_word_index[sampled_token_index]
if(sampled_token!='eostok'):
decoded_sentence+=' '+sampled_token
if (sampled_token=='eostok') or len(decoded_sentence.split())>=(max_summary_len-1):
stop_condition=True
target_seq=np.zeros((1,1))
target_seq[0,0]=sampled_token_index
e_h,e_c=h,c
return decoded_sentence
def predict(test_value):
max_text_len=30
max_summary_len=8
#test_value="Gave me such a caffeine overdose I had the shakes, a racing heart and an anxiety attack. Plus it tastes unbelievably bad. I'll stick with coffee, tea and soda, thanks."
cleaned_text=[]
cleaned_text.append(text_cleaner(test_value,0))
cleaned_text=np.array(cleaned_text)
short_text=[]
for i in range(len(cleaned_text)):
if len(cleaned_text[i].split())<=max_text_len:
short_text.append(cleaned_text[i])
x_tr_test=short_text
file=open('X_training_value.pkl','rb')
x_trained_text=pickle.load(file)
file.close()
#x_trained_text=np.append(x_trained_text,x_tr_test)
x_tokenizer=Tokenizer()
x_tokenizer.fit_on_texts(x_trained_text)
cnt,tot_cnt,freq,tot_freq=rareword_coverage(4,x_tokenizer)
x_tokenizer=Tokenizer(num_words=tot_cnt-cnt)
x_tokenizer.fit_on_texts(list(x_trained_text))
x_tr_seq=x_tokenizer.texts_to_sequences(x_tr_test)
x_tr=pad_sequences(x_tr_seq,maxlen=max_text_len,padding='post')
y_tokenizer=Tokenizer()
reverse_target_word_index=dict(map(reversed, y_tokenizer.word_index.items()))
file=open('reverse_target_word_index.pkl','rb')
reverse_target_word_index=pickle.load(file)
file.close()
file=open('reverse_source_word_index.pkl','rb')
reverse_source_word_index=pickle.load(file)
file.close()
file=open('target_word_index.pkl','rb')
target_word_index=pickle.load(file)
file.close()
max_summary_len=8
#target_word_index=y_tokenizer.word_index
encoder_model=load_model('encoder_model.h5',custom_objects={'AttentionLayer' : AttentionLayer})
decoder_model=load_model('decoder_model.h5',custom_objects={'AttentionLayer' : AttentionLayer})
return decode_sequence(x_tr.reshape(1,max_text_len),encoder_model,decoder_model,target_word_index,reverse_target_word_index,max_summary_len)
#print(predict("Gave me such a caffeine overdose I had the shakes, a racing heart and an anxiety attack. Plus it tastes unbelievably bad. I'll stick with coffee, tea and soda, thanks."))
| 2.40625
| 2
|
test/util/test_map.py
|
azoimide/cyk
| 0
|
12781066
|
<gh_stars>0
from util import map_string, map_to_string, rand_string
def main():
t_tab = {'e': 5, '+': 2, '-': 3, '.': 4, '1': 1, '0': 0}
s = rand_string(len(t_tab))
assert s == map_string(map_to_string(s, t_tab), t_tab)
if __name__ == "__main__":
main()
| 2.875
| 3
|
ML_WebApp/app.py
|
sergiosolmonte/ML_WebApp_easy
| 0
|
12781067
|
##Tutotrial by <NAME>
from flask import Flask, request, render_template
import pandas as pd
import joblib
# Declare a Flask app
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def main():
# If a form is submitted
if request.method == "POST":
# Unpickle classifier
clf = joblib.load("clf.pkl")
# Get values through input bars
height = request.form.get("height")
weight = request.form.get("weight")
# Put inputs to dataframe
X = pd.DataFrame([[height, weight]], columns = ["Height", "Weight"])
# Get prediction
prediction = clf.predict(X)[0]
else:
prediction = ""
return render_template("website.html", output = prediction)
# Running the app
if __name__ == '__main__':
app.run(debug = True)
| 3
| 3
|
osmcal/forms.py
|
OSMChina/openstreetmap-calendar
| 26
|
12781068
|
import json
from typing import Dict, Iterable
import babel.dates
import pytz
from django import forms
from django.contrib.postgres.forms import SimpleArrayField
from django.forms import ValidationError
from django.forms.widgets import DateTimeInput, TextInput
from . import models
from .serializers import JSONEncoder
from .widgets import LeafletWidget, TimezoneWidget
class TimezoneField(forms.Field):
def to_python(self, value):
if not value:
# Babel will default to UTC if no string is specified.
return None
try:
return pytz.timezone(
babel.dates.get_timezone_name(value, return_zone=True)
)
except pytz.exceptions.Error:
return None
def validate(self, value):
if not value:
raise ValidationError('no value', code='required')
class QuestionForm(forms.ModelForm):
choices = SimpleArrayField(forms.CharField())
def clean_choices(self):
return [x for x in self.cleaned_data['choices'][0].splitlines() if x]
class Meta:
model = models.ParticipationQuestion
fields = ('question_text', 'answer_type', 'mandatory')
class QuestionnaireForm(forms.Form):
def __init__(self, questions: Iterable[models.ParticipationQuestion], **kwargs) -> None:
self.fields: Dict[str, forms.Field] = {}
super().__init__(**kwargs)
for question in questions:
if question.answer_type == 'TEXT':
f = forms.CharField(label=question.question_text, required=question.mandatory)
elif question.answer_type == 'BOOL':
f = forms.BooleanField(label=question.question_text, required=question.mandatory)
elif question.answer_type == 'CHOI':
f = forms.ChoiceField(label=question.question_text, required=question.mandatory, choices=[(x.id, x.text) for x in question.choices.all()])
else:
raise ValueError("invalid answer_type: %s" % (question.answer_type))
self.fields[str(question.id)] = f
def clean(self, *args, **kwargs):
for k, v in self.cleaned_data.items():
self.cleaned_data[int(k)] = self.cleaned_data.pop(k)
return super().clean()
class EventForm(forms.ModelForm):
timezone = TimezoneField(required=True, widget=TimezoneWidget())
class Meta:
model = models.Event
fields = ('name', 'whole_day', 'start', 'end', 'link', 'kind', 'location_name', 'location', 'timezone', 'description')
widgets = {
'location': LeafletWidget(),
'start': DateTimeInput(attrs={'class': 'datepicker-flat'}),
'end': DateTimeInput(attrs={'class': 'datepicker-flat', 'placeholder': 'optional'}),
'link': TextInput(attrs={'placeholder': 'https://'}),
'location_name': TextInput(attrs={'placeholder': 'e.g. Café International'}),
}
unlogged_fields = ('timezone', )
def clean(self, *args, **kwargs):
super().clean(*args, **kwargs)
if self.errors:
return self.cleaned_data
tz = self.cleaned_data.get('timezone', None)
"""
Django automatically assumes that datetimes are in the default time zone (UTC),
but in fact they're in the local time zone, so we're stripping the tzinfo from
the field and setting it to the given time zone.
This does not change the value of the time itself, only the time zone placement.
"""
self.cleaned_data['start'] = tz.localize(self.cleaned_data['start'].replace(tzinfo=None))
if self.cleaned_data['end']:
self.cleaned_data['end'] = tz.localize(self.cleaned_data['end'].replace(tzinfo=None))
if self.cleaned_data['end'] <= self.cleaned_data['start']:
self.add_error('end', 'Event end has to be after its start.')
def to_json(self):
d = {}
for field in self.fields:
if field in self.Meta.unlogged_fields:
continue
d[field] = self.cleaned_data[field]
return json.loads(json.dumps(d, cls=JSONEncoder)) # This is bad and I should feel bad.
| 2.515625
| 3
|
scripts/import_old_files.py
|
maxwenger/repast.github.io
| 6
|
12781069
|
#! /usr/bin/env python
import sys
def main(f_orig, f_new):
print("Porting {} as {}".format(f_orig, f_new))
with open(f_new, 'w') as f_out:
start = False
f_out.write("---\nlayout: site\n---\n")
with open(f_orig, 'r') as f_in:
for line in f_in:
line = line.strip()
if line.startswith("<body style=\"background: #f1f1f1;\">"):
start = True
elif line.startswith("<div id=\"footerlinks\">"):
start = False
elif start:
line = line.replace(".php", ".html")
f_out.write(line)
f_out.write("\n")
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| 2.828125
| 3
|
tests/models/test_hovernetplus.py
|
adamshephard/tiatoolbox
| 0
|
12781070
|
<gh_stars>0
"""Unit test package for HoVerNet+."""
import torch
from tiatoolbox.models.architecture import fetch_pretrained_weights
from tiatoolbox.models.architecture.hovernetplus import HoVerNetPlus
from tiatoolbox.utils.misc import imread
from tiatoolbox.utils.transforms import imresize
def test_functionality(remote_sample, tmp_path):
"""Functionality test."""
tmp_path = str(tmp_path)
sample_patch = str(remote_sample("stainnorm-source"))
patch_pre = imread(sample_patch)
patch_pre = imresize(patch_pre, scale_factor=0.5)
patch = patch_pre[0:256, 0:256]
batch = torch.from_numpy(patch)[None]
# Test functionality with both nuclei and layer segmentation
model = HoVerNetPlus(num_types=3, num_layers=5)
# Test decoder as expected
assert len(model.decoder["np"]) > 0, "Decoder must contain np branch."
assert len(model.decoder["hv"]) > 0, "Decoder must contain hv branch."
assert len(model.decoder["tp"]) > 0, "Decoder must contain tp branch."
assert len(model.decoder["ls"]) > 0, "Decoder must contain ls branch."
fetch_pretrained_weights("hovernetplus-oed", f"{tmp_path}/weigths.pth")
pretrained = torch.load(f"{tmp_path}/weigths.pth")
model.load_state_dict(pretrained)
output = model.infer_batch(model, batch, on_gpu=False)
assert len(output) == 4, "Must contain predictions for: np, hv, tp and ls branches."
output = [v[0] for v in output]
output = model.postproc(output)
assert len(output[1]) > 0 and len(output[3]) > 0, "Must have some nuclei/layers."
| 2.09375
| 2
|
scrapy-package/scrapy_twrh/spiders/rental591/util.py
|
eala/tw-rental-house-data
| 0
|
12781071
|
from collections import namedtuple
SITE_URL = 'https://rent.591.com.tw'
LIST_ENDPOINT = '{}/home/search/rsList?is_new_list=1&type=1&kind=0&searchtype=1'.format(SITE_URL)
SESSION_ENDPOINT = '{}/?kind=0®ion=6'.format(SITE_URL)
ListRequestMeta = namedtuple('ListRequestMeta', ['id', 'name', 'page'])
DetailRequestMeta = namedtuple('DetailRequestMeta', ['id', 'gps'])
| 2.484375
| 2
|
isbn-verifier/isbn_verifier.py
|
rdlu/exercism-python
| 0
|
12781072
|
<gh_stars>0
def is_valid(isbn: str) -> bool:
# filter nums, convert to list of int
l = [int(x) for x in filter(str.isnumeric, isbn)]
# handle the edge case: X
if isbn and isbn[-1] == 'X': l.append(10)
# check len and sum the product of digits x position
return len(l) == 10 and sum([l[10-i] * i for i in range(1,11)]) % 11 == 0
| 3.359375
| 3
|
epicteller/bot/main.py
|
KawashiroNitori/epicteller
| 0
|
12781073
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import nonebot
from nonebot.adapters.cqhttp import Bot as CQHTTPBot
from epicteller.bot import bus_init
from epicteller.core import redis
def main():
nonebot.init(command_start={'/', '!', "!"})
driver = nonebot.get_driver()
driver.register_adapter("cqhttp", CQHTTPBot)
nonebot.load_plugin('nonebot_plugin_sentry')
nonebot.load_plugins('epicteller/bot/plugin')
nonebot.get_driver().on_startup(redis.pool.init)
nonebot.get_driver().on_startup(bus_init)
nonebot.run(host='0.0.0.0', port=10090)
if __name__ == '__main__':
main()
| 1.820313
| 2
|
Python/AllRelaisOn.py
|
derdoktor667/FT232Raspberry2USB
| 0
|
12781074
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import serial
import time
ser = serial.Serial('/dev/ttyUSB0',9600)
ser.write("255\r")
print ("ALL RELAIS: ON!")
time.sleep(10)
ser.write("0\r")
print ("ALL RELAIS: OFF!")
ser.close()
print ("ALL GOOD, EXIT!")
# ...quick and dirty by "<EMAIL>"
| 2.3125
| 2
|
Testing/RawHID/send_test.py
|
SystemsCyber/SSS3
| 0
|
12781075
|
<reponame>SystemsCyber/SSS3
import pywinusb.hid as hid
import time
def sample_handler(data):
print("Raw data: {0}".format(data))
filter = hid.HidDeviceFilter(vendor_id = 0x16c0, product_id = 0x0486)
hid_device = filter.get_devices()
print("hid_device:")
print(hid_device)
device = hid_device[0]
print("device:")
print(device)
device.open()
out_report = device.find_output_reports()
print("out_report")
print(out_report)
target_usage = hid.get_full_usage_id(0x00, 0x3f)
device.set_raw_data_handler(sample_handler)
print("target_usage:")
print(target_usage)
#device.close()
data = [0xFF]*64
for i in range(256):
#device.open()
#out_report = device.find_output_reports()
#print(i)
#try:
data[0] = i
buf = [0] + data
out_report[0].set_raw_data(buf)
out_report[0].send()
print("Success with {}".format(i))
time.sleep(.001)
device.close()
#except:
# pass
| 2.515625
| 3
|
app.py
|
coolexplorer/slackbot-buffy
| 3
|
12781076
|
import logging.config
import os
import re
from slack import RTMClient
from slack.errors import SlackApiError
from config.cofiguration import Configuration
from exception.invalid_command import InvalidCommand
from model.message import Message
from parser.command_parser import CommandParser
from service.service_accounts import ServiceAccounts
# log
logging.config.fileConfig(fname='log.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
# slack
slack_bot_token = os.environ.get('SLACK_TOKEN', '')
rtm_client = RTMClient(token=slack_bot_token)
# buffy configuration
config = Configuration()
config.read_env()
service_accounts = ServiceAccounts(config)
@RTMClient.run_on(event='message')
def message_event_handler(**payload):
logger.debug(f"payload : {payload}")
data = payload['data']
sub_type = data.get('subtype', None)
if sub_type is not None:
return
web_client = payload['web_client']
rtm_client = payload['rtm_client']
if 'text' in data:
message = Message(data)
is_mention, message.bot_id = check_mention(message.text)
commands = message.parse_message(is_mention)
logger.debug(f"message {commands}")
_parse_command(web_client, message, commands)
def check_mention(text):
pattern = re.compile('<@([a-zA-z0-9]*)>')
match = pattern.match(text)
if match is not None:
return [True, match.group(1)]
else:
return [False, None]
def _parse_command(web_client, message, commands):
try:
parser = CommandParser(service_accounts, commands)
blocks = parser.parse_command()
logger.debug(f"response message: {blocks}")
web_client.chat_postMessage(channel=message.channel, blocks=blocks, thread_ts=message.ts)
except InvalidCommand as e:
logger.error(e)
web_client.chat_postMessage(channel=message.channel, text=e)
except SlackApiError as e:
logger.error(f"Got an error: {e.response['error']}")
if __name__ == '__main__':
logger.info(f'Jira Configuration {config.jira}')
logger.info(f'Kubernetes Configuration {config.kubernetes}')
logger.info(f'RTM Client is started....')
rtm_client.start()
| 2.125
| 2
|
src/turkey_bowl/__init__.py
|
loganthomas/turkey-bowl
| 0
|
12781077
|
<gh_stars>0
# Local libraries
from turkey_bowl import aggregate # noqa: F401
from turkey_bowl import draft # noqa: F401
from turkey_bowl import leader_board # noqa: F401
from turkey_bowl import scrape # noqa: F401
from turkey_bowl import turkey_bowl_runner # noqa: F401
from turkey_bowl import utils # noqa: F401
__version__ = "2020.2"
| 1.171875
| 1
|
Chapter06/mypandas/operations/pandas1.py
|
MichaelRW/Python-for-Geeks
| 31
|
12781078
|
# pandas1.py
import pandas as pd
weekly_data = {'day':['Monday','Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday'],
'temp':[40, 33, 42, 31, 41, 40, 30],
'condition':['Sunny','Cloudy','Sunny','Rain','Sunny',
'Cloudy','Rain']
}
df = pd.DataFrame(weekly_data)
print(df)
df1 = df.set_index('day')
print(df1)
| 3.703125
| 4
|
examples/libtest/imports/allsimple.py
|
takipsizad/pyjs
| 739
|
12781079
|
"""
Helper module for import * without __all__
"""
all_import2 = 3
all_import3 = 3
all_override = True
| 1.296875
| 1
|
crashreports/rest_api_heartbeats.py
|
FairphoneMirrors/hiccup-server
| 0
|
12781080
|
<reponame>FairphoneMirrors/hiccup-server
"""REST API for accessing heartbeats."""
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import generics, status
from rest_framework.exceptions import NotFound, ValidationError
from crashreports.models import HeartBeat
from crashreports.permissions import (
HasRightsOrIsDeviceOwnerDeviceCreation,
SWAGGER_SECURITY_REQUIREMENTS_ALL,
)
from crashreports.response_descriptions import default_desc
from crashreports.serializers import HeartBeatSerializer
from crashreports.utils import get_object_by_lookup_fields
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="List heartbeats",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
),
)
@method_decorator(
name="post",
decorator=swagger_auto_schema(
operation_description="Create a heartbeat",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
request_body=HeartBeatSerializer,
responses=dict(
[
default_desc(ValidationError),
(
status.HTTP_404_NOT_FOUND,
openapi.Response(
"No device with the given uuid could be found."
),
),
]
),
),
)
class ListCreateView(generics.ListCreateAPIView):
"""Endpoint for listing heartbeats and creating new heartbeats."""
queryset = HeartBeat.objects.all()
paginate_by = 20
permission_classes = (HasRightsOrIsDeviceOwnerDeviceCreation,)
serializer_class = HeartBeatSerializer
filter_fields = ("device", "build_fingerprint", "radio_version")
def get(self, request, *args, **kwargs):
"""Override device__uuid parameter with uuid."""
if "uuid" in kwargs:
self.queryset = HeartBeat.objects.filter(
device__uuid=kwargs["uuid"]
)
return generics.ListCreateAPIView.get(self, request, *args, **kwargs)
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="Get a heartbeat",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound)]),
),
)
@method_decorator(
name="put",
decorator=swagger_auto_schema(
operation_description="Update a heartbeat",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound), default_desc(ValidationError)]),
),
)
@method_decorator(
name="patch",
decorator=swagger_auto_schema(
operation_description="Partially update a heartbeat",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound), default_desc(ValidationError)]),
),
)
@method_decorator(
name="delete",
decorator=swagger_auto_schema(
operation_description="Delete a heartbeat",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound)]),
),
)
class RetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):
"""Endpoint for retrieving, updating and deleting heartbeats."""
# pylint: disable=too-many-ancestors
queryset = HeartBeat.objects.all()
permission_classes = (HasRightsOrIsDeviceOwnerDeviceCreation,)
serializer_class = HeartBeatSerializer
multiple_lookup_fields = {"id", "device__uuid", "device_local_id"}
def get_object(self):
"""Retrieve a heartbeat."""
return get_object_by_lookup_fields(self, self.multiple_lookup_fields)
| 2.140625
| 2
|
torchrecipes/text/doc_classification/tests/test_doc_classification_train_app.py
|
hudeven/recipes
| 0
|
12781081
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# pyre-strict
import os.path
from unittest.mock import patch
import torchrecipes.text.doc_classification.conf # noqa
from torchrecipes.core.base_train_app import BaseTrainApp
from torchrecipes.core.test_utils.test_base import BaseTrainAppTestCase
from torchrecipes.text.doc_classification.tests.common.assets import (
copy_partial_sst2_dataset,
get_asset_path,
copy_asset,
)
from torchrecipes.utils.test import tempdir
class TestDocClassificationTrainApp(BaseTrainAppTestCase):
def setUp(self) -> None:
super().setUp()
# patch the _hash_check() fn output to make it work with the dummy dataset
self.patcher = patch(
"torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True
)
self.patcher.start()
def tearDown(self) -> None:
self.patcher.stop()
super().tearDown()
def get_train_app(self, root_dir: str) -> BaseTrainApp:
# copy the asset files into their expected download locations
# note we need to do this anywhere we use hydra overrides
# otherwise we get a `LexerNoViableAltException`
vocab_path = os.path.join(root_dir, "vocab_example.pt")
spm_model_path = os.path.join(root_dir, "spm_example.model")
copy_asset(get_asset_path("vocab_example.pt"), vocab_path)
copy_asset(get_asset_path("spm_example.model"), spm_model_path)
copy_partial_sst2_dataset(root_dir)
app = self.create_app_from_hydra(
config_module="torchrecipes.text.doc_classification.conf",
config_name="train_app",
overrides=[
"module.model.checkpoint=null",
"module.model.freeze_encoder=True",
f"datamodule.dataset.root={root_dir}",
f"trainer.default_root_dir={root_dir}",
"trainer.logger=False",
"trainer.checkpoint_callback=False",
f"transform.transform.vocab_path={vocab_path}",
f"transform.transform.spm_model_path={spm_model_path}",
"transform.num_labels=2",
],
)
self.mock_trainer_params(app)
return app
@tempdir
def test_doc_classification_task_train(self, root_dir: str) -> None:
train_app = self.get_train_app(root_dir=root_dir)
output = train_app.train()
self.assert_train_output(output)
@tempdir
def test_doc_classification_task_test(self, root_dir: str) -> None:
train_app = self.get_train_app(root_dir=root_dir)
train_app.train()
output = train_app.test()
self.assertIsNotNone(output)
| 2.015625
| 2
|
relational/Spatial_Temporal_Model/main_module.py
|
monthie/cogmods
| 0
|
12781082
|
<reponame>monthie/cogmods
'''
Main Module for the spatial and temporal Models, contains the high-level functions,
all examples (deduction problem-sets) and some unit-tests.
Created on 16.07.2018
@author: <NAME> <<EMAIL>>, <NAME><<EMAIL>>
'''
from copy import deepcopy
import unittest
from parser_spatial_temporal import Parser
import model_construction as construct
import low_level_functions as helper
import backtrack_premises as back
import modify_model as modify
import verification_answer as ver_ans
# GLOBAL VARIABLES
# Two global variables enabling function-prints. Mainly used for debugging purposes.
PRINT_PARSING = False # global variable for whether to print parsing process or not
PRINT_MODEL = False # global variable for whether to print model construction process or not.
class MainModule:
"""
Main module of the Temporal and Spatial Model. This Class contains the high-level functions
like "process_problem_spatial/temporal" which the user can call on a specific problem set.
Moreover, it contains the different "interpret" and "decide" - functions for each the Spatial
and the Temporal Model, which are called depending on whether
"process_problem spatial / temporal" was called.
Short Spatial and Temporal Model Documentation:
This version of the Spatial Model is semantical (almost) equal to the space6 lisp program
from 1989. In this version the program can be called with interpret_spatial(problem) directly
or by calling process_problem / process_all_problems. The Temporal Model can be called likewise
to the Spatial Model.
The Temporal model is as well semantically equal to the first version of the python
translation, besides the fact that it uses dictionaries and is now able to also process
spatial deduction problems.
Quick description of the program:
First it goes through all given PREMISES, parses them and adds them to
the current model or creates a new one. Premises can also combine two exising
models when both items of a premise are in different models.
If The two items from a premise are in the same model, the program will
check if this premise holds in the model. If yes, tries to falsify and
if no, tries to verify the premise an the given model. In order to do this,
the program takes the last added premise(which triggered verify) and tries
to make it hold in the model. If there is a model that satisfies the premise,
it will be added to a list of PREMISES that always have to hold. Now it will
iteratively check if there are conflicts in the current model(that is changed
every time when a premise is made true f.i.) and then try to make them true
in the model. If there are no more conficting PREMISES, the program will
return the new model. If there is a premise that cannot be satisfied in the
model, the program will terminate with the opposite result(e.g. if a certain
premise cannot be made false in the model, it is verified. If it can be made
false, the previous model might be wrong/false.
There is only one order for verifying the conflicting PREMISES that occur,
so there are different models with a different outcome probably left out.
"""
# Global variable capacity illustrating the working memory (how many different models can be
# kept in the working memory). Is usually set to 4.
capacity = 4
def process_all_problems(self, problems, problem_type, spatial_parser=None):
"""
The function takes a set of problems, a problem_type and optional a desired
parser as input. The problem_type can be either the string "spatial" or "temporal",
depending on that the interpret-function of either the temporal or spatial model is called.
The boolean spatial_parser is set to None if not explicitly written in the function-call,
which enables the default parser for each of the models (temporal or spatial parser).
If temporal deduction problems should be processed by the Spatial Model, then one has to
type "spatial" for the problem-type and "False" for spatial_parser. For spatial
deduction problems computed in the temporal model, the problem_type needs to be "temporal"
and spatial_parser needs to be set to "True".
All problems in the problem set are executed by calling the appropriate
interpret-function for each problem. Before each new problem,
the function inserts two dotted lines to separate the problems clearly.
"""
if not problems:
print("No problem given to process!")
for problem in problems:
print("-----------------------------------------------------------------------")
print("-----------------------------------------------------------------------")
print("Process-all-problems: Interpret next problem!")
if problem_type == "spatial":
if spatial_parser is None:
spatial_parser = True
self.interpret_spatial(problem, spatial_parser)
elif problem_type == "temporal":
if spatial_parser is None:
spatial_parser = False
self.interpret_temporal(problem, spatial_parser)
else:
print("No such problem type exists. Try >spatial< or >temporal<.")
def process_problem(self, number, problems, problem_type, spatial_parser=None):
"""
The function takes a number, a set of problems, a problem_type and optional a desired
parser as input. The problem_type can be either the string "spatial" or "temporal",
depending on that the interpret-function of either the temporal or spatial model is called.
The boolean spatial_parser is set to None if not explicitly written in the function-call,
which enables the default parser for each of the models (temporal or spatial parser).
If a temporal deduction problem should be processed by the Spatial Model, then one has to
type "spatial" for the problem-type and "False" for spatial_parser. For a spatial
deduction problem computed in the temporal model, the problem_type needs to be "temporal"
and spatial_parser needs to be set to "True".
The function Function processes the problem number n of the given problem-set.
Every premise of this particular problem is interpreted with the appropriate
interpret-function.
"""
if not problems:
print("No problem given to process!")
return
if number > len(problems):
print("There is no problem with that number in this problem-set. Try a smaller number!")
return
if number < 1:
print("Try a positive number")
return
print("Problem to execute is number", number, "\n",
problems[number-1])
if problem_type == "spatial":
if spatial_parser is None:
spatial_parser = True
self.interpret_spatial(problems[number-1], spatial_parser)
elif problem_type == "temporal":
if spatial_parser is None:
spatial_parser = False
self.interpret_temporal(problems[number-1], spatial_parser)
else:
print("No such problem type exists. Try >spatial< or >temporal<.")
# ---------------------------- SPATIAL MODEL FUNCTIONS --------------------------------------------
def interpret_spatial(self, prem, spatial_parser):
"""
interpret_spatial iterates over the given premise and parses it.
calls the parse function on the PREMISES, then calls the decide_spatial function
on the results.returns the resulting models and the deduction that was
made after parsing every premise.
The argument "spatial_parser" defines which parser shall be used for processing the
premises. For a temporal problem, spatial_parser should be set to False, and for a
spatial problem to True.
"""
mods = [] # list of models
all_mods = [] # to save each construction step
# Check whether the problem is a question problem, which is the case when a Temporal
# problem is executed with the Spatial Model. This is however not possible at the moment.
is_question = ver_ans.is_question(prem)
if is_question:
print("The Spatial Model cannot solve problems with questions.",
"Try a problem without a question.")
return mods
# create the desired parser from the Parser Module
if spatial_parser:
pars = Parser(True)
else:
pars = Parser(False)
# iterate over the list of PREMISES, return models when done
for pre_ in prem:
if PRINT_MODEL:
print(pre_, "premise")
premise = pars.parse(pre_) # the currently parsed premise
if PRINT_MODEL:
print("parsed premise: ", prem)
mods = self.decide_spatial(premise, mods, prem, spatial_parser)
mods[0] = helper.normalize_coords(mods[0])
mods[0] = modify.shrink_dict(mods[0])
# list for all models
all_mods.append(deepcopy(mods[0]))
if PRINT_MODEL:
print("current model after decide_spatial: ", mods)
# print out models in the list.
print("list of all resulting Models")
print(all_mods)
# print all models in the model list.
helper.print_models(all_mods)
return mods
def decide_spatial(self, proposition, models, premises, spatial_parser):
"""[2]
takes the parsed premise and the list of current models.
extracts the subject and object of the premise, then checks if they
can be found in any model.
deletes the models from the models list, if they contain the subj. or obj.
calls helper function choose_function_spatial to decide_spatial what should
be done depending on the
premise and the current models.(see documentation of ddci for more detail)
returns list of current models as a result of choose_function_spatial.
"""
relation = helper.get_relation(proposition)
subject = helper.get_subject(proposition)
object1 = helper.get_object(proposition)
s_co = None
o_co = None
subj_mod = None
obj_mod = None
if PRINT_MODEL:
print("call decide_spatial with rel-subj-obj:", relation, subject, object1)
# retrieve the subject and the object from the models
subj_co_mod = helper.find_first_item(subject, models)
if subj_co_mod is not None:
s_co = subj_co_mod[0]
subj_mod = subj_co_mod[1]
obj_co_mod = helper.find_first_item(object1, models)
if obj_co_mod is not None:
o_co = obj_co_mod[0]
obj_mod = obj_co_mod[1]
if subj_mod in models:
models.remove(subj_mod)
if obj_mod in models:
models.remove(obj_mod)
#print("s_co and o_co:", s_co, o_co)
if not models:
return [self.choose_function_spatial(proposition, s_co, o_co, relation, subject,
object1, subj_mod, obj_mod, premises,
spatial_parser)]
models.insert(0, self.choose_function_spatial(proposition, s_co, o_co, relation,
subject, object1, subj_mod, obj_mod,
premises, spatial_parser))
return models
@staticmethod
def choose_function_spatial(proposition, s_co, o_co, relation, subject, object1,
subj_mod, obj_mod, premises, spatial_parser):
"""
takes a premise(proposition), subject-and object coordinates, a subject and
an object and their models in which they are contained.
deletes the models from the models list, if they contain the subj. or obj.
creates a new model if the subj. and obj. both aren't in any model.
if one of them is in a model, add the new item to the corresponding model.
if they both are in the same model, verify the model. depending on the
result of that, calls make_true or make_false to find counterexamples.
"""
if s_co is not None:
if o_co is not None:
# whole premise already in model, check if everything holds
if subj_mod == obj_mod:
if PRINT_MODEL:
print("verify, whether subj. and obj. are in same model")
# verify returns the model in case the premise holds
if ver_ans.verify_spatial(proposition, subj_mod) is not None:
if PRINT_MODEL:
print("verify returns true, the premise holds")
# try to make falsify the result
return ver_ans.make_false(proposition, subj_mod, premises, spatial_parser)
# try to make all PREMISES hold
return ver_ans.make_true(proposition, subj_mod, premises, spatial_parser)
# subj and obj both already exist, but in different models
if PRINT_MODEL:
print("combine")
# commented out for testing
return construct.combine(relation, s_co, o_co, subj_mod, obj_mod)
if PRINT_MODEL:
print("add object to the model")
# convert relation because the object is added
print("relation before convert: ", relation, "after convert: ",
helper.convert(relation))
return construct.add_item(s_co, helper.convert(relation), object1, subj_mod)
# object != Null but subject doesn't exist at this point
elif o_co is not None:
if PRINT_MODEL:
print("add subject to the model")
return construct.add_item(o_co, relation, subject, obj_mod)
else:
# sub and ob doesn't exist at the moment
if PRINT_MODEL:
print("startmod")
return construct.startmod(relation, subject, object1)
# ---------------------------- TEMPORAL MODEL FUNCTIONS -------------------------------------------
def interpret_temporal(self, premises, spatial_parser, unit_test=False, worked_back=False):
"""
High level function that is called for each problem consisting of several premises.
The argument "spatial_parser" defines which parser shall be used for processing the
premises. For a temporal problem, spatial_parser should be set to False, and for a
spatial problem to True.
The argument unit_test is usually set to False except in the test-functions of the
unit-test. This argument will then disable the 3D-plot of the models.
The factor capacity describes the working memory, specifically how many models
the program can remember. Is the capacity of models exceeded (len(mods) > capacity),
and if the last premise of the problem is a question, "work_back" is called
(function which extracts only the premises which are relevant to answer the question).
If the last premise wasn´t a question or if worked_back is set to True when calling
interpret_temporal, the capacity is just set to a high number, making it possible to
continue solving the problem.
While iterating over the premises of the problem, the function checks for each
premise whether capacity was exceeded and whether the current premise is a question.
If thats the case, "answer" is called, which will give a statement about the
relation between the two items in the questions. After checking for capacity and
question, function parses the current premise and calls decide_temporal on it afterwards
in order to use this premise to continue constructing the mental model(s).
The resulting models for the problem are returned either after answering a
question or at the end of the function.
Additionally, in case the processed problem is a temporal problem (spatial_parser is set
to False), interpret_temporal calls "format_model_print" for each premise in order to
print the current models in a formated and visually better way.
"""
capacity = self.capacity
is_question = ver_ans.is_question(premises) # boolean whether we have a question or not
mods = [] # List of models
mod_length = 0
if spatial_parser: # Set the desired parser.
parser = Parser(True)
else:
parser = Parser(False)
if not premises:
print("No PREMISES found! Return empty list of models")
return mods
for prem in premises:
# Print all current models.
if mods:
print("INTERPRET-LOOP: THE CURRENT NUMBER OF MODELS IS:", mod_length)
if not spatial_parser:
for mod in mods: # print all models as nested lists (Works only for temporal!)
print("-------------------------------------------------------------")
mod = helper.normalize_coords(mod)
helper.format_model_dictionary(mod)
# Uncomment the following line in order to print each construction step in 3D
# helper.print_models(mods)
if mod_length > capacity: # Capacity is exceeded, too many models!
if ((worked_back is False) and is_question):
print("interpret_temporal: CAPACITY exceeded - work back PREMISES!")
reversed_prems = back.work_back(premises)
return self.interpret_temporal(reversed_prems, spatial_parser, unit_test)
print("interpret_temporal: Memory capacity exceeded, increase it to 500!")
self.capacity = 500
return self.interpret_temporal(premises, spatial_parser, unit_test)
if prem[0] == "?": # Found a question at the end, answer it!
print("interpret_temporal: Answer question!")
ver_ans.answer([prem[1]], [prem[2]], mods, spatial_parser)
if not unit_test:
helper.print_models(mods) # print all models in a 3D - plot
return mods
print("Interpret: Premise is: ", prem)
parsed_prem = parser.parse(prem) # The current parsed premise
print("Interpret: Parsed Premise is: ", parsed_prem)
# Continue constructing models with new premise
mods = self.decide_temporal(parsed_prem, mods, spatial_parser)
if mods != None: # calculate length of models (amount of models)
mod_length = len(mods)
else: mod_length = 0
# Construction of models is done. Print the models and the amount of models and return mods.
if mods != None:
print("THE FINAL NUMBER OF MODELS IS:", mod_length)
if not spatial_parser:
print("INTERPRET FINISHED; RESULTING MODELS ARE (FORMATED):")
for mod in mods: # print all models as nested lists (Works only for temporal!)
print("-------------------------------------------------------------")
mod = helper.normalize_coords(mod)
helper.format_model_dictionary(mod)
if not unit_test:
helper.print_models(mods) # print all models in a 3D - plot
else:
print("No model for this premise set!")
return mods
@staticmethod
def decide_temporal(proposition, models, spatial_parser):
"""
Function takes a premise (parsed proposition) and first extracts the subject,
relation and object of the proposition.
Afterwards, the function calls "find_item_mods" on the subject and on the object
in order to find all models which already contain the subject or the object.
Depending on those two model-lists, decide_temporal how to handle the new proposition:
- if both the subject and object are already contained in the existing models:
- if those model lists contain the same models: Try to verify the new proposition.
- if the model lists contain different models: Try to combine existing models with
the new proposition.
- if either the subject or the object is already contained in the existing models
(but not both): call add_item_models and add the new item (either subject or object)
to the existing models.
- if neither subject nor object is already contained in the existing models:
Start a new model with the given subject, object and relation of the proposition.
"""
relation = helper.get_relation(proposition)
relation = (relation[0], relation[1], relation[2])
subj = [helper.get_subject(proposition)]
obj = [helper.get_object(proposition)]
subj_mods = helper.find_item_mods(subj, models) # Find set of models in which subj occurs
obj_mods = helper.find_item_mods(obj, models) # Find set of models in which obj occurs
if PRINT_MODEL:
print("Function call - DECIDE with subj_mods =", subj_mods, " obj_mods =", obj_mods)
if subj_mods != None:
if obj_mods != None:
if subj_mods == obj_mods: # VERIFY
print("-----------Verify premise!")
models = ver_ans.verify_models(relation, subj, obj, models, spatial_parser)
return models
print("-----------Combine models!") # COMBINE
models = construct.combine_mods(relation, subj, obj, subj_mods, subj_mods, obj_mods)
return models
print("-----------Add Object to model!") # ADD
# The following is needed to make Spatial Problems work with the Temporal model.
if not spatial_parser and relation == (0, 1, 0): # do not alter relation "while"
models = construct.add_item_models(relation, obj, subj, models, spatial_parser)
else:
models = construct.add_item_models(helper.converse(relation), obj, subj,
models, spatial_parser)
return models
elif obj_mods != None:
print("-----------Add Subject to model!") # ADD
models = construct.add_item_models(relation, subj, obj, models, spatial_parser)
return models
else:
print("-----------Start new Model!") # START
models.append(construct.startmod(relation, subj, obj))
return models
# ----------------------------SPATIAL REASONING PROBLEM SETS ------------------------------------
# SPATIAL COMBINATION PROBLEMS
COMBO_PROBLEMS = [
[["the", "square", "is", "behind", "the", "circle"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"],
["the", "square", "is", "on", "the", "left", "of", "the", "cross"]],
[["the", "circle", "is", "in", "front", "of", "the", "square"],
["the", "triangle", "is", "behind", "the", "cross"],
["the", "cross", "is", "on", "the", "right", "of", "the", "square"]],
[["the", "square", "is", "behind", "the", "circle"],
["the", "triangle", "is", "behind", "the", "cross"],
["the", "cross", "is", "on", "the", "left", "of", "the", "square"]],
[["the", "square", "is", "behind", "the", "circle"],
["the", "triangle", "is", "behind", "the", "cross"],
["the", "line", "is", "above", "the", "triangle"],
["the", "cross", "is", "on", "the", "left", "of", "the", "square"]]]
# SPATIAL DEDUCTION PROBLEMS
# (correct: 1, 2, 3, 4, 5, 6 (for 5 and 6 only checked important intermediate results)
DEDUCTIVE_PROBLEMS = [
[["the", "circle", "is", "on", "the", "right", "of", "the", "square"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "circle"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"],
["the", "line", "is", "in", "front", "of", "the", "circle"],
["the", "cross", "is", "on", "the", "left", "of", "the", "line"]], # Premise follows validly
[["the", "cross", "is", "in", "front", "of", "the", "circle"],
["the", "circle", "is", "in", "front", "of", "the", "triangle"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"]], # Premise follows validly
[["the", "square", "is", "on", "the", "right", "of", "the", "circle"],
["the", "circle", "is", "on", "the", "right", "of", "the", "triangle"],
["the", "square", "is", "on", "the", "right", "of", "the", "triangle"]], # Premise fol. validly
[["the", "square", "is", "on", "the", "right", "of", "the", "circle"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "circle"],
["the", "square", "is", "on", "the", "right", "of", "the", "triangle"]], # Premise fol. validly
[["the", "square", "is", "on", "the", "right", "of", "the", "circle"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "square"],
["the", "square", "is", "behind", "the", "line"],
["the", "line", "is", "on", "the", "right", "of", "the", "cross"]], # Premise follows validly
[["the", "triangle", "is", "on", "the", "right", "of", "the", "square"],
["the", "circle", "is", "in", "front", "of", "the", "square"],
["the", "cross", "is", "on", "the", "left", "of", "the", "square"],
["the", "line", "is", "in", "front", "of", "the", "cross"],
["the", "line", "is", "on", "the", "right", "of", "the", "ell"],
["the", "star", "is", "in", "front", "of", "the", "ell"],
["the", "circle", "is", "on", "the", "left", "of", "the", "vee"],
["the", "ess", "is", "in", "front", "of", "the", "vee"],
["the", "star", "is", "on", "the", "left", "of", "the", "ess"]]] # Premise follows validly
# SPATIAL INDETERMINATE PROBLEMS --> all correct
INDETERMINATE_PROBLEMS = [
[["the", "circle", "is", "on", "the", "right", "of", "the", "square"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "circle"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"],
["the", "line", "is", "in", "front", "of", "the", "square"],
["the", "cross", "is", "on", "the", "left", "of", "the", "line"]], # previously possibly true
[["the", "triangle", "is", "on", "the", "right", "of", "the", "square"],
["the", "circle", "is", "in", "front", "of", "the", "square"],
["the", "cross", "is", "on", "the", "left", "of", "the", "triangle"],
["the", "line", "is", "in", "front", "of", "the", "cross"],
["the", "line", "is", "on", "the", "right", "of", "the", "ell"],
["the", "star", "is", "in", "front", "of", "the", "ell"],
["the", "circle", "is", "on", "the", "left", "of", "the", "vee"],
["the", "ess", "is", "in", "front", "of", "the", "vee"],
["the", "star", "is", "on", "the", "right", "of", "the", "ess"]], # previously possibly false
[["the", "square", "is", "on", "the", "right", "of", "the", "circle"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "square"],
["the", "triangle", "is", "on", "the", "right", "of", "the", "circle"]],# previously pos. false
[["the", "square", "is", "on", "the", "right", "of", "the", "circle"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "square"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"],
["the", "line", "is", "in", "front", "of", "the", "circle"],
["the", "cross", "is", "on", "the", "right", "of", "the", "line"]], # previously possibly false
[["the", "square", "is", "on", "the", "right", "of", "the", "circle"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "square"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"],
["the", "line", "is", "in", "front", "of", "the", "circle"],
["the", "triangle", "is", "on", "the", "right", "of", "the", "circle"]],# previously pos. false
[["the", "circle", "is", "on", "the", "right", "of", "the", "square"],
["the", "triangle", "is", "on", "the", "left", "of", "the", "circle"],
["the", "cross", "is", "in", "front", "of", "the", "triangle"],
["the", "line", "is", "in", "front", "of", "the", "square"],
["the", "cross", "is", "on", "the", "right", "of", "the", "line"]], # previously possibly false
[["the", "triangle", "is", "in", "front", "of", "the", "square"],
["the", "circle", "is", "on", "the", "right", "of", "the", "square"],
["the", "cross", "is", "behind", "the", "triangle"],
["the", "line", "is", "on", "the", "right", "of", "the", "cross"],
["the", "line", "is", "in", "front", "of", "the", "ell"],
["the", "star", "is", "on", "the", "right", "of", "the", "ell"],
["the", "circle", "is", "behind", "the", "vee"],
["the", "ess", "is", "on", "the", "right", "of", "the", "vee"],
["the", "star", "is", "in", "front", "of", "the", "ess"]], # previously possibly false
[["the", "triangle", "is", "on", "top", "of", "the", "square"],
["the", "circle", "is", "on", "the", "right", "of", "the", "square"],
["the", "cross", "is", "below", "the", "triangle"],
["the", "line", "is", "on", "the", "right", "of", "the", "cross"],
["the", "line", "is", "on", "top", "of", "the", "ell"],
["the", "star", "is", "on", "the", "right", "of", "the", "ell"],
["the", "circle", "is", "below", "the", "vee"],
["the", "ess", "is", "on", "the", "right", "of", "the", "vee"],
["the", "star", "is", "on", "top", "of", "the", "ess"]], # previously possibly false
[["the", "square", "is", "on", "the", "right", "of", "the", "triangle"],
["the", "circle", "is", "on", "the", "left", "of", "the", "square"],
["the", "circle", "is", "behind", "the", "star"],
["the", "ell", "is", "in", "front", "of", "the", "circle"],
["the", "line", "is", "in", "front", "of", "the", "triangle"],
["the", "vee", "is", "in", "front", "of", "the", "triangle"],
["the", "star", "is", "on", "the", "right", "of", "the", "vee"]]] # previously possibly false
# SPATIAL PROBLEMS WITH INCONSISTENT PREMISES --> all correct
INCONSISTENT_PROBLEMS = [
[["the", "square", "is", "on", "the", "left", "of", "the", "circle"],
["the", "cross", "is", "in", "front", "of", "the", "square"],
["the", "triangle", "is", "on", "the", "right", "of", "the", "circle"],
["the", "triangle", "is", "behind", "the", "line"],
["the", "line", "is", "on", "the", "left", "of", "the", "cross"]], # premise is inconsistent
[["the", "square", "is", "in", "front", "of", "the", "circle"],
["the", "triangle", "is", "behind", "the", "circle"],
["the", "triangle", "is", "in", "front", "of", "the", "square"]], # premise is inconsistent
[["the", "triangle", "is", "on", "the", "right", "of", "the", "square"],
["the", "circle", "is", "in", "front", "of", "the", "square"],
["the", "cross", "is", "on", "the", "left", "of", "the", "square"],
["the", "line", "is", "in", "front", "of", "the", "cross"],
["the", "line", "is", "on", "the", "right", "of", "the", "ell"],
["the", "star", "is", "in", "front", "of", "the", "ell"],
["the", "circle", "is", "on", "the", "left", "of", "the", "vee"],
["the", "ess", "is", "in", "front", "of", "the", "vee"],
["the", "star", "is", "on", "the", "right", "of", "the", "ess"]]] # premise is inconsistent
# ----------------------------TEMPORAL REASONING PROBLEM SETS ------------------------------------
# PROBLEMS WITH QUESTIONS
TRANSITIVE_ONE_MODEL_PROBLEMS = [
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens after D
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens after D
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens after D
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]]] # E happens after D
NON_TRANSITIVE_ONE_MODEL_PROBLEMS = [
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens before E
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens after D
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens after D
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens after D
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]]] # E happens after D
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS = [
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens after E
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens after E
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens after E
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "D", "E"]], # D happens after E
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens before D
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens before D
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]], # E happens before D
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "C"],
["?", "E", "D"]]] # E happens before D
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS = [
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "D", "E"]], # No definite relation
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "D", "E"]], # No definite relation
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "D", "E"]], # No definite relation
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "D", "E"]], # No definite relation
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "E", "D"]], # No definite relation
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "E", "D"]], # No definite relation
[["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "before", "the", "B"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "E", "D"]], # No definite relation
[["the", "B", "happens", "after", "the", "A"],
["the", "B", "happens", "after", "the", "C"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "A"],
["?", "E", "D"]]] # No definite relation
WORKING_BACKWARDS_PROBLEMS = [
[["the", "X", "happens", "before", "the", "B"],
["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "before", "the", "C"],
["the", "C", "happens", "before", "the", "D"],
["the", "E", "happens", "before", "the", "D"],
["the", "F", "happens", "before", "the", "D"],
["?", "A", "D"]], # A happens before D
[["the", "A", "happens", "after", "the", "Z"],
["the", "B", "happens", "after", "the", "Z"],
["the", "C", "happens", "after", "the", "Z"],
["the", "D", "happens", "after", "the", "Z"],
["?", "A", "D"]], # No definite relation
[["the", "A", "happens", "before", "the", "B"],
["the", "B", "happens", "before", "the", "C"],
["the", "D", "happens", "before", "the", "C"],
["the", "E", "happens", "before", "the", "C"],
["the", "F", "happens", "before", "the", "C"],
["the", "G", "happens", "before", "the", "C"],
["?", "A", "C"]], # A happens before C
[["the", "A", "happens", "after", "the", "B"],
["the", "B", "happens", "after", "the", "C"],
["the", "C", "happens", "after", "the", "D"],
["the", "E", "happens", "after", "the", "D"],
["the", "F", "happens", "after", "the", "D"],
["the", "G", "happens", "after", "the", "D"],
["?", "A", "D"]]] # A happens after D
# PROBLEMS WITHOUT QUESTIONS
COMBINATION_PROBLEMS = [
[["the", "A", "happens", "while", "the", "B"],
["the", "C", "happens", "while", "the", "D"],
["the", "A", "happens", "before", "the", "C"]],
[["the", "B", "happens", "while", "the", "A"],
["the", "D", "happens", "while", "the", "C"],
["the", "C", "happens", "after", "the", "A"]],
[["the", "A", "happens", "while", "the", "B"],
["the", "D", "happens", "while", "the", "C"],
["the", "C", "happens", "before", "the", "A"]],
[["the", "A", "happens", "while", "the", "B"],
["the", "D", "happens", "while", "the", "C"],
["the", "E", "happens", "while", "the", "D"],
["the", "C", "happens", "before", "the", "A"]]]
DEDUCTION_PROBLEMS = [
[["the", "B", "happens", "after", "the", "A"],
["the", "D", "happens", "before", "the", "B"],
["the", "C", "happens", "while", "the", "D"],
["the", "E", "happens", "while", "the", "B"],
["the", "C", "happens", "before", "the", "E"]], # Premise follows from previous ones
[["the", "C", "happens", "while", "the", "B"],
["the", "B", "happens", "while", "the", "D"],
["the", "C", "happens", "while", "the", "D"]], # Premise follows from previous ones
[["the", "A", "happens", "after", "the", "B"],
["the", "B", "happens", "after", "the", "D"],
["the", "A", "happens", "after", "the", "D"]], # Premise follows from previous ones
[["the", "A", "happens", "after", "the", "B"],
["the", "D", "happens", "before", "the", "B"],
["the", "A", "happens", "after", "the", "D"]], # Premise follows from previous ones
[["the", "A", "happens", "after", "the", "B"],
["the", "C", "happens", "while", "the", "D"],
["the", "D", "happens", "before", "the", "A"],
["the", "A", "happens", "while", "the", "E"],
["the", "E", "happens", "after", "the", "C"]], # Premise follows from previous ones
[["the", "D", "happens", "after", "the", "A"],
["the", "B", "happens", "while", "the", "A"],
["the", "C", "happens", "before", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["the", "E", "happens", "after", "the", "F"],
["the", "G", "happens", "while", "the", "F"],
["the", "B", "happens", "before", "the", "H"],
["the", "J", "happens", "while", "the", "H"],
["the", "G", "happens", "before", "the", "J"]]] # Premise follows from previous ones
INDETERMINACIES_PROBLEMS = [
[["the", "B", "happens", "after", "the", "A"],
["the", "D", "happens", "before", "the", "B"],
["the", "C", "happens", "while", "the", "D"],
["the", "E", "happens", "while", "the", "A"],
["the", "C", "happens", "before", "the", "E"]], # Premise was hitherto possibly false
[["the", "D", "happens", "after", "the", "A"],
["the", "B", "happens", "while", "the", "A"],
["the", "C", "happens", "before", "the", "D"],
["the", "E", "happens", "while", "the", "C"],
["the", "E", "happens", "after", "the", "F"],
["the", "G", "happens", "while", "the", "F"],
["the", "B", "happens", "before", "the", "H"],
["the", "J", "happens", "while", "the", "H"],
["the", "G", "happens", "after", "the", "J"]], # Premise was hitherto possibly false
[["the", "A", "happens", "after", "the", "B"],
["the", "D", "happens", "before", "the", "A"],
["the", "D", "happens", "after", "the", "B"]], # Premise was hitherto possibly false
[["the", "A", "happens", "after", "the", "B"],
["the", "D", "happens", "before", "the", "A"],
["the", "C", "happens", "while", "the", "D"],
["the", "E", "happens", "while", "the", "B"],
["the", "C", "happens", "after", "the", "E"]], # Premise was hitherto possibly false
[["the", "A", "happens", "after", "the", "B"],
["the", "D", "happens", "before", "the", "A"],
["the", "C", "happens", "while", "the", "D"],
["the", "E", "happens", "while", "the", "B"],
["the", "D", "happens", "after", "the", "B"]], # Premise was hitherto possibly false
[["the", "B", "happens", "after", "the", "A"],
["the", "D", "happens", "before", "the", "B"],
["the", "C", "happens", "while", "the", "D"],
["the", "E", "happens", "while", "the", "A"],
["the", "C", "happens", "after", "the", "E"]], # Premise was hitherto possibly false
[["the", "D", "happens", "while", "the", "A"],
["the", "B", "happens", "after", "the", "A"],
["the", "C", "happens", "while", "the", "D"],
["the", "E", "happens", "after", "the", "C"],
["the", "E", "happens", "while", "the", "F"],
["the", "G", "happens", "after", "the", "F"],
["the", "B", "happens", "while", "the", "H"],
["the", "J", "happens", "after", "the", "H"],
["the", "G", "happens", "while", "the", "J"]], # Premise follows from the previous ones
[["the", "A", "happens", "after", "the", "D"],
["the", "B", "happens", "before", "the", "A"],
["the", "B", "happens", "while", "the", "G"],
["the", "F", "happens", "while", "the", "B"],
["the", "E", "happens", "while", "the", "D"],
["the", "H", "happens", "while", "the", "D"],
["the", "G", "happens", "after", "the", "H"]]] # Premise was hitherto possibly false
INCONSISTENT_PREMISES_PROBLEMS = [
[["the", "A", "happens", "before", "the", "B"],
["the", "C", "happens", "while", "the", "A"],
["the", "D", "happens", "after", "the", "B"],
["the", "D", "happens", "while", "the", "E"],
["the", "E", "happens", "before", "the", "C"]], # premise is inconsistent
[["the", "A", "happens", "while", "the", "B"],
["the", "D", "happens", "while", "the", "B"],
["the", "D", "happens", "while", "the", "A"]], # Premise follows from the previous ones
[["the", "B", "happens", "after", "the", "A"],
["the", "D", "happens", "while", "the", "A"],
["the", "C", "happens", "before", "the", "A"],
["the", "E", "happens", "while", "the", "C"],
["the", "E", "happens", "after", "the", "F"],
["the", "G", "happens", "while", "the", "F"],
["the", "B", "happens", "before", "the", "H"],
["the", "J", "happens", "while", "the", "H"],
["the", "G", "happens", "after", "the", "J"]]] # premise is inconsistent
# ---------------------------- UNIT TEST FOR TEMPORAL MODEL ------------------------------------
class TestInterpret(unittest.TestCase):
"""
Unittest-Class which tests each of the 9 different problem types
in a seperate test-function.
(Note: The Unit-Tests are almost the same as in program version v1.
The new thing is that there is a function "translate_all_dicts" which
evaluates the output of "interpret" before comparing it in the assertions.
The new function simply translates the new output dictionaries to nested lists,
like the models were stored in version v1.)
(Note 2: In the unit-tests, the models are not 3D-plotted, otherwise the unit-test
stops after each finished model)
"""
def test_transitive_one_model(self):
"""
Unittest consisting of eight problems of the problem-type transitive
one-model problems (These problem-types lead to only one model which does
require transitive relations in order to solve the problem-question).
Model should always answer with a definite relation like before/ after/ while
between the two events.
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[0], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[1], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[2], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[3], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[4], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[5], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[6], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
TRANSITIVE_ONE_MODEL_PROBLEMS[7], False, True)),
[[[['A'], ['D']], [['B'], [None]], [['C'], ['E']]]])
def test_non_transitive_one_model(self):
"""
Unittest consisting of eight problems of the problem-type non-transitive
one-model problems (These problem-types lead to only one model which doesn´t
require transitive relations in order to solve the problem-question).
Model should always answer with a definite relation like before/ after/ while
between the two events.
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[0], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[1], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[2], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[3], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[4], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[5], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[6], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
NON_TRANSITIVE_ONE_MODEL_PROBLEMS[7], False, True)),
[[[['A'], [None]], [['B'], ['D']], [['C'], ['E']]]])
def test_multiple_model_with_answer(self):
"""
Unittest consisting of eight problems of the problem-type multiple models with
valid answer (These problem-types lead to different models, which all lead to the
same conclusion for the problem-question).
Model should always answer with a definite relation like before/ after/ while
between the two events.
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[0], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[1], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[2], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[3], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[4], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[5], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[6], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_VALID_ANSWER_PROBLEMS[7], False, True)),
[[[['A'], [None]], [['C'], ['E']], [['B'], ['D']]],
[[['C'], ['E']], [['A'], [None]], [['B'], ['D']]],
[[['A'], ['C'], ['E']], [['B'], ['D'], [None]]]])
def test_multiple_model_with_no_ans(self):
"""
Unittest consisting of eight problems of the problem-type multiple models with
no valid answer (These problem-types lead to different models, which lead
to different conclusions for the problem-question, which is why there is
no definite answer).
Model should always answer with "There is no definite relation between the two events."
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[0], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[1], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[2], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[3], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[4], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[5], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[6], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
MULTIPLE_MODEL_WITH_NO_VALID_ANSWER_PROBLEMS[7], False, True)),
[[[['A'], ['E']], [['C'], ['D']], [['B'], [None]]],
[[['C'], ['D']], [['A'], ['E']], [['B'], [None]]],
[[['A'], ['C'], ['D'], ['E']], [['B'], [None], [None], [None]]]])
def test_working_backwards_problems(self):
"""
Unittest consisting of four problems of the problem-type which requires
re-working the premise set (while constructing models, capacity (working
memory) is exceeded and interpret needs to search for only the relevant
premises in the problem and start model-constructing process again with these).
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
WORKING_BACKWARDS_PROBLEMS[0], False, True)),
[[[['A']], [['B']], [['C']], [['D']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
WORKING_BACKWARDS_PROBLEMS[1], False, True)),
[[[['Z']], [['D']], [['A']]], [[['Z']], [['A']], [['D']]],
[[['Z'], [None]], [['A'], ['D']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
WORKING_BACKWARDS_PROBLEMS[2], False, True)),
[[[['A']], [['B']], [['C']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
WORKING_BACKWARDS_PROBLEMS[3], False, True)),
[[[['D']], [['C']], [['B']], [['A']]]])
def test_combination_problems(self):
"""
Unittest consisting of four problems of the problem-type combination problems
(combine different existing models to one big model).
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
COMBINATION_PROBLEMS[0], False, True)),
[[[['B'], ['A']], [['D'], ['C']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
COMBINATION_PROBLEMS[1], False, True)),
[[[['A'], ['B']], [['C'], ['D']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
COMBINATION_PROBLEMS[2], False, True)),
[[[[None], ['C'], ['D']], [['B'], ['A'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
COMBINATION_PROBLEMS[3], False, True)),
[[[[None], ['C'], ['D'], ['E']], [['B'], ['A'], [None], [None]]]])
def test_deduction_problems(self):
"""
Unittest consisting of six problems of the problem-type deduction problems.
Since there is no question contained in the problem, the programm simply
tests if the existing models all hold with the last premise in the problem
(verify --> last premise should follow from the previous ones if it is
consistent with the model(s)).
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
DEDUCTION_PROBLEMS[0], False, True)),
[[[['A'], [None]], [['D'], ['C']], [['B'], ['E']]],
[[['D'], ['C']], [['A'], [None]], [['B'], ['E']]],
[[['A'], ['D'], ['C']], [['B'], ['E'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
DEDUCTION_PROBLEMS[1], False, True)),
[[[['B'], ['C'], ['D']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
DEDUCTION_PROBLEMS[2], False, True)),
[[[['D']], [['B']], [['A']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
DEDUCTION_PROBLEMS[3], False, True)),
[[[['D']], [['B']], [['A']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
DEDUCTION_PROBLEMS[4], False, True)),
[[[['D'], ['C']], [['B'], [None]], [['A'], ['E']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
DEDUCTION_PROBLEMS[5], False, True)),
[[[[None], ['F'], ['G']], [['C'], ['E'], [None]],
[['A'], ['B'], [None]], [['D'], ['H'], ['J']]]])
def test_indeterminacies_problems(self):
"""
Unittest consisting of eight problems of the problem-type
problems with indeterminacies.
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[0], False, True)),
[[[['D'], ['C']], [['A'], ['E']], [['B'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[1], False, True)),
[[[['A'], ['B'], [None]], [[None], ['H'], ['J']], [[None], ['F'], ['G']],
[['C'], ['E'], [None]], [['D'], [None], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[2], False, True)),
[[[['B']], [['D']], [['A']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[3], False, True)),
[[[['B'], ['E']], [['D'], ['C']], [['A'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[4], False, True)),
[[[['B'], ['E']], [['D'], ['C']], [['A'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[5], False, True)),
[[[['A'], ['E']], [['D'], ['C']], [['B'], [None]]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[6], False, True)),
[[[['A'], ['D'], ['C'], [None]], [['B'], ['H'], ['E'], ['F']],
[[None], ['J'], [None], ['G']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INDETERMINACIES_PROBLEMS[7], False, True)),
[[[['D'], ['E'], ['H']], [['B'], ['G'], ['F']], [['A'], [None], [None]]]])
def test_inconsistent_premises(self):
"""
Unittest consisting of three problems of the problem-type inconsistent premisses.
These kind of problems are problems where the last premise is inconsistent with the
previous ones, leading to no solution for the problem.
"""
model = MainModule()
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INCONSISTENT_PREMISES_PROBLEMS[0], False, True)), None)
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INCONSISTENT_PREMISES_PROBLEMS[1], False, True)), [[[['B'], ['A'], ['D']]]])
self.assertEqual(helper.translate_all_dicts(model.interpret_temporal(
INCONSISTENT_PREMISES_PROBLEMS[2], False, True)), None)
# ---------------------------- MAIN FUNCTION ------------------------------------------------------
def main():
"""
Main-function.
Quickstart:
1) To run a Spatial Problem with the Spatial Model:
- process_problem(problem-number, name-of-spatial-problem-set, "spatial")
2) To run a Spatial Problem with the Temporal Model:
- process_problem(problem-number, name-of-spatial-problem-set, "temporal", True)
3) To run a Temporal Problem with the Temporal Model:
- process_problem(problem-number, name-of-temporal-problem-set, "temporal")
4) To run a Temporal Problem with the Spatial Model:
- process_problem(problem-number, name-of-temporal-problem-set, "spatial", False)
(See examples below)
Instead of process_problem, process_all_problems can be called without a number
as explained above to process all problems of a given problem-set.
"""
spatial_model = MainModule()
spatial_model.process_problem(1, INDETERMINATE_PROBLEMS, "spatial") # 1)
#spatial_model.process_all_problems(COMBINATION_PROBLEMS, "temporal") # 5)
#temporal_model = MainModule()
#temporal_model.process_problem(1, NON_TRANSITIVE_ONE_MODEL_PROBLEMS, "temporal") # 3)
#temporal_spatial_model = MainModule()
#temporal_spatial_model.process_problem(1, COMBO_PROBLEMS, "temporal", True) # 2)
#temporal_spatial_model.process_problem(3, DEDUCTION_PROBLEMS, "spatial", False) # 4)
if __name__ == "__main__":
main()
unittest.main()
| 2.703125
| 3
|
mealpy/utils/boundary.py
|
rishavpramanik/mealpy
| 0
|
12781083
|
<filename>mealpy/utils/boundary.py
#!/usr/bin/env python
# Created by "Thieu" at 23:42, 16/03/2022 ----------%
# Email: <EMAIL> %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import operator
def is_in_bound(value, bound):
ops = None
if type(bound) is tuple:
ops = operator.lt
elif type(bound) is list:
ops = operator.le
if bound[0] == float("-inf") and bound[1] == float("inf"):
return True
elif bound[0] == float("-inf") and ops(value, bound[1]):
return True
elif ops(bound[0], value) and bound[1] == float("inf"):
return True
elif ops(bound[0], value) and ops(value, bound[1]):
return True
return False
def is_str_in_list(value: str, my_list: list):
if type(value) == str and my_list is not None:
return True if value in my_list else False
return False
| 2.96875
| 3
|
src/typer_make/__init__.py
|
zdog234/typer-make
| 0
|
12781084
|
<gh_stars>0
"""typer-make."""
| 1.03125
| 1
|
examples/docs_snippets/docs_snippets/concepts/ops_jobs_graphs/jobs.py
|
silentsokolov/dagster
| 0
|
12781085
|
# isort: skip_file
# pylint: disable=unused-argument,reimported
from dagster import DependencyDefinition, GraphDefinition, job, op
@op
def my_op():
pass
# start_pipeline_example_marker
@op
def return_one(context):
return 1
@op
def add_one(context, number: int):
return number + 1
@job
def one_plus_one():
add_one(return_one())
# end_pipeline_example_marker
# start_multiple_usage_pipeline
@job
def multiple_usage():
add_one(add_one(return_one()))
# end_multiple_usage_pipeline
# start_alias_pipeline
@job
def alias():
add_one.alias("second_addition")(add_one(return_one()))
# end_alias_pipeline
# start_tag_pipeline
@job
def tagged_add_one():
add_one.tag({"my_tag": "my_value"})(add_one(return_one()))
# end_tag_pipeline
# start_pipeline_definition_marker
one_plus_one_from_constructor = GraphDefinition(
name="one_plus_one",
node_defs=[return_one, add_one],
dependencies={"add_one": {"number": DependencyDefinition("return_one")}},
).to_job()
# end_pipeline_definition_marker
# start_tags_pipeline
@job(tags={"my_tag": "my_value"})
def my_tags_job():
my_op()
# end_tags_pipeline
def do_something(x):
return x
# start_top_level_input_graph
from dagster import graph, op
@op
def op_with_input(x):
return do_something(x)
@graph
def wires_input(x):
op_with_input(x)
# end_top_level_input_graph
# start_top_level_input_job
the_job = wires_input.to_job(input_values={"x": 5})
# end_top_level_input_job
# start_execute_in_process_input
graph_result = wires_input.execute_in_process(input_values={"x": 5})
job_result = the_job.execute_in_process(
input_values={"x": 6}
) # Overrides existing input value
# end_execute_in_process_input
| 2.125
| 2
|
ConnectingToTheInternet/Client-ServerFileTransfer/Client.py
|
ChocolaKuma/KumasCookbook
| 3
|
12781086
|
import socket as socket
import time as t
import random as r
# creates socket object
def receve_MSG(IP="",port=1337,MSG_Size=1024,TEXT_Decode='ascii',ExpectedFileType="TXT"):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
if(IP==""):
IP = socket.gethostname()
s.connect((IP, port))
tm = s.recv(MSG_Size) # msg can only be x bytes long
if(ExpectedFileType == "TXT"):
RecevedText = tm.decode(TEXT_Decode)
if(ExpectedFileType == "IMG"):
RecevedText = tm
s.close()
return RecevedText
def WriteToFile(toBewritten,FileName="out",FileType=".txt",WriteType="w+"):
FileName = FileName+FileType
text_file = open(FileName, WriteType)
text_file.write(toBewritten)
text_file.close()
x=0
while(1==1):
print("Mesage Receved")
#Only can revece 9Mb of file
WriteToFile(receve_MSG("",1337,9999999,'ascii',"IMG"),"Output",".jpg","wb+")
t.sleep(1)
x=x+1
print(x)
| 2.921875
| 3
|
jasonParser.py
|
ibrahimsh/final-project-software-programming
| 0
|
12781087
|
<filename>jasonParser.py<gh_stars>0
__author__ = 'MatrixRev'
import json
import codecs
import glob
import os
import sys
#path = u"C://Users//MatrixRev//Desktop//library_5//"
path="C://Users//MatrixRev//Desktop//mainLib//" # input file
outFile='C:/Users/MatrixRev/Desktop/books/output/mainsubjects.txt' #output file
pathNew = u"C://Users//MatrixRev//Desktop//newOut"
counter=0
subjects_list=[]
for root,dir,files in os.walk(path):
for file in files:
counter=counter+1
print(counter,root,dir,file)
if len(file)>0 :
if file.endswith(".json"):
with codecs.open(os.path.join(root, file), "rb",encoding="UTF-8") as fd:
json_data = json.load(fd)
select_num=json_data['isbn']
select_title=json_data['title']
select_data =json_data['subjects']
select_subTitle=json_data['subtitle']
select_Authors=json_data['authors']
select_Comments=json_data['comments']
n = len(json_data['subjects'])
print(n)
newFileName=file.replace('.json','.txt')
newdir=os.path.join(pathNew)
os.chdir(newdir)
with codecs.open(os.path.join(newdir,newFileName),'w',encoding='utf-8')as tf:
for l in list(select_data):
print(l,file=tf)
#for i in list(select_title):
print(select_title,file=tf)
for i in select_Comments:
print(i,file=tf)
for i in select_subTitle:
print(i,file=tf)
for i in range(0,len(select_Authors)):
print(select_Authors[i],file=tf)
# fd.write(n,"\n",select_title,"\n","subjects","\n")
#outfile.write(select_title)
# print("book Title : ",select_title,"\n")
# print("subjects is:")
for i in range(n-0):
print(select_data[i])
subjects_list.append(select_data[i]+" "+"***Title:"+" "+select_title+" "+"***link"+" "+root+"//"+file)
#fd.writelines(['%s\n'for sub in asubjects])
# for s in sub:
# fd.write("".join(s)+'\n')
f=len(subjects_list)
print(f)
#fd.close()
with codecs.open(outFile,'w',encoding='utf-8')as fh:
for sub in subjects_list:
if len(sub)>0:
#sub=sub.replace('-','')
sub=sub.lower()
sub=sub.strip('\n')
# print(sub,file='subject.txt')
fh.write(sub)
fh.write("\n")
| 2.71875
| 3
|
src/models/__init__.py
|
imhgchoi/Project-Template
| 2
|
12781088
|
from models.sample import SampleModel
def build_model(args):
if args.model == 'sample':
return SampleModel(args)
else :
raise NotImplementedError(f"check model name : {args.model}")
| 2.421875
| 2
|
test/test_schemas.py
|
SCM-NV/nano-qmflows
| 4
|
12781089
|
"""Check the schemas."""
from assertionlib import assertion
from nanoqm.workflows.input_validation import process_input
from .utilsTest import PATH_TEST
def test_input_validation():
"""Test the input validation schema."""
schemas = ("absorption_spectrum", "derivative_couplings")
paths = [PATH_TEST / x for x in
["input_test_absorption_spectrum.yml", "input_fast_test_derivative_couplings.yml"]]
for s, p in zip(schemas, paths):
d = process_input(p, s)
assertion.isinstance(d, dict)
| 2.3125
| 2
|
selenium_script.py
|
Rob0b0/Google-map-poi-extractor
| 2
|
12781090
|
"""
selenium script to scrape google map POI with rotating IP proxy
@author: <NAME>
@date: 09/02/2019
"""
import os,time
from helper import CoordBound
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.proxy import Proxy, ProxyType
# BIG_BOUND = [(38.896211, -77.032005), (38.902540, -77.018926)] # downtown test
BIG_BOUND = [(38.875, -77.072), (38.918, -77.002)]
PROXY = '192.168.3.11:13010'
# 细分
coords = CoordBound(BIG_BOUND[0][0], BIG_BOUND[0][1], BIG_BOUND[1][0], BIG_BOUND[1][1])
grids = coords.dividify()
print("total number of grids: {}".format(len(grids)))
# print(grids)
# chrome webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--proxy-server=%s' % PROXY)
# start
driver = webdriver.Chrome('./chromedriver', chrome_options=chrome_options)
driver.get('localhost:5000')
time.sleep(2)
index = 0
# driver.get('http://whatismyipaddress.com')
# while (index < 10):
# driver.get('http://whatismyipaddress.com')
# # driver.execute_script("window.open('http://whatismyipaddress.com');")
# time.sleep(2)
# driver.quit()
# driver = webdriver.Chrome('./chromedriver', chrome_options=chrome_options)
# driver.switch_to_window(driver.window_handles[0])
# for grid in grids[162:]:
while index < len(grids):
grid = grids[index]
print("scarping index: {}\ngrid : {}".format(index, grid))
if index > 0 and index % 6 == 0:
# restart driver to change IP
driver.quit()
driver = webdriver.Chrome('./chromedriver', chrome_options=chrome_options)
driver.get('localhost:5000')
time.sleep(2)
# call it
try:
driver.execute_script('continueSearch({},{},{},{});'.format(
grid.sw_lat, grid.sw_lng, grid.ne_lat, grid.ne_lng
))
wait = WebDriverWait(driver, 180)
out = wait.until(ec.text_to_be_present_in_element((By.ID, 'soutput'), '{},{},{},{}: done'.format(
grid.sw_lat, grid.sw_lng, grid.ne_lat, grid.ne_lng
)))
print("done grid index {}".format(index))
index += 1
except TimeoutException:
continue
except JavascriptException:
# page not loaded properly
continue
| 2.828125
| 3
|
step30_multiple_stacks/Python/backend/lambda.py
|
fullstackwebdev/full-stack-serverless-cdk
| 192
|
12781091
|
<reponame>fullstackwebdev/full-stack-serverless-cdk<filename>step30_multiple_stacks/Python/backend/lambda.py
import json
import random
import string
def lambda_handler(event, context):
# print(event)
# print(context)
letters = string.ascii_lowercase
value = ''.join(random.choice(letters) for i in range(10))
return {
'statusCode': 200,
"headers": json.dumps({ 'Access-Control-Allow-Origin': '*' }),
"body": json.dumps(value)
}
| 1.9375
| 2
|
drive_dump.py
|
Qabel/ox-drive-dump
| 0
|
12781092
|
<filename>drive_dump.py<gh_stars>0
#!/usr/bin/env python3.7
import argparse
import os
import os.path
import shutil
import click
import mysql.connector
from anytree import Node, LevelOrderIter
def connect(host, user, password, port, database):
return mysql.connector.connect(
host=host, user=user, password=password, port=port, database=database)
def parse_args():
parser = argparse.ArgumentParser(description='Dump the ox drive')
parser.add_argument('--host', dest='host', default='localhost')
parser.add_argument('--user', dest='user', default='root')
parser.add_argument('--password', dest='password', default=None)
parser.add_argument('--port', dest='port', default='3306')
parser.add_argument('--db', dest='db')
parser.add_argument('--action', choices=['fake', 'hardlink', 'copy'], default='fake')
parser.add_argument('SOURCE', help='Source folder that contains the "hashed" folder')
parser.add_argument('TARGET', help='Target folder to copy the files to')
return parser.parse_args()
def query_folders(db):
cur = db.cursor()
cur.execute('SELECT fuid, parent, fname FROM oxfolder_tree ORDER BY parent')
yield from cur
cur.close()
def query_files(db):
cur = db.cursor()
cur.execute("""
SELECT t.filename,
t.file_store_location,
folder_id
FROM (
SELECT d.filename,
d.file_store_location,
d.version_number,
d.infostore_id,
ROW_NUMBER() over (
PARTITION BY infostore_id ORDER BY version_number
) AS row_num
FROM infostore_document AS d
WHERE d.file_store_location IS NOT NULL
) AS t
JOIN infostore ON t.infostore_id = infostore.id
WHERE t.row_num = 1
""")
yield from cur
cur.close()
def build_file_nodes(rows):
return (Node(name=filename,
fname=filename,
location=file_store_location,
folder_id=folder_id)
for filename, file_store_location, folder_id in rows)
def build_nodes(rows):
return (Node(id=id, name=fname, fname=fname, parent_id=parent_id)
for id, parent_id, fname in rows)
def build_tree(folder_rows, file_rows):
root = Node(name='', fname='', id=0, parent_id=-1)
node_by_id = {node.id: node for node in build_nodes(folder_rows)}
node_by_id[0] = root
with click.progressbar(build_file_nodes(file_rows), label='Connecting files') as itr:
for file in itr:
folder_node = node_by_id[file.folder_id]
file.parent = folder_node
for node in node_by_id.values():
if node.parent_id is not None:
node.parent = node_by_id.get(node.parent_id)
return root
def fake_operation(source, target, _):
print(source, '->', target)
def copy_operation(source, target, target_folder):
os.makedirs(target_folder, exist_ok=True)
shutil.copy(source, target)
def hardlink_operation(source, target, target_folder):
os.makedirs(target_folder, exist_ok=True)
os.link(source, target)
def main():
args = parse_args()
db = connect(args.host, args.user, args.password, args.port, args.db)
root = build_tree(list(query_folders(db)), query_files(db))
paths = []
for node in LevelOrderIter(root):
if hasattr(node, 'location'):
paths.append((os.path.join(*(n.fname for n in node.path[:-1])), node.fname, node.location))
source_root = args.SOURCE
target_root = args.TARGET
operation = {
'fake': fake_operation,
'copy': copy_operation,
'hardlink': hardlink_operation
}[args.action]
label = {
'fake': 'Faking file copy',
'copy': 'Copying files',
'hardlink': 'Hard linking files'
}[args.action]
with click.progressbar(paths, label=label) as path_itr:
files = set()
duplicates = set()
for folder, filename, source in path_itr:
target_folder = os.path.join(target_root, folder)
target_file = os.path.join(target_folder, filename)
source_path = os.path.join(source_root, source)
while target_file in files:
target_file = target_file + ".dup"
duplicates.add(target_file)
files.add(target_file)
operation(source_path, target_file, target_folder)
if duplicates:
print("Found duplicate files:")
for dup in sorted(duplicates):
print(dup)
if __name__ == '__main__':
main()
| 2.53125
| 3
|
Exercício feitos pela primeira vez/ex001colorido.py
|
Claayton/pythonExerciciosLinux
| 1
|
12781093
|
#Ex001b
print('\033[0;35m''Olá Mundo!''\033[m')
print('xD')
| 1.960938
| 2
|
cse521/hw1/p1_data/p1.py
|
interesting-courses/UW_coursework
| 2
|
12781094
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 6 20:17:09 2018
@author: tyler
"""
import numpy as np
import sys
#%%
def karger(G,vertex_label,vertex_degree,size_V):
size_V = len(vertex_label)
#N = int(size_V*(1-1/np.sqrt(2)))
iteration_schedule = [size_V-2]
for N in iteration_schedule:
for n in range(N):
# if n%1000==0: print('iteration:',n)
# uniformly at random pick e = (v0,v1)
cs0 = np.cumsum(vertex_degree)
rand_idx0 = np.random.randint(cs0[-1])
e0 = np.searchsorted(cs0,rand_idx0,side='right')
#cs1 = np.cumsum(np.append(G[e0,e0:],G[:e0,e0]))
cs1 = np.cumsum(G[e0])
rand_idx1 = np.random.randint(vertex_degree[e0])
e1 = np.searchsorted(cs1,rand_idx1,side='right')
if(G[e0,e1] == 0):
print('picked empty edge')
v0 = e0
v1 = e1
# bring edges from v1 into v0
# add new edges to v0
G[v0] += G[v1]
G[:,v0] += G[v1]
new_edge_count = vertex_degree[v1] - G[v0,v0] #- G[v1,v1]
# delete old edges from v1
G[v1] = 0
G[:,v1] = 0
# delete any created loops
G[v0,v0] = 0
np.putmask(vertex_label,vertex_label==v1,v0)
vertex_degree[v0] += new_edge_count
vertex_degree[v1] = 0
nz = np.nonzero(vertex_degree)[0]
if(len(nz) != 2):
print('did not find well defined cut')
SN0 = np.where(vertex_label == nz[0])[0]
SN1 = np.where(vertex_label == nz[1])[0]
if len(SN0) + len(SN1) != size_V:
print('lost nodes')
if len(SN0) < len(SN1):
cut = SN0
else:
cut = SN1
return cut,vertex_degree[nz[0]]
#%%
#python p1.py z N ID
z = sys.argv[1] # 0,1,2,3
N = int(sys.argv[2]) # integer number of runs
ID = sys.argv[3] # output file id
#%%
E_raw = np.loadtxt('b'+str(z)+'.in',dtype='int')
min_E = np.min(E_raw)
E = E_raw - min_E
size_V = np.max(E)+1
G = np.zeros((size_V,size_V),dtype='int64')
vertex_degree = np.zeros(size_V,dtype='int64')
for e0,e1 in E:
vertex_degree[e0] += 1;
vertex_degree[e1] += 1;
G[min(e0,e1),max(e0,e1)] += 1;
G[max(e0,e1),min(e0,e1)] += 1;
vertex_label = np.arange(size_V,dtype='int64') # gives index of supervertex containg vertex
#%%
f=open('b'+z+'/cuts_'+ID+'.dat','ab')
g=open('b'+z+'/cut_sizes_'+ID+'.dat','ab')
#
for n in range(N):
if n%500 == 0:
print(ID+'_trial :', n,' of ',N)
vl,cut_size = karger(np.copy(G),np.copy(vertex_label),np.copy(vertex_degree),size_V)
np.savetxt(f,[vl],fmt='%d',delimiter=',')
np.savetxt(g,[cut_size],fmt='%d',delimiter=',')
f.close()
g.close()
| 3.03125
| 3
|
des.py
|
ybbarng/des
| 0
|
12781095
|
from itertools import accumulate
# Initial Permutation
IP = [
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Final Permutation
FP = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Expansion Function: from 32 bit to 48 bit
E = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# Permutation
P = [
15, 6, 19, 20, 28, 11, 27, 16,
0, 14, 22, 25, 4, 17, 30, 9,
1, 7, 23, 13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10, 3, 24
]
PC1_LEFT = [
56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
]
PC1_RIGHT = [
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
PC2 = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
Rotations = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
# Substitution Boxes
SBox = [
# S1
[
14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13
],
# S2
[
15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9
],
# S3
[
10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12
],
# S4
[
7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14
],
# S5
[
2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3
],
# S6
[
12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13
],
# S7
[
4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12
],
# S8
[
13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11
]
]
def DES(decrypt, MD, keys):
sub_keys = generate_sub_keys(keys)
data = permutation(MD, IP)
left = data[:32]
right = data[32:]
if decrypt:
sub_keys = reversed(sub_keys)
for sub_key in sub_keys:
left, right = right, xor(left, F(right, sub_key))
data = permutation(right + left, FP)
return data
def F(c, key):
new_c = expansion(c)
mixed_data = key_mixing(new_c, key)
s_box_result = substitution(mixed_data)
return permutation(s_box_result)
def generate_sub_keys(keys):
left = permutation(keys, PC1_LEFT)
right = permutation(keys, PC1_RIGHT)
sub_keys = []
for i in accumulate(Rotations):
sub_keys.append(permutation(left[i:] + left[:i] + right[i:] + right[:i], PC2))
return sub_keys
def expansion(c):
return permutation(c, E)
def permutation(data, table=P):
return [data[i] for i in table]
def key_mixing(data, key):
return xor(data, key)
def xor(data1, data2):
return [d1 ^ d2 for d1, d2 in zip(data1, data2)]
def substitution(data):
'''
data: 48 bit
'''
box_size = 6
boxes = [data[i:i + box_size] for i in range(0, 48, box_size)]
result = []
for box, s_box in zip(boxes, SBox):
outer = (box[0] << 1) + box[5]
inner = (box[1] << 3) + (box[2] << 2) + (box [3] << 1) + box[4]
value = s_box[(outer << 4) + inner]
for i in range(3, -1, -1):
result.append((value & 2**i) >> i)
return result
def string_to_bitlist(data):
result = []
for ch in data:
for i in range(7, -1, -1):
result.append(1 if ord(ch) & (1 << i) != 0 else 0)
return result
def hex_to_bitlist(data):
result = []
for ch in data:
int(ch, 16)
for i in range(3, -1, -1):
result.append(1 if int(ch, 16) & (1 << i) != 0 else 0)
return result
def bitlist_to_hex(data):
result = []
buf = 0
for i, value in enumerate(data):
buf = (buf << 1) + value
if i % 4 == 3:
result.append(hex(buf)[2:])
buf = 0
return ''.join(result)
def binary_to_bitlist(data):
result = []
for ch in data:
for i in range(7, -1, -1):
result.append(1 if ch & (1 << i) != 0 else 0)
return result
def bitlist_to_binary(data):
result = []
buf = 0
for i, value in enumerate(data):
buf = (buf << 1) + value
if i % 8 == 7:
result.append(buf)
buf = 0
return bytes(result)
def des_with_file(decrypt, in_file, out_file, key):
with open(in_file, 'rb') as f:
data = f.read()
result = DES(decrypt, binary_to_bitlist(data), string_to_bitlist(key))
with open(out_file, 'wb') as f:
f.write(bitlist_to_binary(result))
def encryption(in_file, out_file, key):
des_with_file(False, in_file, out_file, key)
def decryption(in_file, out_file, key):
des_with_file(True, in_file, out_file, key)
def test():
key = string_to_bitlist('TESTTEST')
# plain = string_to_bitlist('DESTESTT')
plain = hex_to_bitlist('4445535445535454') # DESTESTT
encrypt = hex_to_bitlist('01ecf0428c98db57')
data = DES(False, plain, key)
print(encrypt == data)
new_data = DES(True, data, key)
print(new_data == plain)
if __name__ == '__main__':
from sys import argv
modes = {
'e': encryption,
'd': decryption
}
if argv[1] not in modes:
print('mode must be \'e\' or \'d\'')
else:
modes[argv[1]](*argv[2:])
| 2.625
| 3
|
graphics/collidertest2.py
|
orrenravid1/brainn
| 0
|
12781096
|
<reponame>orrenravid1/brainn<gh_stars>0
import pygame
import math
import sys,os
p = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if not (p in sys.path):
sys.path.insert(0, p)
from AML.mathlib.math2d import*
from AML.graphics.collidergraphics import*
import random
import time
from pygame.locals import*
pygame.init()
pygame.font.init()
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
GREEN = ( 0, 255, 0)
BLUE = (0, 0, 255)
RED = ( 255, 0, 0)
YELLOW = (255,255,0)
PURPLE = (204, 0, 255)
SAND = (244, 164, 96)
ORANGE = (255,165,0)
GRAY = (139,139,139)
BROWN = (165,99,13)
colors = [RED,ORANGE,YELLOW,GREEN,BLUE,PURPLE,GRAY,BROWN]
size = (1080,720)
screen = pygame.display.set_mode(size)
done = False
clock = pygame.time.Clock()
prevtime = pygame.time.get_ticks()
currtime = pygame.time.get_ticks()
def deltaTime():
return (currtime-prevtime)/1000
circle = CircleColliderG(Vector2(150,30),15)
circlev = Vector2(0,0)
surf1 = SurfaceColliderG(Vector2(100,100),Vector2(300,100))
surf2 = SurfaceColliderG(Vector2(400,300),Vector2(500,100))
surf3 = SurfaceColliderG(Vector2(10,50),Vector2(10,250))
surfs = [surf1,surf2,surf3]
shouldprint = True
speed = 200
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
keys = pygame.key.get_pressed()
circlev = Vector2(0,0)
if keys[pygame.K_a]:
circlev.set_x(-speed*deltaTime())
elif keys[pygame.K_d]:
circlev.set_x(speed*deltaTime())
if keys[pygame.K_w]:
circlev.set_y(-speed*deltaTime())
elif keys[pygame.K_s]:
circlev.set_y(speed*deltaTime())
screen.fill(WHITE)
circle.pos += circlev
for i,surf in enumerate(surfs):
col = circle.get_collision(surf)
shouldprint = col[0]
if(shouldprint):
print(str(i) + ": " + str(surf.p1) + "," + str(surf.p2) + str(col))
coldepth = col[1].coldepth
circle.pos -= coldepth
surf.draw(screen)
circle.draw(screen)
pygame.display.flip()
clock.tick(20)
prevtime = currtime
currtime = pygame.time.get_ticks()
pygame.quit()
| 2.46875
| 2
|
myException.py
|
Abbion/Python-Labyrinth-Generator
| 0
|
12781097
|
<filename>myException.py
class WrongEntryException(Exception):
"""Klasa wyjątku -> zwracany gdy wartość wpisana do komórki jset błędna"""
def __init__(self, message, entry_id):
self.__message = message
self.__entry_id = entry_id
def __str__(self):
return "wrong entry error: {0}".format(self.__message)
def get_entry_id(self):
#Pobiera identyfikator dla pola wejściowego
return self.__entry_id
| 3.3125
| 3
|
app/models/__init__.py
|
cybrnode/zkt-sdk-rest-api
| 0
|
12781098
|
<gh_stars>0
from typing import Optional
from pydantic.main import BaseModel
class User(BaseModel):
CardNo: int
Pin: int
Password: str
Group: int
StartTime: str
EndTime: str
SuperAuthorize: bool
class ConnectionParams(BaseModel):
protocol: Optional[str] = "TCP"
ip_address: str = "192.168.10.201"
port: Optional[int] = 4370
timeout: Optional[int] = 4000
passwd: Optional[str] = ""
def __str__(self):
return f"protocol={self.protocol},ipaddress={self.ip_address},port={self.port},timeout={self.timeout},passwd={self.passwd}"
class SetDeviceDataParams(list):
class Config:
schema_extra = {
"example": {
"data": [
User(
**{
"CardNo": "15540203",
"Pin": "1",
"Password": "<PASSWORD>",
"Group": "0",
"StartTime": "0",
"EndTime": "0",
"SuperAuthorize": "1",
}
)
],
}
}
| 2.859375
| 3
|
Python/0760_find_anagram_mappings.py
|
codingyen/CodeAlone
| 2
|
12781099
|
"""
Given two lists Aand B, and B is an anagram of A. B is an anagram of A means B is made by randomizing the order of the elements in A.
We want to find an index mapping P, from A to B. A mapping P[i] = j means the ith element in A appears in B at index j.
These lists A and B may contain duplicates. If there are multiple answers, output any of them.
For example, given
A = [12, 28, 46, 32, 50]
B = [50, 12, 32, 46, 28]
We should return
[1, 4, 3, 2, 0]
as P[0] = 1 because the 0th element of A appears at B[1], and P[1] = 4 because the 1st element of A appears at B[4], and so on.
Note:
A, B have equal lengths in range [1, 100].
A[i], B[i] are integers in range [0, 10^5].
Time: O(n)
Space: O(n)
"""
class Solution(object):
def anagramMappings(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
map = {}
res = []
for i in range(len(B)):
map[B[i]] = i
for j in A:
res.append(map[j])
return res
if __name__ == "__main__":
print("Start the test!")
testA = [12, 28, 46, 32, 50]
testB = [50, 12, 32, 46, 28]
s = Solution()
print(s.anagramMappings(testA, testB))
| 3.921875
| 4
|
PART01/21_series_to_number.py
|
arti1117/python-machine-learning-pandas-data-analytics
| 1
|
12781100
|
<filename>PART01/21_series_to_number.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 22:43:02 2020
@author: arti
"""
import pandas as pd
student1 = pd.Series({'Korean':100, 'English':80, 'Math':99})
print(student1); print('--')
percentage = student1 / 200
print(percentage); print('--')
print(type(percentage))
| 3.09375
| 3
|
skyblock/map/npc/gold.py
|
peter-hunt/skyblock
| 13
|
12781101
|
from ...object.object import *
from ..object import *
GOLD_NPCS = [
Npc('gold_forger',
init_dialog=[
'I love goooold!',
'Talk to me again to open the Gold Forger Shop!',
],
trades=[
(5.5, {'name': 'gold'}),
(10, {'name': 'golden_helmet'}),
(16, {'name': 'golden_chestplate'}),
(14, {'name': 'golden_leggings'}),
(9, {'name': 'golden_boots'}),
(80, {'name': 'fancy_sword'}),
]),
Npc('iron_forger',
init_dialog=[
"For my wares, you'll have to pay the iron price!",
'Seriously though, I accept Coins',
'Talk to me again to open the Iron Forger Shop!',
],
trades=[
(60, {'name': 'iron_pickaxe'}),
(5, {'name': 'iron'}),
(50, {'name': 'chainmail_helmet'}),
(100, {'name': 'chainmail_chestplate'}),
(75, {'name': 'chainmail_leggings'}),
(100, {'name': 'chainmail_boots'}),
]),
]
| 2.046875
| 2
|
mock/mock_patch/additional_property.py
|
aikrasnov/otus-examples
| 8
|
12781102
|
<reponame>aikrasnov/otus-examples
from unittest.mock import patch
def foo():
return input(), input(), input()
def test_function():
with patch('builtins.input', side_effect=("expected_one", "expected_two", "expected_three")) as mock:
# foo()
print('\n')
print(dir(mock))
print("mock.assert_called: ", mock.assert_called)
print("mock.assert_called_once: ", mock.assert_called_once)
print("mock.call_count: ", mock.call_count)
print("mock.called: ", mock.called)
print("mock.method_calls: ", mock.method_calls)
# mock.assert_called()
assert foo() == ("expected_one", "expected_two", "expected_three")
mock.assert_called()
| 3.34375
| 3
|
voxolab/xml_extract_speaker.py
|
voxolab/voxo-lib
| 1
|
12781103
|
<reponame>voxolab/voxo-lib<filename>voxolab/xml_extract_speaker.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from convert import xml_to_txt_speaker
import argparse
def make_xml_to_txt_speaker(xml_file, txt_file):
xml_to_txt_speaker(xml_file, txt_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create subtitle file from xml v2')
parser.add_argument("xml_file", help="the xml (v2) file corresponding to the demo_file.")
parser.add_argument("txt_file", help="the file you want to write too.")
args = parser.parse_args()
make_xml_to_txt_speaker(args.xml_file, args.txt_file)
| 3.25
| 3
|
pyramid_oereb/standard/xtf_import/office.py
|
openoereb/pyramid_oereb
| 4
|
12781104
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from pyramid_oereb.standard.xtf_import.util import parse_string, parse_multilingual_text
class Office(object):
TAG_NAME = 'Name'
TAG_OFFICE_AT_WEB = 'AmtImWeb'
TAG_UID = 'UID'
def __init__(self, session, model):
self._session = session
self._model = model
def parse(self, office): # pragma: no cover
instance = self._model(
id=office.attrib['TID'],
name=parse_multilingual_text(office, self.TAG_NAME),
office_at_web=parse_multilingual_text(office, self.TAG_OFFICE_AT_WEB),
uid=parse_string(office, self.TAG_UID)
)
self._session.add(instance)
| 2.3125
| 2
|
conan_package_dep_map/src/deprecated/package_analyser.py
|
cafesun/expython
| 0
|
12781105
|
#!/usr/bin/python
# -*- coding:utf8 -*-
import os, re
import collections
from pylogger import getLogger
from package_defines import PackageInfo
from deprecated.ast_pyclass_parser import ConanFileParserWarapper
class ConanPkgAnalyzer(object):
'''conan包分析器'''
def __init__(self, scanpath, type):
self._scanpath = scanpath
self._channel = ""
self._pkgPattern = re.compile(r"(\w+)_(\w+)_(\w+)")
self._pkgInfoMap = collections.OrderedDict()
self._type = type
def analyse(self):
channelTxtPath = self._scanpath + "/channel.txt"
if (not os.path.exists(channelTxtPath)) :
getLogger().fatal("No channel.txt file found")
return False
with open(channelTxtPath, "r") as channelTxtHdr :
self._channel = channelTxtHdr.readline()
self.doScan(self._scanpath, self._channel, self._type)
def getPkgName(self, dirPath):
pos = len(self._scanpath)
subPath = dirPath[pos + 1:]
pkgName = subPath.split("\\")[0]
return pkgName
def parseType(self, pkgName, default):
if (default != "auto") :
return default
if (pkgName.find("_plat_") != -1) :
return "platform"
elif (pkgName.find("_msmp_") != -1):
return "msmp"
else:
return "unknown"
def doScan(self, scanPath, pkgChannel, type):
'运行主函数,收集目录下符合条件的文件,准备比较'
pkgMap = {}
for dir, subdirs, fileList in os.walk(scanPath):
# print "directory = %s | subdir = %s | filename = %s" %(dir, subdirs, fs)
if (dir == scanPath):
continue
pkgName = self.getPkgName(dir)
packgeUserType = self.parseType(pkgName, self._type)
if ("ZERO_CHECK.dir" == pkgName or "CMakeFiles" == pkgName):
continue
# 为了解决head-only模块的识别
if (None == pkgMap.get(pkgName)):
pkgMap[pkgName] = False # 防止反复重置为False
for fname in fileList:
if ("conanfile.py" != fname) :
continue
fullFileName = dir + "/" + fname
parser = ConanFileParserWarapper(fullFileName)
parser.parse()
packageInfo = PackageInfo()
name = parser.getAttribute("name")
packageInfo.packageName = name
packageInfo.channel = self._channel
packageInfo.version = parser.getAttribute("version")
packageInfo.packageFullName = pkgName
packageInfo.user = packgeUserType
if (None == name) :
getLogger().error("%s parse version failed!" %fullFileName)
continue
self._pkgInfoMap[name] = packageInfo
def getResult(self):
return self._pkgInfoMap
| 2.703125
| 3
|
src/main_step.py
|
KTH-dESA/OSeMOSYS_step
| 0
|
12781106
|
<reponame>KTH-dESA/OSeMOSYS_step
# main script to run the step function of OSeMOSYS
#%% Importe required packages
import data_split
import os
from otoole import ReadDatapackage
from otoole import WriteDatafile
from otoole import Context
import results_to_next_step as rtns
import step_to_final as stf
import subprocess as sp
#%% Convert datapackage to datafile
def dp_to_df(dp_path,df_path):
# otoole datapackage to datafile
dp_path = os.path.join(dp_path,'datapackage.json')
reader = ReadDatapackage()
writer = WriteDatafile()
converter = Context(read_strategy=reader, write_strategy=writer)
converter.convert(dp_path,df_path)
#%% Run model
def run_df(path,results_path):
try:
os.mkdir(results_path)
except OSError:
print("Creation of the directory %s failed" %results_path)
with open(os.path.join('..','model','osemosys.txt'), 'r') as file:
model = file.readlines()
rp = "param ResultsPath, symbolic default '"+results_path+"';\n"
model[55] = rp
with open(os.path.join('..','model','osemosys.txt'), 'w') as file:
file.writelines(model)
cd_run = 'glpsol -m '+os.path.join('..','model','osemosys.txt')+' -d %(data)s --log %(log)s' % {'data': path, 'log': results_path+'.log'}
sp.run([cd_run],shell=True,capture_output=True)
return results_path
#%% Main function to coordinate the execution of the script
def main(path_data,step_length):
if type(step_length)==int:
dic_yr_in_steps,full_steps = data_split.split_dp(path_data,step_length)
df_step0 = '../data/step0.txt'
dp_to_df('../data/datapackage0',df_step0)
res_path = '../steps/step'+str(0)
# Run step 0
run_df(df_step0,res_path)
stf.main('../steps/step0','../results/',0,dic_yr_in_steps[0].iloc[:step_length])
print('Step 0: done')
for s in range(full_steps):
step = s+1
df_path = '../data/step'+str(step)+'.txt'
dp_path = '../data/datapackage'+str(step)
dp_d_path = '../data/datapackage'+str(step)+'/data'
fr_path = '../results'
rtns.main(dp_d_path,fr_path)
#print('Step %s: ResCap in datapackage'%step)
dp_to_df(dp_path,df_path)
#print('Step %s: datafile created'%step)
res_path = '../steps/step'+str(step)
run_df(df_path,res_path)
#print('Step %s: model run completed'%step)
stf.main('../steps/step'+str(step),'../results/',step,dic_yr_in_steps[step].iloc[:step_length])
print('Step %s: done'%step)
else:
dic_yr_in_steps,full_steps = data_split.split_dp(path_data,step_length)
df_step0 = '../data/step0.txt'
dp_to_df('../data/datapackage0',df_step0)
# Run step 0
res_path = '../steps/step'+str(0)
run_df(df_step0,res_path)
stf.main('../steps/step0','../results/',0,dic_yr_in_steps[0].iloc[:step_length[0]])
print('Step 0: done')
for s in range(full_steps):
step = s+1
df_path = '../data/step'+str(step)+'.txt'
dp_path = '../data/datapackage'+str(step)
dp_d_path = '../data/datapackage'+str(step)+'/data'
fr_path = '../results'
rtns.main(dp_d_path,fr_path)
#print('Step %s: ResCap in datapackage'%step)
dp_to_df(dp_path,df_path)
#print('Step %s: datafile created'%step)
res_path = '../steps/step'+str(step)
run_df(df_path,res_path)
#print('Step %s: model run completed'%step)
stf.main('../steps/step'+str(step),'../results/',step,dic_yr_in_steps[step].iloc[:step_length[1]])
print('Step %s: done'%step)
#%% If run as script
if __name__ == '__main__':
path_data = '../data/utopia.txt'
step_length = [6,10]
#path_data = sys.argv[1]
#step_length = sys.argv[2]
main(path_data,step_length)
# %%
| 1.992188
| 2
|
tweet/__init__.py
|
leo0309/django-demo
| 0
|
12781107
|
#replace MySQLdb with pymysql
import pymysql
pymysql.install_as_MySQLdb()
| 1.28125
| 1
|
nbdt/data/cub.py
|
lisadunlap/explainable-nbdt
| 1
|
12781108
|
<reponame>lisadunlap/explainable-nbdt<gh_stars>1-10
import os
import pandas as pd
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from PIL import Image
__all__ = names = ('CUB2011', 'CUB2011Train', 'CUB2011Val')
class CUB2011(Dataset):
base_folder = 'CUB_200_2011/images'
url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz'
filename = 'CUB_200_2011.tgz'
tgz_md5 = '97eceeb196236b17998738112f37df78'
def __init__(self, root, train=True, download=True, transform=None):
self.root = os.path.expanduser(root)
if train:
self.transform = self.transform_train()
else:
self.transform = self.transform_val()
self.train = train
if download:
self._download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
def _load_metadata(self):
images = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'images.txt'), sep=' ',
names=['img_id', 'filepath'])
image_class_labels = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'image_class_labels.txt'),
sep=' ', names=['img_id', 'target'])
classes = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'classes.txt'),
sep=' ', names=['class', 'name'])
self.classes = [n.split('.')[1] for n in classes["name"]]
train_test_split = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'train_test_split.txt'),
sep=' ', names=['img_id', 'is_training_img'])
data = images.merge(image_class_labels, on='img_id')
self.data = data.merge(train_test_split, on='img_id')
if self.train:
self.data = self.data[self.data.is_training_img == 1]
else:
self.data = self.data[self.data.is_training_img == 0]
@staticmethod
def transform_train(input_size=224):
return transforms.Compose([
transforms.Resize(input_size + 32),
# transforms.RandomRotation(45),
transforms.RandomResizedCrop(input_size), # TODO: may need updating
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
@staticmethod
def transform_val(input_size=224):
return transforms.Compose([
transforms.Resize(input_size + 32),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
def _check_integrity(self):
try:
self._load_metadata()
except Exception:
return False
for index, row in self.data.iterrows():
filepath = os.path.join(self.root, self.base_folder, row.filepath)
if not os.path.isfile(filepath):
print(filepath)
return False
return True
def _download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data.iloc[idx]
path = os.path.join(self.root, self.base_folder, sample.filepath)
target = sample.target - 1 # Targets start at 1 by default, so shift to 0
img = Image.open(path)
if self.transform is not None:
try:
img = self.transform(img)
except:
from PIL import ImageOps
img = ImageOps.colorize(img, black ="black", white ="white")
img = self.transform(img)
# print(path)
return img, target
class CUB2011Train(CUB2011):
def __init__(self, root='./data', *args, **kwargs):
super().__init__(root, train=True)
class CUB2011Val(CUB2011):
def __init__(self, root='./data', *args, **kwargs):
super().__init__(root, train=False)
| 3.0625
| 3
|
filer/tests/__init__.py
|
Wordbank/django-filer
| 0
|
12781109
|
<reponame>Wordbank/django-filer
#-*- coding: utf-8 -*-
from django.test import TestCase
class Mock():
pass
from filer.tests.admin import *
from filer.tests.models import *
from filer.tests.fields import *
from filer.tests.utils import *
from filer.tests.tools import *
from filer.tests.permissions import *
class FilerTests(TestCase):
def test_environment(self):
"""Just make sure everything is set up correctly."""
self.assert_(True)
| 1.9375
| 2
|
aoomuki_comp/app/migrations/0011_auto_20210108_0945.py
|
Kamelgasmi/aoomuki_competences
| 0
|
12781110
|
# Generated by Django 2.1 on 2021-01-08 08:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0010_auto_20210106_1426'),
]
operations = [
migrations.AlterField(
model_name='collaborater',
name='Society',
field=models.CharField(max_length=200, verbose_name='société'),
),
]
| 1.523438
| 2
|
xl_data_tools.py
|
will-leone/xl_data_tools
| 0
|
12781111
|
import os
import csv
import itertools
import datetime
import string
import subprocess
from email.message import EmailMessage
import smtplib
import ssl
import zipfile
import xlwings as xw
import xlsxwriter
"""
This module provides convenient objects for pulling,
cleaning, and writing data between Excel and Python.
It includes functions for common tasks needed to
locate and timestamp Excel file names.
"""
def remove_files(path, exclude=None):
"""
:param path: Directory containing folders to be purged
:param exclude: Folders to be left unmodified
"""
with os.scandir(path) as iter_dir:
for subdir in iter_dir:
if os.DirEntry.is_dir(subdir) and (not exclude or all(exclude not in subdir.name)):
with os.scandir(os.path.join(path, subdir)) as iter_subdir:
for item in iter_subdir:
os.remove(os.path.join(path, subdir, item))
return
def mod_date(foo):
"""
:param foo: path or path-like object representing a file
:return: file modification date
Requires Python version 3.6+ to accept path-like objects.
"""
print(foo)
if foo == os.path.split(foo)[1]:
t = os.path.getmtime(foo)
date = datetime.datetime.fromtimestamp(t)
else:
date = mod_date(os.path.split(foo)[1])
return date
def find_file(dir_path, keywords):
"""
Searches for the newest version of a given file.
:param dir_path: directory containing the desired file
:param keywords: string of keywords from the keywords of the desired file
:return: path of the desired file
"""
dir_list = os.listdir(dir_path)
if isinstance(keywords, str):
keywords = keywords.split()
matches = list()
initial_dir = os.getcwd()
os.chdir(dir_path)
for item in dir_list:
while "." in item:
loc = item.find(".")
if loc == len(item) - 1:
item = item[:-1]
else:
item = item[:loc] + item[loc + 1:]
if os.path.isfile(os.path.join(dir_path, item)):
item_list = item.split()
if all(component in item_list for component in keywords):
matches.append(item)
if not matches:
print(f"There is no file containing keywords '{keywords}' in"
f"{dir_path}.")
else:
matches.sort(key=mod_date)
os.chdir(initial_dir)
return os.path.join(dir_path, matches[-1])
def empty_check(lst):
"""
Determines whether the nested n-layer list contains only empty
and/or None-type items.
:param lst: any list, integer, float, or string
:return: True if the nested list is (a) a list and (b) contains
only empty lists, type objects, or None; otherwise, False
"""
try:
if not lst:
return True
if (isinstance(lst, str) or isinstance(lst, int) or
isinstance(lst, float)):
return False
else:
return all(map(empty_check, lst))
except TypeError:
# This indicates that lst contains None as an object
return True
def terminate_excel():
"""
Terminates all running Excel processes in Windows OS
"""
while True:
try:
subprocess.check_call("TASKKILL /F /IM excel.exe")
except subprocess.CalledProcessError:
break
return
def hide_excel(boolean):
"""
Hides Excel from the user interface and suppresses alerts if the
input value is True. This script must be run again with False
input to enable viewing for output Excel files, after which all
Excel processes are exited.
:param boolean: True or False boolean constant
"""
for app in xw.apps:
app.display_alerts = not boolean
app.screen_updating = not boolean
if boolean is False:
terminate_excel()
return
def csv_extract(file, directory, header=None):
"""
Converts a given CSV file into a pandas dataframe.
:param file: Name of the CSV file
:param directory: Name of the directory containing the CSV file
:param header: Sequence containing all columns from the CSV to be
included in the output. If None, the CSV's first line will be used.
:return: pandas dataframe
"""
initial_dir = os.getcwd()
os.chdir(directory)
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, fieldnames=header)
for row in reader:
new_key = row[header[0]]
if new_key is not None and new_key != "":
csv_dict[new_key] = list()
for column in header[1:]:
csv_dict[new_key].append(row[header[column]])
os.chdir(initial_dir)
return csv_dict
def create_zip(directory, zip_name, files):
"""
Removes all existing .zip files in the chosen directory with the given
zip_name and creates a new .zip file with
this name that contains the chosen files.
:param directory: The directory where the zip file will be created
:param zip_name: The name of the new zip file
:param files: List of the files to be zipped (as filenames)
"""
# Compile zip archive for reports if not comprised of a singled file
initial_dir = os.getcwd()
os.chdir(directory)
if len(files) > 1:
with os.scandir(os.getcwd()) as scan:
for entry in scan:
if zip_name in str(entry):
os.remove(entry)
for foo in files:
with zipfile.ZipFile(zip_name, "a") as my_zip:
my_zip.write(foo)
os.chdir(initial_dir)
def send_email(sender, recipients, subject, html, html_dir, cc=None,
bcc=None, attachments=None, attachments_dir=None):
"""
Sends out an SMTP email using SSL, HTML content, and up to one
attachment (including .zip). Recipients' names must have the form
"required_first_name optional_middle_name optional_last_name". The
sender's email is assumed to be Gmail/Google Inbox.
:param sender: Sequence (a, b) where a is the sender's email and
b is their email account password
:param recipients: Sequence of pairs (a, b) where a is the
recipient's name and b is their email
:param cc: Sequence of pairs (a, b) where a is the cc
recipient's name and b is their email
:param bcc: Sequence of pairs (a, b) where a is the bcc
recipient's name and b is their email
:param subject: Subject title for the email
:param attachments: File name of the attachment (including
.zip) - no more than 1 per email
:param html: File name of the html script defining the email
body's content and signature
:param attachments_dir: Directory containing the attachments
:param html_dir: Directory containing the html script
"""
# Construct formatted strings of names/emails for Message module
recipient_names, cc_names, bcc_names = list(), list(), list()
recipient_emails, cc_emails, bcc_emails = list(), list(), list()
contact_lists = {'recipients': recipients, 'cc': cc, 'bcc': bcc}
contact_names = {'recipients': recipient_names, 'cc': cc_names,
'bcc': bcc_names}
contact_emails = {'recipients': recipient_emails, 'cc': cc_emails,
'bcc': bcc_emails}
for group, contact_list in contact_lists.items():
for contact in contact_list:
contact_names[group].append(contact[0].split()[0])
contact_emails[group].append(contact[1])
contact_names[group] = ", ".join(contact_names[group])
contact_emails[group] = "; ".join(contact_emails[group])
# Extract HTML content for email body
initial_dir = os.getcwd()
os.chdir(html_dir)
with open(html) as f:
email_body = f.read()
os.chdir(initial_dir)
# Construct email
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = sender[0]
msg['To'] = contact_emails['recipients']
if not cc:
msg['Cc'] = contact_emails['cc']
if not bcc:
msg['Bcc'] = contact_emails['bcc']
msg.set_content("""\
<html>
<head></head>
<body>
<body style="font-family:calibri; font-size: 16px" >
<p> Hi, {}, </p>
<p> {}
</p>
</body>
</html>
""".format(contact_names[recipients], email_body),
subtype='html')
if attachments is not None and attachments_dir is not None:
# Prepare the attachment(s) for delivery
initial_dir = os.getcwd()
os.chdir(attachments_dir)
if attachments[len(attachments) - 4:] == ".zip":
with open(attachments, 'rb') as myzip:
msg.add_attachment(myzip.read(), maintype="multipart",
subtype="mixed", filename=attachments)
else:
with open(attachments, 'rb') as fp:
msg.add_attachment(fp.read(), maintype="multipart",
subtype="mixed", filename=attachments)
os.chdir(initial_dir)
# Connect with the server and send the email with its attachment(s)
with smtplib.SMTP(host='smtp.gmail.com', port=587) as s:
context = ssl.create_default_context()
s.starttls(context=context)
s.login(sender[0], sender[1])
s.send_message(msg)
return
def range_converter(xl_col_length=3):
"""
Construct conversions between Excel array ranges and
Pythonic indices (up to column ZZ in Excel)
:param xl_col_length: Length of the longest desired
Excel column (e.g., 2 for "A" to "ZZ", 3 for "A" to "ZZZ")
"""
alpha_initial = string.ascii_uppercase
alpha_extended = list(string.ascii_uppercase)
if xl_col_length == 1:
pass
else: # Expand list with same lexicographic ordering as
# Excel (e.g. "Z" is followed by "AA", "AZ" by "BA")
for k in range(2, xl_col_length + 1):
new_sequences = list()
for letter_sequence in alpha_extended:
for new_letter in alpha_initial:
new_sequences.append("".join([letter_sequence,
new_letter]))
alpha_extended.extend(new_sequences)
convert = zip(range(1, len(alpha_extended) + 1), alpha_extended)
convert_to_alpha = {x: y for x, y in convert}
convert_to_num = {y: x for x, y in convert_to_alpha.items()}
return convert_to_alpha, convert_to_num
class XlArray:
"""
This class is meant for two-layer nested lists representing an
Excel array: e.g., [[row_1], [row_2],...]
"""
# Construct conversions between Excel array ranges and Pythonic indices
converter = range_converter()
convert_to_alpha = converter[0]
convert_to_num = converter[1]
def __init__(self, data, row, col):
"""
:param data: Nested (or mono-layer) list representing an
excel array (or row)
:param row: Row location of the upper-left cell in the array
(in Excel format, e.g., "2")
:param col: Column location of the upper-left cell in the array
(in Excel format - e.g., "B")
"""
# If data is a mono-layer list (representing a row), convert it
# into a nested list (representing an array)
if not all(itertools.starmap(isinstance, zip(data,
[list] * len(data)))):
data = [data]
self.data = data
self.col = col
self.row = row
self.len = len(data) # Indicates the number of rows
# Determine the finalized Excel array range
self.empty = empty_check(data)
if not self.empty:
self.header = self.data[0]
excel_range = (col + str(row) + ":" +
XlArray.convert_to_alpha[len(self.header) +
XlArray.convert_to_num[col] - 1] + str(self.len))
# modified 5/24
self.col_num = XlArray.convert_to_num[self.col]
# XlArray.remove (below) may interfere with self.col_num
self.last_col_num = self.col_num + len(self.header) - 1
self.last_col = XlArray.convert_to_alpha[self.last_col_num]
self.range = excel_range
self.name = ""
def empty(self, row_as_list):
row_num = self.data.index(row_as_list)
return empty_check(self.data[row_num])
def remove(self, columns):
"""
Removes the chosen columns in the instance's source array
from the instance's own array with columns understood
in Excel range terms.
For instance, if the source array is [[a, b], [c,d]]
with (row, col) = (2, "F"), the
Excel interpretation is that the upper-left cell of the
instance array is F2 while the range is F2:G3.
Meanwhile, the instance's array's range is understood as
[(i, j) for i, j in zip(range(2), range(2))].
In the above case, self.remove(["G"]) would reduce the source
array to [[a], [c]] as "b" and "d" represent cells
G2 and G3, respectively.
:param columns: Column (as string) or columns (as list of
strings) in the source array in Excel's range
interpretation - e.g., "A" for the 0th column
"""
# Note that this section assumes no two rows/lists in the
# data array are identical due to list.index()
for excluded_col in columns:
excluded_col_num = XlArray.convert_to_num[excluded_col] # e.g., column "B" becomes 2
if not self.empty and excluded_col_num == self.col_num: # if the first column is to be excluded
for record in self.data:
index = self.data.index(record)
self.data[index] = record[1:] # remove the first column in all rows
self.col = XlArray.convert_to_alpha[self.col_num + 1] # adjust the Excel representation attributes
self.col_num = XlArray.convert_to_num[self.col]
elif not self.empty and excluded_col_num == \
self.last_col_num: # if the last column is to be excluded
for record in self.data:
index = self.data.index(record)
self.data[index] = record[:-1]
elif not self.empty and self.col_num < excluded_col_num \
< self.last_col_num: # if another column is to be excluded
for record in self.data:
index = self.data.index(record)
self.data[index] = record[:excluded_col_num - 1] \
+ record[excluded_col_num:] # Pythonic indexes!
else: # if the column isn't in the instance array
pass
return
def filter(self, column, value, strict=True):
"""
:param column: The column that will be searched in
the array
:param value: The cell content that will be searched
for in the array
:param strict: If true, the filter requires exact
equivalence.
:return: Filtered copy of the array with only those
rows containing the desired entry in the desired column
"""
filtered_array = list()
filter_row = ""
for record in self.data: # Here, rows are represented by lists
if record[column] == value: # Strict equivalency required for a match
if not filter_row: # Determine upper-left range value for the filtered array
filter_row = (self.data.index(record)
+ self.row - 1)
filtered_array.append(record)
elif not strict:
if not filter_row: # Determine upper-left range value for the filtered array
filter_row = (self.data.index(record)
+ self.row - 1)
try:
# if record[column] and value are splittable,
# see if all components of the former are in the latter
entry = record[column].split()
if all(entry[i] in value.split() for
i in list(range(len(entry)))):
filtered_array.append(record)
except TypeError:
pass
return XlArray(filtered_array, filter_row, self.col)
class XlExtract:
"""
Class Dependency: XlArray (for XlEdit.extract())
Extract data from an existing Excel documents using
the xlwings module.
"""
def __init__(self, dir_path):
hide_excel(True)
self.path = dir_path
self.name = os.path.split(dir_path)[1]
self.date = mod_date(dir_path)
self.wb = xw.Book(self.path) # xw.books.open(self.path) returns error
self.sheets = self.wb.sheets
def open(self):
hide_excel(True)
return self.wb
def close(self):
try:
hide_excel(False)
self.wb.close()
finally:
return
def init_sht(self, sheet_name, prior_sheet=None):
"""
Create a new sheet in the workbook
:param sheet_name: Desired name for the new sheet
:param prior_sheet: Optional - the new sheet will
be inserted after this sheet in the workbook
"""
if prior_sheet is None:
self.wb.sheets.add(sheet_name)
else:
self.wb.sheets.add(sheet_name, after=self.sheets)
# create and name sheet
pass
def extract(self, exclude_sheets=None, exclude_cols=None,
max_row=50000, max_col=100):
"""
Imports all data in the workbook with each sheet represented
by a different XlArray object
:param exclude_sheets: List of the names of the sheets from
which data won't be collected
:param exclude_cols: List of pairs (a,b) where a is the sheet
name and b lists the columns to be excluded
:param max_row: Rows beyond this point will not be extracted
:param max_col: Columns beyond this point will not be extracted
:return: Pairs consisting of each sheet number and the array in
that sheet with all empty rows removed.
"""
wb_data = list()
if exclude_sheets:
sht_list = [sheet.name for sheet in self.sheets if sheet
not in exclude_sheets]
else:
sht_list = [sheet.name for sheet in self.sheets]
for sht_name in sht_list:
sht_xl = self.wb.sheets(sht_name)
# Determine endpoints of the range to be extracted
raw_data = sht_xl.range((1, 1), (max_row, max_col)).value
col_len, row_len = list(), -1
for row in raw_data:
if empty_check(row):
row_len += 1
break
else:
row_len += 1
j = -1
while j in list(range(-1, len(row))):
j += 1
if empty_check(row[j:]):
col_len.append(j)
break
col_len = max(col_len)
if col_len < max_col and row_len < max_row:
last_cell_location = (XlArray.convert_to_alpha[col_len]
+ str(row_len))
else:
last_cell_location = (XlArray.convert_to_alpha[max_col]
+ str(max_row))
sht_range = "A1:" + last_cell_location
sht_data = sht_xl.range(sht_range).value
sht_array = XlArray(sht_data, 1, "A")
for row in sht_array.data:
if empty_check(row):
sht_array.data.remove(row)
try:
for x_sheet, x_columns in exclude_cols:
if x_sheet == sht_name:
sht_array.remove(x_columns)
except TypeError: # raised if no columns excluded
pass
wb_data.append((sht_xl.index - 1, sht_array)) # sht.index is 1-based (as in Excel)
self.close()
return wb_data
# create a range method here that opens a chosen sheet and
# scans it for the first completely empty row & column
class XlCreate:
"""
Class Dependency: XlArray
Write XlArray objects to an Excel file with easily-customized
formatting. Instantiating immediately opens a new
Excel workbook, so consider instantiating within a "with" statement.
(Otherwise, use XlCreate.close()) No extension is to be included
in the filename.
"""
def __init__(self, filename, dir_path):
self.initial_dir = os.getcwd()
os.chdir(dir_path)
self.path = dir_path
self.name = os.path.split(dir_path)[1]
hide_excel(True)
self.wb = xlsxwriter.Workbook(filename + ".xlsx")
self.arrays = dict()
self.header_bold = self.wb.add_format({'bold': True,
'text_wrap': 1}) # Format object: Bold/wrap the header
self.wrap = self.wb.add_format({'text_wrap': 1, 'align': 'top'})
self.date_format = self.wb.add_format({'num_format': 'm/d/yy',
'align': 'top'}) # Format object
def close(self):
self.wb.close()
hide_excel(False)
os.chdir(self.initial_dir)
return
def write(self, sheet_name, sheet_data, row=1, column="A",
date_col=None, custom_width=None):
"""
Adds a mapping between the new sheet name and its data
to self.arrays. Writes the data to the new sheet.
:param sheet_name: Name to be used for the new sheet
:param sheet_data: Data to be mapped onto the new sheet
starting with cell A1. Include the header.
:param row: New sheet's row location of the upper-left
cell in the array (in Excel format, e.g., "2")
:param column: New sheet's column location of the
upper-left cell in the array (in Excel format, e.g., "B")
:param date_col: Columns (in Excel format) that are to be
written as dates
:param custom_width: Pairs (column, width) that determine
column-specific width
"""
# Construct conversions between Excel array ranges and Pythonic indices
converter = range_converter()
convert_to_alpha = converter[0]
convert_to_num = converter[1]
# Add mapping between new sheet name and its
# data (translated into a XlArray object)
self.arrays[sheet_name] = data = XlArray(sheet_data,
row, column)
# Add a sheet with the chosen name and set the table name
sht = self.wb.add_worksheet(sheet_name)
table_name = "_".join(sheet_name.split())
# Create list of table header formatting objects
header_formatting = [{'header': col, 'header_format':
self.header_bold} for col in data.header]
# 5/23 This is running correctly
# Insert the table and its data
sht.add_table(data.range, {'columns': header_formatting,
'name': table_name})
for item in data.header:
sht.write(0, data.col_num + data.header.index(item)
- 1, item, self.header_bold)
# 5/24: added -1 above, verify that this works
all_columns_xl = list()
# represents the destination columns
all_columns_py = dict()
# represents the indexes of these columns in the source data.data
for k in range(data.col_num, data.last_col_num + 1): # Added "+1" on 5/23 - check!
all_columns_xl.append(convert_to_alpha[k])
for col in all_columns_xl:
all_columns_py[col] = convert_to_num[col] -\
convert_to_num[all_columns_xl[0]]
for row_py in range(1, data.len):
for col in all_columns_xl:
col_py = all_columns_py[col]
if date_col and col in date_col:
if not isinstance(data.data[row_py][col_py],
datetime.datetime):
sht.write(row_py, col_py, "NO DATE",
self.date_format) # sht.write() uses 0-base indexes
else:
sht.write_datetime(row_py, col_py,
data.data[row_py][col_py],
self.date_format)
else:
sht.write(row_py, col_py,
data.data[row_py][col_py], self.wrap)
# Adjust the column widths
for col in all_columns_xl:
if not custom_width or col not in custom_width:
col_py = all_columns_py[col]
len_lst = [len(str(record[col_py])) for
record in data.data[1:]]
if not len_lst:
max_len = 16
elif max(len_lst) > 50:
max_len = 50
else:
max_len = max(max(len_lst), 16)
sht.set_column(col + ":" + col, max_len)
elif custom_width:
custom_dict = {x: y for x, y in custom_width}
sht.set_column(col + ":" + col, custom_dict[col])
else:
pass
return
| 2.984375
| 3
|
day_03.py
|
seeM/advent-of-code-2018
| 0
|
12781112
|
<reponame>seeM/advent-of-code-2018
"""
Idea:
-----
Build a grid in a single pass through the data:
Grid is a dict, mapping (x, y) co-ordinates to a list of claim IDs
Grid counts is a dict, mapping (x, y) co-ordinates to length of list
Count number of elements in grid_counts with value > 1
"""
import re
from collections import defaultdict
from typing import Dict, List, NamedTuple, Set, Tuple
Point = Tuple[int, int]
class Claim(NamedTuple):
id: int
x: int
y: int
width: int
height: int
def points(self) -> List[Point]:
return [(x, y) for x in range(self.x, self.x + self.width)
for y in range(self.y, self.y + self.height)]
Grid = Dict[Point, Set[int]]
def build_grid(claims: List[Claim]) -> Grid:
grid: Grid = defaultdict(set)
for claim in claims:
for point in claim.points():
grid[point].add(claim.id)
return grid
def shared_inches(grid: Grid) -> int:
return len([point for point, claim_ids in grid.items()
if len(claim_ids) > 1])
def unique_claims(claims: List[Claim], grid: Grid) -> List[int]:
unique_claim_ids = {claim.id for claim in claims}
for _, claim_ids in grid.items():
if len(claim_ids) <= 1:
continue
for claim_id in claim_ids:
unique_claim_ids.discard(claim_id)
return list(unique_claim_ids)
pattern = re.compile('^#([0-9]+) @ ([0-9]+),([0-9]+): ([0-9]+)x([0-9]+)$')
with open('data/day_03.txt') as f:
claims = []
for line in f.readlines():
line = line.strip()
id, x, y, width, height = re.match(pattern, line).groups()
id = int(id)
x = int(x)
y = int(y)
width = int(width)
height = int(height)
claims.append(Claim(id, x, y, width, height))
test_claims = [Claim(1, 1, 3, 4, 4),
Claim(2, 3, 1, 4, 4),
Claim(3, 5, 5, 2, 2)]
test_grid = build_grid(test_claims)
test_unique_claims = unique_claims(test_claims, test_grid)
assert shared_inches(test_grid) == 4
assert len(test_unique_claims) == 1
assert test_unique_claims[0] == 3
grid = build_grid(claims)
print(shared_inches(grid))
unique_claims_ = unique_claims(claims, grid)
assert len(unique_claims_) == 1
print(unique_claims_[0])
| 3.046875
| 3
|
eegpy/stats/tests/TestCluster.py
|
thorstenkranz/eegpy
| 10
|
12781113
|
<filename>eegpy/stats/tests/TestCluster.py
import numpy as np
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_equal, assert_raises, raises
from eegpy.stats.cluster import ClusterSearch1d
from eegpy.filter.smoothing import smooth
#Helper-methods
def make_data_without_cluster():
ar1 = np.random.random((100,15))
ar2 = np.random.random((100,15))
return [ar1,ar2]
def make_data_with_one_cluster():
ar1,ar2 = make_data_without_cluster()
ar1[40:60,:] += np.hanning(20).reshape(-1,1).repeat(15,axis=1)
return [ar1,ar2]
#Create Testdata
data_without_cluster = make_data_without_cluster()
data_with_one_cluster = make_data_with_one_cluster()
#Actual test fixture
class TestClusterSearch1d:
def setUp(self):
self.data_with_one_cluster = [ar.copy() for ar in data_with_one_cluster]
self.data_without_cluster = [ar.copy() for ar in data_without_cluster]
def tearDown(self):
pass
#def test_CreateBlankEventTable (self):
# assert_true( self.evt!=None )
def test_DataWithOneClusterFindOneCluster(self):
#arange
cl1d = ClusterSearch1d(self.data_with_one_cluster,num_surrogates=100)
#act
results = cl1d.search()
cluster = results[1][0]
#assert
assert len(results[1]) == 1
assert cluster[0]>40
assert cluster[1]<60
def test_DataWithNoClusterFindNoCluster(self):
#arange
cl1d = ClusterSearch1d(self.data_without_cluster,num_surrogates=100)
#act
results = cl1d.search()
#assert
assert len(results[1]) == 0
assert results[4].min()>0.01
@raises(ValueError)
def test_Cluster1dForArrayListNotAList(self):
data = self.data_with_one_cluster[0]
cl1d = ClusterSearch1d(data)
@raises(ValueError)
def test_Cluster1dFor1dArrays(self):
data = [d[:,0] for d in self.data_with_one_cluster]
cl1d = ClusterSearch1d(data)
@raises(ValueError)
def test_Cluster1dForZeroSurrogates(self):
cl1d = ClusterSearch1d(self.data_with_one_cluster[0],num_surrogates=0)
@raises(ValueError)
def test_Cluster1dForNegativeNumberOfSurrogates(self):
cl1d = ClusterSearch1d(self.data_with_one_cluster[0],num_surrogates=-10)
| 2.265625
| 2
|
optimum/onnxruntime/quantization.py
|
XinyuYe-Intel/optimum
| 0
|
12781114
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Callable, Optional, Union
import numpy
import torch
from datasets import Dataset, load_dataset
from torch.utils.data import DataLoader, RandomSampler
from transformers import AutoTokenizer, default_data_collator
from transformers.onnx import export
from transformers.onnx.features import FeaturesManager
import onnx
from onnxruntime.quantization import (
CalibrationDataReader,
CalibrationMethod,
QuantFormat,
QuantType,
quantize_dynamic,
quantize_static,
)
from optimum.onnxruntime.configuration import ORTConfig
from optimum.onnxruntime.utils import generate_identified_filename
logger = logging.getLogger(__name__)
class ORTQuantizationMode(Enum):
DYNAMIC = "dynamic"
STATIC = "static"
SUPPORTED_QUANT_MODE = set([approach.value for approach in ORTQuantizationMode])
CALIB_METHOD = {"minmax": "MinMax", "entropy": "Entropy"}
Q_FORMAT = {"operator": "QOperator", "qdq": "QDQ"}
Q_TYPE = {"int8": "QInt8", "uint8": "QUInt8"}
class ORTCalibrationDataReader(CalibrationDataReader):
def __init__(self, calib_dataloader: DataLoader):
self._iter = iter([{key: data[key].numpy() for key in data} for data in calib_dataloader])
def get_next(self):
return next(self._iter, None)
class ORTQuantizer:
def __init__(
self,
model_name_or_path: str,
ort_config: Union[str, ORTConfig],
feature: str = "default",
calib_dataset: Optional[Dataset] = None,
dataset_name: Optional[str] = None,
dataset_config_name: Optional[str] = None,
data_files: Optional[str] = None,
preprocess_function: Optional[Callable] = None,
**kwargs
):
"""
Args:
model_name_or_path (`str`):
Repository name in the Hugging Face Hub or path to a local directory hosting the model.
ort_config (`Union[ORTConfig, str]`):
Configuration file containing all the information related to the model quantization.
Can be either:
- an instance of the class :class:`ORTConfig`,
- a string valid as input to :func:`ORTConfig.from_pretrained`.
feature (`str`):
Feature used when exporting the model.
calib_dataset (`Dataset`, `optional`):
Dataset to use for the calibration step.
dataset_name (`str`, `optional`):
Dataset repository name on the Hugging Face Hub or path to a local directory containing data files to
load to use for the calibration step.
dataset_config_name (`str`, `optional`):
Name of the dataset configuration.
data_files (`str`, `optional`):
Path to source data files.
preprocess_function (`Callable`, `optional`):
Processing function to apply to each example after loading dataset.
cache_dir (`str`, `optional`):
Path to a directory in which a downloaded configuration should be cached if the standard cache should
not be used.
force_download (`bool`, `optional`, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (`bool`, `optional`, defaults to `False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
revision(`str`, `optional`):
The specific version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
"""
config_kwargs_default = [
("cache_dir", None),
("force_download", False),
("resume_download", False),
("revision", None),
]
config_kwargs = {name: kwargs.get(name, default_value) for (name, default_value) in config_kwargs_default}
model_kwargs = copy.deepcopy(config_kwargs)
tokenizer_kwargs = copy.deepcopy(config_kwargs)
self.cache_dir = config_kwargs.get("cache_dir")
self.model_name_or_path = model_name_or_path
if not isinstance(ort_config, ORTConfig):
ort_config = ORTConfig.from_pretrained(ort_config, **config_kwargs)
self.ort_config = ort_config
self.quantization_approach = ORTQuantizationMode(ort_config.quantization_approach)
self.activation_type = QuantType[Q_TYPE.get(ort_config.activation_type)]
self.weight_type = QuantType[Q_TYPE.get(ort_config.weight_type)]
self.quant_format = QuantFormat[Q_FORMAT.get(ort_config.quant_format)]
self.calibrate_method = CalibrationMethod[CALIB_METHOD.get(ort_config.calibration_method)]
self.seed = ort_config.seed
self.calib_dataset = calib_dataset
self.dataset_name = dataset_name
self.dataset_config_name = dataset_config_name
self.data_files = data_files
self.preprocess_function = preprocess_function
self.onnx_config = None
self.feature = feature
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, **tokenizer_kwargs)
model_class = FeaturesManager.get_model_class_for_feature(self.feature)
self.model = model_class.from_pretrained(self.model_name_or_path, **model_kwargs)
def export(self, model_path: os.PathLike) -> None:
"""
Load and export a model to an ONNX Intermediate Representation (IR).
Args:
model_path (`os.PathLike`):
The path used to save the model exported to an ONNX Intermediate Representation (IR).
"""
model_type, model_onnx_config = FeaturesManager.check_supported_model_or_raise(
self.model, feature=self.feature
)
self.onnx_config = model_onnx_config(self.model.config)
opset = self.onnx_config.default_onnx_opset if self.ort_config.opset is None else self.ort_config.opset
_ = export(self.tokenizer, self.model, self.onnx_config, opset, model_path)
def fit(self, output_dir: Union[str, os.PathLike]) -> None:
"""
Load and export a model to an ONNX Intermediate Representation (IR) and apply the specified quantization
approach.
Args:
output_dir (`Union[str, os.PathLike]`):
The output directory where the quantized model will be saved.
"""
output_dir = output_dir if isinstance(output_dir, Path) else Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
model_path = output_dir.joinpath("model.onnx")
quant_model_path = generate_identified_filename(model_path, "-quantized")
self.export(model_path)
if self.quantization_approach == ORTQuantizationMode.DYNAMIC:
quantize_dynamic(
model_path,
quant_model_path,
per_channel=self.ort_config.per_channel,
reduce_range=self.ort_config.reduce_range,
activation_type=self.activation_type,
weight_type=self.weight_type,
optimize_model=self.ort_config.optimize_model,
use_external_data_format=self.ort_config.use_external_data_format,
)
elif self.quantization_approach == ORTQuantizationMode.STATIC:
calib_dataset = self.calib_dataset if self.calib_dataset is not None else self.get_calib_dataset()
calib_dataloader = self.get_calib_dataloader(calib_dataset)
calib_data_reader = self.get_data_reader(calib_dataloader)
quantize_static(
model_path,
quant_model_path,
calib_data_reader,
quant_format=self.quant_format,
per_channel=self.ort_config.per_channel,
reduce_range=self.ort_config.reduce_range,
activation_type=self.activation_type,
weight_type=self.weight_type,
optimize_model=self.ort_config.optimize_model,
use_external_data_format=self.ort_config.use_external_data_format,
calibrate_method=self.calibrate_method,
nodes_to_quantize=self.ort_config.nodes_to_quantize,
nodes_to_exclude=self.ort_config.nodes_to_exclude,
extra_options=self.ort_config.extra_options,
)
else:
raise ValueError(
f"Unknown quantization approach: `quantization_approach` was set to {self.quantization_approach}. "
f"Supported quantization approaches are " + ", ".join(SUPPORTED_QUANT_MODE)
)
def get_calib_dataset(self) -> Dataset:
"""
Returns the calibration :class:`~datasets.arrow_dataset.Dataset` to use for the post-training static
quantization calibration step.
"""
if self.dataset_name is None:
raise ValueError(
"ORTQuantizer: Static quantization calibration step requires a dataset_name if no calib_dataset is "
"provided."
)
if self.preprocess_function is None:
raise ValueError(
"ORTQuantizer: Processing function to apply after loading the dataset used for static quantization "
"calibration step was not provided."
)
calib_dataset = load_dataset(
self.dataset_name,
name=self.dataset_config_name,
data_files=self.data_files,
split=self.ort_config.split,
cache_dir=self.cache_dir,
)
calib_dataset = calib_dataset.map(self.preprocess_function, batched=True)
return calib_dataset
def get_calib_dataloader(self, calib_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the calibration :class:`~torch.utils.data.DataLoader`.
Args:
calib_dataset (`torch.utils.data.Dataset`, `optional`):
If provided, will override `self.calib_dataset`.
"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("ORTQuantizer: static quantization calibration step requires a calib_dataset.")
calib_dataset = calib_dataset if calib_dataset is not None else self.calib_dataset
if self.ort_config.max_samples is not None and len(calib_dataset) > self.ort_config.max_samples:
calib_dataset = calib_dataset.select(range(self.ort_config.max_samples))
ignored_columns = list(set(calib_dataset.column_names) - set(self.onnx_config.inputs.keys()))
calib_dataset = calib_dataset.remove_columns(ignored_columns)
generator = torch.Generator()
generator.manual_seed(self.seed)
sampler = RandomSampler(calib_dataset, generator=generator)
return DataLoader(
calib_dataset,
batch_size=self.ort_config.calib_batch_size,
sampler=sampler,
collate_fn=default_data_collator,
)
@staticmethod
def get_data_reader(calib_dataloader: DataLoader) -> ORTCalibrationDataReader:
"""
Returns the calibration :class:`~optimum.onnxruntime.quantization.ORTCalibrationDataReader`.
Args:
calib_dataloader (`torch.utils.data.DataLoader`):
Calibration dataloader to use for the post-training static quantization calibration step.
"""
return ORTCalibrationDataReader(calib_dataloader)
| 1.8125
| 2
|
fomms_integrate/newton_cotes.py
|
mquevill/fomms_integrate
| 0
|
12781115
|
"""
This file contains the implementation of the Newton-Cotes rules
"""
import numpy as np
def rectangle(x, f):
"""
Compute a 1D definite integral using the rectangle (midpoint) rule
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
ya = f((a+b)/2)
I = (b-a) * ya
return I
def trapz(x, f):
"""
Compute a 1D definite integral using the trapezoidal rule
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
ya = f(a)
yb = f(b)
I = (b-a) * (ya + yb) / 2
return I
def simpson(x, f):
"""
Compute a 1D definite integral using Simpson's rule.
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
ya = f(a)
yb = f((a+b)/2)
yc = f(b)
I = (b-a) * (ya + 4 * yb + yc) / 6
return I
def simpson3_8(x, f):
"""
Compute a 1D definite integral using the 3/8 Simpson's rule.
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
ya = f(a)
yb = f((2*a+ b)/3)
yc = f(( a+2*b)/3)
yd = f(b)
I = (b-a) * (ya + 3 * (yb + yc) + yd) / 8
return I
def boole(x, f):
"""
Compute a 1D definite integral using Boole's rule.
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
ya = f(a)
yb = f((3*a+ b)/4)
yc = f(( a+ b)/2)
yd = f(( a+3*b)/4)
ye = f(b)
I = (b-a) * (7 * (ya + ye) + 32 * (yb + yd) + 12 * yc) * 2 / 45
return I
| 3.796875
| 4
|
scripts/benchmark_ecef2geo.py
|
wrlssqi/pymap3d
| 116
|
12781116
|
<filename>scripts/benchmark_ecef2geo.py
#!/usr/bin/env python3
"""
benchmark ecef2geodetic
"""
import time
from pymap3d.ecef import ecef2geodetic
import numpy as np
import argparse
ll0 = (42.0, 82.0)
def bench(N: int) -> float:
x = np.random.random(N)
y = np.random.random(N)
z = np.random.random(N)
tic = time.monotonic()
lat, lon, alt = ecef2geodetic(x, y, z)
return time.monotonic() - tic
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("N", type=int)
p = p.parse_args()
N = p.N
print(f"ecef2geodetic: {bench(N):.3f} seconds")
| 2.46875
| 2
|
alarmer/log.py
|
long2ice/alert
| 16
|
12781117
|
import logging
from alarmer import Alarmer
class LoggingHandler(logging.Handler):
def __init__(self, level: int = logging.ERROR):
super().__init__()
self.level = level
def emit(self, record: logging.LogRecord) -> None:
if record.levelno >= self.level:
exc_info = record.exc_info
exc = None
if exc_info:
exc = exc_info[1]
Alarmer.send(record.getMessage(), exc)
| 2.6875
| 3
|
herethere/here/magic.py
|
b3b/herethere
| 0
|
12781118
|
<gh_stars>0
"""here.magic"""
import asyncio
from IPython.core import magic_arguments
from IPython.core.magic_arguments import parse_argstring
from IPython.core.magic import (
line_magic,
magics_class,
)
from herethere.everywhere.magic import MagicEverywhere
from herethere.here import ServerConfig, start_server
@magics_class
class MagicHere(MagicEverywhere):
"""Provides the %here magic."""
def __init__(self, shell):
super().__init__(shell)
self.server = None
@line_magic("here")
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"config",
nargs="?",
default="here.env",
help="Location of server config.",
)
def start_server(self, line: str):
"""Start a remote connections listener."""
args = parse_argstring(self.start_server, line)
config = ServerConfig.load(path=args.config, prefix="here")
self.server = asyncio.run(start_server(config))
print("Server started")
| 2.40625
| 2
|
phoobe/resources.py
|
berendt/heat-vagrant
| 0
|
12781119
|
# based on https://github.com/stackforge/flame/blob/master/flameclient/flame.py
template_skeleton = '''
heat_template_version: 2013-05-23
description: Generated template
parameters:
resources:
'''
class Resource(object):
"""Describes an OpenStack resource."""
def __init__(self, name, type, id=None, properties=None):
self.name = name
self.type = type
self.id = id
self.status = 'COMPLETE'
self.properties = properties or {}
self.parameters = {}
def add_parameter(self, name, description, parameter_type='string',
constraints=None, default=None):
data = {
'type': parameter_type,
'description': description,
}
# (arezmerita) disable cause heat bug #1314240
# if constraints:
# data['constraints'] = constraints
if default:
data['default'] = default
self.parameters[name] = data
@property
def template_resource(self):
return {
self.name: {
'type': self.type,
'properties': self.properties
}
}
@property
def template_parameter(self):
return self.parameters
@property
def stack_resource(self):
if self.id is None:
return {}
return {
self.name: {
'status': self.status,
'name': self.name,
'resource_data': {},
'resource_id': self.id,
'action': 'CREATE',
'type': self.type,
'metadata': {}
}
}
| 2.0625
| 2
|
__init__.py
|
mynameismon/comp-project-grade-11
| 3
|
12781120
|
<gh_stars>1-10
from os import getcwd
import os
import mdmaketoc
def __init__():
problemnumber = int(input("Enter the problem number: \n >> "))
foldername = "Problem-No-" + str(problemnumber)
path = getcwd() + "\\" + foldername
# creates directory if it does not exist
try:
os.mkdir(path)
except OSError as e:
pass
i = 1
while os.path.exists("Problem-No-" + str(problemnumber) + "\\Approach-No-%s.py" % i):
i+= 1
last_file = path + "\\" + "Approach-No-%s.py" % i
file = open(last_file, 'w')
file.close()
if __name__ == "__main__":
__init__()
mdmaketoc.main()
| 2.8125
| 3
|
gza/gzacommon.py
|
ynadji/drop
| 6
|
12781121
|
import sys
import nfqueue
import socket
import signal
import time
import whitelist
from collections import defaultdict
class GZA(object):
def __init__(self, vmnum, opts):
self.gamestate = defaultdict(int)
self.vmnum = vmnum
self.iface = 'tap%d' % vmnum
self.opts = opts
self.mac = 'ca:fe:de:ad:be:ef'
signal.signal(signal.SIGUSR1, self.reset) # So we can reset gamestate
if self.opts.whitelist:
whitelist.makewhitelist(self.opts.whitelistpath)
self.whitelisted = whitelist.whitelisted
whitelist.makeipwhitelist(self.opts.ipwhitelistpath)
self.whitelistedip = whitelist.whitelistedip
# Set the game (only used in subclass games)
if self.opts.taken > 0:
self.game = 'taken'
elif self.opts.dropn > 0:
self.game = 'dropn'
elif self.opts.dropall:
self.game = 'dropall'
def reset(self, signum, frame):
sys.stderr.write('Cleared game state!\n')
self.gamestate.clear()
try:
self.q.try_run()
except KeyboardInterrupt:
print('Clean shutdown')
self.q.unbind(socket.AF_INET)
sys.exit(0)
def playgame(self, payload):
payload.set_verdict(nfqueue.NF_ACCEPT)
def startgame(self):
good = False
while not good:
try:
self.q = nfqueue.queue()
self.q.open()
self.q.set_callback(self.playgame)
self.q.fast_open(self.vmnum, socket.AF_INET)
good = True
except RuntimeError as e:
sys.stderr.write(str(e) + '\n')
sys.stderr.write('Retrying to connect to nfqueue #%d...\n'
% self.vmnum)
time.sleep(3)
try:
sys.stderr.write('Successfully bound to nfqueue #%d\n' % self.vmnum)
self.q.try_run()
except KeyboardInterrupt:
print('Clean shutdown')
self.q.unbind(socket.AF_INET)
sys.exit(0)
| 2.296875
| 2
|
math-and-algorithm/005.py
|
silphire/training-with-books
| 0
|
12781122
|
# https://atcoder.jp/contests/math-and-algorithm/tasks/math_and_algorithm_e
input()
print(sum(map(int, input().split())) % 100)
| 2.234375
| 2
|
booking/models.py
|
foad-heidari/django-booking
| 1
|
12781123
|
from django.db import models
from django.conf import settings
BOOKING_PERIOD = (
("5","5M"),
("10","10M"),
("15","15M"),
("20","20M"),
("25","25M"),
("30","30M"),
("35","35M"),
("40","40M"),
("45","45M"),
("60","1H"),
("75","1H 15M"),
("90","1H 30M"),
("105","1H 45M"),
("120","2H"),
("150","2H 30M"),
("180","3H"),
)
class Booking(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE, blank=True, null=True)
date = models.DateField()
time = models.TimeField()
user_name = models.CharField(max_length=250)
user_email = models.EmailField()
approved = models.BooleanField(default=False)
user_mobile = models.CharField(blank=True, null=True, max_length=10)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self) -> str:
return self.user_name or "(No Name)"
class BookingManager(models.Model):
# General
booking_enable = models.BooleanField(default=True)
confirmation_required = models.BooleanField(default=True)
# Date
disable_weekend = models.BooleanField(default=True)
available_booking_months = models.IntegerField(default=1, help_text="if 2, user can only book appointment for next two months.")
max_appointment_per_day = models.IntegerField(null=True, blank=True)
# Time
start_time = models.TimeField()
end_time = models.TimeField()
period_of_each_booking = models.CharField(max_length=3, default="30", choices=BOOKING_PERIOD, help_text="How long each appointment take.")
max_appointment_per_time = models.IntegerField(default=1, help_text="how much appointment can be book for each time.")
| 2.046875
| 2
|
resources/site-packages/xbmctorrent/player.py
|
neno1978/xbmctorrent
| 0
|
12781124
|
import xbmc
import xbmcgui
import urllib
import os
import time
import urlparse
from xbmctorrent import plugin, torrent2http
from xbmctorrent.ga import track_event
from xbmctorrent.utils import url_get_json
from contextlib import contextmanager, closing, nested
TORRENT2HTTP_TIMEOUT = 20
TORRENT2HTTP_POLL = 200
WINDOW_FULLSCREEN_VIDEO = 12005
XBFONT_LEFT = 0x00000000
XBFONT_RIGHT = 0x00000001
XBFONT_CENTER_X = 0x00000002
XBFONT_CENTER_Y = 0x00000004
XBFONT_TRUNCATED = 0x00000008
XBFONT_JUSTIFY = 0x00000010
STATE_STRS = [
'Queued',
'Checking',
'Downloading metadata',
'Downloading',
'Finished',
'Seeding',
'Allocating',
'Allocating file & Checking resume'
]
VIEWPORT_WIDTH = 1920.0
VIEWPORT_HEIGHT = 1088.0
OVERLAY_WIDTH = int(VIEWPORT_WIDTH * 0.7) # 70% size
OVERLAY_HEIGHT = 150
ENCRYPTION_SETTINGS = {
"Forced": 0,
"Enabled": 1,
"Disabled": 2,
}
# Somehow if we close the dialog too fast, it makes XBMC go bonkers
class SafeDialogProgress(xbmcgui.DialogProgress):
def close(self):
xbmc.sleep(1000)
super(SafeDialogProgress, self).close()
class OverlayText(object):
def __init__(self, w, h, *args, **kwargs):
self.window = xbmcgui.Window(WINDOW_FULLSCREEN_VIDEO)
viewport_w, viewport_h = self._get_skin_resolution()
# Adjust size based on viewport, we are using 1080p coordinates
w = int(w * viewport_w / VIEWPORT_WIDTH)
h = int(h * viewport_h / VIEWPORT_HEIGHT)
x = (viewport_w - w) / 2
y = (viewport_h - h) / 2
self._shown = False
self._text = ""
self._label = xbmcgui.ControlLabel(x, y, w, h, self._text, *args, **kwargs)
self._background = xbmcgui.ControlImage(x, y, w, h, os.path.join(plugin.addon.getAddonInfo("path"), "resources", "images", "black.png"))
self._background.setColorDiffuse("0xD0000000")
def show(self):
if not self._shown:
self.window.addControls([self._background, self._label])
self._shown = True
def hide(self):
if self._shown:
self._shown = False
self.window.removeControls([self._background, self._label])
def close(self):
self.hide()
@property
def text(self):
return self._text
@text.setter
def text(self, text):
self._text = text
if self._shown:
self._label.setLabel(self._text)
# This is so hackish it hurts.
def _get_skin_resolution(self):
import xml.etree.ElementTree as ET
skin_path = xbmc.translatePath("special://skin/")
tree = ET.parse(os.path.join(skin_path, "addon.xml"))
res = tree.findall("./extension/res")[0]
return int(res.attrib["width"]), int(res.attrib["height"])
class TorrentPlayer(xbmc.Player):
def init(self, magnet_uri):
track_event("torrent_player", "start")
self.magnet_uri = magnet_uri
self.magnet_args = urlparse.parse_qs(self.magnet_uri.replace("magnet:?", "")) # I know about urlparse.urlsplit but this is faster
self.magnet_display_name = ""
if self.magnet_args["dn"]:
self.magnet_display_name = self.magnet_args["dn"][0]
self.torrent2http_options = {
"magnet": magnet_uri,
"dlpath": plugin.get_setting("dlpath") or ".",
"dlrate": plugin.get_setting("max_download_rate") or "0",
"ulrate": plugin.get_setting("max_upload_rate") or "0",
"ulrate": plugin.get_setting("max_upload_rate") or "0",
"encryption": plugin.get_setting("encryption"),
}
if plugin.get_setting("keep"):
self.torrent2http_options["keep"] = None
track_event("download", "start", magnet_uri)
self.on_playback_started = []
self.on_playback_resumed = []
self.on_playback_paused = []
self.on_playback_stopped = []
return self
def onPlayBackStarted(self):
for f in self.on_playback_started:
f()
track_event("video", "play", self.magnet_display_name)
def onPlayBackResumed(self):
for f in self.on_playback_resumed:
f()
self.onPlayBackStarted()
def onPlayBackPaused(self):
for f in self.on_playback_paused:
f()
track_event("video", "pause", self.magnet_display_name)
def onPlayBackStopped(self):
for f in self.on_playback_stopped:
f()
track_event("video", "stop", self.magnet_display_name)
def _get_status_lines(self, status):
return [
self.magnet_display_name,
"%.2f%% %s" % (status["progress"] * 100, STATE_STRS[status["state"]]),
"D:%(download_rate).2fkb/s U:%(upload_rate).2fkb/s S:%(num_seeds)d P:%(num_peers)d" % status
]
@contextmanager
def attach(self, callback, *events):
for event in events:
event.append(callback)
yield
for event in events:
event.remove(callback)
def _wait_t2h_startup(self, t2h):
start = time.time()
while (time.time() - start) < TORRENT2HTTP_TIMEOUT:
try:
t2h("status")
return True
except:
pass
xbmc.sleep(TORRENT2HTTP_POLL)
return False
def loop(self):
has_resolved = False
plugin.log.info("Starting torrent2http...")
with closing(torrent2http.start(**self.torrent2http_options)) as t2h_instance:
t2h = lambda cmd: url_get_json("http://%s/%s" % (t2h_instance.bind_address, cmd))
if not self._wait_t2h_startup(t2h):
return
plugin.log.info("Opening download dialog...")
with closing(SafeDialogProgress()) as dialog:
dialog.create(plugin.name)
plugin.log.info("Waiting for file resolution...")
while not has_resolved:
if xbmc.abortRequested or dialog.iscanceled():
return
status = t2h("status")
if status["state"] >= 0:
dialog.update(int(status["progress"] * 100), *self._get_status_lines(status))
if status["state"] >= 3 and not has_resolved: # Downloading?
files = t2h("ls")["files"]
biggest_file = sorted(files, key=lambda x: x["size"])[-1]
percent_complete = float(biggest_file["complete_pieces"]) / float(biggest_file["total_pieces"]) * 100.0
if percent_complete >= 0.5:
plugin.log.info("Resolving to http://%s/files/%s" % (t2h_instance.bind_address, biggest_file["name"]))
has_resolved = True
url_name = "/".join(map(urllib.quote, biggest_file["name"].split("/")))
plugin.set_resolved_url({
"label": self.magnet_display_name,
"path": "http://%s/files/%s" % (t2h_instance.bind_address, url_name),
"is_playable": True,
})
break
xbmc.sleep(TORRENT2HTTP_POLL)
# We are now playing
plugin.log.info("Now playing torrent...")
with closing(OverlayText(w=OVERLAY_WIDTH, h=OVERLAY_HEIGHT, alignment=XBFONT_CENTER_X | XBFONT_CENTER_Y)) as overlay:
with nested(self.attach(overlay.show, self.on_playback_paused),
self.attach(overlay.hide, self.on_playback_resumed, self.on_playback_stopped)):
while not xbmc.abortRequested and self.isPlaying():
overlay.text = "\n".join(self._get_status_lines(t2h("status")))
xbmc.sleep(TORRENT2HTTP_POLL)
plugin.log.info("Closing Torrent player.")
| 2.25
| 2
|
ambient-poly.py
|
bpaauwe/AmbientWeatherNS
| 0
|
12781125
|
#!/usr/bin/env python3
"""
Polyglot v2 node server for Ambient Weather data.
Copyright (c) 2018 <NAME>
"""
CLOUD = False
try:
import polyinterface
except ImportError:
import pgc_interface as polyinterface
CLOUD = True
import sys
import time
import requests
import json
LOGGER = polyinterface.LOGGER
class Controller(polyinterface.Controller):
id = 'Ambient'
def __init__(self, polyglot):
super(Controller, self).__init__(polyglot)
self.name = 'AmbientWeather'
self.address = 'ambient'
self.primary = self.address
self.api_key = ''
self.mac_address = ''
self.indoor = 'disabled'
self.myParams = {
'APIKey': '<your value here>',
'macAddress': '<your value here>',
'indoor': 'disabled',
}
self.url_str = 'http://api.ambientweather.net/v1/devices/'
self.default = '<your value here>'
self.configured = False
self.started = False
self.first_poll = True
self.poly.onConfig(self.process_config)
LOGGER.info('Finished controller init.')
'''
This is called whenever there is a configuration change. Somehow
we need to detect if it is a change in custom parameters and then
process those changes.
'''
def process_config(self, config):
if self.started == False:
LOGGER.debug('Ignore config, NS not yet started')
return
changed = False
if 'customParams' in config:
LOGGER.debug('pc: Incoming config = {}'.format(config['customParams']))
if 'APIKey' in config['customParams']:
if self.myParams['APIKey'] != config['customParams']['APIKey']:
self.myParams['APIKey'] = config['customParams']['APIKey']
self.api_key = config['customParams']['APIKey']
changed = True
if 'macAddress' in config['customParams']:
if self.myParams['macAddress'] != config['customParams']['macAddress']:
self.myParams['macAddress'] = config['customParams']['macAddress']
self.mac_address = config['customParams']['macAddress']
changed = True
if 'indoor' in config['customParams']:
if self.myParams['indoor'] != config['customParams']['indoor']:
self.myParams['indoor'] = config['customParams']['indoor']
self.indoor = config['customParams']['indoor']
changed = True
if changed:
LOGGER.debug('Configuration change detected.')
# Update notices. Make sure we restrict setting notices
# to only when something was updated. Otherwise we can
# end up with an infinite loop as setting a notice will
# trigger a configuration change.
self.removeNoticesAll()
notices = {}
self.configured = True
if self.mac_address == self.default:
notices['mac'] = 'Please set your station macAddress (1)'
LOGGER.debug('mac address net set, set configured to false')
self.configured = False
if self.api_key == self.default:
notices['key'] = 'Please set APIKey to your Ambient API Key (1)'
LOGGER.debug('api key net set, set configured to false')
self.configured = False
self.addNotice(notices)
def start(self):
LOGGER.info('Started Ambient Weather Node Server')
if self.check_params():
LOGGER.info('AmbientWeatherNS has been configured.')
self.configured = True
else:
LOGGER.info('APIKey and macAddress not set.')
self.configured = False
self.discover()
LOGGER.info('Ambient Weather Node Server initialization complete.')
self.started = True
def shortPoll(self):
pass
def longPoll(self):
"""
Here is where we want to query the server and update all
the drivers.
https://api.ambientweather.net/v1/devices/macAddress?apiKey=&applicationKey
States that data is updated every 5 or 30 minutes (so which is it?)
"""
if self.configured == False:
if self.first_poll:
LOGGER.info('Waiting to be configured.')
LOGGER.info(' key = ' + self.api_key)
LOGGER.info(' mac = ' + self.mac_address)
self.first_poll = False
return
LOGGER.info('Connecting to Ambient Weather server')
path_str = self.url_str + self.mac_address
path_str += '?apiKey=' + self.api_key
path_str += '&applicationKey=<KEY>'
path_str += '&limit=1'
LOGGER.info(path_str)
try:
c = requests.get(path_str)
except:
LOGGER.error('Request to Ambient servers failed.')
return
try:
awdata = c.json()
except:
LOGGER.error('Ambient sent no data in response to request.')
LOGGER.error(str(c))
return
# deserialize data into an object?
try:
LOGGER.info(awdata[0])
d = awdata[0]
except:
LOGGER.error('Failed to get data from server: ' + str(awdata))
return
# TODO: calculate additional data values
# pressure trend
# heat index
# windchill
# rain rate
for node in self.nodes:
if self.nodes[node].id == 'pressure':
self.set_driver(node, 'GV0', d, 'baromrelin')
if 'baromabsin' in d:
self.set_driver(node, 'ST', d, 'baromabsin')
trend = self.nodes[node].updateTrend(d['baromabsin'])
self.nodes[node].setDriver('GV1', trend, report = True, force = True)
elif self.nodes[node].id == 'temperature':
self.set_driver(node, 'ST', d, 'tempf')
self.set_driver(node, 'GV0', d, 'feelsLike')
self.set_driver(node, 'GV1', d, 'dewPoint')
#self.set_driver(node, 'GV2', d, 'heatIndex')
#self.set_driver(node, 'GV3', d, 'windchill')
elif self.nodes[node].id == 'humidity':
self.set_driver(node, 'ST', d, 'humidity')
elif self.nodes[node].id == 'wind':
self.set_driver(node, 'ST', d, 'windspeedmph')
self.set_driver(node, 'GV0', d, 'winddir')
self.set_driver(node, 'GV1', d, 'windgustmph')
#self.set_driver(node, 'GV2', d, 'windgustdir')
elif self.nodes[node].id == 'precipitation':
#self.set_driver(node, 'ST', d, 'rainrate')
self.set_driver(node, 'GV0', d, 'hourlyrainin')
self.set_driver(node, 'GV1', d, 'dailyrainin')
self.set_driver(node, 'GV2', d, 'weeklyrainin')
self.set_driver(node, 'GV3', d, 'monthlyrainin')
self.set_driver(node, 'GV4', d, 'yearlyrainin')
elif self.nodes[node].id == 'light':
self.set_driver(node, 'ST', d, 'uv')
self.set_driver(node, 'GV0', d, 'solarradiation')
elif self.nodes[node].id == 'indoor':
self.set_driver(node, 'ST', d, 'tempinf')
self.set_driver(node, 'GV0', d, 'humidityin')
self.first_poll = False
def set_driver(self, node, driver, data, index):
try:
self.nodes[node].setDriver(driver, data[index],
report = True, force = self.first_poll)
except (ValueError, KeyError, TypeError):
LOGGER.warning('Missing data: ' + index)
def query(self):
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover(self, *args, **kwargs):
self.addNode(TemperatureNode(self, self.address, 'temperature', 'Temperatures'))
self.addNode(HumidityNode(self, self.address, 'humidity', 'Humidity'))
self.addNode(PressureNode(self, self.address, 'pressure', 'Barometric Pressure'))
self.addNode(WindNode(self, self.address, 'wind', 'Wind'))
self.addNode(PrecipitationNode(self, self.address, 'rain', 'Precipitation'))
self.addNode(LightNode(self, self.address, 'light', 'Illumination'))
if self.indoor.lower() == 'enabled':
self.addNode(IndoorNode(self, self.address, 'indoor', 'Indoor Sensor'))
def delete(self):
LOGGER.info('Deleting the Ambient Weather node server.')
def stop(self):
LOGGER.debug('NodeServer stopped.')
def check_param(self, name, myParams, default, notices, notice):
param = default
st = True
if name in self.polyConfig['customParams']:
if self.polyConfig['customParams'][name] != default:
param = self.polyConfig['customParams']['macAddress']
myParams[name] = param
else:
if notice != '':
notices[name] = notice
st = False
else:
LOGGER.error('check_params: %s not defined in customParams' % name)
if notice != '':
notices[name] = notice
st = False
return st, param
def check_params(self):
st = True
self.removeNoticesAll()
notices = {}
default = '<your value here>'
st1, self.mac_address = self.check_param('macAddress', self.myParams, default, notices, 'Missing station MAC address')
st2, self.api_key = self.check_param('APIKey', self.myParams, default, notices, 'Missing Ambient API key')
st3, self.indoor = self.check_param('indoor', self.myParams, 'disabled', notices, '')
if 'macAddress' in self.polyConfig['customParams']:
if self.polyConfig['customParams']['macAddress'] != default:
self.mac_address = self.polyConfig['customParams']['macAddress']
self.myParams['macAddress'] = self.mac_address
else:
notices['macaddress'] = 'Please set your station macAddress'
st = False
else:
st = False
self.mac_address = default
LOGGER.error('check_params: macAddress not defined in customParams, please add it.')
notices['macaddress'] = 'Please add a customParam with key "macAddress" and value set to your Ambient station MAC address'
if 'APIKey' in self.polyConfig['customParams']:
if self.polyConfig['customParams']['APIKey'] != default:
self.api_key = self.polyConfig['customParams']['APIKey']
self.myParams['APIKey'] = self.api_key
else:
notices['apikey'] = 'Please set APIKey to your Ambient API Key'
st = False
else:
st = False
self.api_key = default
LOGGER.error('check_params: APIKey not defined in customParams, please add it.')
notices['apikey'] = 'Please add a customParam with key "APIKey" and value set to your Ambient API Key'
if 'indoor' in self.polyConfig['customParams']:
if self.polyConfig['customParams']['indoor'] != 'disabled':
self.indoor = self.polyConfig['customParams']['indoor']
self.myParams['indoor'] = self.indoor
else:
self.indoor = 'disabled'
# Must be called with all parameters and all notices!
self.addCustomParam(self.myParams)
self.addNotice(notices)
return (st1 and st2)
def remove_notices_all(self,command):
LOGGER.info('remove_notices_all:')
# Remove all existing notices
self.removeNoticesAll()
def update_profile(self,command):
LOGGER.info('update_profile:')
st = self.poly.installprofile()
return st
commands = {
'DISCOVER': discover,
'UPDATE_PROFILE': update_profile,
'REMOVE_NOTICES_ALL': remove_notices_all
}
drivers = [
{'driver': 'ST', 'value': 1, 'uom': 2},
{'driver': 'BATLVL', 'value': 0, 'uom': 72} # battery level
]
class TemperatureNode(polyinterface.Node):
id = 'temperature'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 17},
{'driver': 'GV0', 'value': 0, 'uom': 17}, # feels like
{'driver': 'GV1', 'value': 0, 'uom': 17}, # dewpoint
{'driver': 'GV2', 'value': 0, 'uom': 17}, # heat index
{'driver': 'GV3', 'value': 0, 'uom': 17} # windchill
]
class HumidityNode(polyinterface.Node):
id = 'humidity'
drivers = [{'driver': 'ST', 'value': 0, 'uom': 22}]
class PressureNode(polyinterface.Node):
id = 'pressure'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 23}, # abs press
{'driver': 'GV0', 'value': 0, 'uom': 23}, # rel press
{'driver': 'GV1', 'value': 0, 'uom': 25} # trend
]
mytrend = []
def updateTrend(self, current):
t = 0
past = 0
if (len(self.mytrend) == 180):
past = self.mytrend.pop()
if self.mytrend != []:
past = self.mytrend[0]
# calculate trend
if (past - current) > 0.01:
t = -1
elif (past - current) < 0.01:
t = 1
self.mytrend.insert(0, current)
return t
class WindNode(polyinterface.Node):
id = 'wind'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 48}, # speed
{'driver': 'GV0', 'value': 0, 'uom': 76}, # direction
{'driver': 'GV1', 'value': 0, 'uom': 48}, # gust
{'driver': 'GV2', 'value': 0, 'uom': 76} # gust direction
]
class PrecipitationNode(polyinterface.Node):
id = 'precipitation'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 24}, # rate
{'driver': 'GV0', 'value': 0, 'uom': 105}, # hourly
{'driver': 'GV1', 'value': 0, 'uom': 105}, # daily
{'driver': 'GV2', 'value': 0, 'uom': 105}, # weekly
{'driver': 'GV2', 'value': 0, 'uom': 105}, # monthly
{'driver': 'GV2', 'value': 0, 'uom': 105} # yearly
]
class LightNode(polyinterface.Node):
id = 'light'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 71}, # UV
{'driver': 'GV0', 'value': 0, 'uom': 74}, # solar radiation
{'driver': 'GV1', 'value': 0, 'uom': 36}, # Lux
]
class IndoorNode(polyinterface.Node):
id = 'indoor'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 17}, # indoor temp
{'driver': 'GV0', 'value': 0, 'uom': 22}, # indoor humidity
]
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('AmbientWeather')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
| 2.515625
| 3
|
mmdeploy/codebase/mmdet3d/models/voxelnet.py
|
xizi/mmdeploy
| 746
|
12781126
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
'mmdet3d.models.detectors.voxelnet.VoxelNet.simple_test')
def voxelnet__simple_test(ctx,
self,
voxels,
num_points,
coors,
img_metas=None,
imgs=None,
rescale=False):
"""Test function without augmentaiton. Rewrite this func to remove model
post process.
Args:
voxels (torch.Tensor): Point features or raw points in shape (N, M, C).
num_points (torch.Tensor): Number of points in each pillar.
coors (torch.Tensor): Coordinates of each voxel.
input_metas (list[dict]): Contain pcd meta info.
Returns:
List: Result of model.
"""
x = self.extract_feat(voxels, num_points, coors, img_metas)
bbox_preds, scores, dir_scores = self.bbox_head(x)
return bbox_preds, scores, dir_scores
@FUNCTION_REWRITER.register_rewriter(
'mmdet3d.models.detectors.voxelnet.VoxelNet.extract_feat')
def voxelnet__extract_feat(ctx,
self,
voxels,
num_points,
coors,
img_metas=None):
"""Extract features from points. Rewrite this func to remove voxelize op.
Args:
voxels (torch.Tensor): Point features or raw points in shape (N, M, C).
num_points (torch.Tensor): Number of points in each pillar.
coors (torch.Tensor): Coordinates of each voxel.
input_metas (list[dict]): Contain pcd meta info.
Returns:
torch.Tensor: Features from points.
"""
voxel_features = self.voxel_encoder(voxels, num_points, coors)
batch_size = coors[-1, 0] + 1 # refactor
assert batch_size == 1
x = self.middle_encoder(voxel_features, coors, batch_size)
x = self.backbone(x)
if self.with_neck:
x = self.neck(x)
return x
| 2.375
| 2
|
autopy/core/Option.py
|
songofhawk/autopy
| 1
|
12781127
|
<gh_stars>1-10
class Option:
project: str = './conf/auto_dingding.yaml'
def __init__(self, project):
self.project = project if project is not None else self.project
| 2.234375
| 2
|
pyextmod_starter.py
|
WarrenWeckesser/pyextmod_starter
| 0
|
12781128
|
# Copyright © 2021 <NAME>
# Distributed under the MIT license.
"""
This module defines the function
def generate_extmod(module_name, module_doc, funcs,
c_filename=None, setup_filename="setup.py")
It generates C code for a Python extension module, with boilerplate code
for defining functions with in the extension module that have signatures
like those in the list of functions provided by the `funcs` parameter.
Only the function signatures of the functions in `funcs` are used; the
bodies of the functions are ignored.
`generate_extmod` generates the boilerplate code for the extension module,
but the code will not do anything useful. The intent is for a developer to
run this once, and then edit the C file to implement whatever the extension
module is supposed to do.
"""
import textwrap
import inspect
def quote_wrap(s):
return '"' + s + '"'
header = """
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <stddef.h>
// Only need stdio.h for the demo code that prints the arguments.
#include <stdio.h>
"""
numpy_header = """
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include <numpy/ndarrayobject.h>
"""
init_start = """
PyMODINIT_FUNC
PyInit_{module_name}(void)
{{
PyObject *module;
module = PyModule_Create(&{module_name}module);
if (module == NULL) {{
return NULL;
}}
"""
init_end = """
return module;
}
"""
func_start = """
static PyObject *
{func_name}(PyObject *self, PyObject *args, PyObject *kwargs)
{{
"""
func_end = """
// The demo code returns None; modify as needed.
Py_RETURN_NONE;
}
"""
methods_table_start = """
static PyMethodDef {module_name}_methods[] = {{
"""
methods_table_entry = """\
{{"{func_name}", (PyCFunction)(void(*)(void)) {func_name}, METH_VARARGS | METH_KEYWORDS,
{doc}}},
"""
methods_table_end = """\
{NULL, NULL, 0, NULL}
};
"""
module_definition_struct = """
static struct PyModuleDef {module_name}module = {{
PyModuleDef_HEAD_INIT,
"{module_name}",
{module_doc},
-1,
{module_name}_methods
}};
"""
numpy_setup = """
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.add_extension('{module_name}',
sources=['{c_filename}'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(name='{module_name}',
version='0.1',
configuration=configuration)
"""
def _generate_function(func, out):
sig = inspect.signature(func)
param_names = list(sig.parameters)
func_name = func.__name__
out.write(func_start.format(func_name=func_name))
# fmt is the format string that will be used in PyArg_ParseTupleAndKeywords
fmt = ''
kwlist = []
has_default = False
for name, param_type in sig.parameters.items():
if func.__kwdefaults__ and name in func.__kwdefaults__:
# Ignore the default value, and use Py_None as the default.
out.write(f' PyObject *{name} = Py_None;\n')
if not has_default:
has_default = True
fmt += '|$'
else:
out.write(f' PyObject *{name} = NULL;\n')
if param_type.kind != param_type.POSITIONAL_ONLY:
kwlist.append(name)
else:
kwlist.append('')
fmt += 'O'
fmt += f':{func_name}'
kwlist_str = ", ".join([quote_wrap(kw) for kw in kwlist])
param_refs = ", ".join(['&' + kw for kw in param_names])
out.write(f' static char *kwlist[] = {{{kwlist_str}, NULL}};\n')
fmt = quote_wrap(fmt)
out.write(f' if (!PyArg_ParseTupleAndKeywords(args, kwargs, {fmt}, kwlist,\n')
out.write(f' {param_refs})) {{\n')
out.write(' return NULL;\n')
out.write(' }\n')
out.write('\n')
out.write(' // This demo code just prints the arguments to stdout.\n')
for param_name in param_names:
out.write(f' printf("{param_name}:\\n");\n')
out.write(f' PyObject_Print({param_name}, stdout, 0);\n')
out.write(' printf("\\n");\n')
out.write(func_end)
def _docstring_literal(doc, name, out):
if doc is None:
return 'NULL'
doc = textwrap.dedent(doc).strip()
lines = doc.splitlines()
if len(lines) > 1:
macro_name = f'{name.upper()}_DOCSTRING'
out.write(f"\n#define {macro_name} \\\n")
for line in doc.splitlines():
out.write(f'"{line}\\n"\\\n')
out.write('""\n')
return macro_name
else:
return quote_wrap(doc)
def _generate_methods_table(module_name, funcs, out):
docstrings = []
for func in funcs:
docstrings.append(_docstring_literal(func.__doc__, func.__name__, out))
out.write(methods_table_start.format(module_name=module_name))
for func, doc in zip(funcs, docstrings):
func_name = func.__name__
out.write(methods_table_entry.format(func_name=func_name, doc=doc))
out.write(methods_table_end)
def _generate_module_definition_struct(module_name, module_doc, out):
doc = _docstring_literal(module_doc, module_name + "_MODULE", out)
out.write(module_definition_struct.format(module_name=module_name,
module_doc=doc))
def _create_setup_numpy(module_name, c_filename, setup_out):
setup_out.write(numpy_setup.format(module_name=module_name,
c_filename=c_filename))
def _create_setup_plain(module_name, c_filename, setup_out):
setup_out.write('# This file follows example shown at\n')
setup_out.write('# https://docs.python.org/3/extending/building.html#building-c-and-c-extensions-with-distutils\n')
setup_out.write('\n')
setup_out.write('from distutils.core import setup, Extension\n')
setup_out.write('\n')
setup_out.write(f"{module_name} = Extension('{module_name}',\n")
setup_out.write(f"{' '*len(module_name)} "
f"sources=['{c_filename}'])\n")
setup_out.write("\n")
setup_out.write(f"setup(name='{module_name}',\n")
setup_out.write(" version='0.1',\n")
setup_out.write(f" ext_modules=[{module_name}])\n")
def generate_extmod(module_name, module_doc, funcs, numpy=False,
c_filename=None, setup_filename="setup.py"):
"""
Generate the boilerplate code for a Python extenstion module.
Parameters
----------
module_name : str
The extension module name.
module_doc : str or None
The docstring for the module.
funcs : list[callable]
For each function in ``funcs``, a function with the same name is
created in the extension module. The function will parse its arguments
as objects, and print them to stdout. (This is just so the module can
be compiled and tested; the intent is for the user to edit the file
to do something useful.)
c_filename : str, optional
The name of the C file for the extension module. If not given, the
name will be generated as ``f"{module_name}module.c".
setup_filename : str
The name of the setup script. The default is `"setup.py"`.
"""
if not module_name.isidentifier():
raise ValueError(f"invalid name {module_name!r}; name must be a "
"valid identifier.")
if c_filename is None:
c_filename = f'{module_name}module.c'
with open(c_filename, 'w') as out:
out.write(header)
if numpy:
out.write(numpy_header)
if callable(funcs):
funcs = [funcs]
for func in funcs:
_generate_function(func, out)
_generate_methods_table(module_name, funcs, out)
_generate_module_definition_struct(module_name, module_doc, out)
out.write(init_start.format(module_name=module_name))
if numpy:
out.write('\n')
out.write(' // Required to access the NumPy C API.\n')
out.write(' import_array();\n')
out.write(init_end)
with open(setup_filename, 'w') as setup_out:
if numpy:
_create_setup_numpy(module_name, c_filename, setup_out)
else:
_create_setup_plain(module_name, c_filename, setup_out)
| 2.359375
| 2
|
bots/stocks/due_diligence/supplier.py
|
jbushago/GamestonkTerminal
| 1
|
12781129
|
import logging
import disnake
import requests
from bs4 import BeautifulSoup
from bots import imps
from gamestonk_terminal.decorators import log_start_end
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def supplier_command(ticker=""):
"""Displays suppliers of the company [CSIMarket]"""
# Debug user input
if imps.DEBUG:
logger.debug("dd supplier %s", ticker)
if not ticker:
raise Exception("A ticker is required")
url_supply_chain = (
f"https://csimarket.com/stocks/competitionNO3.php?supply&code={ticker.upper()}"
)
text_supplier_chain = BeautifulSoup(requests.get(url_supply_chain).text, "lxml")
l_suppliers = list()
for supplier in text_supplier_chain.findAll(
"td", {"class": "svjetlirub11 block al"}
):
l_suppliers.append(supplier.text.replace("\n", "").strip())
if not l_suppliers:
raise Exception("No suppliers found.\n")
# Debug user output
if imps.DEBUG:
logger.debug(l_suppliers)
suppliers, unique = [], []
i = 0
for value in l_suppliers:
name = value
if name in unique: # pylint: disable=R1724
continue
else:
unique.append(name)
while i < len(unique):
warp = unique[i][0:28]
text = f"{warp:<30}" if (i % 2) == 0 else f"{warp}\n"
suppliers.append(text)
i += 1
title = f"Stocks: [CSIMarket] {ticker.upper()} Suppliers"
reports = []
embeds = []
choices = [
disnake.SelectOption(label="Home", value="0", emoji="🟢"),
]
if len(suppliers) < 30:
description = f"```{''.join(suppliers)}```"
embeds.append(
disnake.Embed(
title=title,
description=suppliers,
colour=imps.COLOR,
).set_author(
name=imps.AUTHOR_NAME,
icon_url=imps.AUTHOR_ICON_URL,
)
)
reports.append(f"{description}")
# Output data
output = {
"title": title,
"description": reports,
"embed": embeds,
}
else:
i, end = 0, 30
while i < len(suppliers):
description = f"```{''.join(suppliers[i:end])}```"
embeds.append(
disnake.Embed(
title=title,
description=description,
colour=imps.COLOR,
).set_author(
name=imps.AUTHOR_NAME,
icon_url=imps.AUTHOR_ICON_URL,
)
)
i += 30
end += 30
reports.append(f"{description}")
# Output data
output = {
"view": imps.Menu,
"title": title,
"description": reports,
"embed": embeds,
"choices": choices,
}
return output
| 2.84375
| 3
|
displayingImage.py
|
pratyushshivam/Tesla-Car-Detector
| 0
|
12781130
|
import cv2
#Our Image
img_file='CarImage.jpg'
#Our pre-trained car classifier
classifier_file = 'car_detector.xml'
#create opencv image
img = cv2.imread(img_file)
# Display the image with the cars spotted
cv2.imshow('Car_detector',img) #pops a window with the image
#Don't autoclose
cv2.waitKey() # waits for a key to be pressed to close window
print("cc")
| 3.65625
| 4
|
tests/test_grad.py
|
papamarkou/eeyore
| 6
|
12781131
|
# %% # Definition of function whose analytical and autograd gradient are compared
#
# $$
# \begin{align}
# f(x, y) & =
# x^3 y^4, \\
# \nabla (f(x, y)) & =
# \begin{pmatrix}
# 3 x^2 y^4 \\
# 4 x^3 y^3
# \end{pmatrix}.
# \end{align}
# $$
# %% Import packages
import torch
import unittest
# %% Define function f whose gradient is computed
def f(theta):
return (theta[0]**3)*(theta[1]**4)
# %% Define analytical gradient of f
def analytical_gradf(theta):
return torch.tensor([3*(theta[0]**2)*(theta[1]**4), 4*(theta[0]**3)*(theta[1]**3)], dtype=torch.float)
# %% Class for running tests
class TestDerivatives(unittest.TestCase):
def test_grad(self):
theta = torch.tensor([2., 3.], dtype=torch.float, requires_grad=True)
f_val = f(theta)
f_val.backward()
self.assertTrue(torch.equal(analytical_gradf(theta), theta.grad))
# %% Enable running the tests from the command line
if __name__ == '__main__':
unittest.main()
| 2.84375
| 3
|
falmer/studentgroups/migrations/0004_auto_20170703_1633.py
|
sussexstudent/services-api
| 2
|
12781132
|
<filename>falmer/studentgroups/migrations/0004_auto_20170703_1633.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-03 16:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('studentgroups', '0003_auto_20170703_1630'),
]
operations = [
migrations.AlterField(
model_name='mslstudentgroup',
name='link',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='mslstudentgroup',
name='logo_url',
field=models.URLField(default=''),
),
]
| 1.445313
| 1
|
config.py
|
ufo-project/minepool-data-server
| 3
|
12781133
|
#!/usr/bin/env python
# encoding: utf-8
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'SUPER-SECRET'
LOGFILE = "server.log"
class DevelopmentConfig(Config):
DEBUG = True
LOG_BACKTRACE = True
LOG_LEVEL = 'DEBUG'
DB_HOST = '192.168.1.122'
DB_PORT = 3306
DB_NAME = 'test'
DB_USER = 'root'
DB_PASSWORD = '<PASSWORD>'
DB_USE_UNICODE = True
DB_CHARSET = 'utf8'
LDB_PATH = "../ldb_path"
SHARES_SERVER_IP = "0.0.0.0"
SHARES_SERVER_PORT = 9999
WSGI_SERVER_IP = "0.0.0.0"
WSGI_SERVER_PORT = 8085
@staticmethod
def init_app(app):
pass
class ProductionConfig(Config):
LOG_BACKTRACE = False
LOG_LEVEL = 'INFO'
DB_HOST = '127.0.0.1'
DB_PORT = 3306
DB_NAME = 'ufodb'
DB_USER = ''
DB_PASSWORD = ''
DB_USE_UNICODE = True
DB_CHARSET = 'utf8'
LDB_PATH = "../ldb_path"
SHARES_SERVER_IP = "127.0.0.1"
SHARES_SERVER_PORT = 9999
WSGI_SERVER_IP = "127.0.0.1"
WSGI_SERVER_PORT = 8085
@staticmethod
def init_app(app):
pass
config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| 2.046875
| 2
|
test.py
|
Maciejfiedler/random-flask-app
| 1
|
12781134
|
<filename>test.py
def readfromtxt():
with open("requests.txt", "r") as f:
return f.read()
print(readfromtxt())
| 2.640625
| 3
|
gurun/gui/io.py
|
gabrielguarisa/gurun
| 1
|
12781135
|
from typing import Any, Callable, List
import random
from gurun.node import Node, WrapperNode
try:
import pyautogui
except ImportError:
raise ImportError(
"pyautogui is not installed. Please install it with `pip install pyautogui`."
)
class Typewrite(WrapperNode):
def __init__(self, **kwargs: Any) -> None:
super().__init__(pyautogui.typewrite, **kwargs)
class Scroll(Node):
def __init__(self, **kwargs: Any) -> None:
super().__init__(pyautogui.scroll, **kwargs)
class Click(WrapperNode):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(pyautogui.click, **kwargs)
class HotKey(WrapperNode):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(pyautogui.hotkey, **kwargs)
class MoveRel(Node):
def __init__(
self,
x: int = 0,
y: int = 0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._x = x
self._y = y
def run(self, *args: Any, **kwargs: Any) -> Any:
pyautogui.moveRel(self._x, self._y)
class MoveTo(WrapperNode):
def __init__(self, **kwargs: Any) -> None:
super().__init__(pyautogui.moveTo, **kwargs)
class DragRel(WrapperNode):
def __init__(self, **kwargs: Any) -> None:
super().__init__(pyautogui.dragRel, **kwargs)
class MultipleClicks(Click):
def run(self, positions: List[List[int]], *args: Any, **kwargs: Any):
for x, y in positions:
super().run(*args, x=x, y=y, **kwargs)
class NaturalClick(Click):
def __init__(
self,
easing_functions: List[Callable] = [
pyautogui.easeInQuad,
pyautogui.easeOutQuad,
pyautogui.easeInOutQuad,
],
minimum_duration: int = 1,
maximum_duration: int = 1.5,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._easing_functions = easing_functions
self._minimum_duration = minimum_duration
self._maximum_duration = maximum_duration
def run(self, *args: Any, **kwargs: Any):
return super().run(
*args,
tween=random.choice(self._easing_functions),
duration=random.uniform(self._minimum_duration, self._maximum_duration),
**kwargs,
)
class MultipleNaturalClicks(NaturalClick):
def run(self, positions: List[List[int]], *args: Any, **kwargs: Any):
for x, y in positions:
super().run(*args, x=x, y=y, **kwargs)
| 2.390625
| 2
|
images/foss-asic-tools/addons/sak/bin/delete_areaid.py
|
isabella232/foss-asic-tools
| 9
|
12781136
|
# Enter your Python code here
import pya
from time import sleep
print("Starting...")
app = pya.Application.instance()
win = app.main_window()
# Load technology file
#tech = pya.Technology()
#tech.load(tech_file)
#layoutOptions = tech.load_layout_options
# Load def/gds file in the main window
#cell_view = win.load_layout(input_layout, layoutOptions, 0)
#layout_view = cell_view.view()
#layout_view.max_hier()
# gets the corresponding layout object
#layout = cell_view.layout()
layout = pya.Layout()
layout.read(input_layout)
#layout.clear_layer(81)
#layout.delete_layer(81)
# gets the cell to change is "INV2X"
# cell = layout.cell("Active_area")
#cell = cell_view.cell
# finds source layer
#areaid_layer = layout.layer(81, 14)
#areaid_layer.delete()
#layout.write(input_layout)
layout.write('junk.gds')
print("Successfully wrote", input_layout)
app.exit(0)
| 2.40625
| 2
|
easy/single number/solution.py
|
ilya-sokolov/leetcode
| 4
|
12781137
|
from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> int:
result = 0
for i in nums:
result ^= i
return result
s = Solution()
print(s.singleNumber([2, 2, 1]))
print(s.singleNumber([4, 1, 2, 1, 2]))
print(s.singleNumber([1]))
| 3.421875
| 3
|
vector_comparison/xi_squared.py
|
jensv/relative_canonical_helicity_tools
| 0
|
12781138
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from vector_calculus import vector_calculus as vc
def calc_and_plot_dists(field1, field2,
field1_title=None,
field2_title=None,
units=None):
r"""
"""
field1_magnitudes, field1_orientations = sample_vector_field(field1)
field2_magnitudes, field2_orientations = sample_vector_field(field2)
field1_histograms = bin_samples(field1_magnitudes, field1_orientations)
field2_histograms = bin_samples(field2_magnitudes, field2_orientations)
xi_sq = xi_squared(field1_histograms['counts_2d'], field2_histograms['counts_2d'])
plot_histograms(field1_magnitudes, field1_orientations, units=units)
sns.plt.suptitle(field1_title)
plot_histograms(field2_magnitudes, field2_orientations, units=units)
sns.plt.suptitle(field2_title)
plt.show()
print 'Xi^2 = %f10.2' % xi_sq
def sample_vector_field(vector_field):
r"""
"""
vector_field = np.asarray(vector_field)
orientations = np.ravel(vector_orientation(vector_field))
magnitudes = np.ravel(vc.magnitude(vector_field))
return magnitudes, orientations
def bin_samples(magnitudes,
orientations,
magnitude_bins=50,
orientation_bins=50,
joint_bins=50):
r"""
"""
mag_counts, mag_bins = np.histogram(magnitudes,
bins=magnitude_bins)
o_counts, o_bins = np.histogram(orientations,
bins=orientation_bins)
(counts_2d, mag_bins_2d,
o_bins_2d) = np.histogram2d(magnitudes,
orientations,
bins=(magnitude_bins,
orientation_bins))
histograms = {'mag_counts': mag_counts,
'mag_bins': mag_bins,
'o_counts': o_counts,
'o_bins': o_bins,
'counts_2d': counts_2d,
'mag_bins_2d': mag_bins_2d,
'o_bins_2d': o_bins_2d}
return histograms
def plot_histograms(magnitudes, orientations, bins=50,
color='red', cmap='Reds', units=None):
r"""
"""
joint_grid = sns.JointGrid(magnitudes,
orientations)
joint_grid.plot_joint(plt.hist2d, bins=bins,
cmap=cmap)
joint_grid.plot_marginals(sns.distplot,
kde=False, bins=bins,
color=color)
xlabel = 'magnitude' + ' [' + units + ']'
joint_grid.set_axis_labels(xlabel, 'orientation [rad]')
return joint_grid
def xi_squared(dist1, dist2, dof=2.):
r"""
"""
assert dist1.shape == dist2.shape, "Distributions do not have equal dimensions."
addends = (dist1 - dist2)**2 / (dist1 + dist2)
addends[np.isclose(dist1 + dist2, 0)] = 0
xi_sq = np.sum(addends)
if not dof:
dof = len(dist1.shape)
xi_sq = xi_sq/dof
return xi_sq
def vector_orientation(vector_field):
r"""
Return vector angle with the z-axis.
"""
mag = vc.magnitude(vector_field)
angle = np.arccos(vector_field[2, :, :, :]/mag)
angle[np.isclose(mag, 0)] = 0
reflex = np.where(np.logical_and(vector_field[0, :, :, :] < 0,
vector_field[1, :, :, :] >= 0))
angle[reflex] = 2.*np.pi - angle[reflex]
reflex = np.where(np.logical_and(vector_field[0, :, :, :] < 0,
vector_field[1, :, :, :] < 0))
angle[reflex] = 2.*np.pi - angle[reflex]
return angle
| 2.765625
| 3
|
riptide_controllers/action/gateManeuver.py
|
tsender/riptide_software
| 0
|
12781139
|
#! /usr/bin/env python
import rospy
import actionlib
import dynamic_reconfigure.client
from riptide_msgs.msg import AttitudeCommand, LinearCommand, Imu
from std_msgs.msg import Float32, Float64, Int32
import riptide_controllers.msg
import time
import math
import numpy as np
def angleDiff(a, b):
return ((a-b+180) % 360)-180
class GateManeuver(object):
ROLL_P = 2
CRUISE_VELOCITY = 45
DRIVE_FORCE = 30
def __init__(self):
self.rollPub = rospy.Publisher(
"/command/roll", AttitudeCommand, queue_size=5)
self.yawPub = rospy.Publisher(
"/command/yaw", AttitudeCommand, queue_size=5)
self.XPub = rospy.Publisher(
"/command/x", LinearCommand, queue_size=5)
self.YPub = rospy.Publisher(
"/command/y", LinearCommand, queue_size=5)
self.ZPub = rospy.Publisher(
"/command/force_z", Float64, queue_size=5)
self._as = actionlib.SimpleActionServer(
"gate_maneuver", riptide_controllers.msg.GateManeuverAction, execute_cb=self.execute_cb, auto_start=False)
self._as.start()
def execute_cb(self, goal):
rospy.loginfo("Starting gate maneuver")
self.startAngle = rospy.wait_for_message("/state/imu", Imu).rpy_deg.z
self.angleTraveled = 0
self.pastHalf = False
self.yawPub.publish(self.CRUISE_VELOCITY, AttitudeCommand.VELOCITY)
self.rollPub.publish(self.CRUISE_VELOCITY, AttitudeCommand.VELOCITY)
self.imuSub = rospy.Subscriber("/state/imu", Imu, self.imuCb)
while self.angleTraveled < 330 and not rospy.is_shutdown():
rospy.sleep(0.05)
if self._as.is_preempt_requested():
rospy.loginfo('Preempted Gate Maneuver')
self.cleanup()
self._as.set_preempted()
return
rospy.loginfo("Leveling")
self.cleanup()
while abs(rospy.wait_for_message("/state/imu", Imu).rpy_deg.x) > 5 and not rospy.is_shutdown():
rospy.sleep(0.05)
rospy.loginfo("Done")
self._as.set_succeeded()
def cleanup(self):
self.yawPub.publish(0, AttitudeCommand.POSITION)
self.rollPub.publish(0, AttitudeCommand.POSITION)
self.imuSub.unregister()
self.XPub.publish(0, LinearCommand.FORCE)
self.YPub.publish(0, LinearCommand.FORCE)
self.ZPub.publish(0)
def imuCb(self, msg):
self.angleTraveled = angleDiff(msg.rpy_deg.z, self.startAngle)
roll = msg.rpy_deg.x
if self.angleTraveled < -90:
self.pastHalf = True
if self.pastHalf and self.angleTraveled < 0:
self.angleTraveled += 360
if roll < 0:
roll += 360
self.rollPub.publish(self.CRUISE_VELOCITY + self.ROLL_P * (self.angleTraveled - roll), AttitudeCommand.VELOCITY)
sr = math.sin(roll * math.pi / 180)
cr = math.cos(roll * math.pi / 180)
sy = math.sin(self.angleTraveled * math.pi / 180)
cy = math.cos(self.angleTraveled * math.pi / 180)
rRotMat = np.matrix([[1,0,0],[0,cr,-sr],[0,sr,cr]])
yRotMat = np.matrix([[cy,-sy,0],[sy,cy,0],[0,0,1]])
outVector = np.dot(np.linalg.inv(np.dot(yRotMat, rRotMat)), np.matrix([[self.DRIVE_FORCE],[0],[0]]))
self.XPub.publish(outVector.item(0), LinearCommand.FORCE)
self.YPub.publish(outVector.item(1), LinearCommand.FORCE)
self.ZPub.publish(outVector.item(2))
if __name__ == '__main__':
rospy.init_node('gate_maneuver')
server = GateManeuver()
rospy.spin()
| 2.453125
| 2
|
tools/display_bemf_graph.py
|
tuw-cpsg/bldc-controller
| 0
|
12781140
|
<filename>tools/display_bemf_graph.py
import serial
import matplotlib.pyplot as plt
import numpy as np
import struct
class DataFromController:
s1 = None
s2 = None
s3 = None
def __init__(self, s1=None, s2=None, s3=None):
self.s1 = s1
self.s2 = s2
self.s3 = s3
def __str__(self):
return "DataFromController(s1={}, s2={}, s3={})".format(self.s1, self.s2, self.s3)
def read_data(port):
ser = serial.Serial()
ser.port = port
ser.baudrate = 2000000
ser.timeout = 10
ser.open()
if ser.is_open:
print("Serial port opened: ", ser)
else:
print("Error opening serial port")
exit(1)
ser.read_until(b'\r\n')
while True:
l = ser.read_until(b'\r\n')
try:
l = l[:-2]
# p = l.split(b';')
# s1 = int(p[0])
s1 = int(l)
# s2 = int(p[1])
# s3 = int(p[2])
# yield DataFromController(s1=s1, s2=s2, s3=s3)
yield DataFromController(s1=s1, s2=0, s3=0)
except:
print("error parsing line: " + str(l))
# use ggplot style for more sophisticated visuals
plt.style.use('ggplot')
def handle_close(evt):
exit(0)
size = 1000
x = np.linspace(0, 1, size + 1)[0:-1]
s1_data = np.zeros(size)
s2_data = np.zeros(size)
s3_data = np.zeros(size)
plt.ion()
fig = plt.figure(figsize=(13, 6))
fig.canvas.mpl_connect('close_event', handle_close)
a1 = fig.add_subplot(111)
line1, = a1.plot(x, s1_data)
line2, = a1.plot(x, s2_data)
line3, = a1.plot(x, s3_data)
# update plot label/title
plt.ylabel('Y Label')
plt.title('BEMF')
plt.show()
plt.ylim([0, 1000])
def update():
line1.set_ydata(s1_data)
line2.set_ydata(s2_data)
line3.set_ydata(s3_data)
plt.pause(0.0000001)
c = 0
for d in read_data('/dev/ttyUSB0'):
if d.s1 > 4000 or d.s2 > 4000 or d.s3 > 4000:
continue
s1_data[-1] = d.s1
s2_data[-1] = d.s2
s3_data[-1] = d.s3
print("avg: {}".format(np.average(s1_data)))
c += 1
if c == size / 10:
update()
c = 0
s1_data = np.append(s1_data[1:], 0.0)
s2_data = np.append(s2_data[1:], 0.0)
s3_data = np.append(s3_data[1:], 0.0)
| 2.875
| 3
|
scrape_mars.py
|
jleibfried/web-scraping-challenge
| 0
|
12781141
|
import requests
import pymongo
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
from flask import Flask, render_template
# Create an instance of our Flask app.
app = Flask(__name__)
# Create connection variable
conn = 'mongodb://localhost:27017'
# Pass connection to the pymongo instance.
client = pymongo.MongoClient(conn)
# Route that will trigger the scrape function
@app.route("/")
def scrape():
url = 'https://mars.nasa.gov/news/'
# Retrieve page with the requests module
response = requests.get(url)
# Create BeautifulSoup object; parse with 'lxml'
soup = BeautifulSoup(response.text, 'lxml')
# Retrieve the parent divs for all articles
firstTitle = soup.find('div', class_='content_title').text
firstGraf = soup.find('div', class_="rollover_description_inner").text
# Testing code
# firstTitle = "line of dialogue"
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url_jpl = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url_jpl)
try:
browser.click_link_by_partial_text('FULL IMAGE')
except:
print("Scraping Complete")
newHtml = browser.html
soup = BeautifulSoup(newHtml, 'html.parser')
images = soup.findAll('img')
# images.find(class_=)
extractImage = images[3]
extractImageSrc = extractImage['src']
featured_image_url = 'https://www.jpl.nasa.gov' + extractImageSrc
# Tweet place holder while I figure out the twitter scrape here ./testPython/twitter Test.ipynb
mars_weather = 'Sol 1801 (Aug 30, 2017), Sunny, high -21C/-5F, low -80C/-112F, pressure at 8.82 hPa, daylight 06:09-17:55'
url = 'https://space-facts.com/mars/'
# Retrieve page with the requests module
response = requests.get(url)
# Create BeautifulSoup object; parse with 'html'
soup = BeautifulSoup(response.text, 'html.parser')
# Code from here
# https://pythonprogramminglanguage.com/web-scraping-with-pandas-and-beautifulsoup/
# https://stackoverflow.com/questions/50633050/scrape-tables-into-dataframe-with-beautifulsoup
table = soup.find_all('table')[0]
table_rows = table.find_all('tr')
l = []
for tr in table_rows:
td = tr.find_all('td')
row = [tr.text for tr in td]
l.append(row)
factsDf = pd.DataFrame(l)
factsDf.columns = (['Mars Metrics','Measurements'])
factsDf.set_index('Mars Metrics')
htmlOutput = factsDf.to_html()
# # https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars
# # The website is failing to load in two browsers. Seems like a bad thing
# hemisphere_image_urls = [
# {"title": "Valles Marineris Hemisphere", "img_url": "..."},
# {"title": "Cerberus Hemisphere", "img_url": "..."},
# {"title": "Schiaparelli Hemisphere", "img_url": "..."},
# {"title": "Syrtis Major Hemisphere", "img_url": "..."},
# ]
# Scraping Wikipedia
url_jpl = 'https://en.wikipedia.org/wiki/Chrysler_Hemi_engine'
browser.visit(url_jpl)
newHtml = browser.html
soup = BeautifulSoup(newHtml, 'html.parser')
images = soup.findAll('img')
# creating a list of images
extImgList = []
count =0
for image in images:
extractImage = images[count]
extractImageSrc = extractImage['src']
extImgList.append(extractImageSrc)
count = count +1
# selecting the ones I like
extractImageSrc0 = extImgList[15]
extractImageSrc1 = extImgList[3]
extractImageSrc2 = extImgList[16]
extractImageSrc3 = extImgList[6]
link0 = "https:" + extractImageSrc0
link1 = "https:" + extractImageSrc1
link2 = "https:" + extractImageSrc2
link3 = "https:" + extractImageSrc3
hemisphere_image_urls = [
{"title": "5 7 Hemi", "img_url": link0},
{"title": "Hemi in 300C", "img_url": link1},
{"title": "6 1 Hemi", "img_url": link2},
{"title": "FiredomeV8", "img_url": link3},
]
# Connect to a database. Will create one if not already available.
db = client.marsdb
# Drops collection if available to remove duplicates
db.marsdb.drop()
# Building DB
db.marsdb.insert_many(
[
{
"Title": firstTitle,
"Paragraph": firstGraf,
"Image": featured_image_url,
"Tweet":mars_weather,
"Table":htmlOutput
}
]
)
# sending info to index.html
marsInfoDb = list(db.marsdb.find())
print (marsInfoDb)
return render_template('index.html', marsInfoDb=marsInfoDb, hemisphere_image_urls=hemisphere_image_urls)
if __name__ == "__main__":
app.run(debug=True)
| 3.046875
| 3
|
music_tag/wave.py
|
gyozaaaa/music-tag
| 28
|
12781142
|
#!/usr/bin/env python
# coding: utf-8
try:
import mutagen.wave
from music_tag.id3 import Id3File
class WaveId3File(Id3File):
tag_format = "Wave[Id3]"
mutagen_kls = mutagen.wave.WAVE
def __init__(self, filename, **kwargs):
super(WaveId3File, self).__init__(filename, **kwargs)
# self.tag_map = self.tag_map.copy()
# self.tag_map.update({
# '#codec': TAG_MAP_ENTRY(getter=lambda afile, norm_key: 'mp3',
# type=str),
# '#bitspersample': TAG_MAP_ENTRY(getter=lambda afile, norm_key: None,
# type=int),
# })
except ImportError:
pass
| 2.03125
| 2
|
logo.py
|
KOLANICH/UniOpt
| 1
|
12781143
|
<filename>logo.py<gh_stars>1-10
import numpy as np
from funcs import logoFunc
import matplotlib
from matplotlib import pyplot as plt
import PIL.Image
import colorcet
def background(xscale=3):
return np.mean(makeLogo(xscale*10)[-1])
defaultRes=2048
def makeLogo(x=0, y=0, xscale=3, xres=defaultRes, yres=defaultRes):
xs=np.linspace(-xscale+x, x+xscale, xres)
ys=np.linspace(y-1, y+30, yres)
xys=np.array(np.meshgrid(xs, ys))
zs=logoFunc(xys)
zs=np.flip(zs, axis=0)
return (xs, ys, xys, zs)
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LightSource
logoBack=background()
defaultCmap=colorcet.m_rainbow_r
def getColors(z, cmap=None):
if cmap is None:
cmap=defaultCmap
mz=np.array(z)
mz[mz>logoBack]=logoBack
mz-=np.min(mz)
mz/=np.max(mz)
cols=cmap(mz)
#cols=np.zeros((z.shape[0], z.shape[1], 4))
#cols[:,:,3]=np.exp(-mz)
return cols
def plotLogo3D(res=defaultRes):
(x, y, xy, z)=makeLogo(xres=res, yres=res)
fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(elev=90, azim=90)
ax.plot_surface(*xy, z, facecolors=getColors(z))
#ax.contour(x, y, z)
return fig
def plotLogo(res=defaultRes):
(x, y, xy, z)=makeLogo(xres=res, yres=res)
del(xy)
del(x)
del(y)
imgData=getColors(z)
del(z)
im=PIL.Image.fromarray(np.array(imgData*255, dtype=np.int8), "RGBA")
return im
from plumbum import cli
class LogoPlotterCLI(cli.Application):
res=cli.SwitchAttr(["r", "resolution"], int, default=defaultRes, help="A resolution of the image. Use the highest possible.")
threeD=cli.Flag(["3d"], help="Plot 3d")
def main(self):
if not self.threeD:
im=plotLogo(res=self.res)
im.save("logo.png", format="png")
else:
fig=plotLogo3D(res=self.res)
plt.show()
if __name__=="__main__":
LogoPlotterCLI.run()
| 2.40625
| 2
|
utils/settings.py
|
alex-ortega-07/hand-writing-recognition-model
| 2
|
12781144
|
<filename>utils/settings.py<gh_stars>1-10
import pygame
pygame.init()
# We declare some screen constants
WIDTH, HEIGHT = 300, 360
FPS = 240
# Here are some colors
BG_COLOR = 255, 255, 255
LINE_COLOR = 120, 120, 120
PIXEL_ACTIVE_COLOR = 0, 0, 0
BLACK = 0, 0, 0
WHITE = 255, 255, 255
# Below, we declare some data for the canvas
MARGIN_CANVAS_TOP = 40
MARGIN_CANVAS_BOTTOM = 40
MARGIN_CANVAS_SIDES = 10
SPACE_EACH_SQUARE = 10
CANVAS_GRID = True
CANVAS_ROWS = 28
CANVAS_COLS = 28
CANVAS_PAINT_SIZE = 4
| 2.75
| 3
|
app/extractor/__init__.py
|
ken-pan/wallpaper-downloader
| 2
|
12781145
|
"""
request(method, url, params=None, data=None, headers=None, cookies=None, files=None, auth=None, timeout=None, allow_redirects=True, proxies=None, hooks=None, stream=None, verify=None, cert=None, json=None)
参数:
method -- 请求方法
url -- 网址
params -- url中传递参数,字典类型{'key':'value'},http://www.baidu.com?key=value
data -- (optional) 字典、字节或文件对象的数据
json -- (optional) JSON格式的数据
headers -- (optional) 请求头
cookies -- (optional) 字典或者CookieJar
files -- (optional) 文件对象,上传文件
auth -- (optional) 身份验证方案
timeout (float or tuple) -- (optional) 超时参数
allow_redirects (bool) -- (optional) 是否允许跳转,默认为True
proxies -- (optional) 代理设置,字典类型
stream -- (optional) 大文件下载,把文件一点一点的下载,如果这个值为false,则全部写到内存中
verify -- (optional) 是否验证SSL,用于https
cert -- (optional) 证书参数,告诉request去这个地方去下载cert
"""
| 2.90625
| 3
|
src/ContactParticleFilter/python/twostepestimator.py
|
mpetersen94/spartan
| 0
|
12781146
|
<gh_stars>0
__author__ = 'manuelli'
# system imports
import numpy as np
from collections import namedtuple
# director imports
from director import transformUtils
import director.vtkAll as vtk
from director import lcmUtils
# CPF imports
from pythondrakemodel import PythonDrakeModel
import contactfilterutils as cfUtils
class TwoStepEstimator:
def __init__(self, robotStateModel, robotStateJointController, linkMeshData, config_filename):
"""
:param robotStateModel:
:param robotStateJointController:
:param linkMeshData: can be found in externalForce.linkMeshData
:param config: config filename, expected to be examples/ContactParticleFilter/config directory
"""
self.robotStateModel = robotStateModel
self.robotStateJointController = robotStateJointController
self.config = cfUtils.loadConfig(config_filename)
self.createDrakeModel()
self.initializeRobotPoseTranslator()
self.linkMeshData = linkMeshData
self.computeResidualThresholdForContact()
def createDrakeModel(self, filename=None):
self.drakeModel = PythonDrakeModel(self.config['robot']['floatingBaseType'], self.config['robot']['urdf'])
def initializeRobotPoseTranslator(self):
self.robotPoseTranslator = cfUtils.RobotPoseTranslator(self.robotStateModel.model, self.drakeModel.model)
def computeResidualThresholdForContact(self):
self.residualThresholdForContact = self.config['twoStepEstimator']['residualThresholdForContact']*np.ones(self.drakeModel.numJoints)
def getSquaredErrorFromResidual(self, residual):
"""
Computes the squared error given the residual
:param residual:
:return: squared error of the residual
"""
squaredError = np.dot(residual.transpose(), residual)
return squaredError
def findLinkNameWithExternalForceFromResidual(self, residual):
"""
Finds largest index in residual that is above threshold
:param residual:
:return: Largest index in residual that is above threshold. Returns None
if there is no index above threshold
"""
idx = np.where(np.abs(residual) > self.residualThresholdForContact)[0]
if len(idx) == 0:
return None
jointIdx = idx[-1]
jointName = self.drakeModel.getJointNameFromIdx(jointIdx)
linkName = str(self.drakeModel.model.findNameOfChildBodyOfJoint(jointName))
return linkName
# be careful here if director and this use different models
# for example if we are FIXED base and director has ROLLPITCHYAW
def getCurrentPose(self):
q_director = self.robotStateJointController.q
q = self.robotPoseTranslator.translateDirectorPoseToRobotPose(q_director)
return q
def computeTwoStepEstimate(self, residual, linkNamesWithContactForce=None):
"""
Computes the two step estimate from Haddadin et. al. paper
:param residual:
:param linksWithContactForce: If this is None we will figure ouit
links with contact force on our own, assuming there is only one
:return: return estimate data if we actually did the estimation,
otherwise returns None
"""
# print "computing the two step estimate"
# figure out if this residual is above threshold where we should
# actually be computing the residual
squaredError = self.getSquaredErrorFromResidual(residual)
if squaredError < self.config['thresholds']['addContactPointSquaredError']:
return None
if linkNamesWithContactForce is None:
# print "two step estimate is computing it's own active link"
linkName = self.findLinkNameWithExternalForceFromResidual(residual)
if linkName is None:
return
else:
# the code below expects this to be a list
linkNamesWithContactForce = [linkName]
if len(linkNamesWithContactForce) == 0:
return None
# do kinematics on our internal model
q = self.getCurrentPose()
self.drakeModel.model.setJointPositions(q)
# stack the jacobians
jacobianTransposeList = []
for linkName in linkNamesWithContactForce:
linkId = self.drakeModel.model.findLinkID(linkName)
J = self.drakeModel.geometricJacobian(0,linkId,linkId, 0, False)
jacobianTransposeList.append(J.transpose())
stackedJacobian = np.hstack(jacobianTransposeList)
#do the pseudo inverse
pinvJacobian = np.linalg.pinv(stackedJacobian)
stackedWrenches = np.dot(pinvJacobian, residual)
returnData = dict()
# unpack the wrenches
for idx, linkName in enumerate(linkNamesWithContactForce):
startIdx = 6*idx
endIdx = startIdx + 6
wrench = stackedWrenches[startIdx:endIdx]
torque = wrench[0:3]
force = wrench[3:6]
contactLocationData = self.computeContactLocation(linkName, force, torque)
if contactLocationData is not None:
returnData[linkName] = contactLocationData
return returnData
def computeContactLocation(self, linkName, force, torque):
# want to find contactPoint such that force applied at contactPoint
# leads to given torque, i.e. we want to solve for contactPoint such that
# torque = contactPoint x force, where x denotes the cross product. This is
# the same as solving torque = -force x contactPoint = -forceCross * contactPoint
# everything here is in link frame
forceCross = transformUtils.crossProductMatrix(force)
forceCrossPseudoInverse = np.linalg.pinv(forceCross)
contactPoint_d = -np.dot(forceCrossPseudoInverse, torque)
forceNorm = np.linalg.norm(force)
if forceNorm < 0.5:
return None
forceNormalized = force/forceNorm
# now intersect line with linkMesh, choose the start and end of the ray
# so that we find a contact point where the force is pointing "into" the link
# mesh
rayOrigin = contactPoint_d - 0.5*forceNormalized
rayEnd = contactPoint_d + 0.5*forceNormalized
############# DEBUGGING
# print ""
# print "force", force
# print "torque", torque
# print "r_d", contactPoint_d
# impliedTorque = np.cross(contactPoint_d, force)
# print "implied torque", impliedTorque
linkToWorld = self.robotStateModel.getLinkFrame(linkName)
rayOriginInWorld = np.array(linkToWorld.TransformPoint(rayOrigin))
rayEndInWorld = np.array(linkToWorld.TransformPoint(rayEnd))
contactRayVisObjectName = linkName + " contact ray world frame"
pt = self.raycastAgainstLinkMesh(linkName, rayOrigin, rayEnd)
# data for later drawing and stuff
d = dict()
d['pt'] = pt # will be None if no intersection with link is found
d['contactRay'] = dict()
d['contactRay']['rayOriginInWorld'] = rayOriginInWorld
d['contactRay']['rayEndInWorld'] = rayEndInWorld
d['contactRay']['visObjectName'] = contactRayVisObjectName
d['contactRay']['color'] = [1,1,0]
d['force'] = force
d['forceInWorld'] = linkToWorld.TransformVector(force)
d['linkName'] = linkName
if pt is not None:
d['contactLocation'] = pt
d['contactLocationInWorld'] = linkToWorld.TransformPoint(pt)
return d
def raycastAgainstLinkMesh(self, linkName, rayOrigin, rayEnd):
meshToWorld = self.linkMeshData[linkName]['transform']
rayOriginInWorld = np.array(meshToWorld.TransformPoint(rayOrigin))
rayEndInWorld = np.array(meshToWorld.TransformPoint(rayEnd))
# ### DEBUGGING
# if self.showContactRay:
# d = DebugData()
# d.addLine(rayOriginInWorld, rayEndInWorld, radius=0.005)
# color=[1,0,0]
# obj = vis.updatePolyData(d.getPolyData(), "raycast ray in mesh frame", color=color)
tolerance = 0.0 # intersection tolerance
pt = [0.0, 0.0, 0.0] # data coordinate where intersection occurs
lineT = vtk.mutable(0.0) # parametric distance along line segment where intersection occurs
pcoords = [0.0, 0.0, 0.0] # parametric location within cell (triangle) where intersection occurs
subId = vtk.mutable(0) # sub id of cell intersection
result = self.linkMeshData[linkName]['locator'].IntersectWithLine(rayOriginInWorld, rayEndInWorld, tolerance, lineT, pt, pcoords, subId)
# this means we didn't find an intersection
if not result:
return None
# otherwise we need to transform it back to linkFrame
worldToMesh = meshToWorld.GetLinearInverse()
ptInLinkFrame = worldToMesh.TransformPoint(pt)
return ptInLinkFrame
| 2.203125
| 2
|
general/update_buildnum.py
|
shawnj/python
| 0
|
12781147
|
import os
import json
import sys
file = sys.argv[1]
buildsite = sys.argv[2]
buildnum = sys.argv[3]
fileid = sys.argv[4]
with open(file, "r") as jsonFile:
data = json.load(jsonFile)
if fileid is not "":
data["default_attributes"]["Sites"][buildsite.upper()]["BUILD"] = str(buildnum) + "." + str(fileid)
else:
data["default_attributes"]["Sites"][buildsite.upper()]["BUILD"] = str(buildnum)
with open(file, "w") as jsonFile:
json.dump(data, jsonFile)
| 2.84375
| 3
|
data_analysis/audiocommons_ffont/tempo_estimation/evaluation_metrics.py
|
aframires/freesound-loop-annotator
| 18
|
12781148
|
<reponame>aframires/freesound-loop-annotator
from ..ac_utils.general import vfkp
def accuracy1(data, method, sound_ids=None, tolerance=0.04, skip_zeroed_values=False):
"""
Compares estimated bpm with annotated bpm of provided data. Considers as good matches values that do not differ
more than the tolerance parameter (default toleracne = 0, 0 bpm max diff, i.e., exact match)
:param data: sound data with annotations and analysis
:param method: analysis algorithm name to compare with annotation
:param sound_ids: ids of the sounds to include in comparison (default=None, all found in data)
:param tolerance: max % deviation of bpm to consider a good match (default=0.04)
:param skip_zeroed_values: whether to take into account sounds annotated with ground truth bpm=0.0
:return: list with 1 for sounds that match and 0 for sounds that do not match
"""
return accuracy2(data, method, sound_ids, tolerance, skip_zeroed_values, allowed_multiples=[1.0])
def accuracy2(data, method, sound_ids=None, tolerance=0.04, skip_zeroed_values=False,
allowed_multiples=[1.0, 1.0/3, 0.5, 1.0, 2.0, 3.0]):
"""
Compares estimated bpm with annotated bpm of provided data. Considers as good matches values that are either the
same or multiples as listed in 'allowed_multiples' parameter.
See: <NAME>., <NAME>., andM. <NAME>., <NAME>., & <NAME>. (2006). An Experimental Comparison
of Audio Tempo Induction Algorithms. IEEE Transactions on Audio, Speech and Language Processing, 14(5), 1832-1844.
:param data: sound data with annotations and analysis
:param method: analysis algorithm name to compare with annotation
:param sound_ids: ids of the sounds to include in comparison (default=None, all found in data)
:param tolerance: max % deviation of bpm to consider a good match (default=0.04)
:param skip_zeroed_values: whether to take into account sounds annotated with ground truth bpm=0.0
:param allowed_multiples: multiples of the ground truth bpm value which are to be accepted as good estimates
:return: list with 1 for sounds that match and 0 for sounds that do not match
"""
if sound_ids is None:
sound_ids = data.keys()
output = list()
for sound_id in sound_ids:
ground_truth_value = data[sound_id]['annotations']['bpm']
if ground_truth_value is None:
continue
try:
estimated_value = vfkp(data[sound_id]['analysis'], method + '.bpm', ignore_non_existing=False)
if skip_zeroed_values and estimated_value == 0.0:
continue
except KeyError:
# Method produced no estimation
if not skip_zeroed_values:
output.append(0)
continue
threshold = tolerance * float(ground_truth_value)
found_match = 0
for multiple in allowed_multiples:
delta = abs(float(ground_truth_value) * multiple - estimated_value)
if delta <= threshold:
found_match = 1
break
output.append(found_match)
return output
def accuracy1e(data, method, sound_ids=None, skip_zeroed_values=False):
"""
Compares estimated bpm with annotated bpm of provided data. Considers as good matches values that, after rounder,
are exatcly that of the ground truth.
:param data: sound data with annotations and analysis
:param method: analysis algorithm name to compare with annotation
:param sound_ids: ids of the sounds to include in comparison (default=None, all found in data)
:param skip_zeroed_values: whether to take into account sounds annotated with ground truth bpm=0.0
:return: list with 1 for sounds that match and 0 for sounds that do not match
"""
if sound_ids is None:
sound_ids = data.keys()
output = list()
for sound_id in sound_ids:
ground_truth_value = data[sound_id]['annotations']['bpm']
if ground_truth_value is None:
continue
try:
estimated_value = vfkp(data[sound_id]['analysis'], method + '.bpm', ignore_non_existing=False)
if skip_zeroed_values and estimated_value == 0.0:
continue
except KeyError:
# Method produced no estimation
if not skip_zeroed_values:
output.append(0)
continue
if int(round(estimated_value)) == int(round(ground_truth_value)):
output.append(1)
else:
output.append(0)
return output
| 2.75
| 3
|
templates/server.py
|
eatsleeplim/ansible-role-install-root-cert
| 7
|
12781149
|
import BaseHTTPServer, SimpleHTTPServer
import ssl
httpd = BaseHTTPServer.HTTPServer(('0.0.0.0', 443), SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile='{{ certfile_pem }}', server_side=True)
httpd.serve_forever()
| 2.328125
| 2
|
release/stubs.min/System/Windows/Forms/__init___parts/TreeNodeMouseClickEventArgs.py
|
YKato521/ironpython-stubs
| 0
|
12781150
|
class TreeNodeMouseClickEventArgs(MouseEventArgs):
"""
Provides data for the System.Windows.Forms.TreeView.NodeMouseClick and System.Windows.Forms.TreeView.NodeMouseDoubleClick events.
TreeNodeMouseClickEventArgs(node: TreeNode,button: MouseButtons,clicks: int,x: int,y: int)
"""
@staticmethod
def __new__(self, node, button, clicks, x, y):
""" __new__(cls: type,node: TreeNode,button: MouseButtons,clicks: int,x: int,y: int) """
pass
Node = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the node that was clicked.
Get: Node(self: TreeNodeMouseClickEventArgs) -> TreeNode
"""
| 2.28125
| 2
|