blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e1b8c58e13404a3f8bcab537d108dc8fbba6cf2
|
824f831ce0921b3e364060710c9e531f53e52227
|
/Leetcode/Bit_Manipulation/LC-137. Single Number II.py
|
1d1cbe18b737d7bc46e48e4b93d6593d40a9734d
|
[] |
no_license
|
adityakverma/Interview_Prepration
|
e854ff92c10d05bc2c82566ea797d2ce088de00a
|
d08a7f728c53943e9a27c33f8e4249633a69d1a6
|
refs/heads/master
| 2020-04-19T19:36:06.527353
| 2019-06-15T23:02:30
| 2019-06-15T23:02:30
| 168,392,921
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# Given a non-empty array of integers, every element appears three times except for one, which appears exactly once. Find that single one.
#
# Note:
#
# Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
#
# Example 1:
#
# Input: [2,2,3,2]
# Output: 3
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
one, two = 0, 0
for x in nums:
one, two, three = one ^ x, two | (one & x), two & x
one, two = one & ~three, two & ~three
return one
# https://leetcode.com/problems/single-number-ii/discuss/43412/Python-Bit-Manipulation-(with-more-general-case)
|
[
"noreply@github.com"
] |
adityakverma.noreply@github.com
|
cef4204c3131566960b2cffb00031012c9cfab5a
|
0da6b4de78c6142b3629a3fd80ce2b03bea734e6
|
/Code_Eval/Moderate/RemoveCharacters/RemoveCharacters.py3
|
adbfe74177c6a0f88895d21855ceea6c4b80980c
|
[
"MIT"
] |
permissive
|
marshallhumble/Coding_Challenges
|
4979e19315b6b05ae04805534e6576dbf6d9bfd9
|
90101d308a3342a0eab33c1f4580df549bfc8896
|
refs/heads/master
| 2021-01-18T02:30:37.740072
| 2016-10-09T16:25:41
| 2016-10-09T16:25:41
| 48,114,974
| 0
| 0
| null | 2016-06-03T14:30:38
| 2015-12-16T14:26:03
|
Python
|
UTF-8
|
Python
| false
| false
| 320
|
py3
|
#!/usr/bin/env python
from sys import argv
with open(argv[1], 'r') as f:
cases = f.read().strip().splitlines()
for item in cases:
text, letters = item.split(',')
text = text.split()
for c in letters:
for t in text:
text[text.index(t)] = t.replace(c, '')
print(' '.join(text))
|
[
"humblejm@gmail.com"
] |
humblejm@gmail.com
|
f815c4084a42ba53c8fd0d521b59be910aa8d39a
|
fc30a728749d80f80b3ad0ceb36f781fa1c0539c
|
/venv/bin/chardetect
|
cf2c8de26a8327efdd326fd9f77595ddbb767524
|
[] |
no_license
|
naveenthammu/spark2.0
|
d3d2373321d48efcfccfe64d1f77281fed543aa5
|
648bda252f6dead334d2c56f70fc4de7448b4ca7
|
refs/heads/main
| 2023-01-21T10:44:40.972645
| 2020-12-09T10:28:51
| 2020-12-09T10:28:51
| 319,920,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
#!/Users/naveen/PycharmProjects/jarvisAI/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"46121222+naveenthammu@users.noreply.github.com"
] |
46121222+naveenthammu@users.noreply.github.com
|
|
09b8fbbe63d9cdc1b305b139040ceae512662f1c
|
6beb400670e95229e6a286ec9caf5b951ac25acc
|
/users/migrations/0040_auto_20210324_1942.py
|
f971d8bf84a283a9545adadd88f0d3db92f928f2
|
[] |
no_license
|
AlphBeta/MiniProject
|
dc9d4bc3983787fb49a034b87262fa88577c2b1d
|
0bf4383cebd453c4404c6c7a6007ad9ba9831f1f
|
refs/heads/master
| 2023-04-09T17:42:44.005625
| 2021-04-16T16:35:20
| 2021-04-16T16:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,161
|
py
|
# Generated by Django 2.2 on 2021-03-24 14:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0039_auto_20210323_1226'),
]
operations = [
migrations.AddField(
model_name='medinfo',
name='whr_grade',
field=models.CharField(default=None, max_length=20, null=True),
),
migrations.AlterField(
model_name='medinfo',
name='eye_sight',
field=models.CharField(choices=[('Glasses', 'I wear Glasses'), ('Myopia', 'Difficult to see distant objects'), ('Hypermetropia', 'Difficult in reading'), ('Normal', 'I can see clearly'), ('Presbyopia', 'Difficulty in both')], default=False, max_length=20, verbose_name='How is your eye sight?'),
),
migrations.AlterField(
model_name='medinfo',
name='fever_cycle',
field=models.CharField(choices=[('NN', "Don't remember the last time I got"), ('N', 'Once in a while'), ('S', 'Very Often')], default=None, max_length=3, null=True, verbose_name='How often do you get cold or fever?'),
),
]
|
[
"antonybush95@gmail.com"
] |
antonybush95@gmail.com
|
66bd838d845ba508452f6e07db34617b1be75c50
|
4c424f63df041c523d0c799d3532f5ca422e8ddb
|
/setup.py
|
f5c381bb171834375a5184339edb128f17d852eb
|
[
"MIT"
] |
permissive
|
tomoeyukishiro/jiphy
|
cc1971e4530e93e68622b70daa54e9023d8d6591
|
0eee4d578373ca5c7fcdc5c1a13ff1b4d479b216
|
refs/heads/master
| 2021-01-17T14:36:33.980648
| 2015-05-01T04:24:46
| 2015-05-01T04:24:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,642
|
py
|
#!/usr/bin/env python
import subprocess
import sys
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
extra_kwargs = {'tests_require': ['pytest', 'mock']}
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.exit(pytest.main(self.test_args))
except ImportError:
from distutils.core import setup, Command
class PyTest(Command):
extra_kwargs = {}
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
raise SystemExit(subprocess.call([sys.executable, 'runtests.py']))
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError, OSError, RuntimeError):
readme = ''
setup(name='jiphy',
version='1.0.0',
description='A Python to JavaScript two way converter. Your client side done in a Jiphy.',
long_description=readme,
author='Timothy Crosley',
author_email='timothy.crosley@gmail.com',
url='https://github.com/timothycrosley/jiphy',
license="MIT",
entry_points={
'console_scripts': [
'jiphy = jiphy.main:main',
]
},
packages=['jiphy'],
requires=[],
install_requires=[],
cmdclass={'test': PyTest},
keywords='Refactor, Python, Python2, Python3, Refactoring, JavaScript, Converter, Transform, Convert',
classifiers=['Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'Natural Language :: English',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities'],
**PyTest.extra_kwargs)
|
[
"timothy.crosley@gmail.com"
] |
timothy.crosley@gmail.com
|
353ae098259974c435e2b3d91c5469d6c2c01efd
|
f08639e673a8b30ba957bf9a034e397aa7e9b68c
|
/2021.04.12 - Prova 2º Bimestre/Felipe Souza Vieira/a/mainAtendente.py
|
cf8df2434cd6bcec1c8c77675c8111d3bc3a8e5d
|
[
"MIT"
] |
permissive
|
Feolips/ADS18A.08-Estrutura-de-Dados
|
558b1996b5671cf4d83237695f7a413a95cc5897
|
cc4471a9b562706b4782d6adb446a13e4c908c1f
|
refs/heads/main
| 2023-04-19T12:31:12.956562
| 2021-05-13T13:52:13
| 2021-05-13T13:52:13
| 340,669,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
# 2ª Prova de Estrutura de Dados
# Felipe Souza Vieira
# Gestão de senhas por ordem de chegada
from classes import telaAtendente, todaFila
|
[
"feolips@outlook.com"
] |
feolips@outlook.com
|
de035fce7e28e300c7dca1fc24cc91f1940d09b5
|
78aa1e05e9d20d9d68bd3411f31068ac5cbe2d13
|
/car_key_fob_hardware_backdoor/reset_target.py
|
65b9aab5e9d173a28e7547580bbf14b9f9132820
|
[] |
no_license
|
cloakware-ctf/rhme3-writeups
|
7b5a53e6996aa3d02e73a082f5cf4a7ad5c9e3d8
|
6f4e01976621647a8cb2d4417476737e0ed7f9b1
|
refs/heads/master
| 2020-03-16T21:18:13.894855
| 2019-01-07T03:24:59
| 2019-01-07T03:27:16
| 132,993,579
| 47
| 6
| null | 2019-01-05T20:16:38
| 2018-05-11T05:29:59
|
Assembly
|
UTF-8
|
Python
| false
| false
| 2,810
|
py
|
#!/usr/bin/env python3
# deps can be satisfied on Linux with `sudo pip3 install pyftdi`
from pyftdi.gpio import GpioController, GpioException
from time import sleep
import sys
import serial
import bitstring
import hmac
import hashlib
bitstring.bytealigned = True # change the default behaviour
bitbang = GpioController()
bitbang.open_from_url('ftdi:///1')
ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=None)
DELAY = 0.0000005 #strict worst-case delay is 0.54ms -- we can relax that due to lots of delays in the many layers of software between us.
#on my machine this results in a minimum CLK pulse width of 0.58 ms on my machine
HARD_DELAY = 0.00054 # for cases where strict delay adherence is necessary (e.g. when begining shift-out)
state = 0
def pin_output(line):
bitbang.set_direction(1 << line, 1 << line)
return
def pin_input(line):
bitbang.set_direction(1 << line, 0)
return
def pin_high(line):
global state
state = state | (1 << line)
bitbang.write_port(state)
return
def pin_low(line):
global state
state = state & ~(1 << line)
bitbang.write_port(state)
return
def set_pin(line, val):
if val:
pin_high(line)
else:
pin_low(line)
def get_pin(line):
state = bitbang.read_port()
return bool(state & (1 << line))
# SPI Name | MPSSE # | MPSSE Color | RHME3 Pin | Function Guess
MISO = 2 # GREEN | A5 | DO
MOSI = 1 # YELLOW | A4 | DI
CS = 3 # BROWN | A3 | LATCH
CLK = 0 # ORANGE | A2 | CLK
RESET = 4 # GREY | RESET | RESET
def shift_in_and_out_byte(tx):
building_byte = 0
for i in range(0, 8):
pin_low(CLK)
#assuming MSB first for now
set_pin(MOSI, bool(tx & (1 << (7 - i))))
sleep(DELAY)
pin_high(CLK)
sleep(DELAY)
building_byte = building_byte | (get_pin(MISO) << (7 - i))
pin_low(CLK)
return building_byte
pin_high(RESET)
pin_output(RESET)
pin_low(RESET)
pin_low(CLK)
pin_output(CLK)
pin_low(CS)
pin_output(CS)
pin_high(MOSI)
pin_output(MOSI)
pin_input(MISO)
def release_reset_and_wait():
global ser
print("Resetting Target...")
pin_low(RESET)
sleep(2 * HARD_DELAY)
pin_high(RESET)
while True:
line = ser.readline()
print(line)
if 'Test mode activated' in line.decode("utf-8"):
return
return
release_reset_and_wait()
def get_any_serial():
global ser
count = ser.in_waiting
if count > 0:
return ser.readline()
return ''
def print_any_serial():
line = get_any_serial()
if not line == '':
print(line)
sys.stdout.flush()
return
print_any_serial()
ser.close()
bitbang.close()
|
[
"ben.l.gardiner@gmail.com"
] |
ben.l.gardiner@gmail.com
|
1f6464086f60ef7a26dd93d29b844cd1a8a11f48
|
ddac36e3bfdb8e5beb95ce483271d4b1a3e5a80b
|
/isolate_characteristics.py
|
df4f6891785be59bf0c29ad17e6ddbdc35af3526
|
[] |
no_license
|
arbitton/SLAC
|
2cd75b51773b4dcf1391c642924873d5a33a8708
|
f1f8dabd50efe5e2e01ea3b43057ff4629d34431
|
refs/heads/master
| 2021-01-19T20:18:49.823645
| 2010-08-09T19:27:26
| 2010-08-09T19:27:26
| 771,523
| 0
| 2
| null | 2017-10-29T03:20:06
| 2010-07-13T00:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 968
|
py
|
#!/usr/bin/env python
import sys
import re
from invenio.search_engine import get_fieldvalues
line_re = re.compile("(?P<click>[0-9]+)\s(?P<cite>[0-9]+)\s(?P<count>[0-9]+)\s\[(?P<paper_list>[,0-9\s]*)\]")
list_re = re.compile("[\b\]\[,\s]+")
def do_stuff(file):
file_contents = open(file, 'r')
for line in file_contents:
match = line_re.match(line)
if match:
click = int(match.group('click'))
cite = int(match.group('cite'))
count = int(match.group('count'))
if count != 0:
review_count = 0
paper_list = list_re.split(match.group('paper_list'))
for x in paper_list:
if 'Review' in get_fieldvalues(int(x), '980__%'):
review_count += 1
print click, cite, review_count
else:
print click, cite, "0"
def main(args):
for file in args:
do_stuff(file)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"abitton@pcudssw1508.cern.ch"
] |
abitton@pcudssw1508.cern.ch
|
c6ac579453969c92f1d9c44fabba1f781b0a2fec
|
370d66d174eedb5c2f9640b87f882a04b22e1f6d
|
/binary_search.py
|
29511959e2fc0cf0f27d1a6684245ae5a47cf55c
|
[] |
no_license
|
atuhe/Day4-andela_labs
|
510f5e118cd47891d05b1119da08a5550c99f85e
|
d49581ab92ae49deba05f396da7c41f7d61cc255
|
refs/heads/master
| 2021-01-19T22:43:53.915641
| 2017-04-20T12:28:35
| 2017-04-20T12:28:35
| 88,855,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 920
|
py
|
class BinarySearch(list):
def __init__(self, a, b):
self.array = [x for x in range(b, a * b, b)]
self.length = len(self.array)
def search(self, param):
count = 0
print(self.array)
array = self.array
first = 0
last = len(array)
while first < last:
try:
count += 1
mid = first + (last - first) // 2
value = self.array[mid]
if param == value:
return {"count": count, "index": mid}
elif param > value:
if first == mid:
break
first = mid + 1
else:
last = mid - 1
except:
break
return {"count": count, "index": -1}
# print(self.array)
mySearch = BinarySearch(120, 10)
print(mySearch.search(50))
|
[
"ratuhe60@gmail.com"
] |
ratuhe60@gmail.com
|
4905ecff70a426deb48f3e7b73ce340172ee9369
|
dc2d0aff11ac347b6079d339f04cbe0276968973
|
/Ejemplos A concise introduction to programming to python/tetris.py
|
9559a2670c03c7f46a10c61dc35179c743d021b0
|
[] |
no_license
|
CertifiedErickBaps/Python36
|
418f6b4815cb9fb329957d2fdb3a297ff2d1c3c1
|
447fdf6d1779ef73e4910ac94b5732553c0bb7dd
|
refs/heads/master
| 2021-09-24T00:00:09.207766
| 2017-05-20T23:25:19
| 2017-05-20T23:25:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,066
|
py
|
from turtle import sys, random, time, select, os, termios
width = 10
height = 22
blocks = [ [ (0,0), (0,1), (0,-1), (1,0) ], # T
[ (0,0), (0,1), (0,2), (0,-1) ], # I
[ (0,0), (0,1), (1,1), (-1,0) ], # S
[ (0,0), (0,-1), (1,-1), (-1,0) ], # Z
[ (0,0), (0,1), (1,1), (1,0) ], # O
[ (0,0), (-1,1), (-1,0), (1,0) ], # L
[ (0,0), (1,1), (-1,0), (1,0) ], # J
]
inverted = '\033[7;1m'
blue = '\033[7;34m'
normal = '\033[0m'
clear_screen = '\033[2J' # clear the screen
home = '\033[H' # goto top left corner of the screen
# (the latter two were found using 'clear | od -c')
empty = ' '
black = inverted + ' ' + normal # two inverted spaces
blue = blue + ' ' + normal # two inverted spaces
floor = '=='
left = 'left'
right = 'right'
turn = 'turn'
down = 'down'
quit = 'quit'
shaft = None
def play_tetris():
initialize_shaft()
while True: # until game is lost
block = get_random_block()
coordinates = (width/2-1, 1) # in the middle at the top
if not place_block(block, coordinates, blue): # collision already?
return # game is lost!
next_fall_time = time.time() + fall_delay()
# ^^^ this is the time when the block will fall automatically
# one line down
while True: # until block is placed fixedly
print_shaft()
remove_block(block, coordinates)
x, y = coordinates
try:
try:
command = get_command(next_fall_time)
except Timeout: # no command given
raise Fall()
else: # no exception, so process command:
if command == left:
new_coordinates = (x-1, y)
new_block = block
elif command == right:
new_coordinates = (x+1, y)
new_block = block
elif command == turn:
new_coordinates = (x, y)
new_block = turn_block(block)
elif command == down:
raise Fall()
elif command == quit:
return
else:
raise Exception("internal error: %r" % command)
if place_block(new_block, new_coordinates,
blue): # command ok?
# execute the command:
block = new_block
coordinates = new_coordinates
else:
place_block(block, coordinates, blue)
# ignore the command which could not be executed
# maybe beep here or something ;->
except Fall:
# make the block fall automatically:
new_coordinates = (x, y+1)
next_fall_time = time.time() + fall_delay()
if place_block(block, new_coordinates, blue): # can be placed?
coordinates = new_coordinates
else:
place_block(block, coordinates,
black) # place block there again
break # and bail out
remove_full_lines()
class Timeout(Exception): pass
class Fall(Exception): pass
def remove_full_lines():
global shaft, width, height
def line_full(line):
global width
for x in range(width):
if line[x] == empty:
return False
return True
def remove_line(y):
global shaft, width
del shaft[y] # cut out line
shaft.insert(0, [ empty ] * width) # fill up with an empty line
for y in range(height):
if line_full(shaft[y]):
remove_line(y)
def fall_delay():
return 1.3 # cheap version; implement raising difficulty here
def turn_block(block):
"return a turned copy(!) of the given block"
result = []
for x, y in block:
result.append((y, -x))
return result
def get_command(next_fall_time):
"if a command is entered, return it; otherwise raise the exception Timeout"
while True: # until a timeout occurs or a command is found:
timeout = next_fall_time - time.time()
if timeout > 0.0:
(r, w, e) = select.select([ sys.stdin ], [], [], timeout)
else:
raise Timeout()
if sys.stdin not in r: # not input on stdin?
raise Timeout()
key = os.read(sys.stdin.fileno(), 1)
if key == 'j':
return left
elif key == 'l':
return right
elif key == 'k':
return turn
elif key == ' ':
return down
elif key == 'q':
return quit
else: # any other key: ignore
pass
def place_block(block, coordinates, color):
"if the given block can be placed in the shaft at the given coordinates"\
" then place it there and return True; return False otherwise and do not"\
" place anything"
global shaft, width, height
block_x, block_y = coordinates
for stone_x, stone_y in block:
x = block_x + stone_x
y = block_y + stone_y
if (x < 0 or x >= width or
y < 0 or y >= height or # border collision?
shaft[y][x] != empty): # block collision?
return False # cannot be placed there
# reached here? ==> can be placed there
# now really place it:
for stone_x, stone_y in block:
x = block_x + stone_x
y = block_y + stone_y
shaft[y][x] = color
return True
def remove_block(block, coordinates):
global shaft
block_x, block_y = coordinates
for stone_x, stone_y in block:
x = block_x + stone_x
y = block_y + stone_y
shaft[y][x] = empty
def get_random_block():
if random.randint(1, 10) == 1:
return perfect_block() or random.choice(blocks)
return random.choice(blocks)
def perfect_block():
result = []
for y in range(height):
if filter(lambda b: b != empty, shaft[y]): # found summit
random_order = range(width)
random.shuffle(random_order)
for x in random_order:
if shaft[y][x] == empty: # found space besides summit
for x_ in range(width-x): # fill to the right
if shaft[y][x+x_] != empty:
break
for y_ in range(height-y):
if shaft[y+y_][x+x_] == empty:
result.append((x_, y_))
else:
break
for x_ in range(-1, -x-1, -1): # fill to the left
if shaft[y][x+x_] != empty:
break
for y_ in range(height-y):
if shaft[y+y_][x+x_] == empty:
result.append((x_, y_))
else:
break
# shift block in x direction to center it:
xmin = min(map(lambda v: v[0], result))
xmax = max(map(lambda v: v[0], result))
return map(lambda v: (v[0]-(xmax+xmin)/2, v[1]), result)
return None
def initialize_shaft():
global width, height, shaft, empty
shaft = [ None ] * height
for y in range(height):
shaft[y] = [ empty ] * width
def print_shaft():
# cursor-goto top left corner:
sys.stdout.write(home)
for y in range(height):
if y > 3: # does this line have a border? (the topmost ones do not)
sys.stdout.write(']')
else:
sys.stdout.write(' ')
for x in range(width):
sys.stdout.write(shaft[y][x])
if y > 3: # does this line have a border? (the topmost ones do not)
sys.stdout.write('[\n')
else:
sys.stdout.write('\n')
# print bottom:
sys.stdout.write(']' + floor * width + '[\n')
def prepare_tty():
"set the terminal in char mode (return each keyboard press at once) and"\
" switch off echoing of this input; return the original settings"
stdin_fd = sys.stdin.fileno() # will most likely be 0 ;->
old_stdin_config = termios.tcgetattr(stdin_fd)
[ iflag, oflag, cflag, lflag, ispeed, ospeed, cc ] = \
termios.tcgetattr(stdin_fd)
cc[termios.VTIME] = 1
cc[termios.VMIN] = 1
iflag = iflag & ~(termios.IGNBRK |
termios.BRKINT |
termios.PARMRK |
termios.ISTRIP |
termios.INLCR |
termios.IGNCR |
#termios.ICRNL |
termios.IXON)
# oflag = oflag & ~termios.OPOST
cflag = cflag | termios.CS8
lflag = lflag & ~(termios.ECHO |
termios.ECHONL |
termios.ICANON |
# termios.ISIG |
termios.IEXTEN)
termios.tcsetattr(stdin_fd, termios.TCSANOW,
[ iflag, oflag, cflag, lflag, ispeed, ospeed, cc ])
return (stdin_fd, old_stdin_config)
def cleanup_tty(original_tty_settings):
"restore the original terminal settings"
stdin_fd, old_stdin_config = original_tty_settings
termios.tcsetattr(stdin_fd, termios.TCSADRAIN, old_stdin_config)
original_tty_settings = prepare_tty() # switch off line buffering etc.
sys.stdout.write(clear_screen)
try: # ensure that tty will be reset in the end
play_tetris()
finally:
cleanup_tty(original_tty_settings)
|
[
"erickburn01@hotmail.com"
] |
erickburn01@hotmail.com
|
b3a60ac714fce6264dda9c805bc408569525ff9d
|
54f94b581268d301eb4b0bbf0f5c4c7373503252
|
/main/admin.py
|
0740f7f3cef5bece0662635a644852c32a3dc592
|
[] |
no_license
|
Mohammed-Aadil/Locate-restro
|
cfbe3f3b46e55f114f74f9109097ea8786b6b262
|
32af2e962e7046d9218cf5b87c35ceb387f0db5b
|
refs/heads/master
| 2021-01-01T19:39:17.255997
| 2015-08-02T03:46:52
| 2015-08-02T03:46:52
| 39,996,595
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 91
|
py
|
from django.contrib import admin
from main.models import restro
admin.site.register(restro)
|
[
"mailtoaadilhanif@gmail.com"
] |
mailtoaadilhanif@gmail.com
|
68579f64743e8acb46651befbce45f48bb5a6047
|
d1ad23e66d6a244066b74dcb371901f6dfcd4b4b
|
/game1.py
|
4e5dff1b169eaaa404c0e9e712b9de800ace6677
|
[] |
no_license
|
TobalJackson/qLearning
|
51939ca9fa526fb9eca4c297e7a44770d66afc80
|
c2aad641469acede3abebb02be16026d9a01e44d
|
refs/heads/master
| 2016-09-06T04:45:11.200944
| 2015-02-20T20:44:26
| 2015-02-20T20:44:26
| 31,048,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,093
|
py
|
#!/usr/bin/env python3
import os
choices = ['A', 'B', 'C']
state = True
global score
score = 0
global turns
turns = 2
os.system('clear')
print("Score: 0 \nTotal Score: 0")
def runGame():
global state
global turns
print("\nLives Left: {} ...\nChoose!:".format(turns))
print(choices)
var = input()
os.system('clear')
getPoints(var)
printScore()
if var in ('B', 'b'):
state = False
def getPoints(x):
global score
global choices
if x in ('A', 'a'):
score = score + 10
print("Score: +10")
if x in ('B', 'b'):
score = score + 30
print("Score: +30")
if x in ('C', 'c'):
score = score + 20
print("Score: +20")
choices.remove(x.upper())
def printScore():
print("Total Score: {}".format(score))
while(turns > 0):
while(state == True):
runGame()
turns = turns - 1
state = True
choices = ['A', 'B', 'C']
if turns != 0:
print("\nYou've died! Use what you've learned to achieve a higher score!")
else:
print("GAME OVER")
|
[
"tobaljackson@gmail.com"
] |
tobaljackson@gmail.com
|
4fd1eb98bcce9517b2c25ff5338f67ff3f22db0f
|
f706300ec7f39dfd12155899c92622d72c9ecdd9
|
/www/orm.py
|
94ddefb7ac0c19ef573de44ce01b6085358ffc74
|
[] |
no_license
|
yiluxiangbei000/awesome-website
|
b83cf064a6ba32f4e423d83ac196133150e23cfa
|
be1c1b1767a0eccb5273a19f5f2fd2b8a275c53c
|
refs/heads/master
| 2022-11-25T14:27:38.670238
| 2020-08-01T15:29:52
| 2020-08-01T15:29:52
| 282,934,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,009
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio, logging, aiomysql
def log(sql, args=()):
logging.info('SQL: %s' % sql)
async def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
__pool = await aiomysql.create_pool(
host=kw.get('host', 'localhost'),
port=kw.get('port', 3306),
user=kw['user'],
password=kw['password'],
db=kw['db'],
charset=kw.get('charset', 'utf8'),
autocommit=kw.get('autocommit', True),
maxsize=kw.get('maxsize', 10),
minsize=kw.get('minsize', 1),
loop=loop
)
async def select(sql, args, size=None):
log(sql, args)
global __pool
with (await __pool) as conn:
cur = await conn.cursor(aiomysql.DictCursor)
await cur.execute(sql.replace('?', '%s'), args or ())
if size:
rs = await cur.fetchmany(size)
else:
rs = await cur.fetchall()
await cur.close()
logging.info('rows returned: %s' % len(rs))
return rs
async def execute(sql, args):
log(sql)
global __pool
with (await __pool) as conn:
try:
cur = await conn.cursor()
await cur.execute(sql.replace('?', '%s'), args)
affected = cur.rowcount
await cur.close()
except BaseException as e:
raise
return affected
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
# 排除Model类本身:
if name=='Model':
return type.__new__(cls, name, bases, attrs)
# 获取table名称:
tableName = attrs.get('__table__', None) or name
logging.info('found model: %s (table: %s)' % (name, tableName))
# 获取所有的Field和主键名:
mappings = dict()
fields = []
primaryKey = None
for k, v in attrs.items():
if isinstance(v, Field):
logging.info(' found mapping: %s ==> %s' % (k, v))
mappings[k] = v
if v.primary_key:
# 找到主键:
if primaryKey:
raise RuntimeError('Duplicate primary key for field: %s' % k)
primaryKey = k
else:
fields.append(k)
if not primaryKey:
raise RuntimeError('Primary key not found.')
for k in mappings.keys():
attrs.pop(k)
escaped_fields = list(map(lambda f: '`%s`' % f, fields))
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = tableName
attrs['__primary_key__'] = primaryKey # 主键属性名
attrs['__fields__'] = fields # 除主键外的属性名
# 构造默认的SELECT, INSERT, UPDATE和DELETE语句:
attrs['__select__'] = 'select `%s`, %s from `%s`' % (primaryKey, ', '.join(escaped_fields), tableName)
attrs['__insert__'] = 'insert into `%s` (%s, `%s`) values (%s)' % (tableName, ', '.join(escaped_fields), primaryKey, create_args_string(len(escaped_fields) + 1))
attrs['__update__'] = 'update `%s` set %s where `%s`=?' % (tableName, ', '.join(map(lambda f: '`%s`=?' % (mappings.get(f).name or f), fields)), primaryKey)
attrs['__delete__'] = 'delete from `%s` where `%s`=?' % (tableName, primaryKey)
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass=ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def getValue(self, key):
return getattr(self, key, None)
def getValueOrDefault(self, key):
value = getattr(self, key, None)
if value is None:
field = self.__mappings__[key]
if field.default is not None:
value = field.default() if callable(field.default) else field.default
logging.debug('using default value for %s: %s' % (key, str(value)))
setattr(self, key, value)
return value
@classmethod
async def findAll(cls, where=None, args=None, **kw):
## find objects by where clause
sql = [cls.__select__]
if where:
sql.append('where')
sql.append(where)
if args is None:
args = []
orderBy = kw.get('orderBy', None)
if orderBy:
sql.append('order by')
sql.append(orderBy)
limit = kw.get('limit', None)
if limit is not None:
sql.append('limit')
if isinstance(limit, int):
sql.append('?')
args.append(limit)
elif isinstance(limit, tuple) and len(limit) == 2:
sql.append('?, ?')
args.extend(limit)
else:
raise ValueError('Invalid limit value: %s' % str(limit))
rs = await select(' '.join(sql), args)
return [cls(**r) for r in rs]
@classmethod
async def findNumber(cls, selectField, where=None, args=None):
## find number by select and where
logging.info('find number by select and where.....')
sql = ['select %s _num_ from `%s`' % (selectField, cls.__table__)]
if where:
sql.append('where')
sql.append(where)
rs = await select(' '.join(sql), args, 1)
if len(rs) == 0:
return None
return rs[0]['_num_']
@classmethod
async def find(cls, pk):
## find object by primary key
rs = await select('%s where `%s`=?' % (cls.__select__, cls.__primary_key__), [pk], 1)
if len(rs) == 0:
return None
return cls(**rs[0])
async def save(self):
args = list(map(self.getValueOrDefault, self.__fields__))
args.append(self.getValueOrDefault(self.__primary_key__))
rows = await execute(self.__insert__, args)
if rows != 1:
logging.warning('failed to insert record: affected rows: %s' % rows)
async def update(self):
args = list(map(self.getValue, self.__fields__))
args.append(self.getValue(self.__primary_key__))
rows = await execute(self.__update__, args)
if rows != 1:
logging.warning('failed to update by primary key: affected rows: %s' % rows)
async def remove(self):
args = [self.getValue(self.__primary_key__)]
rows = await execute(self.__delete__, args)
if rows != 1:
logging.warning('failed to remove by primary key: affected rows: %s' % rows)
class Field(object):
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name)
class StringField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchar(100)'):
super().__init__(name, ddl, primary_key, default)
class BooleanField(Field):
def __init__(self, name=None, default=False):
super().__init__(name, 'boolean', False, default)
class IntegerField(Field):
def __init__(self, name=None, primary_key=False, default=0):
super().__init__(name, 'bigint', primary_key, default)
class FloatField(Field):
def __init__(self, name=None, primary_key=False, default=0.0):
super().__init__(name, 'real', primary_key, default)
class TextField(Field):
def __init__(self, name=None, default=None):
super().__init__(name, 'text', False, default)
def create_args_string(num):
L = []
for n in range(num):
L.append('?')
return ', '.join(L)
|
[
"913311670@qq.com"
] |
913311670@qq.com
|
e4eb420afc6fcbe357610f2a37e1565a7e0eefaa
|
29b1fc7add9f6a4d8a99def6691285df83d74385
|
/switchTest.python/python/surf/xilinx/_AxiSysMonUltraScale.py
|
902ede898babfea476dbfd1a9fa9f49f4b54a4f4
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
hmbui/switchtest
|
3cfbe1288816b82c7fad09ccd36b8f1daebf5526
|
3478afa822547b4ef7fe61c10ad9a83f61062872
|
refs/heads/master
| 2021-07-08T21:01:57.009949
| 2018-08-16T21:37:03
| 2018-08-16T21:37:03
| 143,228,716
| 0
| 2
| null | 2020-07-22T03:09:34
| 2018-08-02T01:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 16,869
|
py
|
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Title : PyRogue AXI-Lite System Managment for Xilinx Ultra Scale (Refer to PG185 and UG580)
#-----------------------------------------------------------------------------
# File : AxiSysMonUltraScale.py
# Created : 2017-04-12
#-----------------------------------------------------------------------------
# Description:
# PyRogue AXI-Lite System Managment for Xilinx Ultra Scale (Refer to PG185 and UG580)
#-----------------------------------------------------------------------------
# This file is part of the rogue software platform. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue software platform, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import pyrogue as pr
class AxiSysMonUltraScale(pr.Device):
def __init__( self,
name = "AxiSysMonUltraScale",
description = "AXI-Lite System Managment for Xilinx Ultra Scale (Refer to PG185 and UG580)",
**kwargs):
super().__init__(name=name, description=description, **kwargs)
def addPair(name,offset,bitSize,units,bitOffset,description,function,pollInterval = 0,):
self.add(pr.RemoteVariable(
name = (name+"Raw"),
offset = offset,
bitSize = bitSize,
bitOffset = bitOffset,
base = pr.UInt,
mode = 'RO',
description = description,
pollInterval = pollInterval,
hidden = True,
))
self.add(pr.LinkVariable(
name = name,
mode = 'RO',
units = units,
linkedGet = function,
disp = '{:1.3f}',
dependencies = [self.variables[name+"Raw"]],
))
##############################
# Variables
##############################
self.add(pr.RemoteVariable(
name = "SR",
description = "Status Register",
offset = 0x04,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "AOSR",
description = "Alarm Output Status Register",
offset = 0x08,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "CONVSTR",
description = "CONVST Register",
offset = 0x0C,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "WO",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "SYSMONRR",
description = "SYSMON Hard Macro Reset Register",
offset = 0x10,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "WO",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "GIER",
description = "Global Interrupt Enable Register",
offset = 0x5C,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "IPISR",
description = "IP Interrupt Status Register",
offset = 0x60,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "IPIER",
description = "IP Interrupt Enable Register",
offset = 0x68,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
hidden = True,
))
###############################################
addPair(
name = 'Temperature',
offset = 0x400,
bitSize = 12,
bitOffset = 4,
units = "degC",
function = self.convTemp,
pollInterval = 5,
description = "Temperature's ADC value",
)
addPair(
name = 'VccInt',
offset = 0x404,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "VCCINT's ADC value",
)
addPair(
name = 'VccAux',
offset = 0x408,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "VCCAUX's ADC value",
)
addPair(
name = 'VpVn',
offset = 0x40C,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "VP/VN's ADC value",
)
addPair(
name = 'Vrefp',
offset = 0x410,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "VREFP's ADC value",
)
addPair(
name = 'Vrefn',
offset = 0x414,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "VREFN's ADC value",
)
addPair(
name = 'VccBram',
offset = 0x418,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "VBRAM's ADC value",
)
addPair(
name = 'SupplyOffset',
offset = 0x420,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "Supply Offset",
)
addPair(
name = 'AdcOffset',
offset = 0x424,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "ADC Offset",
)
addPair(
name = 'GainError',
offset = 0x428,
bitSize = 12,
bitOffset = 4,
units = "",
function = self.convCoreVoltage,
pollInterval = 5,
description = "Gain Offset",
)
addPair(
name = 'VauxpVauxn',
offset = 0x440,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convAuxVoltage,
pollInterval = 5,
description = "VAUXP_VAUXN's ADC values",
)
addPair(
name = 'MaxTemperature',
offset = 0x480,
bitSize = 12,
bitOffset = 4,
units = "degC",
function = self.convTemp,
pollInterval = 5,
description = "maximum temperature measurement",
)
addPair(
name = 'MaxVccInt',
offset = 0x484,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "maximum VCCINT measurement",
)
addPair(
name = 'MaxVccAux',
offset = 0x488,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "maximum VCCAUX measurement",
)
addPair(
name = 'MaxVccBram',
offset = 0x48C,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "maximum VBRAM measurement",
)
addPair(
name = 'MinTemperature',
offset = 0x490,
bitSize = 12,
bitOffset = 4,
units = "degC",
function = self.convTemp,
pollInterval = 5,
description = "minimum temperature measurement",
)
addPair(
name = 'MinVccInt',
offset = 0x494,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "minimum VCCINT measurement",
)
addPair(
name = 'MinVccAux',
offset = 0x498,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "minimum VCCAUX measurement",
)
addPair(
name = 'MinVccBram',
offset = 0x49C,
bitSize = 12,
bitOffset = 4,
units = "V",
function = self.convCoreVoltage,
pollInterval = 5,
description = "minimum VBRAM measurement",
)
self.add(pr.RemoteVariable(
name = "I2cAddress",
description = "I2C Address",
offset = 0x4E0,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "FlagRegister",
description = "Flag Register",
offset = 0x4FC,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
hidden = True,
))
self.addRemoteVariables(
name = "Configuration",
description = "Configuration Registers",
offset = 0x500,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
number = 4,
stride = 4,
hidden = True,
)
self.add(pr.RemoteVariable(
name = "SequenceReg8",
description = "Sequence Register 8",
offset = 0x518,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
hidden = True,
))
self.add(pr.RemoteVariable(
name = "SequenceReg9",
description = "Sequence Register 9",
offset = 0x51C,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
hidden = True,
))
self.addRemoteVariables(
name = "SequenceReg_7_0",
description = "Sequence Register [7:0]",
offset = 0x520,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
number = 8,
stride = 4,
hidden = True,
)
self.addRemoteVariables(
name = "AlarmThresholdReg_8_0",
description = "Alarm Threshold Register [8:0]",
offset = 0x540,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
number = 9,
stride = 4,
hidden = True,
)
self.add(pr.RemoteVariable(
name = "AlarmThresholdReg12",
description = "Alarm Threshold Register 12",
offset = 0x570,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
hidden = True,
))
self.addRemoteVariables(
name = "AlarmThresholdReg_25_16",
description = "Alarm Threshold Register [25:16]",
offset = 0x580,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RW",
number = 8,
stride = 4,
hidden = True,
)
self.addRemoteVariables(
name = "Vuser",
description = "VUSER[4:0] supply monitor measurement",
offset = 0x600,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
number = 4,
stride = 4,
hidden = True,
)
self.addRemoteVariables(
name = "MaxVuser",
description = "Maximum VUSER[4:0] supply monitor measurement",
offset = 0x680,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
number = 4,
stride = 4,
hidden = True,
)
self.addRemoteVariables(
name = "MinVuser",
description = "Minimum VUSER[4:0] supply monitor measurement",
offset = 0x6A0,
bitSize = 32,
bitOffset = 0x00,
base = pr.UInt,
mode = "RO",
number = 4,
stride = 4,
hidden = True,
)
# Default to simple view
self.simpleView()
@staticmethod
def convTemp(dev, var):
value = var.dependencies[0].get(read=False)
fpValue = value*(501.3743/4096.0)
fpValue -= 273.6777
return (fpValue)
@staticmethod
def convCoreVoltage(var):
value = var.dependencies[0].value()
fpValue = value*(732.0E-6)
return fpValue
@staticmethod
def convAuxVoltage(var):
return var.dependencies[0].value() * 244e-6
def simpleView(self):
# Hide all the variable
self.hideVariables(hidden=True)
# Then unhide the most interesting ones
vars = ["Temperature", "VccInt", "VccAux", "VccBram"]
self.hideVariables(hidden=False, variables=vars)
|
[
"hbui@slac.stanford.edu"
] |
hbui@slac.stanford.edu
|
459aea7ba8c6dfd791970997b1899a1c826d042c
|
36299cb484c42352ab10534f86e94d93fdc4982b
|
/GetDiskFreeSpace/sendmail_aliyun_demo.py
|
6af37a9d59aa89971945384b969554e221228ec6
|
[] |
no_license
|
alienwaredream/Python_SendMail
|
88102baf558a02011bbcf20f15bb2e1afbbd831c
|
e4c7b666e33e76bb057ab58618d2efcab6818963
|
refs/heads/master
| 2020-03-19T13:01:38.897071
| 2018-06-08T02:54:03
| 2018-06-08T02:54:03
| 136,556,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,953
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import smtplib
import email
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email.mime.application import MIMEApplication
from email.header import Header
# 发件人地址,通过控制台创建的发件人地址
username = 'qhyy@mail.zhucx.top'
# 发件人密码,通过控制台创建的发件人密码
password = 'rF635akmhxDFX5neYUbd'
# 自定义的回复地址
replyto = '56240949@qq.com'
# 收件人地址或是地址列表,支持多个收件人,最多30个
#rcptto = ['***', '***']
rcptto = '56240949@qq.com'
# 构建alternative结构
msg = MIMEMultipart('alternative')
msg['Subject'] = Header('自定义信件主题')
msg['From'] = '%s <%s>' % (Header('自定义发信昵称'), username)
msg['To'] = rcptto
msg['Reply-to'] = replyto
msg['Message-id'] = email.utils.make_msgid()
msg['Date'] = email.utils.formatdate()
# 构建alternative的text/plain部分
textplain = MIMEText('自定义TEXT纯文本部分', _subtype='plain', _charset='UTF-8')
msg.attach(textplain)
# 构建alternative的text/html部分
texthtml = MIMEText('自定义HTML超文本部分', _subtype='html', _charset='UTF-8')
msg.attach(texthtml)
# 发送邮件
try:
client = smtplib.SMTP()
#python 2.7以上版本,若需要使用SSL,可以这样创建client
#client = smtplib.SMTP_SSL()
#SMTP普通端口为25或80
client.connect('smtpdm.aliyun.com', 25)
#开启DEBUG模式
client.set_debuglevel(0)
client.login(username, password)
#发件人和认证地址必须一致
#备注:若想取到DATA命令返回值,可参考smtplib的sendmaili封装方法:
# 使用SMTP.mail/SMTP.rcpt/SMTP.data方法
client.sendmail(username, rcptto, msg.as_string())
client.quit()
print('邮件发送成功!')
except Exception:
print("邮件发送失败")
|
[
"zhuzhu262526@hotmail.com"
] |
zhuzhu262526@hotmail.com
|
f01500f48190bf27570e9b3e872617ca36403ad9
|
c1bd12405d244c5924a4b069286cd9baf2c63895
|
/azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/connection_monitor_result_py3.py
|
a63c14cdb35d058b59d00b0b3ecc676da158ceb9
|
[
"MIT"
] |
permissive
|
lmazuel/azure-sdk-for-python
|
972708ad5902778004680b142874582a284a8a7c
|
b40e0e36cc00a82b7f8ca2fa599b1928240c98b5
|
refs/heads/master
| 2022-08-16T02:32:14.070707
| 2018-03-29T17:16:15
| 2018-03-29T17:16:15
| 21,287,134
| 1
| 3
|
MIT
| 2019-10-25T15:56:00
| 2014-06-27T19:40:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,303
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectionMonitorResult(Model):
"""Information about the connection monitor.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar name: Name of the connection monitor.
:vartype name: str
:ivar id: ID of the connection monitor.
:vartype id: str
:param etag: Default value: "A unique read-only string that changes
whenever the resource is updated." .
:type etag: str
:ivar type: Connection monitor type.
:vartype type: str
:param location: Connection monitor location.
:type location: str
:param tags: Connection monitor tags.
:type tags: dict[str, str]
:param source: Required.
:type source:
~azure.mgmt.network.v2018_01_01.models.ConnectionMonitorSource
:param destination: Required.
:type destination:
~azure.mgmt.network.v2018_01_01.models.ConnectionMonitorDestination
:param auto_start: Determines if the connection monitor will start
automatically once created. Default value: True .
:type auto_start: bool
:param monitoring_interval_in_seconds: Monitoring interval in seconds.
Default value: 60 .
:type monitoring_interval_in_seconds: int
:param provisioning_state: The provisioning state of the connection
monitor. Possible values include: 'Succeeded', 'Updating', 'Deleting',
'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2018_01_01.models.ProvisioningState
:param start_time: The date and time when the connection monitor was
started.
:type start_time: datetime
:param monitoring_status: The monitoring status of the connection monitor.
:type monitoring_status: str
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'type': {'readonly': True},
'source': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'source': {'key': 'properties.source', 'type': 'ConnectionMonitorSource'},
'destination': {'key': 'properties.destination', 'type': 'ConnectionMonitorDestination'},
'auto_start': {'key': 'properties.autoStart', 'type': 'bool'},
'monitoring_interval_in_seconds': {'key': 'properties.monitoringIntervalInSeconds', 'type': 'int'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'monitoring_status': {'key': 'properties.monitoringStatus', 'type': 'str'},
}
def __init__(self, *, source, destination, etag: str="A unique read-only string that changes whenever the resource is updated.", location: str=None, tags=None, auto_start: bool=True, monitoring_interval_in_seconds: int=60, provisioning_state=None, start_time=None, monitoring_status: str=None, **kwargs) -> None:
super(ConnectionMonitorResult, self).__init__(**kwargs)
self.name = None
self.id = None
self.etag = etag
self.type = None
self.location = location
self.tags = tags
self.source = source
self.destination = destination
self.auto_start = auto_start
self.monitoring_interval_in_seconds = monitoring_interval_in_seconds
self.provisioning_state = provisioning_state
self.start_time = start_time
self.monitoring_status = monitoring_status
|
[
"noreply@github.com"
] |
lmazuel.noreply@github.com
|
8bed2576e84efdad959c537448c0618eafad6dca
|
d67a7f1fc0721e1ff9880b824635082238bf7fb5
|
/quickstart-external.py
|
85815f235c2d2ffc8f94e977075015cdc2287974
|
[
"MIT"
] |
permissive
|
isabella232/QuickStart-SendSMS-Python
|
544deac45d873f31951d23a4cf9cb318c4b157fd
|
4afb9d627aa579bef1f1e7aa77d8aa55f0f6da85
|
refs/heads/master
| 2022-03-07T11:14:15.849043
| 2017-08-07T13:23:12
| 2017-08-07T13:23:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
# quickstart.py
# tested in python 2.7.12
# implements send SMS example in
# https://sdcdocumentation.syniverse.com/index.php/sms/quick-start
#
# this example can be run using the free credits provided with your account.
# uses one non-standard python library, requests described in detail at
# http://docs.python-requests.org/en/latest/index.html
#
# dependencies are
# 1) SDC account at https://developer.syniverse.com/
# 2) correct service offerings enabled, as described in quick start guide
# 3) application created as descreibed in quick start guide
# 4) whitelisted number, as described in the quick start guide, unless you have added a payment method
#
# changes required to the below script are
# 1) add your access token for the application
# 2) change the channel_id to your preferred one. see list at
# https://developer.syniverse.com/scg-web-gui/#/main/publicChannels
# 3) change my_number to your whitelisted number
# n.b. for some countries, such as the US, you need to use a pre-existing template
import requests
uk_channel_id = 'DJm-vHcnSBKbeK4b2FAOLQ' # change to your preferred country
my_number = '+44XXXXXXXXXX' # put your UK number here
url = 'https://api.syniverse.com/scg-external-api/api/v1/messaging/message_requests'
sms_text = 'Testing SMS via API!' # change this if you want
access_token = '[YOUR ACCESS TOKEN HERE]'
headers = {'Authorization': 'Bearer ' + access_token, 'Content-Type': 'application/json'}
payload = {'from': 'channel:' + uk_channel_id, 'to': [my_number], 'body': sms_text}
response = requests.post(url, json=payload, headers=headers)
print 'status code: ' + str(response.status_code)
print 'response: ' + response.text
|
[
"noreply@github.com"
] |
isabella232.noreply@github.com
|
744e25ea2ce4f23d24ec3ab78860ee223dc8ff82
|
5ea379292343f9c597e3b716880fcef5be9f6ce9
|
/old/Sensors2TypingTutorGateway_eye_face_wear_11.py
|
ed5569d5ac138422eacc54785155c1e8ffe216c6
|
[] |
no_license
|
markome/Sensor2TypingTutorGateway
|
7fd06480bb8c929d367f1575ebbf6b935f68306c
|
0f75e28639a82873da53dcbca7984d60dfb361c3
|
refs/heads/master
| 2021-01-11T21:00:15.246643
| 2017-01-17T13:43:58
| 2017-01-17T13:43:58
| 79,228,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,989
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 17:19:46 2016
@author: markome
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 10:02:19 2016
Facereader and Tobii eye tracker and AndroidWear 2 JS - python app. Creates 2 sockets for incomming dotNET app (from tobii and noldus) messages and forwards them on http as .json objects.
All received events (from tobi and noldus) are stored into buffer and send as vector upon http request. When data is sent, buffer is cleared.
For developmnet purposes, until no data is received from noldus and tobii the app is sending "placeholder" data repeatedly. Placeholder data is in format of actual data.
For ports and addresses and paths please see configuration variables.
call of http://localhost:8080/emptyData clears all data buffers.
!! Code requires bottle package.
Install python package bottle by issuing following command from cmd window:
pip install bottle
Code snippets were stollen from various sources:
Working solution from for CORS using bottle (Allow get requests from another server than JavaScript application is originating from):
http://stackoverflow.com/questions/17262170/bottle-py-enabling-cors-for-jquery-ajax-requests
Creation of json:
http://stackoverflow.com/questions/23110383/how-to-dynamically-build-a-json-object-with-python
Threading:
http://stackoverflow.com/questions/2846653/python-multithreading-for-dummies
@author: markome, marko.meza@fe.uni-lj.si
"""
import threading
import socket
import sys
import bottle
from bottle import response
import json
import time
import datetime
import re
runTimestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H%M%S')
#configuration of dotNET application communication socket - data source
global tobiiDotNetAppAddress
tobiiDotNetAddress = 'localhost'
global tobiiDotNeAppPort
tobiiDotNeAppPort=10003
tobiiEyeTrackerServerHostRoute='/tobiiEyetracker'
global tobiiEyeTrackerReceivedEventCounter
tobiiEyeTrackerReceivedEventCounter=0;
global receivedTobiiMessage
# default data sent UNTIL none is received from FaceReader
receivedTobiiMessage=[]
#receivedTobiiMessage.append(json.loads('{"tobiiEyeTracker": {"leftPos": {"y": "6,54191137932216","x": "-6,95598391113099","z": "58,852241687782"},"timeStamp": "24.12.2015 11:06:59.6288","leftGaze": {"y": "21,7719111544532","x": "0,86490466749226","z": "6,23850804784943"},"rightPos": {"y": "7,04664176343695","x": "-0,80637315236163","z": "59,3518039600174"},"rightGaze": {"y": "22,6628011052412","x": "-5,56592479836322","z": "6,56276625429384"}}}'))
#receivedTobiiMessage.append(json.loads('{"tobiiEyeTracker": {"leftPos": {"y": "6,54191137932216","x": "-6,95598391113099","z": "58,852241687782"},"timeStamp": "24.12.2015 11:06:59.6288","leftGaze": {"y": "21,7719111544532","x": "0,86490466749226","z": "6,23850804784943"},"rightPos": {"y": "7,04664176343695","x": "-0,80637315236163","z": "59,3518039600174"},"rightGaze": {"y": "22,6628011052412","x": "-5,56592479836322","z": "6,56276625429384"}}}'))
receivedTobiiMessage.append(json.loads('{"tobiiEyeTracker":{"timeStamp":"30.12.2015 14:06:20.2412","leftPos":{"x":"-0,228793755914194","y":"11,5027813555582","z":"60,912982163767"},"rightPos":{"x":"5,89524352818696","y":"11,2245013358383","z":"61,0730322352786"},"leftGaze":{"x":"3,15812377150551","y":"17,3247499470179","z":"4,61986983600664"},"rightGaze":{"x":"-2,49937069615642","y":"17,3932511520527","z":"4,64480229580618"},"leftPupilDiameter":"2,645874","rightPupilDiameter":"2,622345"}}'))
receivedTobiiMessage.append(json.loads('{"tobiiEyeTracker":{"timeStamp":"30.12.2015 14:06:20.2442","leftPos":{"x":"-0,258863875351471","y":"11,5149518687205","z":"60,9095247803002"},"rightPos":{"x":"5,88168331298095","y":"11,2362714331765","z":"61,0613078775579"},"leftGaze":{"x":"2,38144559635971","y":"16,7283881083418","z":"4,40281135417063"},"rightGaze":{"x":"-3,55454772939922","y":"17,2529816540119","z":"4,59374825056375"},"leftPupilDiameter":"2,642151","rightPupilDiameter":"2,673187"}}'))
global tobiiReceivedFromSender # this is used for debugging. Until actual data is received, fake data is sent.
tobiiReceivedFromSender=False
global tobiiSocketRunning
tobiiSocketRunning = True
global tobiiDataLocalFileLogging
tobiiDataLocalFileLogging=True
global tobiiDataLocalFileLoggingFileName
tobiiDataLocalFileLoggingFileName='tobiiLog_'+runTimestamp+'.log'
global faceReaderDotNetAddress
faceReaderDotNetAddress='localhost'
global faceReaderDotNeAppPort
faceReaderDotNeAppPort=10001
#global noldusFaceReaderServerHostRoute
noldusFaceReaderServerHostRoute='/noldusFaceReader'
global noldusFaceReaderReceivedEventCounter
noldusFaceReaderReceivedEventCounter=0;
global receivedNoldusMessage
receivedNoldusMessage=[]
# default data sent UNTIL none is received from FaceReader
receivedNoldusMessage.append('DetailedLog 18.11.2015 14:41:35.299 Neutral : 0,5704 Happy : 0,6698 Sad : 0,0013 Angry : 0,0040 Surprised : 0,0129 Scared : 0,0007 Disgusted : 0,0048 Valence : 0,6650 Arousal : 0,2297 Gender : Male Age : 20 - 30 Beard : None Moustache : None Glasses : Yes Ethnicity : Caucasian Y - Head Orientation : -1,7628 X - Head Orientation : 2,5652 Z - Head Orientation : -3,0980 Landmarks : 375,4739 - 121,6879 - 383,2627 - 113,6502 - 390,8202 - 110,3507 - 396,1021 - 109,7039 - 404,9615 - 110,9594 - 443,2603 - 108,9765 - 451,9454 - 106,7192 - 457,1207 - 106,8835 - 464,1162 - 109,5496 - 470,9659 - 116,8992 - 387,4940 - 132,0171 - 406,4031 - 130,4482 - 441,6239 - 128,6356 - 460,6862 - 128,1997 - 419,0713 - 161,6479 - 425,3519 - 155,1223 - 431,9862 - 160,6411 - 406,9320 - 190,3831 - 411,4790 - 188,7656 - 423,1751 - 185,6583 - 428,5339 - 185,6882 - 433,7802 - 184,8167 - 445,6192 - 186,3515 - 450,8424 - 187,2787 - 406,0796 - 191,1880 - 411,9287 - 193,5352 - 417,9666 - 193,6567 - 424,0851 - 193,4941 - 428,6678 - 193,5652 - 433,2172 - 192,7540 - 439,3548 - 192,0136 - 445,4181 - 191,1532 - 451,6007 - 187,9486 - 404,5193 - 190,6352 - 412,8277 - 185,4609 - 421,1355 - 181,2883 - 428,3182 - 181,1826 - 435,2024 - 180,2258 - 443,9292 - 183,2533 - 453,1117 - 187,2288 - 405,9689 - 193,2750 - 410,0249 - 199,8118 - 416,0457 - 203,0374 - 423,4839 - 204,1818 - 429,9247 - 204,2175 - 436,3620 - 203,1305 - 443,4268 - 200,9355 - 448,9572 - 197,1335 - 452,0746 - 190,0314 Quality : 0,8137 Mouth : Closed Left Eye : Open Right Eye : Open Left Eyebrow : Lowered Right Eyebrow : Lowered Identity : NO IDENTIFICATION')
receivedNoldusMessage.append('DetailedLog 24.12.2015 13:01:54.282 ')
global noldusReceivedFromSender # this is used for debugging. Until actual data is received, fake data is sent.
noldusReceivedFromSender=False
global noldusSocketRunning
noldusSocketRunning = True
global noldusDataLocalFileLogging
noldusDataLocalFileLogging=True
global noldusDataLocalFileLoggingFileName
noldusDataLocalFileLoggingFileName='noldusLog_'+runTimestamp+'.log'
global wearSocketRunning
wearSocketRunning = True
global wearSocketAddress
# !!!!!!!!!!!!!!!!!! if correct IP is obtained automatically leave like this.
#Otherwise use commented IP hardcoding line
#wearSocketAddress="192.168.81.186"
wearSocketAddress=socket.gethostbyname(socket.gethostname())
global wearSocketPort
wearSocketPort=10005
global wearReceivedEventCounter
wearReceivedEventCounter=0
wearServerHostRoute='/wear'
global receivedWearMessage
receivedWearMessage=[]
#fake data for debugging
receivedWearMessage.append(json.loads('{"timestamp":"02.02.2016 09:36:55.0860","type":"accel","z":9.594614028930664,"y":0.1405097097158432,"x":0.07721300423145294}'))
receivedWearMessage.append(json.loads('{"timestamp":"02.02.2016 09:36:55.0930","type":"accel","z":9.592219352722168,"y":0.15487492084503174,"x":0.06763619929552078}'))
global wearReceivedFromSender # this is used for debugging. Until actual data is received, fake data is sent.
wearReceivedFromSender=False
global wearDataLocalFileLogging
wearDataLocalFileLogging=True
global wearDataLocalFileLoggingFileName
wearDataLocalFileLoggingFileName='wearLog_'+runTimestamp+'.log'
#configuration of http server serving json
#serverHostIP = '192.168.81.76'
#serverHostIP = 'localhost'
serverHostIP = '127.0.0.1'
serverHostPort=8080
#call to this url clears all data buffers.
emptyDataServerHostRoute='/emptyData'
#global tobiiEyeTrackerServerHostRoute
#tobiiEyeTrackerServerHostRoute='/facereader'
# parses NOLDUS string to compatible JSON object (by David Brvar)
def parseNoldus(x):
# da javasscript lahko lazje stringe spremeni v floate (rabi decimalno piko)
zamenjajVseVejiceSPiko = re.compile('\,')
# compilan tu in ne v for loopu - za performance!
parsajVJSONObjekt = re.compile(ur'(?:(DetailedLog) ([^ ]+ [^ ]+)|(\b[A-Z][A-Za-z -]+?) : ((?:(?:-?[\d.]+)(?: - -?[\d.]+)*|(?:(?:[A-Z ]+\b|[A-Za-z]+)))))(?:$| )')
data = []
result = {}
temp = {}
# obdela vsak posamezni element v listu
for i in x:
temp = zamenjajVseVejiceSPiko.sub(".", i)
# vse matche spravim v array
temp = re.findall(parsajVJSONObjekt, temp)
# obdela vsak match posebej, matchi so tipa <type 'tuple'>
for match in temp:
# sortiram glede na to, kako regex vrne matche
if match[0]:
result[match[0]] = match[1]
else:
result[match[2]] = match[3]
data.append(result)
return data
print 'LUCAMI gateway Noldus module starting.'
# Establiehses server socket for incomming connections from .net application.
def listenNoldusFaceReaderSocketFromDotNET():
#will run util this is True
global noldusSocketRunning
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
global faceReaderDotNetAddress
global faceReaderDotNeAppPort
server_address = (faceReaderDotNetAddress, faceReaderDotNeAppPort)
print >>sys.stderr, 'Server socket for incomming data: starting up on %s port %s' % server_address
sock.bind(server_address)
global noldusDataLocalFileLogging
#noldusDataLocalFileLogging=True
global noldusDataLocalFileLoggingFileName
#noldusDataLocalFileLoggingFileName='noldusLog_'+runTimestamp+'.log'
# Listen for incoming connections
sock.listen(1)
while noldusSocketRunning:
# Wait for a connection
print >>sys.stderr, 'Server socket for incomming data: waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'Server socket for incomming data: connection from', client_address
# Receive the data in small chunks and retransmit it
while noldusSocketRunning:
global noldusFaceReaderReceivedEventCounter
noldusFaceReaderReceivedEventCounter = noldusFaceReaderReceivedEventCounter + 1
global receivedNoldusMessage
global noldusReceivedFromSender
data = connection.recv(10000) # kinda ugly hack. If incomming message will be longer this will spill.
#receivedNoldusMessage = data
if (not noldusReceivedFromSender): #clear data
print 'Got first message from Tobii. Switching to real data mode.'
noldusReceivedFromSender = True
receivedNoldusMessage=[]
receivedNoldusMessage.append(data)
if noldusDataLocalFileLogging:
f = open(noldusDataLocalFileLoggingFileName, 'a')
f.write(data)
f.close()
#print >>sys.stderr, 'received "%s"' % data
if data:
print >>sys.stderr, 'Server socket for incomming data: sending data back to the client'
connection.sendall(data)
else:
print >>sys.stderr, 'Server socket for incomming data: no more data from', client_address
break
finally:
# Clean up the connection
connection.shutdown(1);
connection.close()
print 'Closing incomming data socket connection.'
print 'Finished server socket for incomming data thread'
# start listening socket in thread
# About threads: http://docs.python.org/2/library/threading.html#thread-objects
listeningNoldusSocketThread = threading.Thread(target=listenNoldusFaceReaderSocketFromDotNET, args=())
listeningNoldusSocketThread.start()
print 'LUCAMI gateway Tobii module starting.'
# Establiehses server socket for incomming connections from .net application.
def listenTobiiSocketFromDotNET():
#will run util this is True
global tobiiSocketRunning
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
global tobiiDotNetAddress
global tobiiDotNeAppPort
server_address = (tobiiDotNetAddress, tobiiDotNeAppPort)
print >>sys.stderr, 'Server socket for incomming tobii data: starting up on %s port %s' % server_address
sock.bind(server_address)
global tobiiDataLocalFileLogging
#tobiiDataLocalFileLogging=True
global tobiiDataLocalFileLoggingFileName
#tobiiDataLocalFileLoggingFileName='tobiiLog_'+runTimestamp+'.log'
# Listen for incoming connections
sock.listen(1)
while tobiiSocketRunning:
# Wait for a connection
print >>sys.stderr, 'Server socket for incomming tobii data: waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'Server socket for incomming tobii data: connection from', client_address
# Receive the data in small chunks and retransmit it
while tobiiSocketRunning:
global receivedTobiiMessage
global tobiiEyeTrackerReceivedEventCounter
tobiiEyeTrackerReceivedEventCounter=tobiiEyeTrackerReceivedEventCounter+1
data = connection.recv(10000) # kinda ugly hack. If incomming message will be longer this will spill.
#receivedTobiiMessage = data
global tobiiReceivedFromSender
if(not tobiiReceivedFromSender): #clear data
print 'Got first message from Noldus. Switching to real data mode.'
tobiiReceivedFromSender = True
receivedTobiiMessage=[]
try:
if tobiiDataLocalFileLogging:
f = open(tobiiDataLocalFileLoggingFileName, 'a')
f.write(data)
f.close()
receivedTobiiMessage.append(json.loads(data))
except:
print 'Exception while parsing received JSON nessage from tobii'
if tobiiDataLocalFileLogging:
f = open(tobiiDataLocalFileLoggingFileName, 'a')
f.write('Exception while parsing received JSON nessage:')
f.write(data)
f.close()
#receivedTobiiMessage.append(data)
#print >>sys.stderr, 'received "%s"' % data
if data:
print >>sys.stderr, 'Server socket for incomming tobii data: sending data back to the client'
connection.sendall(data)
else:
print >>sys.stderr, 'Server socket for incomming tobii data: no more data from', client_address
break
finally:
# Clean up the connection
connection.shutdown(1);
connection.close()
print 'Closing incomming tobii data socket connection.'
print 'Finished server socket for incomming tobii data thread'
# start listening socket in thread
# About threads: http://docs.python.org/2/library/threading.html#thread-objects
listeningTobiiSocketThread = threading.Thread(target=listenTobiiSocketFromDotNET, args=())
#print 'SOCKET SERVER NOT STARTED!!!!! Uncomment to start.'
listeningTobiiSocketThread.start()
print 'LUCAMI gateway Wear module starting.'
def listenWearSocket():
#will run util this is True
global wearSocketRunning
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
global wearSocketAddress
global wearSocketPort
server_address = (wearSocketAddress, wearSocketPort)
print >>sys.stderr, 'Server socket for incomming data: starting up on %s port %s' % server_address
print >>sys.stderr, 'If you see error creating socket please hardcode ip in wearSocketAddress variable.'
sock.bind(server_address)
global wearDataLocalFileLogging
#wearDataLocalFileLogging=True
global wearDataLocalFileLoggingFileName
#wearDataLocalFileLoggingFileName='wearLog.log'
# Listen for incoming connections
sock.listen(1)
while wearSocketRunning:
# Wait for a connection
print >>sys.stderr, 'Server socket for incomming data: waiting for a connection'
connection, client_address = sock.accept()
try:
print >>sys.stderr, 'Server socket for incomming data: connection from', client_address
# Receive the data in small chunks and retransmit it
while wearSocketRunning:
global wearReceivedEventCounter
wearReceivedEventCounter = wearReceivedEventCounter + 1
global receivedWearMessage
global wearReceivedFromSender
wearReceivedFromSender=True
data = connection.recv(10000) # kinda ugly hack. If incomming message will be longer this will spill.
#receivedNoldusMessage = data
#if (not noldusReceivedFromSender): #clear data
# print 'Got first message from Tobii. Switching to real data mode.'
# noldusReceivedFromSender = True
# receivedNoldusMessage=[]
#receivedWearMessage.append(data)
if wearDataLocalFileLogging:
f = open(wearDataLocalFileLoggingFileName, 'a')
f.write(data)
f.close()
#fata ='{"timestamp":1454328042855,"z":1.1598410606384277,"y":1.7661726474761963,"x":-9.566631317138672}'
#receivedWearMessage.append(json.loads(fata))
try:
recv = json.loads(data)
receivedWearMessage.append(recv)
#receivedWearMessage.append(json.loads(data))
except:
print 'Exception while parsing received JSON nessage from wear.'
if wearDataLocalFileLogging:
f = open(wearDataLocalFileLoggingFileName, 'a')
f.write('Exception while parsing received JSON nessage:')
f.write(data)
f.close()
print >>sys.stderr, 'received "%s"' % data
if data:
#print >>sys.stderr, 'Server socket for incomming data: sending data back to the client'
#connection.sendall(data)
print >>sys.stderr, 'Since lazy Marko did not implement it jet, NOT sending received data back to Android.'
else:
print >>sys.stderr, 'Server socket for incomming data: no more data from', client_address
break
finally:
# Clean up the connection
connection.shutdown(1);
connection.close()
print 'Closing incomming data socket connection.'
print 'Finished server socket for incomming data thread'
# start listening socket in thread
# About threads: http://docs.python.org/2/library/threading.html#thread-objects
listenWearSocketThread = threading.Thread(target=listenWearSocket, args=())
listenWearSocketThread.start()
# In order to enable CORS - .JS getting data from another web address
# sourced from: http://stackoverflow.com/questions/17262170/bottle-py-enabling-cors-for-jquery-ajax-requests
class EnableCors(object):
name = 'enable_cors'
api = 2
def apply(self, fn, context):
def _enable_cors(*args, **kwargs):
# set CORS headers
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
if bottle.request.method != 'OPTIONS':
# actual request; reply with the actual response
return fn(*args, **kwargs)
return _enable_cors
app = bottle.app()
#@app.route('/cors', method=['OPTIONS', 'GET'])
@app.route(tobiiEyeTrackerServerHostRoute, method=['OPTIONS', 'GET'])
def tobiiEyeTrackerResponder():
response.headers['Content-type'] = 'application/json'
#i=i+1
global receivedTobiiMessage
#i=i+1
global tobiiEyeTrackerReceivedEventCounter
data = {}
#data['mykey']='myvalue'
data['receivedEventCounter']=tobiiEyeTrackerReceivedEventCounter
data['TobiiDetailedLog'] = receivedTobiiMessage
json_data = json.dumps(data)
#clear message buffer
if tobiiReceivedFromSender:
receivedTobiiMessage=[]
else:
print 'Sending emulated response for Tobii, since no data arrived jet from tobii.'
#return '[1]'
return json_data
@app.route(noldusFaceReaderServerHostRoute, method=['OPTIONS', 'GET'])
def noldusFaceReaderDataResponder():
response.headers['Content-type'] = 'application/json'
#i=i+1
global noldusFaceReaderReceivedEventCounter
global receivedNoldusMessage
data = {}
#data['mykey']='myvalue'
data['receivedEventCounter']=noldusFaceReaderReceivedEventCounter
data['NoldusDetailedLog'] = parseNoldus(receivedNoldusMessage)
json_data = json.dumps(data)
#clear message buffer
if noldusReceivedFromSender:
receivedNoldusMessage=[]
else:
print 'Sending emulated response for Noldus, since no data arrived jet from Noldus.'
#return '[1]'
return json_data
@app.route(wearServerHostRoute, method=['OPTIONS', 'GET'])
def wearDataResponder():
response.headers['Content-type'] = 'application/json'
#i=i+1
global wearReceivedEventCounter
global receivedWearMessage
data = {}
#data['mykey']='myvalue'
data['receivedEventCounter']=wearReceivedEventCounter
data['wearLog'] = receivedWearMessage
json_data = json.dumps(data)
#clear message buffer
if wearReceivedFromSender:
receivedWearMessage=[]
else:
print 'Sending emulated response for Wear, since no data arrived jet from Noldus.'
#return '[1]'
return json_data
# clears all data buffers
@app.route(emptyDataServerHostRoute, method=['OPTIONS', 'GET'])
def emptyDataDataResponder():
response.headers['Content-type'] = 'application/json'
#i=i+1
#global wearReceivedEventCounter
global receivedWearMessage
global receivedNoldusMessage
global receivedTobiiMessage
#data = {}
#data['mykey']='myvalue'
#data['receivedEventCounter']=wearReceivedEventCounter
#data['wearLog'] = receivedWearMessage
#json_data = json.dumps(data)
#clear message buffer
#if wearReceivedFromSender:
# receivedWearMessage=[]
#else:
# print 'Sending emulated response for Wear, since no data arrived jet from Noldus.'
print 'Clearing all data buffers.'
receivedWearMessage=[]
receivedNoldusMessage=[]
receivedTobiiMessage=[]
return '[1]'
#return json_data
app.install(EnableCors())
print 'Starting http server on http://',serverHostIP,':',serverHostPort,tobiiEyeTrackerServerHostRoute
app.run(host=serverHostIP, port=serverHostPort)
print 'Cleanup: http server stopped.'
print 'Cleanup: Stopping incomming tobii data socket server.'
tobiiSocketRunning = False
#send one last message to socket thread to disconnect. Ugly hack. Not working until other side client is connected.
tobiiClientSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tobiiServer_address = (tobiiDotNetAddress, tobiiDotNeAppPort)
tobiiClientSock.connect(tobiiServer_address)
tobiiClientSock.sendall('Die!')
tobiiClientSock.close()
print 'Cleanup: Stopping incomming noldus data socket server.'
noldusSocketRunning = False
#send one last message to socket thread to disconnect. Ugly hack. Not working until other side client is connected.
noldusClientSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
noldusServer_address = (faceReaderDotNetAddress, faceReaderDotNeAppPort)
noldusClientSock.connect(noldusServer_address)
noldusClientSock.sendall('Die!')
noldusClientSock.close()
print 'Cleanup: Stopping incomming wear data socket server.'
wearSocketRunning = False
#send one last message to socket thread to disconnect. Ugly hack. Not working until other side client is connected.
wearClientSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
wearServer_address = (wearSocketAddress, wearSocketPort)
wearClientSock.connect(wearServer_address)
wearClientSock.sendall('Die!')
wearClientSock.close()
# this should finish the program. Currently does not work, since socket does not disconnect.
listeningTobiiSocketThread.join()
listeningNoldusSocketThread.join()
listenWearSocketThread.join()
|
[
"marko.meza@fe.uni-lj.si"
] |
marko.meza@fe.uni-lj.si
|
fe8a5ce65f8abb9d4813a5c15a05656484bc8427
|
8871b6aea062ccab28aa66ff16802c44bf9a951b
|
/spellcheckMain-Unigram(Multi Run).py
|
82d78ae28c77a04bc6a932f4f67cc8b3792df775
|
[] |
no_license
|
mhtarek/Auto-correction-of-English-to-Bengali-Transliteration-System
|
ba5442f9a7b839367c88dd3afde4f6c3c933b272
|
f89366813bac31ca3845aa653de628abfe37b453
|
refs/heads/master
| 2020-05-24T02:37:33.331877
| 2019-05-16T15:47:24
| 2019-05-16T15:47:24
| 187,057,183
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,219
|
py
|
from fuzzywuzzy import fuzz
import csv
from pyavrophonetic import avro
import WordDictionary.wordDictionaryCreatorAndFilteredWords as dictionary
import Unigram.uniGramBangla as unigram
from BanglaWordSort.bangla_sort import bangla
import re
from sklearn.metrics import accuracy_score
from paragraphLevel import multiSelection
#Globally Making a word dictionary
WordDictionary = dictionary.makeDictionary('./files/data.csv')
WordDictionary = sorted(WordDictionary, key=bangla)
#print(WordDictionary)
def correct(inputLine):
string_words = inputLine.split()
for i in range(len(string_words)):
if is_number(string_words[i]):
continue
suggestions = []
value =[]
for name in WordDictionary:
if string_words[i] == name:
value.append(fuzz.ratio(string_words[i], name))
suggestions.append(name)
elif fuzz.ratio(string_words[i], name) >= 65:
value.append(fuzz.ratio(string_words[i], name))
suggestions.append(name)
print(suggestions)
print(value)
if len(suggestions) > 0:
maxPer = 0
cor = ''
for name in suggestions:
percent = fuzz.ratio(string_words[i], name)
if percent > maxPer:
if checkSameSuggestion(value):
newList = wordWithMaxScore(value,suggestions)
myUniGram = unigram.createUnigram('./files/data.csv')
cor = unigram.findMax(newList,myUniGram)
else:
cor = name
maxPer = percent
print(cor,'---',maxPer,'\n')
string_words[i] = cor
return " ".join(string_words)
def wordWithMaxScore(value,string):
valueLi = []
newStr = []
for i in range(len(value)):
if max(value) == value[i]:
valueLi.append(i)
for i in range(len(valueLi)):
newStr.append(string[valueLi[i]])
return newStr
def is_number(s):
try:
float(s)
except ValueError:
try:
complex(s)
except ValueError:
return False
return True
def checkSameSuggestion(wordList):
same = False
count = 0
for i in range(len(wordList)):
if max(wordList) == wordList[i]:
count+=1
if count>1:
same = True
if same == True:
return 1
else:
return 0
def createTestDataList(fileLocation):
with open (fileLocation, "r",encoding = 'utf-8') as myfile:
banglish_sentence=myfile.readlines()
testData = banglish_sentence[0].split(' ')
testData.pop(0)
return testData
def checkFloat(string):
string1 = string.split(" ")
oldFloat = re.findall("[-|+]?\d+\।\d+", string)
c = []
for i in range(len(oldFloat)):
for j in range(len(string1)):
if oldFloat[i] == string1[j]:
c.append(j)
for i in range(len(oldFloat)):
oldFloat[i] = oldFloat[i].replace('।','.')
for i in range(len(oldFloat)):
string1[c[i]] = oldFloat[i]
return " ".join(string1)
def main():
while(True):
command = input('y/n:')
if command != 'y':
break
fileName = input('Enter file name: ')
testFileLocation = "./True Value/test data/"+fileName+".txt"
with open (testFileLocation, "r",encoding = 'utf-8') as myfile:
banglish_sentence=myfile.readlines()
string_to_be_checked = checkFloat(avro.parse(banglish_sentence))
prdicted_sentence = correct(string_to_be_checked)
print("\nConverted sentence: ", string_to_be_checked,'\n')
print(prdicted_sentence)
trueFileLocation = "./True Value/true data/"+fileName+"True.txt"
trueTestData = createTestDataList(trueFileLocation)
predictedData = prdicted_sentence.split(' ')
print(trueTestData)
print(predictedData)
print("Total Number Of Test Word: ",len(predictedData))
errorData = 0
for i in range(len(predictedData)):
if predictedData[i] == trueTestData[i]:
continue
else:
errorData += 1
print("Corrected Word: ", len(predictedData) - errorData)
print("Failed To Correct Word: ",errorData)
accuracy = accuracy_score(trueTestData, predictedData)
print("Accuracy: ",accuracy*100)
main()
|
[
"noreply@github.com"
] |
mhtarek.noreply@github.com
|
596de5813ab5b9e95e648acb240a8c48db3664af
|
91b388a256059c0b0a0b785e90d9ec01fa6e0546
|
/back/schemes/req.py
|
63d0bd8e02394a734e2380891bcc83542659051a
|
[] |
no_license
|
Leva-kleva/collateral-assessment
|
f058b18451098d5752fcbbacaef019fe72355f10
|
8ef1a2b12e2546ee60d5ed1dd556e183425aec0e
|
refs/heads/master
| 2023-08-14T14:03:13.769472
| 2021-09-22T23:29:37
| 2021-09-22T23:29:37
| 409,383,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
object = {
"type": "object",
"properties": {
"object": {"type": "array"}
},
"required": ["object"]
}
post = {'car': {
"type": "object",
"properties": {
"Engine": {"type": "array"},
"Mileage": {"type": "array"},
"Drive": {"type": "array"},
"Year": {"type": "array"},
"Box": {"type": "array"},
"Engine_volume": {"type": "array"},
"mark": {"type": "array"},
"Generation": {"type": "array"},
"Model": {"type": "array"},
"Torque": {"type": "array"},
},
"required": ["Engine", "Mileage", "Drive", "Year",
"Box", "Engine_volume", "mark",
"Generation", "Model", "Torque",]
},
'flat': {
"type": "object",
"properties": {
"wallsMaterial": {"type": "array"},
"floorNumber": {"type": "array"},
"floorsTotal": {"type": "array"},
"totalArea": {"type": "array"},
"kitchenArea": {"type": "array"},
"latitude": {"type": "array"},
"longitude": {"type": "array"},
},
"required": ["wallsMaterial", "floorNumber", "floorsTotal",
"totalArea", "kitchenArea", "latitude", "longitude"]
}}
|
[
"lev_vladimirovich1999@mail.ru"
] |
lev_vladimirovich1999@mail.ru
|
2daff4ac8abe0107b92f180d34129ad8b3c1a994
|
6d4ffdc17372c2006f29fcc216e407c28ec69a1c
|
/binarytree.py
|
46bf60986bd9e63a1f25c05c4c9a4564ccb9b25a
|
[] |
no_license
|
fallaciousreasoning/study
|
869abbcb37f9a9046b95190bf22cd9fa40cdbfe0
|
0ff28f47bbf7a86cedb4c40da7e529af31c09dea
|
refs/heads/master
| 2020-03-18T03:55:12.696110
| 2018-05-28T10:55:12
| 2018-05-28T10:55:12
| 134,262,365
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BinaryTree:
def __init__(self):
self.root = None
def insert(self, value):
if not self.root:
self.root = Node(value)
at = self.find_parent(self.root, value)
if value < at.value:
at.left = Node(value)
else:
at.right = Node(value)
def find_parent(self, parent, value):
if value < parent.value:
if not parent.left:
return parent
return self.find_parent(parent.left, value)
else:
if not parent.right:
return parent
return self.find_parent(parent.right, value)
def contains(self, value, base=''):
if base == '':
base = self.root
if base is None:
return None
if base.value == value:
return base
if value < base.value:
return self.contains(value, base.left)
if value > base.value:
return self.contains(value, base.right)
if __name__ == '__main__':
tree = BinaryTree()
items = [7,1,9,0,30,100,10]
for item in items:
tree.insert(item)
for item in items:
print(tree.contains(item) is not None)
not_in = [-1, 22, 77, 72, 4]
for item in not_in:
print(tree.contains(item) is not None)
|
[
"jay.harris@outlook.co.nz"
] |
jay.harris@outlook.co.nz
|
adce0ea723e9e20cd3f7fe094726d6685a5004fe
|
a1fe5eda46ac5842c0da900300206463103f7d3f
|
/tests/game/test_level.py
|
978935aa24c0c0fe1b12fb9df7f912c19632d300
|
[] |
no_license
|
LiveInInformatic/noobhack
|
c25801c134c398635f0589a16bbc49c58057a241
|
ffb4901202ba1c6493edb411856b70c3ec78a53a
|
refs/heads/master
| 2022-01-27T02:58:55.508133
| 2014-09-29T02:34:27
| 2014-09-29T02:34:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
import sys
from noobhack.game.mapping import Level
from tests.utils import level_chain
def test_changing_branches_changes_my_branch():
l = Level(1, "main")
l.change_branch_to("mines")
assert l.branch == "mines"
def test_changing_branches_changes_my_childrens_branch():
levels = level_chain(3, "main")
levels[0].change_branch_to("mines")
assert all(l.branch == "mines" for l in levels)
def test_changing_branches_changes_only_those_levels_that_are_below_the_branch_that_changed():
levels = level_chain(5, "main")
levels[1].change_branch_to("mines")
assert all(l.branch == "mines" for l in levels if l.dlvl > 1)
assert levels[0].branch == "main"
def test_level_with_branch_has_a_branch():
levels = level_chain(2, "main")
levels[1].change_branch_to("mines")
first = levels[0]
assert first.has_a_branch() == True
assert first.branches() == [levels[1]]
def test_level_with_no_children_doesnt_have_a_branch():
l = Level(1, "main")
assert l.has_a_branch() == False
def test_level_with_only_one_child_that_doesnt_have_a_branch_has_no_branch():
levels = level_chain(2, "main")
first = levels[0]
assert first.has_a_branch() == False
def test_changing_branch_to_sokoban_doesnt_change_children_branches():
levels = level_chain(5, "main")
sokoban = Level(3)
levels[4].add_stairs(sokoban, (3, 3))
sokoban.add_stairs(levels[4], (4, 4))
sokoban.change_branch_to("sokoban")
assert sokoban.branch == "sokoban"
assert levels[4].branch == "main"
def test_is_a_junction_when_there_are_two_children():
main = level_chain(2, "main")
mines = level_chain(2, "mines", start_at=2)
main[0].add_stairs(mines[0], (2, 2))
mines[0].add_stairs(main[0], (1, 1))
assert main[0].is_a_junction()
def test_is_not_a_junction_when_there_is_only_one_child():
main = level_chain(2, "main")
assert not main[0].is_a_junction()
def test_a_level_with_a_parent_and_a_child_is_not_a_junction():
main = level_chain(3, "main")
assert not main[1].is_a_junction()
def test_a_level_with_two_parents_is_a_junction():
main = level_chain(3, "main")
sokoban = level_chain(2, "sokoban")
main[2].add_stairs(sokoban[-1], (1, 1))
sokoban[-1].add_stairs(main[2], (2, 2))
assert main[2].is_a_junction()
|
[
"sam@ifdown.net"
] |
sam@ifdown.net
|
d96348b51fa49b45f200a931198785d61b3d84ea
|
3de1eea85c194ad21ea36d407aa904674bd6377c
|
/code/libs/dvbobjects/SQL/db_connect.py
|
07066177f4d514d53db8e0709badb2f29ea96183
|
[] |
no_license
|
okassov/dvbcastlib
|
bcd05056824521e528bf84da6d6447502cd97a5b
|
dc1c0508f2579fedfa8e4a4ebc6eabcc490b2be6
|
refs/heads/master
| 2022-03-25T18:35:11.688191
| 2019-12-10T11:15:59
| 2019-12-10T11:15:59
| 221,779,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
import psycopg2
def connect():
'''This function connect to PSQL DB
and return connection'''
conn = None
try:
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(
host="192.168.93.128",
database="DVBCAST",
user="root",
password="root")
return conn
except:
print ("Error")
|
[
"34717508+Marik92@users.noreply.github.com"
] |
34717508+Marik92@users.noreply.github.com
|
5d25b4e3543485e5e6afdc4794062a2c728c6f08
|
d4b03bfa0ea60e68d79621057a456981f8d42020
|
/PBD24.py
|
6cc9655f7a2323ae79e1c084f3df4ce680636600
|
[] |
no_license
|
gitmocho/PBD-24
|
a983635180ab088f2dcbfbc33a0570c73b7505ab
|
e1e24815bdf906f8218ce4006c38ad56e5217c86
|
refs/heads/master
| 2021-01-10T10:47:24.359377
| 2016-04-02T05:21:32
| 2016-04-02T05:21:32
| 55,277,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 602
|
py
|
#description :PBD-24 Create a dictionary containing min 15 items/results.
#author :MC
#date :02/04/2014
#version :0.1
#usage :python pyscript.py
#notes :
#python_version :3.4.2
countriesOfTheWorld = {
'Afghanistan':'AFG',
'Aland Islands':'ALA',
'Albania':'ALB',
'Algeria':'DZA',
'American Samoa':'ASM',
'Andorra':'AND',
'Angola':'AGO',
'Anguilla':'AIA',
'Antarctica':'ATA',
'Antigua and Barbuda':'ATG',
'Argentina':'ARG',
'Armenia':'ARM',
'Aruba':'ABW',
'Australia':'AUS',
'Austria':'AUT',
}
# print(countriesOfTheWorld['American Samoa'])
|
[
"mocho@live.co.uk"
] |
mocho@live.co.uk
|
818bc283f0e50ed4abec2563b11b85feb754fa5d
|
f7eb63a39e3c32963dc69964fbf3d98381bef9df
|
/kiosk/kiosk-master/kiosk/kiosk/settings.py
|
955fc186210dcf854ab75f8ca52169e0d57ec1c2
|
[] |
no_license
|
aadwaysinha/Projects
|
0219687d1bef6bb9b24b0f756380f1c5df44fefa
|
cdf1de1943b325c50bf6ca5c1234d1f8896f9bc4
|
refs/heads/master
| 2021-05-13T17:07:16.917736
| 2019-04-20T06:02:43
| 2019-04-20T06:02:43
| 116,812,823
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,380
|
py
|
"""
Django settings for kiosk project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r6r!t5ses8urlivz67y49b5#n8fpnwz7i($fr6lc6g$4id-6zc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'voice',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kiosk.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kiosk.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIR = [STATIC_DIR]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = MEDIA_DIR
|
[
"aadwaysinha@gmail.com"
] |
aadwaysinha@gmail.com
|
48d57c5d4b3f4e5a953cf1e4a48871fbce14de64
|
e466e3bb0e20754cf8a56dabdc3760f5b97e1e39
|
/api_interface_demo/swagger_server/models/asset_property_double.py
|
521f969b85b7538cba67d098a581aad321a408dd
|
[] |
no_license
|
JoaquinRives/AWS-Coolgreen-Demo1
|
fe84d4df7cc1ab7d9e21e001e8706fff2e879270
|
1c92b3a475d9470fe4fda2d72af802429433f578
|
refs/heads/main
| 2023-08-14T14:29:19.730542
| 2021-10-01T07:47:00
| 2021-10-01T07:47:00
| 385,243,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,318
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class AssetPropertyDouble(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, property_name: str=None, asset_id: str=None, integer_value: float=None): # noqa: E501
"""AssetPropertyDouble - a model defined in Swagger
:param property_name: The property_name of this AssetPropertyDouble. # noqa: E501
:type property_name: str
:param asset_id: The asset_id of this AssetPropertyDouble. # noqa: E501
:type asset_id: str
:param integer_value: The integer_value of this AssetPropertyDouble. # noqa: E501
:type integer_value: float
"""
self.swagger_types = {
'property_name': str,
'asset_id': str,
'integer_value': float
}
self.attribute_map = {
'property_name': 'propertyName',
'asset_id': 'assetId',
'integer_value': 'integerValue'
}
self._property_name = property_name
self._asset_id = asset_id
self._integer_value = integer_value
@classmethod
def from_dict(cls, dikt) -> 'AssetPropertyDouble':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The AssetPropertyDouble of this AssetPropertyDouble. # noqa: E501
:rtype: AssetPropertyDouble
"""
return util.deserialize_model(dikt, cls)
@property
def property_name(self) -> str:
"""Gets the property_name of this AssetPropertyDouble.
:return: The property_name of this AssetPropertyDouble.
:rtype: str
"""
return self._property_name
@property_name.setter
def property_name(self, property_name: str):
"""Sets the property_name of this AssetPropertyDouble.
:param property_name: The property_name of this AssetPropertyDouble.
:type property_name: str
"""
self._property_name = property_name
@property
def asset_id(self) -> str:
"""Gets the asset_id of this AssetPropertyDouble.
:return: The asset_id of this AssetPropertyDouble.
:rtype: str
"""
return self._asset_id
@asset_id.setter
def asset_id(self, asset_id: str):
"""Sets the asset_id of this AssetPropertyDouble.
:param asset_id: The asset_id of this AssetPropertyDouble.
:type asset_id: str
"""
self._asset_id = asset_id
@property
def integer_value(self) -> float:
"""Gets the integer_value of this AssetPropertyDouble.
:return: The integer_value of this AssetPropertyDouble.
:rtype: float
"""
return self._integer_value
@integer_value.setter
def integer_value(self, integer_value: float):
"""Sets the integer_value of this AssetPropertyDouble.
:param integer_value: The integer_value of this AssetPropertyDouble.
:type integer_value: float
"""
self._integer_value = integer_value
|
[
"joaquin.rives01@gmail.com"
] |
joaquin.rives01@gmail.com
|
c110f5e17b898aa4be7be7dc53bd72bcac371767
|
5fefe6ae720287bd56c55c51077a746e4467d48a
|
/PyLab1/task16Lab1.py
|
23f7aefacaf0fe72221f1c786c457bf365303cd7
|
[] |
no_license
|
nana0calm/PythonLabs
|
d55fcf4b235cfc68979d86765b17a32e4f83ee22
|
c4b4905e417b912ca27cd8ad2748ba3c0e34558c
|
refs/heads/master
| 2022-09-08T04:43:07.185543
| 2020-05-27T21:19:56
| 2020-05-27T21:19:56
| 267,428,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
#Напишите скрипт, который на основе списка
#из 16 названий футбольных команд случайным
#образом формирует 4 группы по 4 команды,
#а также выводит на консоль календарь
#всех игр (игры должны проходить по средам,
#раз в 2 недели, начиная с 14 сентября текущего года).
#Даты игр необходимо выводить в формате «14/09/2016, 22:45».
#Используйте модули random и itertools.
from random import shuffle # смешивание комада
import itertools
from time import strftime
from datetime import timedelta, datetime
format = "%d/%m/%Y, %H:%M"
start = datetime.strptime("14/09/2020, 22:45", format)
football_teams=['Наполи','Арсенал','Зенит','Гамбург',
'Барселона','Ювентус','Милан','Ливерпуль',
'Марсель','Динамо','Спартак','Шахтёр',
'Герта','Динамо','Рубин','Днепр']
shuffle(football_teams)
football_teams=[football_teams[i*4: i*4+4] for i in range(0,4)]
games=[]
for g in football_teams:
games.append([j for j in itertools.combinations(g,2)])
for i in range(0,6):
print("Игра: ", i+1)
print(start.strftime(format))
print(games[0][i])
print(games[1][i])
print(games[2][i])
print(games[3][i])
print('\n')
start = start + timedelta(days=14)
print("Чемпионат закончен!")
print(start.strftime(format))
|
[
"nastyazaybert.00@gmail.com"
] |
nastyazaybert.00@gmail.com
|
05304047d2f96132cbc17f0cf618c38a71c9507d
|
990e80b7209b90740730708561345c119fb53334
|
/multiclassLearning.py
|
a64122291bf07fbc0dbe3c2361f6a2cdddb8515f
|
[] |
no_license
|
Archith09/Sentiment-Analysis-of-Movie-Reviews
|
3ad2d51dd3ecd1393277d1503814d25794f34371
|
6a148802fee156e628c48ced27264e08f72ad183
|
refs/heads/master
| 2021-01-18T04:00:00.064499
| 2017-03-22T01:43:43
| 2017-03-22T01:43:43
| 85,772,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
from sklearn.grid_search import GridSearchCV
from pre_processing import PreProcess
from sklearn.multiclass import OneVsRestClassifier
# from sklearn.multiclass import OutputCodeClassifier
from sklearn.svm import LinearSVC
from sklearn import metrics
from sklearn.cross_validation import cross_val_score
preprocess = PreProcess("data/train", "data/test")
preprocess.read_train_test_data()
preprocess.getTfIdf()
lr_clf = OneVsRestClassifier(LinearSVC(random_state=0))
# lr_clf = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=0)
# scores = cross_val_score(lr_clf, preprocess.traintfIdf, preprocess.train_target, cv=3)
# print("the cross validated accuracy on training is " + str(scores))
# print("the cross validated accuracy(standard deviation) on training is: %0.4f (+/- %0.4f)" % (
# scores.mean(), scores.std() * 2))
lr_clf.fit(preprocess.traintfIdf, preprocess.train_target)
# finding the training and test predictions
train_pred_knn = lr_clf.predict(preprocess.traintfIdf)
test_pred_knn = lr_clf.predict(preprocess.testtfIdf)
lr_train_accuracy = metrics.accuracy_score(preprocess.train_target, train_pred_knn)
lr_test_accuracy = metrics.accuracy_score(preprocess.test_target, test_pred_knn)
lr_train_prec = metrics.precision_score(preprocess.train_target, train_pred_knn, average="macro")
lr_test_prec = metrics.precision_score(preprocess.test_target, test_pred_knn, average="macro")
lr_train_recall = metrics.recall_score(preprocess.train_target, train_pred_knn,average="macro")
lr_test_recall = metrics.recall_score(preprocess.test_target, test_pred_knn, average="macro")
print("Scores\t\t" + "Multiclass Learning")
print("Train Accuracy" + "\t" + str(lr_train_accuracy))
print("Test Accuracy" + "\t" + str(lr_test_accuracy))
print("Train Precision" + "\t" + str(lr_train_prec))
print("Test Precision" + "\t" + str(lr_test_prec))
print("Train Recall" + "\t" + str(lr_train_recall))
print("Test Recall" + "\t\t" + str(lr_test_recall))
|
[
"archith06@gmail.com"
] |
archith06@gmail.com
|
989517ef77bdde1d961aae1210789b7ea48f0091
|
950e7bb0213c3ad2bce96d1b66665534033ca256
|
/lab 3/Pattern lab 3/Pattern/online.py
|
db9a75a982937827cfa50a83d3335952f1588ef7
|
[
"Apache-2.0"
] |
permissive
|
nasim-aust/Pattern-Lab-Work
|
a7aa012605c5fb50e354b0d7ec1eec093369b098
|
5300f837c5e66b260f070d772d56c9646366adb6
|
refs/heads/master
| 2020-09-09T01:14:41.843530
| 2019-11-12T19:53:46
| 2019-11-12T19:53:46
| 221,299,343
| 9
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
list1=[1,2,3,4]
list2=[5,6,7,8]
for i in range[0,0] :len(list1)
|
[
"nasim5153@gmail.com"
] |
nasim5153@gmail.com
|
62a5eb1aff48efc4a89b228708687605863779f8
|
567693120ae18bcc7bc3287c99fc9043f93854a4
|
/main.py
|
e029a3abed99dc1c02efcc82dd08697086080b91
|
[] |
no_license
|
GitinitSaurabh/BirthdayWishMailer
|
e6d3e69aff37b1b76beebd92c4a19fc35fdb521a
|
cc2860a3ff544ed7b15b575cd0151736b262356f
|
refs/heads/main
| 2023-03-01T13:12:18.568869
| 2021-02-10T00:44:55
| 2021-02-10T00:44:55
| 337,576,939
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
from datetime import datetime
import pandas as pd
import random
import smtplib
my_email = "your email here"
my_password = "your password here"
today = (datetime.now().month, datetime.now().day)
data = pd.read_csv("birthdays.csv")
birthday_dict = {(data_row["month"], data_row["day"]): data_row for (index, data_row) in data.iterrows()}
if today in birthday_dict:
birthday_person = birthday_dict[today]
file_path = f"letter_templates/letter_{random.randint(1,3)}.txt"
with open(file_path) as letter_file:
content = letter_file.read()
name = birthday_person["name"]
content = content.replace("[NAME]",name)
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(my_email,my_password)
connection.sendmail(from_addr=my_email,
to_addrs=birthday_person["email"],
msg=f"Subject: Happy Birthday! \n \n {content}"
)
print("Email sent successfully! ")
|
[
"saurabh160196@gmail.com"
] |
saurabh160196@gmail.com
|
7a7b81b0c29bf297af774fa02c2b6c3bba8c4309
|
c58e8c29181c88257d28db4783463d55292220ef
|
/DBZbeall.py
|
1c204aa0e910d478548f0a799393bd4d0304ba42
|
[] |
no_license
|
HectorMontillo/DBZ---Beat-m-all
|
02b132a5084d0544f17f759cd205ea7b611e18c4
|
4f1ac878dabfdfd4b49cb516e7d888d461bf86e1
|
refs/heads/master
| 2020-03-16T17:26:09.707902
| 2018-05-10T03:01:56
| 2018-05-10T03:01:56
| 132,832,154
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
import pygame as pg
from main import main
import sys
if __name__=="__main__":
main()
pg.quit()
sys.exit()
|
[
"hector.montillo@utp.edu.co"
] |
hector.montillo@utp.edu.co
|
afe07b82e62a24bacb50ca14087ea9a8defc583b
|
d9d979e9d58403a99411f0f58631558fd37bafc9
|
/booking_details.py
|
5b90f10a357db94be3fe2b759d3ee52001ec8544
|
[] |
no_license
|
sole1907/echo_bot
|
d46604287eaa99bc10c6b7dc1020fe5386f5f05f
|
e8f2b1e5c4d40be0b9232a9d67b79fbc2d26db56
|
refs/heads/master
| 2021-03-01T23:47:43.608686
| 2020-03-13T07:24:10
| 2020-03-13T07:24:10
| 245,822,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class BookingDetails:
def __init__(
self,
destination: str = None,
origin: str = None,
travel_date: str = None,
unsupported_airports=None,
):
if unsupported_airports is None:
unsupported_airports = []
self.destination = destination
self.origin = origin
self.travel_date = travel_date
self.unsupported_airports = unsupported_airports
|
[
"ayansola_akanmu@mastercard.com"
] |
ayansola_akanmu@mastercard.com
|
142a23c9264567b5956634eb2867302514fcb259
|
dafaa64cf49c76ff00ef86d77f162f98279c0bc6
|
/chef/__init__.py
|
7cf348a3340f5d4020ef73ff36277b731ee84f00
|
[] |
no_license
|
SeanOC/pychef
|
93c08992d4a85b3002348aa588cf5e460e69402a
|
bc1b39586f567a5539b92570c4d38ceb02b23b6e
|
refs/heads/master
| 2021-01-21T01:16:02.079784
| 2011-01-30T16:57:37
| 2011-01-30T16:57:37
| 1,309,387
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# Copyright (c) 2010 Noah Kantrowitz <noah@coderanger.net>
from chef.api import ChefAPI, autoconfigure
from chef.data_bag import DataBag, DataBagItem
from chef.exceptions import ChefError
from chef.node import Node
from chef.role import Role
from chef.search import Search
|
[
"noah@coderanger.net"
] |
noah@coderanger.net
|
58b279232104ddc47e84ec90853929111169acc6
|
d59fda40f42165f6a872b533e6204a644ed1d002
|
/取猫图.py
|
cb5b2652875d01d7661384310ac9dbf22bfde9fb
|
[] |
no_license
|
starman1992/web_crawler
|
ffe4bbf73ec2e3120967dcafe9577cc966ee5c51
|
85157297e0040e55410032858dbfe44134e0a4c6
|
refs/heads/master
| 2020-07-05T04:14:56.319194
| 2019-09-03T16:52:24
| 2019-09-03T16:52:24
| 202,518,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
#取猫图
import urllib.request
def savepic(wideth,height):
url = f'http://placekitten.com/g/{wideth*100}/{height*100}'
response = r.urlopen(url)
cat_img = response.read()
with open(f'cat_{wideth*100}_{height*100}.jpg','wb') as f:
f.write(cat_img)
for n in range(1,10):
savepic(n,n)
|
[
"noreply@github.com"
] |
starman1992.noreply@github.com
|
81ecbc364110aa1f1b8170f906e8232f3493cfb3
|
ddafd6b71a745a7b3886afac0035985c6a7b9fa3
|
/Course_1_simplest_instruction_vedio/Learn_5-8/hello.py
|
af76a1ebca6798c19605e19f5d2c23fda7a1e5ad
|
[] |
no_license
|
MissDiPan/Learn_Python
|
d3acd84462a7ea49cdffae6255d3d1be75345a53
|
d326706afcf9c6752ad2426a9d11603fb549a99a
|
refs/heads/master
| 2022-01-08T15:51:05.275298
| 2021-12-28T16:09:50
| 2021-12-28T16:09:50
| 145,735,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
def print_hello():
print("hello")
|
[
"miss.dear.pan@gmail"
] |
miss.dear.pan@gmail
|
e213472c48d8787a28647887cd8710a30177cada
|
d956fdc8a7655884c27e28b060c548d4d654884e
|
/conftest.py
|
c58b25e85bd74b07de52f0c1a676041213084b20
|
[
"Apache-2.0"
] |
permissive
|
HeikKerimov/python_training
|
6bd9162d5f2a0783c31c2f54e2988dbac906c8e7
|
4ed62f16a9bd77ee13d1682be4f6f5ee7ab3fb53
|
refs/heads/master
| 2021-01-25T12:21:18.690338
| 2018-05-05T07:56:33
| 2018-05-05T07:56:33
| 123,468,495
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,737
|
py
|
import pytest
import json
import os
import importlib
import jsonpickle
from fixture.application import Application
from fixture.db import DbFixture
from fixture.orm import ORMFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))["web"]
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, base_url=web_config["baseUrl"])
fixture.session.ensure_login(username=web_config["username"], password=web_config["password"])
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.close()
request.addfinalizer(fin)
return fixture
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))["db"]
dbfixture = DbFixture(host=db_config["host"], name=db_config["name"], user=db_config["user"], password=db_config["password"])
def fin():
dbfixture.close()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope="session")
def orm(request):
db_config = load_config(request.config.getoption("--target"))["db"]
ormfixture = ORMFixture(host=db_config["host"], name=db_config["name"], user=db_config["user"], password=db_config["password"])
return ormfixture
@pytest.fixture
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="chrome")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
test_data = load_from_module(fixture[5:])
metafunc.parametrize(fixture, test_data, ids=[str(x) for x in test_data])
elif fixture.startswith("json_"):
test_data = load_from_json(fixture[5:])
metafunc.parametrize(fixture, test_data, ids=[str(x) for x in test_data])
def load_from_module(module):
return importlib.import_module("data.%s" % module).test_data
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
|
[
"kerimov225@mail.ru"
] |
kerimov225@mail.ru
|
bda3d25cdd66b3a5b3375aec754ecb7556a97716
|
dcfb3053e8fe225a694882d0d2f7906dd39e13ab
|
/aspectRatioDetector.py
|
52eae8cca966ec3ac1258eb9f8230eb19f2363ff
|
[] |
no_license
|
RosieCampbell/aspectratio
|
2e7bef5df4eaca01d7967156ab8f246ce5afdd9b
|
24d8c37d2315b8aa77270cbde6532ce5a43276c2
|
refs/heads/master
| 2016-09-13T23:41:17.743236
| 2016-05-19T12:41:18
| 2016-05-19T12:41:18
| 59,205,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
from scipy import fftpack
import numpy as np
import pylab as py
from PIL import Image
from numpy import array
img = Image.open("rose-wide.png").convert('L')
arr = array(img)
# Take the fourier transform of the image.
F1 = fftpack.fft2(arr)
# Now shift so that low spatial frequencies are in the center.
F2 = fftpack.fftshift( F1 )
# the 2D power spectrum is:
psd2D = np.abs( F2 )**2
# plot the power spectrum
py.figure(1)
py.clf()
py.imshow( psd2D )
py.show()
|
[
"rosiekcampbell@gmail.com"
] |
rosiekcampbell@gmail.com
|
5ee5203e5981eeb531ccf67b22a3da8ef49642b5
|
2417d9f6afe95ba19354c65bfb400556f2eb2e19
|
/pixiedust/utils/scalaBridge.py
|
40f0eb7f8cd87315ab8edee476df5275a48e2e2a
|
[
"Apache-2.0"
] |
permissive
|
rakeshnb/pixiedust
|
39f1249a867719919441488f085e1f60519dae58
|
fb5198c7564589c267147d7bdee1f798e7b361ef
|
refs/heads/master
| 2020-05-23T08:09:42.603871
| 2016-10-07T22:08:10
| 2016-10-07T22:08:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,296
|
py
|
# -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
import os
import subprocess
import re
from IPython.core.magic import (Magics, magics_class, cell_magic)
from pixiedust.utils.javaBridge import *
from pixiedust.utils.template import *
'''
Manages the variables defined interactively in the Notebook
'''
class InteractiveVariables(object):
def __init__(self, shell):
self.shell = shell
def getVar(self, varName):
return self.shell.user_ns.get(varName, None ) or self.shell.user_ns_hidden.get(varName, None)
def varTypeTransformer(self, varName, varValue):
pythonToScalaSimpleTypeMap = {"str":"String","int":"Int"}
scalaType = pythonToScalaSimpleTypeMap.get(varValue.__class__.__name__, None)
if scalaType == "String":
varValue = "\"" + varValue.replace('\n','\\n') + "\""
return {"value": varValue, "codeValue": varValue if scalaType is not None else None, "type": scalaType or "Any"}
def getVarsDict(self):
user_ns = self.shell.user_ns
user_ns_hidden = self.shell.user_ns_hidden
nonmatching = object() # This can never be in user_ns
#for i in user_ns:
# print(user_ns[i].__class__)
filtered = ["function"]
out = { key : self.varTypeTransformer(key, user_ns[key]) for key in user_ns \
if not key.startswith('_') \
and key!="sc" and key!="sqlContext" \
and (user_ns[key] is not user_ns_hidden.get(key, nonmatching)) \
and not inspect.isclass(user_ns[key])\
and not inspect.isfunction(user_ns[key])\
and not inspect.ismodule(user_ns[key])}
return out
def updateVarsDict(self, vars):
self.shell.user_ns.update(vars)
@magics_class
class PixiedustScalaMagics(Magics):
def __init__(self, shell):
super(PixiedustScalaMagics,self).__init__(shell)
self.interactiveVariables = InteractiveVariables(shell)
self.scala_home = os.environ.get("SCALA_HOME")
self.class_path = JavaWrapper("java.lang.System").getProperty("java.class.path")
self.env = PixiedustTemplateEnvironment()
def getLineOption(self, line, optionName):
m=re.search(r"\b" + optionName + r"=(\S+)",line)
return m.group(1) if m is not None else None
def hasLineOption(self, line, optionName):
return re.match(r"\b" + optionName + r"\b", line) is not None
def getReturnVars(self, code):
vars=set()
for m in re.finditer(r"\b__(\w+?)\b",code):
vars.add(m.group(0))
return vars
def fromJava(self, stuff):
if stuff.__class__.__name__ == "JavaObject":
if stuff.getClass().getName() == "org.apache.spark.sql.DataFrame":
return DataFrame(stuff, SQLContext(SparkContext.getOrCreate(), stuff.sqlContext()))
elif stuff.getClass().getName() == "org.apache.spark.sql.SQLContext":
return SQLContext(SparkContext.getOrCreate(),stuff)
return stuff
@cell_magic
def scala(self, line, cell):
if not self.scala_home:
print("Error Cannot run scala code: SCALA_HOME environment variable not set")
return
#generate the code
scalaCode = self.env.getTemplate("scalaCell.template").render(
cell=cell, variables=self.interactiveVariables.getVarsDict(), returnVars=self.getReturnVars(cell)
)
if self.hasLineOption(line, "debug"):
print(scalaCode)
return
#build the scala object
dir=os.path.expanduser('~') + "/pixiedust"
if not os.path.exists(dir):
os.makedirs(dir)
source="pixiedustRunner.scala"
with open(dir + "/" + source, "w") as f:
f.write(scalaCode)
#Compile the code
proc = subprocess.Popen([self.scala_home + "/bin/scalac","-classpath", self.class_path, source],stdout=subprocess.PIPE,stderr=subprocess.PIPE, cwd=dir)
code = proc.wait()
if code != 0:
while True:
line = proc.stderr.readline()
if not line:
break
print(line.rstrip())
return
#Load the class and initialize the variables
f = sc._jvm.java.io.File(dir)
url = f.toURL()
urls=sc._gateway.new_array(sc._jvm.java.net.URL,1)
urls[0]=url
cl = sc._jvm.java.net.URLClassLoader(urls)
cls = sc._jvm.java.lang.Class.forName("com.ibm.pixiedust.PixiedustScalaRun$", True, cl)
runnerObject = JavaWrapper(cls.getField("MODULE$").get(None), True,
self.getLineOption(line, "channel"), self.getLineOption(line, "receiver"))
runnerObject.callMethod("init", pd_getJavaSparkContext(), self.interactiveVariables.getVar("sqlContext")._ssql_ctx )
varMap = runnerObject.callMethod("runCell")
#capture the return vars and update the interactive shell
returnVars = {}
it = varMap.iterator()
while it.hasNext():
t = it.next()
returnVars[t._1()] = self.fromJava(t._2())
self.interactiveVariables.updateVarsDict(returnVars)
#discard the ClassLoader, we only use it within the context of a cell.
#TODO: change that when we support inline scala class/object definition
cl.close()
cl=None
cls=None
runnerObject=None
try:
get_ipython().register_magics(PixiedustScalaMagics)
except NameError:
#IPython not available we must be in a spark executor
pass
|
[
"david_taieb@us.ibm.com"
] |
david_taieb@us.ibm.com
|
e0008f6ef235559eeac6bb474c4f50dc7adbadf7
|
26e475d787fd8ac87ca0a5b2ebd6f0afccf88f7e
|
/scripts/readPcd.py
|
3993a900bc3c16b28e86f4b24bb71843278f7474
|
[] |
no_license
|
gusugusu1018/automotive
|
76f49dea3fa4220ddc245473757371d2d60f59b0
|
398b53f794b08f23d00bf2e677d6ec74a33cc248
|
refs/heads/master
| 2020-04-12T01:18:40.042800
| 2018-12-20T13:17:09
| 2018-12-20T13:17:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
from open3d import *
if __name__ == "__main__":
pcd = read_point_cloud("../pcd/1.pcd")
draw_geometries([pcd])
|
[
"sunahukinn1353@me.com"
] |
sunahukinn1353@me.com
|
7334c5326540dcca12d32d20eb5561972b13660e
|
644940a9c1e0693b257769df066c36fe9a4d47f4
|
/code/intervalDetect.py
|
f300edccff006c82f1638eccf14e48f579b656e4
|
[] |
no_license
|
pig6485/MIR-Final
|
d0d2a6980dfac4b397dbbedcecab84edb4366857
|
2a9b2e52c2eff34e81763a13bf7d1f21e553f2fa
|
refs/heads/master
| 2020-03-21T02:29:25.737732
| 2018-06-21T13:15:39
| 2018-06-21T13:15:39
| 138,001,163
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
import numpy as np
import librosa
# from easy to hard
# 3 5 (0.1)
# 2 4 6 8 (0.3)
# 3b 6b 7b (0.5)
# 2b 7 (0.8)
# 5b >8 (1)
interval_template = [0, 0.8, 0.3, 0.5, 0.1, 0.3, 1, 0.1, 0.5, 0.3, 0.5, 0.8, 0.3, 1]
def intervalDetect(notes):
interval = np.diff(notes)
for i in range(len(interval)):
if np.abs(interval[i]) > 13: interval[i] = 13
print(interval)
score = 0.0
for i in interval:
score += interval_template[np.abs(i)]
l = len([x for x in interval if x != 0])
return np.round(100*score/l, decimals=1)
notes = [30, 35, 35, 35, 43, 41, 41, 42, 39, 38]
score = intervalDetect(notes=notes)
print(score)
|
[
"pig6485@gmail.com"
] |
pig6485@gmail.com
|
343b3b6be1b464c55ff2f2c1190c190e798abda5
|
062d281432770b9d86f4bad03178cb94178caae3
|
/mpPrimes1.4.py
|
4f878093c052158034246ff52715978576361d57
|
[] |
no_license
|
joshuaellis555/MultiprocessingPrimes
|
040fdc84e6ef5fb65b95bc86a0eae5c230812293
|
be77d11079177ba8f94837265e6f881a2e052d59
|
refs/heads/master
| 2020-03-30T14:43:02.623581
| 2018-10-02T23:02:47
| 2018-10-02T23:02:47
| 151,332,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,694
|
py
|
#import math as M
import multiprocessing as mp
import threading
import time
import math
'''
Primes=[2,3,5,7,11,13]
EndPoint=Primes[-1]
iMainPrime=len(Primes)-2
NextMPrimeSqrd=Primes[iMainPrime+1]**2
print('1.5')
while True:
for dummy in range(700):
StartPoint=EndPoint+2
EndPoint+=Primes[iMainPrime]-1
if EndPoint >= NextMPrimeSqrd:
EndPoint=NextMPrimeSqrd
iMainPrime+=1
NextMPrimeSqrd=Primes[iMainPrime+1]**2
CurrentList=list(range(StartPoint,EndPoint+1,2))
lcl=len(CurrentList)
for prime in Primes[1:iMainPrime+1]:
c=-(-StartPoint//prime)*prime
for i in range(((c if c%2 else c+prime)-StartPoint)//2,lcl,prime):
CurrentList[i]=0
Primes+=[x for x in CurrentList if x]
print('The '+str(len(Primes))+"'th prime is "+str(Primes[-1]))
'''
def doPrimes(START,END,PRIMES):
#print(START,END,PRIMES)
l=list(range(START,END+1,2))
#print(l)
ln=len(l)
#print(ln)
for prime in PRIMES:
s=-(-START//prime)*prime
for i in range(((s if s%2 else s+prime)-START)//2,ln,prime):
l[i]=0
#print(l)
return l
def ppl(l):
r=l.pop(0)
ln=len(l)
while ln>0 and l[0]==0:
ln-=1
l.pop(0)
return r
def doWork(Jobs,Rq):
n,i,c,s,e,p=Jobs.get()
Rq.put([n,i,c,doPrimes(s,e,p)])
def doWork(Jobs,Rq):
try:
n,i,c,s,e,p=Jobs.get(True,0)
Rq.put([n,i,c,doPrimes(s,e,p)])
except:
pass
def threadWork(Jobs,Rq):
try:
t = threading.Thread(target = tryWork, args=[Jobs,Rq])
t.start()
except:
pass
def worker(Jobs,Rq,Rlenq,Rpq,Pq,CORES,NUM):
if NUM==0:
Primes=[3,5,7]
iCP=0#index Current Prime
Li=iCP#Lowes used index
Rlens={}#return lengths
update=time.time()
while True:
doW=True
if time.time()-update>1:
print("the " + str(len(Primes)+1) + "'th prime is " + str(Primes[-1]))
update+=1
if not Pq.empty():
print("P")
doW=False
Primes+=Pq.get()
if Jobs.empty() and iCP<len(Primes)-1:
print("A")
doW=False
START=Primes[iCP]**2+2
END=Primes[iCP+1]**2-2
numj=int((END-START+1)**.5)#number of jobs
s=START
for i in range(numj):
e=((END-s)//(numj-i))+s
e=e if e%2 else e+1
Jobs.put([numj,i,iCP,s,e,Primes[:iCP+1]])
s=e+2
iCP+=1
if doW:
#threadWork(Jobs,Rq)
pass
elif NUM==1:
while True:
if not Rpq.empty():
print("B")
primes=Rpq.get()
p=[]
while primes:
p+=[ppl(primes)]
print("1",p)
Pq.put(p)
else:
#threadWork(Jobs,Rq)
pass
elif NUM==2:
Li=0
Curc=0
Rl={}#Return list
Rl[0]=[]
while True:
if not Rq.empty():
print("C")
n,i,c,x=Rq.get()
if x[0]==0:
ppl(x)
if x:
if c not in Rl:
Rl[c]=[]
Rl[c]+=[[i,n,c]+x]
Rl[c].sort()
while c==Curc and i==Li:
Rpq.put(Rl[c].pop(0)[3:])
Li+=1
#print("asdf")
if i==n-1:
del Rl[c]
Curc+=1
Li=0
try:
i,n,c=Rl[c][0][:3]
except:
break
else:
#threadWork(Jobs,Rq)
pass
else:
while True:
doWork(Jobs,Rq)
if __name__ == "__main__":
import sys
print(sys.argv[0])
Jobs=mp.Queue()
CORES = max(3, mp.cpu_count())
Rq= mp.Queue()#Return Queue
p={}
Rlenq=mp.Queue()#Return lenght Queue
Rpq=Pq=mp.Queue()#Return Primes Queue
Pq=mp.Queue()#Primes Queue
for i in range(CORES):
p[i] = mp.Process(target = worker, args=[Jobs,Rq,Rlenq,Rpq,Pq,CORES,i])
p[i].start()
for i in range(CORES):
p[i].join()
|
[
"joshuaellis101@gmail.com"
] |
joshuaellis101@gmail.com
|
4ed0b8a8f8af97103450f34a9b829a12508d8b59
|
cc5e2e568304ced47ff2219770c4ba6255584fa7
|
/renameIMG.py
|
a1c4c906b1ef9587fa101139b5b0af8484b0d262
|
[] |
no_license
|
kenigma/python-renIMG
|
e234481f3310224edd8cb37bdb7e68cae4877882
|
06f9e5f9f6e00f837dc70c658e23a7d0d8d3ef5b
|
refs/heads/master
| 2021-01-19T17:20:30.122166
| 2017-02-19T09:27:26
| 2017-02-19T09:27:26
| 82,448,115
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,532
|
py
|
#!/usr/bin/python
import exifread
from datetime import datetime
import os
import sys
IMG_DATETIME_KEY = 'Image DateTime'
def validate(src_file):
if not os.path.isfile(src_file):
raise Exception("%s not exists." % src_file)
if os.path.islink(src_file):
raise Exception("%s is a symlink." % src_file)
return True
def getIMGFilename(src_file):
f = open(src_file, 'rb')
tags = exifread.process_file(f, details=False, stop_tag="Image DateTime")
img_filename = '';
if IMG_DATETIME_KEY in tags:
imgDateTimeStr = tags[IMG_DATETIME_KEY].values
imgDateTime = datetime.strptime(imgDateTimeStr, '%Y:%m:%d %H:%M:%S')
img_filename = imgDateTime.strftime('%Y%m%d_%H%M%S')
if img_filename == '':
raise Exception('Cannot read exif [%s] for file: %s' % (IMG_DATETIME_KEY, src_file))
filename, file_extension = os.path.splitext(src_file)
return img_filename, file_extension
def renameIMG(src_file):
validate(src_file)
new_filename, file_extension = getIMGFilename(src_file)
new_filepath_noext = os.path.join(os.path.dirname(src_file), new_filename)
new_filepath = new_filepath_noext + file_extension
i = 1
while os.path.isfile(new_filepath):
new_filepath = new_filepath_noext + "_%d" % i + file_extension
i += 1
os.rename(src_file, new_filepath)
return new_filename
def main():
src_file = sys.argv[1]
try:
dst_file = renameIMG(src_file)
return 0
except Exception as e:
print "Error: %s" % e.args
return 1
if __name__ == '__main__':
sys.exit(main())
|
[
"noreply@github.com"
] |
kenigma.noreply@github.com
|
97dbca4b2f4001c079940dd5d9b1a219b2d0b32c
|
0ec776725aaa61f9a51b8c322057b304dad4b0d0
|
/devel/lib/python2.7/dist-packages/robmovil_msgs/msg/_Trajectory.py
|
0415a86dc294abd6a6ee6d30f8700221cd77aa47
|
[] |
no_license
|
jrr1984/robot_diferencial_EKF
|
6824e79b6d8d64942f5cc7cc281bb293997fb04c
|
105fd9ca0db8390aa5b40ea0ae035eaf3955a804
|
refs/heads/master
| 2020-03-28T17:54:48.577061
| 2018-09-14T19:45:47
| 2018-09-14T19:45:47
| 148,834,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
/home/juan/catkin_ws_2/devel/.private/robmovil_msgs/lib/python2.7/dist-packages/robmovil_msgs/msg/_Trajectory.py
|
[
"juanreto@gmail.com"
] |
juanreto@gmail.com
|
2b6ac48ad0bd7660ab1a795018c68dce0e129311
|
bda459a42028eb9d53225bc4d636dd620f026b4b
|
/linkedlist/linkedlist.py
|
ca20b8df94eef1f353695b79d86449b71517c3cd
|
[] |
no_license
|
iamanx17/dslearn
|
3561ce78c160929fac084fecfaaa813cfc1dbb5d
|
ea64451c33b455838303c61d0ede0f53e8bf8e74
|
refs/heads/main
| 2023-06-16T03:33:41.871844
| 2021-07-16T08:34:14
| 2021-07-16T08:34:14
| 354,756,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 586
|
py
|
class Node:
def __init__(self, data):
self.data=data
self.next=None
def takeinput():
inputlist=[int(ele) for ele in input().split()]
head=None
tail=None
for currdata in inputlist:
if currdata==-1:
break
newnode=Node(currdata)
if head is None:
head=newnode
tail=newnode
else:
tail.next=newnode
tail=newnode
return head
def printdata(head):
while head is not None:
print(head.data, end='->')
head=head.next
print('None')
|
[
"pranj.17am@gmail.com"
] |
pranj.17am@gmail.com
|
02bb8f97744eeb371f7ece14abb8f13ff3c0ae7d
|
df97ef06f3e72df0d8d068435abc31e0e5b26b2a
|
/main/migrations/0006_auto_20200406_1512.py
|
092380ad7a4bbcb05f08b335d3f754eaf25c948b
|
[] |
no_license
|
fer0m/youtube_dowloader
|
ff41821e24d3bdca1f1435a717818cf25b617a59
|
4886772d6a9175459ec14880092d9ea61adfe194
|
refs/heads/master
| 2022-06-05T08:03:06.900183
| 2020-05-02T08:50:49
| 2020-05-02T08:50:49
| 260,642,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 432
|
py
|
# Generated by Django 3.0.5 on 2020-04-06 15:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0005_auto_20200406_1458'),
]
operations = [
migrations.AlterField(
model_name='post',
name='file_video',
field=models.FileField(default='', upload_to='documents', verbose_name='Видео'),
),
]
|
[
"antsin93@gmail.com"
] |
antsin93@gmail.com
|
60a9ff763ffdaa302a268fe02b4a72f9207aceac
|
601ac0c9f7138b3e506c0511d4a3e7f60a499305
|
/src/pykeen/datasets/wikidata5m.py
|
dd111388f66e68ebd96f0e31b583d57f9baa1897
|
[
"MIT"
] |
permissive
|
cdpierse/pykeen
|
9aa551adc05c9e609353d473db1d3da1b92f4ab0
|
e8225c066b56bcdd3180ba895ce3e153808e7e38
|
refs/heads/master
| 2023-09-02T06:30:25.849873
| 2021-11-09T17:32:15
| 2021-11-09T17:32:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
# -*- coding: utf-8 -*-
"""The Wikidata5m dataset from [wang2019]_.
Wikidata5m is a million-scale knowledge graph dataset with aligned corpus.
This dataset integrates the Wikidata knowledge graph and Wikipedia pages.
Each entity in Wikidata5m is described by a corresponding Wikipedia page,
which enables the evaluation of link prediction over unseen entities.
- Website: https://deepgraphlearning.github.io/project/wikidata5m
- Paper: https://arxiv.org/pdf/1911.06136.pdf
Get a summary with ``python -m pykeen.datasets.wikidata5m``,
"""
import pathlib
from docdata import parse_docdata
from .base import TarFileRemoteDataset
__all__ = [
"Wikidata5M",
]
TRANSDUCTIVE_URL = "https://zenodo.org/record/5546383/files/wikidata5m_transductive.tar.gz"
INDUCTIVE_URL = "https://zenodo.org/record/5546387/files/wikidata5m_inductive.tar.gz"
@parse_docdata
class Wikidata5M(TarFileRemoteDataset):
"""The Wikidata5M dataset from [wang2019]_.
---
name: Wikidata5M
statistics:
entities: 4594149
relations: 822
training: 20614279
testing: 4977
validation: 4983
triples: 20624239
citation:
author: Wang
year: 2019
arxiv: 1911.06136
link: https://arxiv.org/abs/1911.06136
"""
def __init__(self, create_inverse_triples: bool = False, **kwargs):
"""Initialize the Wikidata5M dataset.
:param create_inverse_triples: Should inverse triples be created? Defaults to false.
:param kwargs: keyword arguments passed to :class:`pykeen.datasets.base.TarFileRemoteDataset`.
"""
super().__init__(
url=TRANSDUCTIVE_URL,
relative_training_path=pathlib.PurePath("wikidata5m_transductive_train.txt"),
relative_testing_path=pathlib.PurePath("wikidata5m_transductive_test.txt"),
relative_validation_path=pathlib.PurePath("wikidata5m_transductive_valid.txt"),
create_inverse_triples=create_inverse_triples,
**kwargs,
)
if __name__ == "__main__":
Wikidata5M.cli()
|
[
"noreply@github.com"
] |
cdpierse.noreply@github.com
|
89a118a5de9eb5ca4bbf945e6591cac91039424b
|
9936c928667f8e5503997585bfcd4d88e7894480
|
/RaspberryPi_Pico/test/test.py
|
e79baaecff9e15117fe0f7eec892e050404c181b
|
[
"MIT"
] |
permissive
|
DelinLi/Phenotyping
|
3758eb6bdfa45ef35196c8b148874f666d6b1921
|
ad9af5007e03e10fba4f8db69219a7fe07fe1895
|
refs/heads/master
| 2023-04-08T09:03:15.052636
| 2023-04-03T07:42:54
| 2023-04-03T07:42:54
| 141,260,708
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
import alarm
import time
import board
from adafruit_onewire.bus import OneWireBus
from adafruit_ds18x20 import DS18X20
import busio
import adafruit_tsl2561
import adafruit_dht
import digitalio
import adafruit_ds3231
import bitbangio
from analogio import AnalogIn
led = digitalio.DigitalInOut(board.LED)
led.direction = digitalio.Direction.OUTPUT
led.value = True
time.sleep(1)
#### 2.2 light by TSL2561
i2c_light = busio.I2C(board.GP15, board.GP14)
light = adafruit_tsl2561.TSL2561(i2c_light)
light_lux=light.lux
if light_lux is None:
light_lux=0
recorder=str(round(light_lux,0))+"\n"
print(recorder)
### 3. power up the pin and deep sleep 30 min
led.value = False
|
[
"delin.bio@gmail.com"
] |
delin.bio@gmail.com
|
0ed74f868dc846148625d61c313c8c1e7d63ef03
|
1b8444aa0413ad9202368af148be3e28490cef13
|
/setup.py
|
3523bd064169aca8bbe01fe9a9373d1197a89d5b
|
[
"MIT"
] |
permissive
|
entvia/whenareyou
|
371f6bbf408074d07eaaa39366f6223fa2ca5df5
|
46c0b9358b91ef6744fd75b50a21568a7c4cadb7
|
refs/heads/master
| 2020-03-17T06:34:51.946173
| 2018-05-14T13:56:50
| 2018-05-14T13:56:50
| 133,361,223
| 0
| 0
|
MIT
| 2018-05-14T13:56:51
| 2018-05-14T13:00:41
|
Python
|
UTF-8
|
Python
| false
| false
| 635
|
py
|
#!/usr/bin/env python3
from setuptools import find_packages, setup
with open('requirements.txt') as requirements:
required = requirements.read().splitlines()
with open('README.rst') as readme:
long_description = readme.read()
setup(
name='whenareyou',
version='0.3.2',
description='Gets the timezone of any location in the world',
long_description=long_description,
url='https://github.com/aerupt/whenareyou',
author='Lasse Schuirmann',
author_email='lasse.schuirmann@gmail.com',
license='MIT',
packages=find_packages(),
install_requires=required,
include_package_data=True,
)
|
[
"lasse.schuirmann@gmail.com"
] |
lasse.schuirmann@gmail.com
|
409dcd964d8baa18a299660ccebd1167402d65d9
|
af9e369879aaceea92a400605679dc97a949f519
|
/api_tests/nodes/views/test_view_only_query_parameter.py
|
0d7055f3e4f8553b7acfe384875776443f02d27d
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ubiquitypress/osf.io
|
7c3735ab70c7a2b56ce4ae7a266601440cebed74
|
c032511c27a8d6c145e8cc4603a59f301568499b
|
refs/heads/develop
| 2020-12-13T18:26:41.678556
| 2016-02-22T22:46:06
| 2016-02-22T23:13:47
| 48,805,847
| 0
| 1
| null | 2015-12-30T15:05:08
| 2015-12-30T15:05:07
| null |
UTF-8
|
Python
| false
| false
| 10,392
|
py
|
from nose.tools import * # flake8: noqa
from website.util import permissions
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import ProjectFactory
from tests.factories import AuthUserFactory
from tests.factories import PrivateLinkFactory
from website.models import Node
class ViewOnlyTestCase(ApiTestCase):
def setUp(self):
super(ViewOnlyTestCase, self).setUp()
self.creation_user = AuthUserFactory()
self.viewing_user = AuthUserFactory()
self.contributing_read_user = AuthUserFactory()
self.contributing_write_user = AuthUserFactory()
self.valid_contributors = [
self.creation_user._id,
self.contributing_read_user._id,
self.contributing_write_user._id,
]
self.private_node_one = ProjectFactory(is_public=False, creator=self.creation_user, title="Private One")
self.private_node_one.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.private_node_one.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
self.private_node_one_anonymous_link.nodes.append(self.private_node_one)
self.private_node_one_anonymous_link.save()
self.private_node_one_private_link = PrivateLinkFactory(anonymous=False)
self.private_node_one_private_link.nodes.append(self.private_node_one)
self.private_node_one_private_link.save()
self.private_node_one_url = '/{}nodes/{}/'.format(API_BASE, self.private_node_one._id)
self.private_node_two = ProjectFactory(is_public=False, creator=self.creation_user, title="Private Two")
self.private_node_two.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.private_node_two.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.private_node_two_url = '/{}nodes/{}/'.format(API_BASE, self.private_node_two._id)
self.public_node_one = ProjectFactory(is_public=True, creator=self.creation_user, title="Public One")
self.public_node_one.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.public_node_one.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.public_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
self.public_node_one_anonymous_link.nodes.append(self.public_node_one)
self.public_node_one_anonymous_link.save()
self.public_node_one_private_link = PrivateLinkFactory(anonymous=False)
self.public_node_one_private_link.nodes.append(self.public_node_one)
self.public_node_one_private_link.save()
self.public_node_one_url = '/{}nodes/{}/'.format(API_BASE, self.public_node_one._id)
self.public_node_two = ProjectFactory(is_public=True, creator=self.creation_user, title="Public Two")
self.public_node_two.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.public_node_two.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.public_node_two_url = '/{}nodes/{}/'.format(API_BASE, self.public_node_two._id)
def tearDown(self):
Node.remove()
class TestNodeDetailViewOnlyLinks(ViewOnlyTestCase):
def test_private_node_with_link_works_when_using_link(self):
res_normal = self.app.get(self.private_node_one_url, auth=self.contributing_read_user.auth)
assert_equal(res_normal.status_code, 200)
res_linked = self.app.get(self.private_node_one_url, {'view_only': self.private_node_one_private_link.key})
assert_equal(res_linked.status_code, 200)
assert_equal(res_linked.json, res_normal.json)
def test_private_node_with_link_unauthorized_when_not_using_link(self):
res = self.app.get(self.private_node_one_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_with_link_anonymous_does_not_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_equal(contributor['id'], '')
def test_private_node_with_link_non_anonymous_does_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_private_node_logged_in_with_anonymous_link_does_not_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
}, auth=self.creation_user.auth)
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_public_node_with_link_anonymous_does_not_expose_user_id(self):
res = self.app.get(self.public_node_one_url, {
'view_only': self.public_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_equal(contributor['id'], '')
def test_public_node_with_link_non_anonymous_does_expose_contributor_id(self):
res = self.app.get(self.public_node_one_url, {
'view_only': self.public_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_public_node_with_link_unused_does_expose_contributor_id(self):
res = self.app.get(self.public_node_one_url, {
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'], self.valid_contributors)
def test_view_only_link_does_not_grant_write_permission(self):
payload = {
'data': {
'attributes': {
'title': 'Cannot touch this' },
'id': self.private_node_one._id,
'type': 'nodes',
}
}
res = self.app.patch_json_api(self.private_node_one_url, payload, {
'view_only': self.private_node_one_private_link.key,
}, expect_errors=True)
assert_equal(res.status_code, 401)
def test_view_only_link_from_anther_project_does_not_grant_view_permission(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.public_node_one_private_link.key,
}, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_project_logs_with_anonymous_link_does_not_expose_user_id(self):
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': self.private_node_one_anonymous_link.key,
})
assert_equal(res.status_code, 200)
body = res.body
assert_not_in(self.contributing_write_user._id, body)
assert_not_in(self.contributing_read_user._id, body)
assert_not_in(self.creation_user._id, body)
def test_bad_view_only_link_does_not_modify_permissions(self):
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, expect_errors=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, auth=self.creation_user.auth)
assert_equal(res.status_code, 200)
class TestNodeListViewOnlyLinks(ViewOnlyTestCase):
def test_private_link_does_not_show_node_in_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_private_link.key,
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
node_ids = []
for node in nodes:
node_ids.append(node['id'])
assert_not_in(self.private_node_one._id, node_ids)
def test_anonymous_link_does_not_show_contributor_id_in_node_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert_equal(contributor['id'], '')
assert_not_equal(assertions, 0)
def test_non_anonymous_link_does_show_contributor_id_in_node_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert_in(contributor['id'], self.valid_contributors)
assert_not_equal(assertions, 0)
|
[
"bgeiger@pobox.com"
] |
bgeiger@pobox.com
|
6c70d30ae19fe565783bf2d6103378583e077d46
|
f87bcf6af65e0642d71507b8bf52f1983daa013c
|
/mindstorms.py
|
e077746dbbbdf912c9383884dcf7787309420e5c
|
[] |
no_license
|
Akshit94/PythonProjects
|
80d7b22bac9b39b5c798ece82827a23d04961c44
|
dc29881fe00fde6da185fc7487c0fcedd3381a6f
|
refs/heads/master
| 2021-05-01T17:15:39.889015
| 2017-01-21T06:27:06
| 2017-01-21T06:27:06
| 79,422,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,075
|
py
|
import turtle
def draw_art():
# this line creates a new windows for our turtle to work on.
window = turtle.Screen()
# sets the window's background color to red.
window.bgcolor("red")
# here "turtle" is the atual thing that moves around and draws stuff
# on the computer for us.
# turtle.Turtle() is used to grab the turtle. "Turtle" is a class.
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("blue")
brad.speed(3)
# forward(distance) moves the turtle forward by the given distance.
# right(degrees) turns the turtle to right by the given degrees.
for i in range(1,37):
count = 4
while(count):
brad.forward(100)
brad.right(90)
count = count - 1
brad.right(10)
#angie = turtle.Turtle()
#angie.shape("arrow")
#angie.color("yellow")
# here circle(radius) is used to create a circle with the given radius.
#angie.circle(100)
# this call defines that windows will be closed if we click on it.
window.exitonclick()
draw_art()
|
[
"akshit.jain94@gmail.com"
] |
akshit.jain94@gmail.com
|
b500eab6bba9162aabe1fe171d1a7803115f717c
|
b4f0d86b9ad95038f058cbadda2bca797f0a016c
|
/kakebo/kakebo/dataccess.py
|
a9c14f12aa58233651885b09ae0279cfa37a6700
|
[] |
no_license
|
MARALOGON/repaso_web_flask_js
|
762cb37606478caf5fa0d6664bbb6d13b0b461dd
|
a88c27d6e2717555c64a6491e0ef6760b78c3d59
|
refs/heads/main
| 2023-06-12T10:34:03.572807
| 2021-07-05T07:18:43
| 2021-07-05T07:18:43
| 375,792,744
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,481
|
py
|
import sqlite3
class DBmanager():
def __toDict__(self, cur): # __toDict__ lo que hace es generar un resultado que es una lista y añadir los registos, va a devolver una lista con un unico registro
#Obtengo los datos de la consulta
claves = cur.description #Creo la variable claves para que contenga todos los campos de la tabla (fecha, concepto, categoria, etc.), que se los proporciona cur.description
filas = cur.fetchall() #Fetchall muestra todos los datos de la tabla (todas las filas, todos los registros)
#Proceso los datos para devolver una lista de diccionarios (un diccionario por fila)
resultado = []
for fila in filas:
d = {}
for tuplaclave, valor in zip(claves,fila):
d[tuplaclave[0]] = valor
resultado.append(d)
return resultado
def consultaMuchasSQL(self, query, parametros = []): #Con esta función vamos a sacar todo lo referente a las consultas de la función de index para localizarlo aqui. Esta función siempre va a devolver una lista de diccionarios, al pedirle muchas consultas
conexion = sqlite3.connect("movimientos.db") #Abro la conexion con la base de datos
cur = conexion.cursor() #Creo una instancia del cursor y lo conecto
#Ejecuto la consulta
cur.execute(query, parametros) #Este execute queremos que nos devuelva un registro o una lista de registros, de ahi que su parametro sea query
resultado = self.__toDict__(cur)
conexion.close()
return resultado
def consultaUnaSQL(self, query, parametros = []): #Esta función va a devolver una lista con un registro, ya que solo le solicitamos una busqueda
resultado = self.consultaMuchasSQL(query, parametros)
if len(resultado) > 0: #Aqui le dice que si el resultado de la consulta es mayor que 0, es decir, si existe
return resultado[0] #Lo devuelve el primer elemento como una lista
def modificaTablaSQL(self, query, parametros = []): #En esta función vamos a incluir todo lo que tenga que ver con las modificaciones de la tabla, el UPDATE, el DELETE, etc.
conexion = sqlite3.connect("movimientos.db")
cur = conexion.cursor()
cur.execute(query, parametros)
conexion.commit() #Este commit lo que hace es que el cambio relizado lo fija en la base de datos. Es oblgatorio para que se fije en la base de datos.
conexion.close()
|
[
"marcosalonso@MacBook-Pro-de-Marcos.local"
] |
marcosalonso@MacBook-Pro-de-Marcos.local
|
23626047fb1346a7b1f5d121d74e0209be82ae8d
|
a197bcd558447be9356e419eadf21df493762903
|
/Tutorial_2.py
|
15de5bef37393eb7d0e482173b9dfea653ba0b1e
|
[] |
no_license
|
marcy3ait/aero-bemt
|
adbea465f09db57ac0515492de4b00770f34e1bd
|
31095ff0307a5b82f13cfe8b11d8cf9377dd2ed0
|
refs/heads/master
| 2023-08-23T15:49:11.884584
| 2021-10-16T22:16:58
| 2021-10-16T22:16:58
| 342,839,490
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
#%% [markdown]
# ## Atividade 06
#
# Plotar o gráfico de coeficiente tração por FM considerando os paramentros
#Dados:
#
#$C_{l\alpha}$ = 5.9
#
#$\sigma = 0.1$
#
# Cd0 = 0.01
#
# d1 = 0.025
#
# d2 = 0.65
#
#$ C_{treq} $ variando de 0.001 a 0.01
#
# $\theta_{tw}$ = \[0°, -5°, -10°, -15°, -20°, -25°]
#
#%%
import aerobemt as bem
import matplotlib.pyplot as plt
import numpy as np
ctreq = np.linspace(0.001, 0.01, 10)
twist = np.linspace(0, -25, 6)
for i in twist:
rotor1 = bem.Rotor(cla = 5.9, solidEqui= 0.1, ctreq = ctreq, numberBlades=4, twist=i )
#rotor1.setArrato(cd0 = 0.01, d1=0.025, d2 = 0.65 )
simulacao = bem.Bemt(rotor1, correcao=True)
__, __, __, __, K , __, Fmerit = simulacao.solver()
plt.plot(ctreq[K != 0], K[ K != 0], label = f'twist = {i} [°]')
plt.xlabel('$C_t$')
plt.ylabel('FM')
plt.grid()
plt.show()
# %%
|
[
"39223917+marcy3ait@users.noreply.github.com"
] |
39223917+marcy3ait@users.noreply.github.com
|
9ad35780007bb37dbfa5951a1df8e08fa152e1c3
|
f0da7ad429b9a8820359bd51eee5d31e51cc1d77
|
/env/lib/python3.6/encodings/hex_codec.py
|
42dc75ae25ec9a12570c48fbec25cb21d7127059
|
[] |
no_license
|
DavidNganga/bag
|
e190ae1a2d33439394859c03456a19676c85be36
|
eb67ba9b62a99dc41bcff8aae267541f46023800
|
refs/heads/master
| 2020-05-03T15:12:17.909935
| 2019-05-04T05:36:25
| 2019-05-04T05:36:25
| 178,699,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 70
|
py
|
/home/david/.pyenv/versions/3.6.5/lib/python3.6/encodings/hex_codec.py
|
[
"ngashiedavid@gmail.com"
] |
ngashiedavid@gmail.com
|
f6c11cf14d64b201675cb7a703529b8803adf02f
|
772593e07d763754b06999b94d183f9eb31ced8f
|
/pdfmerger.py
|
c55cc4c70a0d91586f2a92783af4ffc922f56da2
|
[] |
no_license
|
amannairDL/junk-cookbook
|
7e11796181d4385a8411f6e6b50f238148c85e22
|
35f73b17ae3d70655474f890db4f1ccd84a608ec
|
refs/heads/main
| 2023-02-22T04:19:17.706472
| 2021-01-14T13:50:32
| 2021-01-14T13:50:32
| 329,622,631
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 3 10:47:16 2020
@author: Aman.Sivaprasad
"""
from PyPDF2 import PdfFileMerger
pdfs = ['aman 10th and 12th.pdf', 'aman_srm marksheets (1).pdf']
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(pdf)
merger.write(r"C:\Users\Aman.Sivaprasad\OneDrive - EY\Desktop\learnml\collect\MARKSHEETS.pdf")
merger.close()
|
[
"noreply@github.com"
] |
amannairDL.noreply@github.com
|
c67a3e11e19eeaa5fd04cb863ccc3d6632d20b4b
|
e452aba0313c63d01f7aae34cb8783f254f6870b
|
/Scrapy/scrapy/monprojettest/monprojettest/spiders/leboncoin.py
|
08e45b88f9436f397e19f5458ac2e2c007b36252
|
[] |
no_license
|
Taysonho/OUAP-4314
|
207667097e23f0ed742cb8d8c08071916bdda4c4
|
6d9f211e10cc60c0e1d747325d90e6fa62c7e139
|
refs/heads/master
| 2021-04-06T15:38:49.132356
| 2018-05-05T06:40:23
| 2018-05-05T06:40:23
| 124,361,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
# -*- coding: utf-8 -*-
import scrapy
class LeboncoinSpider(scrapy.Spider):
name = 'leboncoin'
allowed_domains = ['leboncoin.fr']
start_urls = ['http://leboncoin.fr/']
def parse(self, response):
pass
|
[
"tay-son.ho@edu.esiee.fr"
] |
tay-son.ho@edu.esiee.fr
|
3e54d02cd57b6507ea7cff0fee53106806668e30
|
778d4d6e10583d51a6e0a3957ba5d88053556150
|
/sendEmailTo/send_email.py
|
6d1d8ba334589ace836a4559e2bf2b24703c43e8
|
[] |
no_license
|
cmoralesmani/sendEmailPython
|
e4c71ffe7cb1cf72fc4691df6e8ebab91709f90a
|
48d1695b97d6d66c7169d2c77dba8a071243fa30
|
refs/heads/master
| 2021-05-14T19:02:33.468126
| 2018-01-16T22:34:46
| 2018-01-16T22:34:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,634
|
py
|
# import necessary packages
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from read_contacts import get_contacts
from read_template import read_template
from setup_smtp import get_smtp
def main():
# Path directory
path_dir = os.path.dirname(__file__)
print('Directorio raíz: {}'.format(path_dir))
# read contacts
path_contacts = os.path.join(path_dir, 'contacts.txt')
names, emails = get_contacts(path_contacts)
path_message = os.path.join(path_dir, 'message.txt')
message_template = read_template(path_message)
path_config = os.path.join(path_dir, 'config.ini')
s, from_addr = get_smtp(path_config)
# For each contact, send the email
for name, email in zip(names, emails):
print('Enviando a: {} {}'.format(name, email))
msg = MIMEMultipart() # Create a message
# add in the actual person name to the message template
message = message_template.substitute(PERSON_NAME=name.title())
# setup the parameters of the message
msg['From'] = from_addr
msg['To'] = email
msg['Subject'] = 'This is test'
# add in the message body
msg.attach(MIMEText(message, 'plain'))
try:
# send the message via the server set up earlier.
s.send_message(msg, from_addr=from_addr, to_addrs=email)
print('Ok the email has sent')
except Exception:
print('Can\'t send the email')
del msg
# Terminate the SMTP session and close the connection
s.quit()
if __name__ == '__main__':
main()
|
[
"cmorales@thedataage.com"
] |
cmorales@thedataage.com
|
f0ab01ade1d77c24f20903412b740cf20293c999
|
c009a29b0f7d1c11e2cedff0ac97b8fa9e4900bb
|
/features/migrations/0005_auto_20190527_2326.py
|
5cb80bab269f4c5d77eb01531d37c2a7c6ccf128
|
[] |
no_license
|
Code-Institute-Submissions/Django-Issue-Tracker-3
|
3849effde7e82e8b191ccfd055edd366c487333f
|
7aed62aece71802106f1beecefe87ce4d9a9956b
|
refs/heads/master
| 2020-06-15T12:55:30.325943
| 2019-07-04T21:39:51
| 2019-07-04T21:39:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-27 22:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('features', '0004_auto_20190524_1150'),
]
operations = [
migrations.RenameField(
model_name='features',
old_name='paid_no',
new_name='total_votes',
),
]
|
[
"player.c64@gmail.com"
] |
player.c64@gmail.com
|
b0f97c33a7f36f5bef81575d0d4538488ee2213c
|
a5092d68261c6d8eba399f67af4fea1a27f69512
|
/django-react/bin/pyrsa-priv2pub
|
6abd23fb6f44e5bc616c0ea9addf72fb4659ac5f
|
[
"MIT"
] |
permissive
|
Sagar-svg/Research_Buddy
|
643d82db2de70d89e9a535112be5df722e6d6706
|
27520cc8859dd5a071327e0ae6bb402778e79ad0
|
refs/heads/main
| 2023-08-02T01:38:04.810853
| 2021-09-01T02:36:31
| 2021-09-01T02:36:31
| 388,659,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
#!/home/sagar30/djangoApi/DJ_REACT/django-react/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from rsa.util import private_to_public
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(private_to_public())
|
[
"thasalsagar30@gmail.com"
] |
thasalsagar30@gmail.com
|
|
3108cf979785c30082ded6081d96410ca92a4acc
|
9bb3cb5982a170af001d6e8b235f6d37f84a52a1
|
/day1/2.1.ImageClassificationWithPyTorch/scripts/separate_to_folders.py
|
b56b9321a0d62d9e9ff18c17d48c475ecb573ee1
|
[
"MIT"
] |
permissive
|
Azure/Azure-AI-Camp
|
dfe2922c778750bb6a578416801f2c8ed39b5bed
|
a0d9399448dab3307c74e6e69c3cee326f43847b
|
refs/heads/master
| 2023-08-15T05:26:18.313410
| 2023-02-03T17:14:29
| 2023-02-03T17:14:29
| 238,554,513
| 14
| 5
|
MIT
| 2023-07-23T04:44:58
| 2020-02-05T21:43:15
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,533
|
py
|
"""
Splits the single directory of images randomly into
training and validation folders based on labels for
training ML model for image classification.
"""
import os
import glob
import shutil
import random
import argparse
os.makedirs('data/train/normal', exist_ok=True)
os.makedirs('data/val/normal', exist_ok=True)
os.makedirs('data/train/suspicious', exist_ok=True)
os.makedirs('data/val/suspicious', exist_ok=True)
imglookup = {}
def arg_parse():
"""
Parse arguments
"""
parser = argparse.ArgumentParser(description='Split the image data into train and val folders.')
parser.add_argument("--dir", dest='dir', help="The base directory of images")
return parser.parse_args()
args = arg_parse()
with open('cctvFrames_train_labels.csv') as f:
for line in f:
linespl = line.rstrip().split(',')
imglookup[linespl[0]] = linespl[1]
imgfiles = glob.glob(os.path.join(args.dir, '*.jpg'))
print(len(imgfiles))
for imgf in imgfiles:
randnum = random.choice(range(10))
if imglookup[os.path.basename(imgf)] == '1':
if randnum not in [0,1]:
shutil.copyfile(imgf, 'data/train/suspicious/' + os.path.basename(imgf))
else:
shutil.copyfile(imgf, 'data/val/suspicious/' + os.path.basename(imgf))
else:
if randnum not in [0,1]:
shutil.copyfile(imgf, 'data/train/normal/' + os.path.basename(imgf))
else:
shutil.copyfile(imgf, 'data/val/normal/' + os.path.basename(imgf))
if __name__ == "__main__":
pass
|
[
"michhar@microsoft.com"
] |
michhar@microsoft.com
|
3dd85555daf1359530cdcbcdf8e7a4a1a078db01
|
abc82ef21a2997ef7e713c8b9322a7c359715d7e
|
/spotify_bot.py
|
dbaa0b5de35806dfb149801b181b790020507186
|
[] |
no_license
|
Raj6695/python_spotify_playlist_automation
|
330a5a84b70fd2d18c22bacaddcca6f7afb92441
|
a64a5161bff980ee28deefb6a31dc7da7c24de7e
|
refs/heads/main
| 2023-06-16T15:40:39.591442
| 2021-07-19T07:56:04
| 2021-07-19T07:56:04
| 387,383,800
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
from spotipy.oauth2 import SpotifyOAuth, SpotifyClientCredentials
from bs4 import BeautifulSoup
import requests
import spotipy
SPOTIPY_CLIENT_ID = "ID"
SPOTIPY_CLIENT_SECRET = "SECRET"
date = input("Which year do you want to travel to? Type the date in this format YYYY-MM-DD: ")
response = requests.get("https://www.billboard.com/charts/hot-100/" + date)
soup = BeautifulSoup(response.text, 'html.parser')
song_names_spans = soup.find_all("span", class_="chart-element__information__song")
song_names = [song.getText() for song in song_names_spans]
print(song_names)
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
scope="playlist-modify-private",
redirect_uri="http://example.com",
client_id=SPOTIPY_CLIENT_ID,
client_secret=SPOTIPY_CLIENT_SECRET,
show_dialog=True,
cache_path="token.txt"
)
)
songs = []
user_id = sp.current_user()["id"]
year = date.split("-")[0]
print(user_id)
play = sp.user_playlist_create(user=user_id, name=f"{date} POPULAR-songs", public=False)
print(play["id"])
for items in song_names:
get_song = sp.search(q=f"track:{items} year:{year}", type="track")
uri = (get_song["tracks"]["items"][0]["uri"])
songs.append(uri)
playlist_synthesis = sp.playlist_add_items(playlist_id=play["id"], items=songs)
popular = sp.artist_top_tracks(artist_id="7dGJo4pcD2V6oG8kP0tJRR", country="US")
for num in range(len(popular["tracks"])):
songs.append(popular["tracks"][num]["uri"])
plays = sp.playlist_add_items(playlist_id=play["id"], items=songs)
print(plays)
|
[
"noreply@github.com"
] |
Raj6695.noreply@github.com
|
9653439cba0bee5f524f34380d9cb8822f054396
|
b22b8680432bed6a962bafda690359ba7436644d
|
/manipulate.py
|
e619c3c726133be469e23519fc3ae008688be453
|
[] |
no_license
|
bradleyvlr/pythonscripts
|
b370d0f5ebcb0d68aa6ea27b5d61730c3a0ff8af
|
1da882cdc9de2df6e4e3b01a3e39d6a9851368f6
|
refs/heads/master
| 2021-01-23T22:23:56.566658
| 2018-07-31T23:02:53
| 2018-07-31T23:02:53
| 102,931,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,548
|
py
|
#! /usr/bin/python3
#This is a program to search through a file tree and copy or delete files with a certain file extension to a directory
#This will also allow the same thing to be done with command line argument in MB
####### !!!!!!!!!I want to try flags so the syn will be program.py -dc -R DIR <DIR> [arg]
#I think the flags may be causing an issue because I am dumb, so I am giving up on them.
#I will try flags in a later program
import os, sys, shutil, re
def main():
checker()
#First going to start a checker for syntax, and send err/help, then return var to select method
def checker():
recurs = False
dirArg = 2
destArg = 3
ArgArg = 4
if sys.argv[2].lower() == 'recursive':
recurs = True
dirArg += 1
destArg += 1
ArgArg += 1
target = sys.argv[dirArg]
if os.path.isdir(sys.argv[dirArg]) == False:
if os.path.isdir(os.path.join(os.getcwd(),sys.argv[dirArg])):
target = os.getcwd()+'/'+sys.argv[dirArg]
else:
errMsg(3)
if len(sys.argv) < 4 or len(sys.argv) > 7:
errMsg(0)
sys.exit(0)
elif sys.argv[1] == 'copy':
print('Copy Selected')
if os.path.isdir(sys.argv[destArg]):
os.mkdir(sys.argv[destArg]);
if ArgArg >= len(sys.argv):
errMsg(1)
else:
cpSel(recurs,target, sys.argv[destArg], sys.argv[ArgArg], ArgArg+1)
print('no')
elif sys.argv[1] == 'delete':
print('Delete Selected')
if destArg >= len(sys.argv) + 1:
errMsg(2)
delSel(recurs, target, sys.argv[ArgArg - 1], ArgArg)
elif sys.argv[1] == 'help':
helper()
else:
errMsg(0)
#Error Msg. Should offer specific help def errMsg(x):
def errMsg(x):
if x == 0:
print('Poor Syntax: Argument number is incorrect See Help\n\n' + ('*'*50) + '\n\n')
elif x ==1:
print('Not Enough Arguments. To copy files in a directory, you must include the relevant flag, the target directory, the destination directory, the type (\'size\' or \'ext\'), and the argument\n\n')
elif x ==2:
print('Not Enough Arguments. To delete files, you must include the relevant flag, the target directory, the type of file (\'size\' or \'ext\'), and the argument\n\n')
elif x == 3:
print('Target Directory Does not exist!\n\n')
elif x == 4:
print('Invalid file selector: Valid types are \'all\', \'size\', or \'ext\'\n\n')
helper()
sys.exit(0)
#Help Function
def helper():
print('The format should be \'program.py (delete|copy) [recursive] targetDIRECTORY <destinationDIRECTORY> type arg\n\n\tdelete\t-specifies files to be deleted\n\tcopy\t-specifies files will be copied\n\thelp\t-displays this help screen\n\trecursive\t-optional -directory will be walked recursively\n\ttargetDIRECTORY\tis the affected directory\n\tdestinationDIRECTORY\tis only used with -c. Files will be copied here\n\ttype use size,ext to filter files by size or extension\n\targ\t-if size is specified, use number in MB (no Suffix), if extension is specified, supply extention (e.g. .pdf)')
def listFiles(rec,direct,cp=False):
lst = []
if rec == True:
for path,subdir,filenames in os.walk(direct):
for files in filenames:
if cp == False:
fullfile = (path+'/'+files)
lst.append(fullfile)
else:
for filenames in os.listdir(direct):
lst.append(direct+'/'+filenames)
print(lst)
for i in lst:
if os.path.isdir(i):
lst.remove(i)
return lst
#Method for copying
def cpSel(rec, direct, destin, argum, argarg):
fileList = listFiles(rec, direct)
nameList = listFiles(rec, direct, True)
destin = pathChecker(destin)
if argum.lower() == 'all':
for i in fileList:
finale += destin+i
shutil.copy(i,finale)
elif argum.lower() == 'ext':
for i in fileList:
if i.endswith(sys.argv[argarg]):
finale += destin+i
shutil.copy(i,finale)
elif argum.lower() == 'size':
for i in fileList:
finale = destin+i
shutil.copy(i,finale)
else:
errMsg(4)
print('slectrt wrkng')
#Method for deleting
def delSel(rec, direct, argum, argarg):
fileList = listFiles(rec, direct)
chopBlock = []
direct = pathChecker(direct)
if argum.lower() == 'all':
for i in fileList:
print(i)
elif argum.lower() == 'size':
for i in fileList:
if os.getsize(i) >= sys.argv[ArgArg-1]:
print(i)
chopBlock = chopBlock.append(i)
elif argum.lower() == 'ext':
for i in fileList:
if i.endswith(sys.arg[var]):
print(i)
chopBlock += chopBlock.append(i)
else:
errMsg(4)
print("\nThese files will be deleted... Is this okay (y/n)?\n>>",end="")
confirm = input()
if confirm.lower() == 'y':
for i in chopBlock:
os.unlink(i)
else:
print('*********************\nDELETION ABORTED\n\n\n')
#Ensure paths all have / at the end
def pathChecker(path):
if path.endswith('/') == False:
path += '/'
return path
#Pluggable method for searching
#
main()
|
[
"bradleyr@cybercon.net"
] |
bradleyr@cybercon.net
|
da3c1bb19cf8b55a882ea6b6988160a62adc5695
|
250805e6c5dcafe7ac1b230cc7cf091585e03378
|
/Previous_Code/Heroku Test/app.py
|
0e49878b0787308e4f1024b56b19cc76562513f6
|
[] |
no_license
|
zachalexander/capstone_cunysps
|
884ee81706ff2f66859b7c69e216e7561ee81159
|
947a8b927b98bc5e2e203e95e041b77c1ec78d9e
|
refs/heads/main
| 2023-04-22T05:08:01.213488
| 2021-05-19T02:49:39
| 2021-05-19T02:49:39
| 339,902,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,197
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
import math
from modsim import *
#From https://callmepower.com/faq/electricity-prices/electricity-prices-per-sq-ft
cost = pd.read_csv("https://raw.githubusercontent.com/mkollontai/DATA608/master/Final/Monthly_cost_per_sf_States.csv", sep = ',', header = 0)
cost.columns = ['State','kWh per Month','Price per kWh', 'Monthly Bill', 'House Sq Ft', 'Monthly Bill per Sq Ft','Region']
States = list(cost['State'])
markdown_text = '''
# Long term financial projection of Solar Panel array
Solar panel arrays are often marketed by the number of kW they can produce. This number is actually what the system would produce assuming they receive full sunlight for at least 8 hours in a day. In reality the amount of kW they produce depends largely on the sunlight your rooftop is exposed to.
The following calculator is meant to take into account the sunlight in your area and provide estimates of certain panel system sizes (since you are often limited by the size of your roof) as well as their efficiencies.
* Current efficiencies (r) range from 17.5% to slightly above 20%.
* Installation costs (I) can range anywhere from $10,000 to $25,000 depending on various factors.
'''
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div([
dcc.Markdown(children = markdown_text),
html.Div([dcc.Input(id='lat-input', value=29.42412, type='number')," : Latitude"]),
html.Br(),
html.Div([dcc.Input(id='long-input', value=-98.49363, type='number')," : Longitude"]),
html.Br(),
html.Button(id='get_solar', n_clicks=0, children='Update Solar Data'),
html.Br(),
html.Br(),
dcc.Dropdown(
id = 'St',
options=[{'label':St, 'value':St} for St in States],
value = 'Texas'
),
html.Br(),
html.Div(["Home sq. ft. : ",
dcc.Input(id='home_sf', value=2300, type='number')]),
html.Br(),
html.Div(["Roof sq. ft. : ",
dcc.Input(id='roof_sf', value=850, type='number')]),
html.Br(),
html.Button(id='submit_details', n_clicks=0, children='Recalculate'),
html.Br(),
html.Div([
dcc.Graph(id = 'US_prices'),
dcc.Graph(id = 'your-home')
], style={'columnCount':2}),
dcc.Graph(id = 'Projection'),
html.H3('Fun Facts:'),
#dcc.Markdown(id='simple_facts'),
dcc.Markdown(id='calc_facts'),
#Hidden Div for storing Solar API Data
html.Div(id='Solar-data', style={'display':'none'})
])
@app.callback(Output('Solar-data', 'children'),
Input('get_solar','n_clicks'),
dash.dependencies.State('lat-input', 'value'),
dash.dependencies.State('long-input', 'value'))
def collect_ghi(solar_clicks,la, lo):
api_key = 'BUnBQIpFlpJZcCcqO2VeYuUMXjX7zCSGiVBNIIdH'
attributes = 'ghi'
year = '2019'
lat, lon = la, lo
leap_year = 'false'
interval = '60'
utc = 'false'
name = 'Misha+Kollontai'
reason= 'school_project'
affiliation = 'CUNY+SPS'
email = 'mkollontai@gmail.com'
mailing_list = 'false'
#combine all of the relevant information into the API-specified URL
url = 'https://developer.nrel.gov/api/solar/nsrdb_psm3_download.csv?wkt=POINT({lon}%20{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'.format(year=year, lat=lat, lon=lon, leap=leap_year, interval=interval, utc=utc, name=name, email=email, mailing_list=mailing_list, affiliation=affiliation, reason=reason, api=api_key, attr=attributes)
GHI_raw = pd.read_csv(url,skiprows = 2)
#Set the index to the proper timestamps
GHI_raw = GHI_raw.set_index(pd.date_range('1/1/{yr}'.format(yr=year), freq=interval+'Min', periods=525600/int(interval)))
temp = GHI_raw[['Month','Day','GHI']]
daily = temp.groupby(['Month','Day']).sum()
monthly_mean = daily.groupby(['Month']).mean()
monthly_sd = daily.groupby(['Month']).std()
monthly_ghi = pd.DataFrame(monthly_mean)
monthly_ghi['STD'] = monthly_sd['GHI']
return monthly_ghi.to_json()
@app.callback(
Output('US_prices','figure'),
Input('St', 'value')
)
def adjust_state_fig(st):
state_fig = px.scatter(cost, x="kWh per Month", y="Price per kWh", color= "Region",
size = np.where(cost['State'] == st, 5, 1), hover_data=['State'],
opacity = np.where(cost['State'] == st, 1, 0.5))
state_fig.update(layout_coloraxis_showscale=False)
state_fig.update_layout(
title={
'text':'Distribution of Energy Use and Prices in the US by State',
'x':0.5,
'xanchor':'center',
'yanchor':'top'
},
xaxis_title='Average Energy Use (kWh/Month)',
yaxis_range=[0,35],
yaxis_title='Average price (¢/kWh)',
)
return state_fig
@app.callback(
Output('your-home','figure'),
[Input('home_sf', 'value'),
Input('St', 'value')]
)
def home_graph_update(h_sf,st):
us_monthly_use = 887
avg_home_sf = int(cost.loc[cost['State'] == st, 'House Sq Ft'].iloc[0])
avg_monthly_use = int(cost.loc[cost['State'] == st, 'kWh per Month'].iloc[0])
our_monthly_use = int(float(h_sf) / avg_home_sf * avg_monthly_use)
d = {'Location':['US',st,'Your Home'],
'Monthly Use':[us_monthly_use,avg_monthly_use,our_monthly_use],
'Region':['USA',cost.loc[cost['State'] == st, 'Region'].iloc[0],'Home']}
df = pd.DataFrame(d)
home_fig = px.bar(df,
x='Location',
y = 'Monthly Use',
color = 'Region',
color_discrete_map={
"South": "#636EFA",
"Northeast": "#AB63FA",
"Pacific": "#EF553B",
"West": "#00CC96",
"Midwest": "#FFA15A",
"USA":"#FF6692",
"Home":"#19D3F3"},
opacity = 0.6)
home_fig.update_layout(
title={
'text':'Monthly Energy Use Comparison',
'x':0.5,
'xanchor':'center',
'yanchor':'top'
},
yaxis_range=[0,2000],
yaxis_title='Average Energy Use (kWh/Month)',
showlegend=False
)
return home_fig
@app.callback(
[Output('Projection','figure'),
Output('calc_facts','children')],
[Input('submit_details','n_clicks'),
Input('Solar-data', 'children')],
dash.dependencies.State('lat-input', 'value'),
dash.dependencies.State('long-input', 'value'),
dash.dependencies.State('home_sf', 'value'),
dash.dependencies.State('roof_sf', 'value'),
dash.dependencies.State('St', 'value')
)
def update_output_div(n_clicks,solar,la, lo, sf, rf, st):
monthly_ghi = pd.read_json(solar)
avg_bill = float(cost.loc[cost['State'] == st, 'Monthly Bill'].iloc[0])
avg_home_sf = int(cost.loc[cost['State'] == st, 'House Sq Ft'].iloc[0])
monthly_per_sf = avg_bill / avg_home_sf
prices = pd.read_csv("https://raw.githubusercontent.com/mkollontai/DATA608/master/Final/Average_US_Electricity_Price.csv", sep = '\t', header = None)
prices.columns = ['State','Avg_Rate_2019','Avg_Rate_2018','%_change','Monthly_cost']
##### Define a system describing our solar panels and location ############################
def define_system(A=80,r=0.175,PR=0.8,lat=29.42412,long=-98.49363,state='Texas',initial_cost=20000):
'''Create a system object defining our solar panel system
'''
start = State(P=0, N=0, PB=0, MP = -initial_cost, C = 0)
t0 = 0
'''15 years worth of operation'''
t_end = 15*12
return System(start=start, t0=t0, t_end=t_end, A=A, r=r, PR=PR, state = state, lat=lat, long=long)
#############################################################################################
#### We must calculate the amount of power generated on on a given day by the panels.
#### This number is influenced by the surface area of the panels, their efficiency,
#### performance ratio and amount of exposure to sun they receive on that day. In our
#### estimation of GHI on a given day, we will assume a normal distribution given the
#### mean and stDev from the table we pulled from the NSRDB. The formula used below to
#### calculate the actual yield is taken from
# (https://photovoltaic-software.com/principle-ressources/how-calculate-solar-energy-power-pv-systems)
#### with the 'Annual average' value replaced with the GHI per day value calculated from the NSRDB data.
#### Function to determine the daily yield of the panels ################################
### system - pre-defined system defining the panels
### month - the month (1-12) for which the GHI is to be estimated
def days_yield(system,month):
month = month
ghi_day = np.random.normal(monthly_ghi.iloc[month-1]['GHI'],monthly_ghi.iloc[month-1]['STD'])
ghi_day = float(ghi_day)
if ghi_day < 0:
ghi_day = 0
return (system.A*system.r*ghi_day*system.PR)/1000
#############################################################################################
#### Function generating a value for the demand on our system in a month.
def month_demand_norm(per_sf = 0.06):
tot_monthly = per_sf * float(sf)
std_d = tot_monthly * 0.15
demand_month = np.random.normal(tot_monthly,std_d)
if demand_month < 0:
demand_month = 0
return demand_month
#############################################################################################
#### Function calculating the balance at the end of a month ##############################
def calc_month(system, month):
#2% yearly increase in electricity rates
yearly_increase = 1.02
year = math.floor(month % 12)
month_mod = (month % 12)+1
if month_mod in [1,3,5,7,8,10,12]:
days = 31
elif month_mod in [4,6,9,11]:
days = 30
elif month_mod == 2:
days = 28
else:
print("Not a valid month number")
return None
loss = month_demand_norm(monthly_per_sf * yearly_increase**year)
p = 0
n = 0
balance = 0
gain = 0
price = prices.loc[prices['State'] == system.state, 'Avg_Rate_2019'].iloc[0]
price = price/100 * yearly_increase**year
for day in range(1,days+1):
gain = gain + days_yield(system,month_mod)
balance = gain*price - loss
if balance >= 0:
p = 1
else:
n = 1
this_month = State(P=p, N=n, B=balance, C = loss)
return this_month
#############################################################################################
def update_fxn(state,system,month):
'''Update the pos/neg/balance model.
state: State with variables P, N, PB, FB, C
system: System with relevant info
'''
p, n, pb, fb, c = state
month_result = calc_month(system, month)
p += month_result.P
n += month_result.N
pb += month_result.B
fb += month_result.B
c += month_result.C
return State(P=int(p), N=int(n), PB=pb, FB = fb, C = c)
#### The function below generates three TimeSeries objects over the time interval specified
#### within the provided time interval. The TimeSeries track number of months with a positive
#### balance, number of months with a negative balance and the overall balance throughout
#### the interval
def run_simulation(system,upd_fxn):
"""Take a system as input and unpdate it based on the update function.
system - system object defining panels
update_fxn - function describing change to system
returns - Timeseries
"""
P = TimeSeries()
N = TimeSeries()
PB = TimeSeries()
FB = TimeSeries()
C = TimeSeries()
state = system.start
t0 = system.t0
P[t0], N[t0], PB[t0], FB[t0], C[t0] = state
for t in linrange(system.t0, system.t_end):
state = upd_fxn(state,system,t)
P[t+1], N[t+1], PB[t+1], FB[t+1], C[t+1] = state
#return P, N, PB, FB, -C
return FB, -C
roof_A = float(rf) * 0.092903
system = define_system(A=roof_A, lat=la, long=lo, state=st, initial_cost = 25000, r =.175)
FB, C = run_simulation(system,update_fxn)
system2 = define_system(A=roof_A, lat=la, long=lo, state=st, initial_cost = 25000, r =.2)
FB2, C2 = run_simulation(system2,update_fxn)
system3 = define_system(A=roof_A, lat=la, long=lo, state=st, initial_cost = 15000, r =.175)
FB3, C3 = run_simulation(system3,update_fxn)
system4 = define_system(A=roof_A, lat=la, long=lo, state=st, initial_cost = 15000, r =.2)
FB4, C4 = run_simulation(system4,update_fxn)
projection = pd.concat([FB,FB2,FB3,FB4,C], axis =1)
projection.columns = ['I=$25k, r=.175','I=$25k, r=.2','I=$15k, r=.175','I=$15k, r=.2','Regular Grid Service']
intersect = []
test1 = 1
test2 = 1
test3 = 1
test4 = 1
for i,r in projection.iterrows():
if r['I=$25k, r=.175'] > r['Regular Grid Service'] and test1:
intersect.append(i)
test1 = 0
if r['I=$25k, r=.2'] > r['Regular Grid Service'] and test2:
intersect.append(i)
test2 = 0
if r['I=$15k, r=.175'] > r['Regular Grid Service'] and test3:
intersect.append(i)
test3 = 0
if r['I=$15k, r=.2'] > r['Regular Grid Service'] and test4:
intersect.append(i)
test4 = 0
fig = px.line(projection,
color_discrete_map={
'I=$25k, r=.175': 'blue',
'I=$25k, r=.2':'green',
'I=$15k, r=.175':'aqua',
'I=$15k, r=.2':'purple',
'Regular Grid Service':'red'
})
fig.update_layout(
title={
'text':'Cost of regular grid power -vs- solar panel array',
'x':0.5,
'xanchor':'center',
'yanchor':'top'
},
xaxis_title='# of Months',
yaxis_title='Projected Cost ($)'
)
even_pt_lo = math.ceil(min(intersect)/12)
even_pt_hi = math.ceil(max(intersect)/12)
fig.add_vrect(
x0=min(intersect), x1=max(intersect),
fillcolor='rgb(179,226,205)', opacity=0.5,
layer="below", line_width=0,
)
max_earn = int(projection['I=$15k, r=.2'].max())
ceiling = max(max_earn,0) - 2500
fig.update(layout=dict(
annotations=[
go.layout.Annotation(x=(min(intersect)+max(intersect))/2,
y=ceiling,
text="Likely Break-Even Range",
showarrow=False
)
]
))
c_over_13 = -(projection.iloc[13*12]['Regular Grid Service'])
c_over_13 = int(round(c_over_13/100,0)*100)
yearly_over_13 = int(round(c_over_13/13/10,0)*10)
fun_fcts = '''
* Over 13 years (median home ownership) with a home like yours, _**you**_ are estimated to pay around **${}** in utility bills or about **${}** per year.
* If you spend $15,000 to install it, a 20% efficiency system would start being profitable (over the regular utility bills you would have amassed) within approximately **{} years**.
* For comparison, a $20,000/17.5% efficiency system would take around **{} years** to become profitable.
'''.format(c_over_13, yearly_over_13,even_pt_lo,even_pt_hi)
return fig, fun_fcts
if __name__ == '__main__':
app.run_server(debug=True)
|
[
"alexander.d.zachary@gmail.com"
] |
alexander.d.zachary@gmail.com
|
54efaa4ffc4bd53f98954c764105f48d60696530
|
40d1b3584beace72127cc56623d220b9b5e058a6
|
/discourse/formsAccount.py
|
50e69e5815eb7661acf2ecf2c005661de149d0ea
|
[] |
no_license
|
riyan-eng/Praxis_Academy-Aplikasi_pengumpu_link_kultum_bulan_puasa
|
155600ddca4370e562406acba8d4ae31a19dd0d5
|
41bfd625f760c715ea60825f067a9dc1f48a7eee
|
refs/heads/master
| 2023-03-29T15:36:55.929633
| 2021-03-29T17:13:49
| 2021-03-29T17:13:49
| 352,723,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
class createUser(UserCreationForm):
class Meta:
model = User
fields = ['username', 'email', 'first_name', 'password1', 'password2']
|
[
"fbriyant11@gmail.comgit config --global user.email fbriyant11@gmail.com"
] |
fbriyant11@gmail.comgit config --global user.email fbriyant11@gmail.com
|
a4d707c0297a7f959fd8c80ea3132c82ca170186
|
61a856d931688a49435b3caab4e9d674ca2a32aa
|
/tests/test_policies.py
|
bb111cdb3197196db38727d1aa8b5271f2960c84
|
[
"Apache-2.0"
] |
permissive
|
kvt0012/NeMo
|
3c9803be76c7a2ef8d5cab6995ff1ef058144ffe
|
6ad05b45c46edb5d44366bd0703915075f72b4fc
|
refs/heads/master
| 2020-08-14T16:59:18.702254
| 2019-10-14T22:46:48
| 2019-10-14T22:46:48
| 215,203,912
| 1
| 0
|
Apache-2.0
| 2019-10-15T04:05:37
| 2019-10-15T04:05:34
| null |
UTF-8
|
Python
| false
| false
| 974
|
py
|
import unittest
from nemo.utils.lr_policies import SquareAnnealing, CosineAnnealing, \
WarmupAnnealing
from .common_setup import NeMoUnitTest
class TestPolicies(NeMoUnitTest):
def test_square(self):
policy = SquareAnnealing(100)
lr1, lr2, lr3 = (policy(1e-3, x, 0) for x in (0, 10, 20))
self.assertTrue(lr1 >= lr2)
self.assertTrue(lr2 >= lr3)
self.assertTrue(lr1 - lr2 >= lr2 - lr3)
def test_working(self):
total_steps = 1000
lr_policy_cls = [SquareAnnealing, CosineAnnealing, WarmupAnnealing]
lr_policies = [p(total_steps=total_steps) for p in lr_policy_cls]
for step in range(1000):
for p in lr_policies:
assert p(1e-3, step, 0) > 0
def test_warmup(self):
policy = SquareAnnealing(100, warmup_ratio=0.5)
lr1, lr2, lr3 = (policy(1e-3, x, 0) for x in (0, 50, 100))
self.assertTrue(lr1 < lr2)
self.assertTrue(lr2 > lr3)
|
[
"okuchaiev@nvidia.com"
] |
okuchaiev@nvidia.com
|
fcc7bea13fa1ad3b89aeeaf83d8070d2dc34cda1
|
269e2ed3b6ba346ead6542e39f6243b1f44897de
|
/createRandomForest.py
|
631c62baaeec1d5b20f826bd8e4920b16a871e2b
|
[] |
no_license
|
tobincolby/CS6220-Programming3
|
cb40a3d5995cddef0950a146a0776ebcec7630be
|
60d3a49dee6eb36692383fd3d08149a3a667210e
|
refs/heads/master
| 2020-08-09T05:04:16.411873
| 2019-10-12T03:23:56
| 2019-10-12T03:23:56
| 214,003,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
import pickle
car_file = open("data/car_quality_train.csv")
car_data = np.loadtxt(car_file, delimiter=",")
data_X = car_data[:, :-1]
data_Y = car_data[:, -1]
parameters = {'max_depth': np.arange(2, 20), 'min_samples_leaf': np.arange(1, len(data_Y) - 2)}
clf = GridSearchCV(RandomForestClassifier(n_estimators=5, random_state=1),
parameters,
n_jobs=4,
verbose=1)
clf.fit(data_X, data_Y)
best_forest = clf.best_estimator_
data = clf.cv_results_
pickle.dump(best_forest, open("cv_outputs/random_forest.clf", "wb"))
pickle.dump(data, open("cv_outputs/random_forest_data.data", "wb"))
|
[
""
] | |
db1b84b96f69914398f0ba1ca7b2172817741628
|
75dec6116f56ca2c915c91ae86f54b81f6ea15d8
|
/tests/user_config_test.py
|
2a469645599c5f4ab99dd9a0398e85c53d31b70f
|
[
"MIT"
] |
permissive
|
mthomp89/icecube
|
20f75793e16e2aaed5d57c7305d2295611749c01
|
4787588c06fd96df88740bd4f2e9f30e59645e60
|
refs/heads/main
| 2023-08-01T09:13:17.073198
| 2021-09-13T08:52:43
| 2021-09-13T08:52:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
import datetime
import os
from pathlib import Path
import pytest
# Local import
from icecube.bin.config import CubeConfig
JSON_CONFIG_DIR = os.path.join(Path(__file__).parent, "resources", "json_config")
def test_load_config():
cube_config = CubeConfig()
cube_config_dict = cube_config.load_config(
os.path.join(JSON_CONFIG_DIR, "config_use_case1.json")
)
print(cube_config_dict)
assert cube_config.temporal_resolution == 1
assert float(cube_config.min_incidence_angle) == 20
assert float(cube_config.max_incidence_angle) == 21
assert bool(cube_config.coregistered)
assert bool(cube_config.space_overlap)
assert bool(cube_config.temporal_overlap)
assert str(cube_config.start_date) == "20200402"
assert str(cube_config.end_date) == "20210420"
assert cube_config_dict["temporal_resolution"] == 1
assert cube_config_dict["min_incidence_angle"] == 20
assert cube_config_dict["max_incidence_angle"] == 21
assert cube_config_dict["coregistered"]
assert cube_config_dict["space_overlap"]
assert cube_config_dict["temporal_overlap"]
assert cube_config_dict["start_date"] == datetime.datetime(2020, 4, 2)
assert cube_config_dict["end_date"] == datetime.datetime(2021, 4, 20)
def test_load_config():
cube_config = CubeConfig()
cube_config_dict = cube_config.load_config(
os.path.join(JSON_CONFIG_DIR, "config_use_case2.json")
)
assert cube_config_dict["start_date"] == datetime.datetime(2020, 4, 2)
assert cube_config_dict["end_date"] == datetime.datetime(2021, 4, 20)
def test_wrong_type_parameter():
with pytest.raises(Exception) as e_info:
cube_config = CubeConfig()
cube_config.load_config(os.path.join(JSON_CONFIG_DIR, "config_use_case3.json"))
assert "temporal_resolution" in str(e_info)
|
[
"arnaud.dupeyrat@iceye.fi"
] |
arnaud.dupeyrat@iceye.fi
|
d1d3246d975978a4660d760066f2eb9981edc608
|
17ba75d82f7b46a3723d168b060e1d541128c6c0
|
/log.py
|
a5ccbf5f4a592391d5801119d95d9d9f9b760008
|
[] |
no_license
|
NImy0718/novel-downloader
|
b3d101bd52e70a2d2530a5cb3455c9f572af1e5c
|
c33b4bc493e4fa05828e94e0dbc99725085c8d72
|
refs/heads/main
| 2023-07-10T06:07:04.530858
| 2021-08-17T00:14:22
| 2021-08-17T00:14:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 391
|
py
|
import logging
import sys
LEVEL = logging.INFO
def init() -> logging.Logger:
root = logging.getLogger()
root.setLevel(LEVEL)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(LEVEL)
formatter = logging.Formatter('[%(asctime)s](%(levelname)s): %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
return root
logger = init()
|
[
"8900942+yjqiang@users.noreply.github.com"
] |
8900942+yjqiang@users.noreply.github.com
|
2aad07bdc47fff733d3242ebb0671c4f5cc1d9df
|
4bb7c48f27cbaf11684c1fa66531dac3841d1b7e
|
/aarhus/roots_tfidf.py
|
01ddfdadd8cd91ca82a52a657980784c78571b20
|
[
"Apache-2.0"
] |
permissive
|
mikedelong/aarhus
|
5a749506ddff8372da8ed5fe9ffd0786ce3e9b43
|
0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1
|
refs/heads/master
| 2021-01-12T10:13:29.187829
| 2017-08-07T19:52:21
| 2017-08-07T19:52:21
| 76,390,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,014
|
py
|
import collections
import json
import logging
import pickle
import sys
import time
import mpld3
import numpy
from matplotlib import pyplot as pyplot
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
import random
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
reload(sys)
sys.setdefaultencoding("utf8")
def get_character_sets(arg_message):
charsets = set({})
for character_set in arg_message.get_charsets():
if character_set is not None:
charsets.update([character_set])
return charsets
def handle_error(arg_logger, arg_error_message, arg_email_message, arg_character_set):
arg_logger.warn('message: %s character_set: %s character sets found: %s subject: %s sender: %s' %
(arg_error_message, arg_character_set, get_character_sets(arg_email_message),
arg_email_message['subject'], arg_email_message['from']))
# https://stackoverflow.com/questions/7166922/extracting-the-body-of-an-email-from-mbox-file-decoding-it-to-plain-text-regard
def get_email_body(arg_logger, arg_message):
body = None
# Walk through the parts of the email to find the text body.
if arg_message.is_multipart():
for part in arg_message.walk():
# If part is multipart, walk through the subparts.
if part.is_multipart():
for subpart in part.walk():
if subpart.get_content_type() == 'text/plain':
# Get the subpart payload (i.e the message body)
body = subpart.get_payload(decode=True)
# Part isn't multipart so get the email body
elif part.get_content_type() == 'text/plain':
body = part.get_payload(decode=True)
# If this isn't a multi-part message then get the payload (i.e the message body)
elif arg_message.get_content_type() == 'text/plain':
body = arg_message.get_payload(decode=True)
# No checking done to match the charset with the correct part.
for charset in get_character_sets(arg_message):
try:
body = body.decode(charset)
except UnicodeDecodeError:
handle_error(arg_logger, "UnicodeDecodeError: encountered.", arg_message, charset)
except AttributeError:
handle_error(arg_logger, "AttributeError: encountered", arg_message, charset)
except LookupError:
handle_error(arg_logger, "LookupError: encountered", arg_message, charset)
return body
def run():
start_time = time.time()
formatter = logging.Formatter('%(asctime)s : %(name)s :: %(levelname)s : %(message)s')
logger = logging.getLogger('main')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
file_handler = logging.FileHandler('./roots_tfidf.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('started.')
with open('roots-tfidf-settings.json') as data_file:
data = json.load(data_file)
logger.debug(json.dumps(data, sort_keys=True, indent=4, separators=(',', ': ')))
figure_output_file = data['figure_output_file']
input_pickle_file = data['input_pickle_file']
kmeans_verbose = bool(data['k_means_verbose'])
limit = int(data['document_count_limit'])
limit = sys.maxint if limit == -1 else limit
minibatch = bool(data['k_means_minibatch'])
max_df = float(data['max_df'])
min_df = float(data['min_df'])
n_components = int(data['n_components'])
n_features = int(data['n_features'])
ngram_range_min = int(data['ngram_range_min'])
ngram_range_max = int(data['ngram_range_max'])
random_state = int(data['random_state'])
stopword_file = data['stopword_file']
terms_to_print = int(data['terms_to_print'])
tfidf_vocabulary_file = data['tfidf_vocabulary_file']
true_k = int(data['k_means_cluster_count'])
use_idf = bool(data['tfidf_use_idf'])
write_tfidf_vocabulary = data['write_tfidf_vocabulary']
random.seed(random_state)
vectorizer_english = TfidfVectorizer(max_df=max_df, max_features=n_features, min_df=min_df,
ngram_range=(ngram_range_min, ngram_range_max), stop_words='english',
use_idf=use_idf)
tf_vectorizer = CountVectorizer(max_df=0.95, max_features=n_features, min_df=2, stop_words='english')
# tf = tf_vectorizer.fit_transform(data_samples)
with open(input_pickle_file, 'rb') as input_fp:
roots = pickle.load(input_fp)
# http://scikit-learn.org/stable/auto_examples/text/document_clustering.html
logger.debug('After pickle load we have %d messages.' % len(roots))
text_data = list()
documents_processed = list()
sample_keys = random.sample(roots.keys(), limit)
for key in sample_keys:
value = roots[key]
body = get_email_body(logger, value)
if body is not None:
if False:
pass
else:
try:
decoded = body.decode('utf-8', 'ignore')
text_data.append(decoded)
documents_processed.append(key)
except UnicodeDecodeError as unicodeDecodeError:
logging.warn(unicodeDecodeError)
pass
actual_size = len(text_data)
logger.debug('After ignoring documents with unicode decode errors we have %d messages.' % actual_size)
original_size = (min(limit, len(roots)))
loss_percent = (100 * (original_size - actual_size) / original_size)
logger.debug('We lost %d percent due to unicode errors: %d of %d' % (loss_percent, (original_size - actual_size),
original_size))
logger.debug('data extraction complete. Running TFIDF.')
tf_idf_initial = vectorizer_english.fit_transform(text_data)
estimated_k = tf_idf_initial.shape[0] * tf_idf_initial.shape[1] / tf_idf_initial.nnz
logger.debug('Initial k estimate before stopword removal: %d ' % estimated_k)
logger.debug('The vocabulary contains %d words.' % len(vectorizer_english.vocabulary_.keys()))
logger.debug('The model found %d stopwords.' % len(vectorizer_english.stop_words_))
stopwords = vectorizer_english.stop_words_
additional_stopwords = set()
with open(stopword_file, 'rb') as stopwords_fp:
for item in iter(stopwords_fp):
additional_stopwords.add(unicode(item.strip()))
logger.debug('Additional stopwords (%d): %s' % (len(additional_stopwords), sorted(list(additional_stopwords))))
stopwords.update(additional_stopwords)
# todo move these to a data file
basic_stopwords = sorted(
['will', 'your', 'our', 'as', 'or', 'if', 'by', 'my', 'can', 'all', 'not', 'but', 'me', 'would', 'about',
'us', 'he', 'she', 'an', 'please', 'so', 'do', 'was', 'has', 'thanks', 'well', 'his', 've', 'what', 'who',
'just', 'know', 'call', 'sent', 'her', 'am', 'out', 'new', 'time', 'they', 'more', 'up', 'here', 'there',
'get', 'best', 'one', 're', 'their', 'now', 'let', 'any', 'the', 'need', 'work', 'good', 'hope', 'should',
'thank', 'how', 'have', 'been', 'no', 'could', 'also', 'make', 'its', 'some', 'may', 'think', 'when',
'said', 'today', 'like', 'going', 'him', 'see', 'had', 'great', 'very', 'you', 'next', 'send', 'this',
'and', 'for', 'from', 'look', 'forward', 'to', 'seeing', 'want', 'which', 'first', 'go', 'because', 'were',
'did', 'ask', 'meet', 'are', 'lot', 'of', 'sure', 'after', 'help', 'receiving', 'via', 'big', 'over',
'last', 'back', 'don', 'doing', 'wanted', 'much', 'than', 'why', 'we', 'happy', 'end', 'less',
'in', 'use', 'asked', 'say', 'with', 'on', 'these'])
logger.debug('most common basic stopwords: %s' % collections.Counter(basic_stopwords).most_common(3))
logger.debug('we have %d stopwords of which %d are unique.' % (len(basic_stopwords), len(set(basic_stopwords))))
logger.debug('basic stopwords: %s' % basic_stopwords)
stopwords.update(basic_stopwords)
vectorizer_stopwords = TfidfVectorizer(max_df=max_df, max_features=n_features, min_df=min_df,
ngram_range=(ngram_range_min, ngram_range_max), stop_words=stopwords,
use_idf=use_idf)
logger.debug('got the extended stopword list; rerunning TFIDF with the expanded list')
tfidf_data = vectorizer_stopwords.fit_transform(text_data)
estimated_k = tfidf_data.shape[0] * tfidf_data.shape[1] / tfidf_data.nnz
logger.debug('From shape and nnz data we estimate true K to be %d' % estimated_k)
logger.debug('The vocabulary contains %d words.' % len(vectorizer_stopwords.vocabulary_.keys()))
logger.debug('The model found %d stopwords.' % len(vectorizer_stopwords.stop_words_))
logger.debug('TFIDF complete; shape = %d x %d' % tfidf_data.shape)
if n_components == -1:
n_components = tfidf_data.shape[0] - 1
logger.debug('Using size of tf-idf matrix for SVD dimensions %d' % n_components)
svd = TruncatedSVD(n_components, random_state=random_state)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
lsa_data = lsa.fit_transform(tfidf_data)
explained_variance = svd.explained_variance_ratio_.sum()
logger.debug('with %d documents, %d components, and %d features we have %.2f explained variance.' %
(len(lsa_data), n_components, n_features, explained_variance))
if true_k == 0 or true_k == -1:
logger.debug('Using empirical k for k-means %d' % estimated_k)
true_k = estimated_k
else:
logger.debug('Using k-means with setting k = %d rather than empirical k %d' % (true_k, estimated_k))
logger.debug('Using k-means with k = %d rather than setting k %d' % (estimated_k, true_k))
if minibatch:
km = MiniBatchKMeans(batch_size=1000, init='k-means++', init_size=1000, n_clusters=true_k, n_init=1,
random_state=random_state, verbose=kmeans_verbose)
else:
km = KMeans(init='k-means++', max_iter=100, n_clusters=true_k, n_init=1, random_state=random_state,
verbose=kmeans_verbose)
logger.debug('Clustering sparse data with %s' % km)
km.fit(lsa_data)
cluster_counts = collections.Counter(km.labels_)
logger.debug('Silhouette Coefficient: %0.3f' % metrics.silhouette_score(tfidf_data, km.labels_, sample_size=1000))
logger.debug('Top terms per cluster:')
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
terms = vectorizer_stopwords.get_feature_names()
cluster_topic_terms = list()
for jndex in range(true_k):
cluster_topic_terms.append(str([terms[index] for index in order_centroids[jndex, :terms_to_print]]))
logger.debug('Cluster %d: %d (%.2f) : %s' % (
jndex, cluster_counts[jndex], float(cluster_counts[jndex]) / float(actual_size), cluster_topic_terms[jndex]))
if write_tfidf_vocabulary:
logger.debug('Writing tf-idf vocabulary to %s' % tfidf_vocabulary_file)
with open(tfidf_vocabulary_file, 'wb') as output_fp:
for key, value in vectorizer_stopwords.vocabulary_.iteritems():
output_fp.write('%s,%d \n' % (key, value))
logger.debug('lengths of labels: %d, documents processed: %d' % (len(km.labels_), len(documents_processed)))
largest_cluster_number = cluster_counts.most_common(1)[0][0]
largest_cluster = sorted([int(item[1]) for item in zip(km.labels_, documents_processed) if
item[0] == largest_cluster_number])
logger.debug('largest cluster: %d (%d) : %s' % (largest_cluster_number, len(largest_cluster), largest_cluster))
homogeneity_score = metrics.homogeneity_score(documents_processed, km.labels_)
logger.debug('Homogeneity: %0.3f' % homogeneity_score)
# build the labels
tooltip_labels = list()
for pair in zip(documents_processed, km.labels_):
label = str(pair[0]) + ' : ' + cluster_topic_terms[pair[1]]
tooltip_labels.append(label)
# use t-SNE to visualize
model_tsne = TSNE(n_components=2, random_state=random_state)
points_tsne = model_tsne.fit_transform(lsa_data)
figsize = (20, 10)
fig = pyplot.figure(figsize=figsize)
color_map = 'Set1' # 'plasma'
scatter_plot = pyplot.scatter([each[0] for each in points_tsne], [each[1] for each in points_tsne],
c=km.labels_.astype(numpy.float), cmap=color_map, marker='x')
pyplot.colorbar(ticks=[range(0, true_k)])
pyplot.tight_layout()
pyplot.savefig(figure_output_file)
finish_time = time.time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logger.info('Time: {:0>2}:{:0>2}:{:05.2f}'.format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
# pop up a D3 view of the data with message labels
tooltip = mpld3.plugins.PointLabelTooltip(scatter_plot, labels=tooltip_labels)
mpld3.plugins.connect(fig, tooltip)
mpld3.show()
if __name__ == '__main__':
run()
|
[
"mikedelong@outlook.com"
] |
mikedelong@outlook.com
|
3cb8691ea805ecd6dcac881ce7e5fdcfed3dd7eb
|
3e28ae35815f0cba65259c550f11ad5baa983904
|
/blog/migrations/0001_initial.py
|
10de255b159f85e35967f0d32b5633788899cf25
|
[] |
no_license
|
areksnk/my-first-blog
|
752974aaedea9b87440f3884f14856ff86ca84ba
|
64d60e11c98fb9e8788144f530715b919dd2a43b
|
refs/heads/master
| 2020-05-30T06:17:19.747917
| 2019-05-31T10:43:08
| 2019-05-31T10:43:08
| 189,576,577
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-05-10 13:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"arkadiusz.szyszka@pinaclsolutions.com"
] |
arkadiusz.szyszka@pinaclsolutions.com
|
a2c8b9dbb10e984be4bcd8d26a4b42f8e075e491
|
fc4551d0811fdfd21887193b0452781a141382f1
|
/lecture3/decorator_examples.py
|
5508506c778229abe2aff3140f9706d457f0d656
|
[] |
no_license
|
bojika/CSCentre_Python
|
72921651537f34bdff9818680da3a9c6e3297bd3
|
0f308a0c22d74329b6d8c342554b845e84104500
|
refs/heads/master
| 2022-01-21T19:20:50.514113
| 2019-03-26T17:12:22
| 2019-03-26T17:12:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,909
|
py
|
import functools
import time
import warnings
def timethis(func=None, *, n_iter=100):
if func is None:
return lambda f: timethis(f, n_iter=n_iter)
@functools.wraps(func)
def inner(*args, **kwargs):
print(func.__name__, end=" ... ")
acc = float("inf")
for i in range(n_iter):
tick = time.perf_counter()
result = func(*args, **kwargs)
acc = min(acc, time.perf_counter() - tick)
print(acc)
return result
return inner
# result = timethis(sum)(range(10))
def profiled(func):
@functools.wraps(func)
def inner(*args, **kwargs):
inner.ncalls += 1
return func(*args, **kwargs)
inner.ncalls = 0
return inner
@profiled
def identity(x):
return x
# identity(42); print(identity.ncalls)
def once(func):
@functools.wraps(func)
def inner(*args, **kwargs):
if not inner.called:
func(*args, **kwargs)
inner.called = True
inner.called = False
return inner
@once
def initialize_settings():
print("Setting initialized.")
# initialize_settings(); initialize_settings()
def memoized(func):
cache = {}
@functools.wraps(func)
def inner(*args, **kwargs):
# We can't use dict here for arguments. Dict is not hashable
# This won't work
# key = args, kwargs
key = args + tuple(sorted(kwargs.items()))
if key not in cache:
cache[key] = func(*args, **kwargs)
return cache[key]
return inner
def depricated(func):
# The problem here is that the warning will appear when the module is loaded
# Not the function call
code = func.__code__
warnings.warn_explicit(
func.__name__ + "is depricated.",
category=DeprecationWarning,
filename=code.co_filename,
lineno=code.co_firstlineno + 1,
)
return func
|
[
"corwinat@gmail.com"
] |
corwinat@gmail.com
|
dde9ba5321586c1b720d7ab1dd8858169dc38675
|
8d45d514512f699145873a9c64a849b82e8a2656
|
/Stacks&Queues/queues_withDeque.py
|
7b8a01cbf78db1932c2f927ef3fa7fc2f6fe9cba
|
[] |
no_license
|
SatyaChipp/PythonImpls
|
f2621c52a72249e5c007666dec026c003890d524
|
159ff3c92890c0d1bdec21e72ac6a50461ae5f5e
|
refs/heads/master
| 2020-09-07T18:38:48.247479
| 2019-11-13T02:45:42
| 2019-11-13T02:45:42
| 136,539,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 17:56:38 2018
@author: Jen
"""
from collections import deque
if __name__ == '__main__':
queue = deque([1, 2, 4, 6])
queue.append(9) ##for enqueuing
queue.append(12)
print(queue)
queue.popleft()#for dequeuing
queue.popleft()
print(queue)
|
[
"noreply@github.com"
] |
SatyaChipp.noreply@github.com
|
49a1c6e81d68d3fe379dd000dd670b98edecb3fe
|
276ce333da24dbbbfc9885b4397bb1a65e96dc11
|
/addons/source-python/plugins/wcs/core/database/thread.py
|
96e5e703070bbcb32e4746d64790d0f29cd46a14
|
[] |
no_license
|
herlak/WCS
|
8b2a558dae2bf68163442aea14df9dab5952f14f
|
6081299c40574992cb4cd97a59e3132995084077
|
refs/heads/master
| 2020-07-02T03:57:16.072881
| 2019-08-13T12:23:38
| 2019-08-13T12:23:38
| 201,409,111
| 1
| 0
| null | 2019-08-09T06:51:32
| 2019-08-09T06:51:31
| null |
UTF-8
|
Python
| false
| false
| 8,220
|
py
|
# ../wcs/core/database/thread.py
# ============================================================================
# >> IMPORTS
# ============================================================================
# Python Imports
# PyMySQL
from pymysql.err import InterfaceError
from pymysql.err import OperationalError
# Queue
from queue import PriorityQueue
from queue import Queue
# Sys
from sys import exc_info
# Threading
from threading import Event
from threading import Thread
# Time
from time import sleep
# Source.Python Imports
# Hooks
from hooks.exceptions import except_hooks
# Listeners
from listeners.tick import Repeat
from listeners.tick import RepeatStatus
# WCS Imports
# Constants
from ..constants import NodeType
# ============================================================================
# >> CLASSES
# ============================================================================
class _PriorityQueue(PriorityQueue):
_entry = 0
def put(self, node):
node._entry = self._entry
self._entry += 1
super().put(node)
if node._blocking:
assert _repeat.status == RepeatStatus.RUNNING
node.priority = 2
node._executed.wait()
exception = node._result.exception
if exception:
raise exception[1].with_traceback(exception[2])
return node._result
class _Result(object):
def __init__(self, query=None, data=None, args=None, exception=None):
self._query = query
self._data = data
self._args = args
self._exception = exception
self._curindex = 0
def __getitem__(self, name):
return self._args[name]
def fetchone(self):
if len(self._data) <= self._curindex:
return None
data = self._data[self._curindex]
self._curindex += 1
return data
def fetchall(self):
data = self._data[self._curindex:]
self._curindex = len(self._data)
return data
@property
def query(self):
return self._query
@property
def exception(self):
return self._exception
class _Node(object):
def __init__(self, type_, query=None, arguments=None, callback=None, keywords=None, priority=0, blocking=False):
self.type = type_
self.query = query
self.arguments = arguments
self.callback = callback
self.keywords = keywords
self.priority = priority
self._entry = None
self._blocking = blocking
if blocking:
self._executed = Event()
self._result = None
def __lt__(self, other):
if self.priority == other.priority:
return self._entry < other._entry
return self.priority > other.priority
class _Thread(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._use_database = None
self._unloading = Event()
self._counter = 0
self._reconnect_delay = 10
def run(self):
self.con = None
self.cur = None
self.connector = _queue.get().query
while True:
node = _queue.get()
if node.type in (NodeType.QUERY, NodeType.QUERY_MANY):
if self.con is None or self.cur is None:
if not self.connect():
node.priority = 1
_queue.put(node)
sleep(self._reconnect_delay)
continue
result = _Result(args=node.keywords, query=node.query)
try:
if node.type is NodeType.QUERY:
self.cur.execute(node.query, node.arguments)
else:
self.cur.executemany(node.query, node.arguments)
except (InterfaceError, OperationalError) as e:
if node._blocking:
result._exception = exc_info()
node._result = result
node._executed.set()
else:
node.priority = 1
_queue.put(node)
if isinstance(e, InterfaceError):
self.close()
if not self.connect():
sleep(self._reconnect_delay)
except:
result._exception = exc_info()
if node._blocking:
node._result = result
node._executed.set()
else:
node.priority = 1
_queue.put(node)
_output.put((None, result))
else:
if node.callback is not None:
result._data = self.cur.fetchall()
_output.put((node.callback, result))
if node._blocking:
node._result = result
node._executed.set()
elif node._blocking:
result._data = self.cur.fetchall()
node._result = result
node._executed.set()
elif node.type is NodeType.CALLBACK:
_output.put((node.callback, _Result(args=node.keywords)))
elif node.type is NodeType.CONNECT:
if not self.connect():
sleep(self._reconnect_delay)
elif node.type is NodeType.USE:
self._use_database = node.query
if self.cur is not None:
self.cur.execute(self._use_database)
elif node.type is NodeType.CLOSE:
break
if _queue.empty():
self.close()
self.close()
_output.put((True, True))
def _tick(self):
for _ in range(16):
if _output.empty():
break
callback, result = _output.get_nowait()
if callback is True and result is True:
_repeat.stop()
else:
if callback is not None:
try:
callback(result)
except:
except_hooks.print_exception()
if result.exception:
except_hooks.print_exception(*result.exception)
def connect(self):
try:
self.con = self.connector()
self.cur = self.con.cursor()
if self._use_database is not None:
self.cur.execute(self._use_database)
except OperationalError:
if self.unloading:
if self._counter >= 4:
_queue.put(_Node(NodeType.CLOSE, priority=4))
return False
self._counter += 1
_queue.put(_Node(NodeType.CONNECT, priority=3))
return False
except:
_output.put((None, _Result(exception=exc_info())))
_output.put((True, True))
raise
else:
self._counter = 0
return True
def close(self):
if self.cur is not None:
self.cur.close()
self.cur = None
if self.con is not None:
try:
self.con.commit()
except InterfaceError:
pass
self.con.close()
self.con = None
@property
def unloading(self):
return self._unloading.is_set()
@unloading.setter
def unloading(self, value):
getattr(self._unloading, 'set' if value else 'clear')()
class Repeat2(Repeat):
def _unload_instance(self):
# I need to be sure the repeat does NOT stop at unload
pass
# ============================================================================
# >> GLOBAL VARIABLES
# ============================================================================
_queue = _PriorityQueue()
_output = Queue()
_thread = _Thread(name='wcs.database')
_repeat = Repeat2(_thread._tick)
|
[
"thapwned@hotmail.com"
] |
thapwned@hotmail.com
|
75ecce35b2cf17fb97a518738a4b99c3a3a3a551
|
0dc5321f53ba8d385fbe4cc68b2c3ff28db2d748
|
/src/DataScience_Project6.py
|
361ebde2efffa9231a0f383c74fa0592318ea2c6
|
[] |
no_license
|
sandyr97/DataScience_Project6
|
a05cf7f9bd23d0c71a620ff6c01c380eef7c5821
|
d207a72eb6ad95990158267577403dbeb13bd47c
|
refs/heads/master
| 2020-09-25T15:18:18.670018
| 2019-12-06T22:29:47
| 2019-12-06T22:29:47
| 226,032,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,880
|
py
|
pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df=pd.read_csv("../data/Twitter_volume_FB.csv")
df.head(10)
df.isnull().sum()
df['timestamp'] = pd.to_datetime(df['timestamp'])
print(df.dtypes)
df['day'] = df['timestamp'].dt.day
df['month'] = df['timestamp'].dt.month
df['year'] = df['timestamp'].dt.year
print(df.head(10))
dt=df['timestamp']
dt = pd.DatetimeIndex ( dt ).astype ( np.int64 )/1000000
df['unixTime']=dt
print(df.head(10))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xticklabels(df['timestamp'], rotation=70)
ax.plot_date(x=df.timestamp, y=df.value, ls='-', marker='.')
import seaborn as sns
sns.distplot(df['value'])
plt.title("Distribution of Values")
sns.despine()
print("Skewness: %f" % df['value'].skew())
print("Kurtosis: %f" % df['value'].kurt())
isolation_forest = IsolationForest(n_estimators=100)
isolation_forest.fit(df['value'].values.reshape(-1, 1))
xx = np.linspace(df['value'].min(), df['value'].max(), len(df)).reshape(-1,1)
anomaly_score = isolation_forest.decision_function(xx)
outlier = isolation_forest.predict(xx)
plt.figure(figsize=(10,4))
plt.plot(xx, anomaly_score, label='anomaly score')
plt.fill_between(xx.T[0], np.min(anomaly_score), np.max(anomaly_score),
where=outlier==-1, color='r',
alpha=.4, label='outlier region')
plt.legend()
plt.ylabel('anomaly score')
plt.xlabel('Value')
plt.show();
X=np.array(df['value']).reshape(-1, 1)
rs=np.random.RandomState(0)
clf = IsolationForest(max_samples=100,random_state=rs, contamination=.1)
clf.fit(X)
if_scores = clf.decision_function(X)
if_anomalies=clf.predict(X)
if_anomalies=pd.Series(if_anomalies).replace([-1,1],[1,0])
if_anomalies=df[if_anomalies==1]
plt.figure(figsize=(12,8))
plt.hist(if_scores)
plt.title('Histogram of Avg Anomaly Scores: Lower => More Anomalous')
def LOF_plot(k):
import seaborn as sns
from sklearn.neighbors import LocalOutlierFactor
var1,var2=1,2
clf = LocalOutlierFactor(n_neighbors=k, contamination=.1)
y_pred = clf.fit_predict(X)
LOF_Scores = clf.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF), K={}".format(k))
plt.scatter(df.iloc[:, 5], df.iloc[:, 1], color='k', s=3., label='Data points')
radius = (LOF_Scores.max() - LOF_Scores) / (LOF_Scores.max() - LOF_Scores.min())
plt.scatter(df.iloc[:, 5],df.iloc[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier Score')
plt.axis('tight')
plt.ylabel("{}".format(df.columns[1]))
plt.xlabel("{}".format(df.columns[5]))
legend = plt.legend(loc='upper right')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.ylim(0, 400)
plt.show()
LOF_plot(5)
LOF_plot(30)
LOF_plot(70)
from sklearn.neighbors import LocalOutlierFactor
clf = LocalOutlierFactor(n_neighbors=30, contamination=.1)
y_pred = clf.fit_predict(X)
LOF_Scores = clf.negative_outlier_factor_
LOF_pred=pd.Series(y_pred).replace([-1,1],[1,0])
LOF_anomalies=df[LOF_pred==1]
cmap=np.array(['white','red'])
plt.scatter(df.iloc[:,5],df.iloc[:,1],c='white',s=20,edgecolor='k', label='Data points')
plt.scatter(LOF_anomalies.iloc[:,5],LOF_anomalies.iloc[:,1],c='red', label='Anomalies')
#,marker=’x’,s=100)
plt.title('Local Outlier Factor-Anomalies')
plt.xlabel('Time')
plt.ylabel('value')
plt.ylim(0, 400)
plt.xlim(1.425e+12, 1.42516e+12)
legend = plt.legend(loc='upper right')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
cmap=np.array(['white','red'])
plt.scatter(df.iloc[:,5], df.iloc[:,1],c='white',s=20,edgecolor='k', label='Data points')
plt.scatter(if_anomalies.iloc[:,5],if_anomalies.iloc[:,1],c='red', label='Anomalies')
legend = plt.legend(loc='upper right')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.xlabel('Time')
plt.ylabel('Value')
plt.title('Isolation Forests - Anomalies')
plt.ylim(0, 400)
plt.xlim(1.425e+12, 1.42516e+12)
|
[
"rechs97@gmail.com"
] |
rechs97@gmail.com
|
9543414e9d257ebe3377c2f2038e405d28417bc2
|
95122b52ae1f00a7d2930a275217bd45bd07c24c
|
/unittest_msp430x2xx.py
|
14b3a6a582c91d8176b63ce1510a56c838c44917
|
[] |
no_license
|
SaitoYutaka/msp430-Assembler
|
fbbb062bfb8dfd338d21e54761b59a802ec2c901
|
d49c870aa1c1f20ae05c8a132a37d6426b196526
|
refs/heads/master
| 2020-05-19T15:06:33.156625
| 2011-04-04T13:25:36
| 2011-04-04T13:25:36
| 1,297,091
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,112
|
py
|
import unittest
import msp430x2xx
class TestFunctions(unittest.TestCase):
def setUp(self):
pass
def test_getcode(self):
self.mnemonic = (
("rrc", (1, 0x1000 )) ,
("rrc.b", (1, 0x1040 )) ,
("swpb", (1, 0x1080 )) ,
("rra", (1, 0x1100 )) ,
("rra.b", (1, 0x1140 )) ,
("sxt", (1, 0x1180 )) ,
("push", (1, 0x1200 )) ,
("push.b",(1, 0x1240 )) ,
("call", (1, 0x1280 )) ,
("reti", (1, 0x1300 )) ,
("jne", (2, 0x2000 )) ,
("jeq", (2, 0x2400 )) ,
("jnc", (2, 0x2800 )) ,
("jc", (2, 0x2C00 )) ,
("jn", (2, 0x3000 )) ,
("jge", (2, 0x3400 )) ,
("jl", (2, 0x3800 )) ,
("jmp", (2, 0x3C00 )) ,
("mov", (0, 0x4000 )) ,
("add", (0, 0x5000 )) ,
("addc", (0, 0x6000 )) ,
("subc", (0, 0x7000 )) ,
("sub", (0, 0x8000 )) ,
("cmp", (0, 0x9000 )) ,
("dadd", (0, 0xA000 )) ,
("bit", (0, 0xB000 )) ,
("bic", (0, 0xC000 )) ,
("bis", (0, 0xD000 )) ,
("xor", (0, 0xE000 )) ,
("and", (0, 0xF000 )) ,
("xxx", (None,None)))
self.op = msp430x2xx.OPcode()
for x in self.mnemonic:
self.assertEqual(self.op.GetOPcode(x[0]), x[1])
for x in self.mnemonic:
self.assertEqual(self.op.GetOPcode(x[0].upper()), x[1])
def test_getRegNum(self):
self.dicReg = (
# (val, return)
("R0",0),
("PC",0),
("R1",1),
("SP",1),
("R2",2),
("GC1",2),
("SR",2),
("R3",3),
("GC2",3),
("R4",4),
("R5",5),
("R6",6),
("R7",7),
("R8",8),
("R9",9),
("R10",10),
("R11",11),
("R12",12),
("R13",13),
("R14",14),
("R15",15),
("XX", None),
)
self.reg = msp430x2xx.Register()
for x in self.dicReg:
self.assertEqual(self.reg.getRegNum(x[0]), x[1])
def test_GetAddressingMode(self):
testdata = (
# (val, return)
('R6',0), # Register mode
('0x1234(R6)',2), # Indexed mode
('0x1234',6), # Synbolic mode
('&0x1234',3), # Absolute mode
('@R6',5), # Indirect register mode
('@R6+',4), # Indirect autoincrement
('#0x1234',1), # Immediate mode
('xxxxxx',None)
)
self.admode = msp430x2xx.AddressingMode()
for x in testdata:
self.assertEqual(self.admode.GetAddressingMode(x[0]), x[1])
def test_GetSource(self):
testdata = (
# (val, return)
('R6',(6,0)), # Register mode
('0x1234(R6)',(6,1)), # Indexed mode
('0x1234',(0,1)), # Synbolic mode
('&0x1234',(2,1)), # Absolute mode
('@R6',(6,2)), # Indirect register mode
('@R6+',(6,3)), # Indirect autoincrement
('#0x1234',(0,3)), # Immediate mode
('xxxxxx',(None,None))
)
self.msp = msp430x2xx.MSP430x2xx()
for x in testdata:
self.assertEqual(self.msp._GetSource(x[0]),x[1])
def test_GetDestination(self):
testdata = (
# (val, return)
('R6',(6,0)), # Register mode
('0x1234(R6)',(6,1)), # Indexed mode
('0x1234',(0,1)), # Synbolic mode
('&0x1234',(2,1)), # Absolute mode
('@R6',(None,None)), # Indirect register mode
('@R6+',(None,None)), # Indirect autoincrement
('#0x1234',(None,None)), # Immediate mode
('xxxxxx',(None,None))
)
self.msp = msp430x2xx.MSP430x2xx()
for x in testdata:
self.assertEqual(self.msp._GetDestination(x[0]),x[1])
def test_GetNextWord(self):
testdata = (
# (val, return)
('R6',None), # Register mode
('0x1234(R6)',0x1234), # Indexed mode
('0x1234',0x1234), # Synbolic mode
('&0x1234',0x1234), # Absolute mode
('@R6',None), # Indirect register mode
('@R6+',None), # Indirect autoincrement
('#0x1234',0x1234), # Immediate mode
('xxxxxx',None)
)
self.msp = msp430x2xx.MSP430x2xx()
for x in testdata:
self.assertEqual(self.msp._GetNextWord(x[0]),x[1])
def test_IsConstangGenerator(self):
testdata = (
# (val, return)
('#4',True),
('#8',True),
('#0',True),
('#1',True),
('#2',True),
('#-1',True),
)
self.msp = msp430x2xx.ConstantGeneratorRegister()
for x in testdata:
self.assertEqual(self.msp.IsConstangGenerator(x[0]),x[1])
def test_GetCGval(self):
testdata = (
# (val, return)
('#4',(2,2)),
('#8',(2,3)),
('#0',(3,0)),
('#1',(3,1)),
('#2',(3,2)),
('#-1',(3,3)),
)
self.msp = msp430x2xx.ConstantGeneratorRegister()
for x in testdata:
self.assertEqual(self.msp.GetCGval(x[0]),x[1])
def test_GetIntValue(self):
testdata = (
# (val, return)
('0xf',0xf),
('0xff',0xff),
('0xfff',0xfff),
('0xffff',0xffff),
('#0xf',0xf),
('#0xff',0xff),
('#0xfff',0xfff),
('#0xffff',0xffff),
('#0xfffff',0xfffff)
)
for x in testdata:
self.assertEqual(msp430x2xx.GetIntValue(x[0]),x[1])
def test_asm(self):
testdata = [
# (val, return)
["MOV #0x280,SP ", [0x3140 ,0x8002 ]],
["MOV #0x5a80,&0x0120 ", [0xb240 ,0x805a ,0x2001 ]],
["MOV 0xf880(R15),0x200(R15)", [0x9f4f ,0x80f8 ,0x0002 ]],
["BIS.B #0x41,&0x0022 ", [0xf2d0 ,0x4100 ,0x2200 ]],
["BIS.B #0x41,&0x0021 ", [0xf2d0 ,0x4100 ,0x2100 ]],
["MOV #0x280,SP ", [0x3140 ,0x8002 ]],
["MOV #0x5a80,&0x0120 ", [0xb240 ,0x805a ,0x2001 ]],
["BIS.B #0x41,&0x0022 ", [0xf2d0 ,0x4100 ,0x2200 ]],
["BIS.B #0x41,&0x0021 ", [0xf2d0 ,0x4100 ,0x2100 ]],
["BIS.B #0x20,&0x0053 ", [0xf2d0 ,0x2000 ,0x5300 ]],
["MOV #0x110,&0x0160 ", [0xb240 ,0x1001 ,0x6001 ]],
["MOV #0x10,&0x0162 ", [0xb240 ,0x1000 ,0x6201 ]],
["MOV #0x2edf,&0x0172 ", [0xb240 ,0xdf2e ,0x7201 ]],
["MOV #0x8,R15 ", [0x3f42 ]],
["MOV R15,SR ", [0x024f ]],
["JMP 0xf874 ", [0x74fc ]],
["XOR.B #0x41,&0x0021 ", [0xf2e0 ,0x4100 ,0x2100 ]],
["RETI ", [0x0013 ]],
]
self.msp = msp430x2xx.MSP430x2xx()
for x in testdata:
self.assertEqual(self.msp.asm(x[0]),x[1])
def test_MakeErrorMsg(self):
testdata = (
# (val, return)
(['xxx'],'xxx \n^\nerror'),
(['xxx','xxx'],'xxx xxx \n^\nerror'),
(['xxx','xxx','xxx'],'xxx xxx xxx \n^\nerror'),
(['xxx','xxx','xxx','xxx'],'xxx xxx xxx xxx \n^\nerror'),
)
self.msp = msp430x2xx.MSP430x2xx()
for x in testdata:
self.assertEqual(self.msp._MakeErrorMsg(x[0], 0 ,'error'),x[1])
|
[
"melody.pea@gmail.com"
] |
melody.pea@gmail.com
|
1081525b20718c558ae7b855202fe7f403650bfd
|
4ae2cb9df3a5c82ffe4d311be999e3667245a24d
|
/askdjango/accounts/models.py
|
4c9fd309070889ca485a873b0280385942848718
|
[] |
no_license
|
lowelllll/AskDjango
|
e93ff0a6cfb8a8105589736c86f3987334e4be3a
|
c8aa0910e1e505e8da217c393279e275e2a0bfcc
|
refs/heads/master
| 2020-03-27T08:29:29.156731
| 2018-09-21T15:35:10
| 2018-09-21T15:35:10
| 146,260,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
# account/models.py
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL) # 좋은 방법
phone_number = models.CharField(max_length=20)
address = models.CharField(max_length=50)
|
[
"32219612+lowelllll@users.noreply.github.com"
] |
32219612+lowelllll@users.noreply.github.com
|
e66548766cb65f8016bcbe09fd86be83198c7a03
|
1845a3022ae11eccea0e2a177eca540c8b0b1874
|
/backend/backend/settings.py
|
0a6d26025ad38a75cdb1f2faf668606abff0004b
|
[] |
no_license
|
looogin/nuxt-django-graphql-example
|
f89310a20774f54f27c55bdfaa764e6a95b192c2
|
cd4f22ab2e3779a886c1a5b3220a854f5ce57225
|
refs/heads/master
| 2023-05-14T15:45:14.991632
| 2020-03-16T16:48:47
| 2020-03-16T16:48:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,553
|
py
|
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from corsheaders.defaults import default_headers
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@)2hbrz*5p#ee4&p(@(zt#r%*q(jt4&8c78%*+e@jeosw+#z(h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'backend.todo_list',
'corsheaders'
]
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_HEADERS = default_headers + ('cache-control', 'cookies')
CORS_ORIGIN_ALLOW_ALL = True
GRAPHENE = {
'SCHEMA': 'backend.backend.api.schema',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'backend.backend.api.CustomCsrfMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"vlzvoice@ya.ru"
] |
vlzvoice@ya.ru
|
27acb2bfa3bcd96b23f7189d87bd2506602cbb26
|
3e06b430d27f349f6b8013a5389d2a8b908aa0ad
|
/test/test_resources.py
|
6ba68bc0d750aaa4a7062b2b9479011ede40ecf1
|
[] |
no_license
|
vivanmig/QGIS
|
8d5c38dcaa5886a442508d2e262e43c19eef91e6
|
ce515850b4781eced91c378074258d2b23506a3f
|
refs/heads/master
| 2021-08-08T02:27:55.459661
| 2017-11-09T10:23:29
| 2017-11-09T10:23:29
| 102,602,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
# coding=utf-8
"""Resources test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'huntboy320@gmail.com'
__date__ = '2017-06-06'
__copyright__ = 'Copyright 2017, Wei-an V. Hung'
import unittest
from PyQt4.QtGui import QIcon
class ExportToGEDialogTest(unittest.TestCase):
"""Test rerources work."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_icon_png(self):
"""Test we can click OK."""
path = ':/plugins/ExportToGE/icon.png'
icon = QIcon(path)
self.assertFalse(icon.isNull())
if __name__ == "__main__":
suite = unittest.makeSuite(ExportToGEResourcesTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
[
"noreply@github.com"
] |
vivanmig.noreply@github.com
|
42ee0a4f5c27323df56050638cc350e97c1a135f
|
0db67bff1f2dcdadecf635ae535add91cb54c4f3
|
/PythonBasis/week05/task25.py
|
c10b2ed687ac7f1e55694b59631c2e09409e381d
|
[] |
no_license
|
pavelbrnv/Coursera
|
713fdb79dbf6fbde405fc991bd67db0cab30da00
|
cc568f79229147866ff1df8539cf8ea66dc9ccca
|
refs/heads/master
| 2023-03-07T23:21:09.685318
| 2021-02-22T15:08:27
| 2021-02-22T15:08:27
| 336,600,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
inputs = input().split()
inputsLen = len(inputs)
for i in range(0, inputsLen // 2):
temp = inputs[i]
inputs[i] = inputs[inputsLen - i - 1]
inputs[inputsLen - i - 1] = temp
print(*inputs)
|
[
"pbaranov@stc-spb.ru"
] |
pbaranov@stc-spb.ru
|
1f23d7df94af40edea584189a763e1366e0aafc4
|
9d5ae2850f5d74fe9f47055a6e278b70d3a90ebf
|
/ScraperStrategies/HtmlRetrievers/HtmlRetriever.py
|
b0d77d4b7255f8016536d8b01f21e4c4be81efd4
|
[] |
no_license
|
bje43/PodiumInterview
|
4eba47fb3b27d00ec7dfa100b9ce8f6105232506
|
b4248cdf8bd7be07fb270152b1450ba6035e7297
|
refs/heads/master
| 2021-01-19T16:33:32.697870
| 2017-08-23T00:48:24
| 2017-08-23T00:48:24
| 100,838,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
py
|
import requests
from bs4 import BeautifulSoup
from ScraperStrategies.HtmlRetrievers.HtmlRetrieverBase import HtmlRetrieverBase
class HtmlRetriever(HtmlRetrieverBase):
def retrieve_page_content(self, web_url):
try:
page = requests.get(web_url)
page_content = BeautifulSoup(page.content, 'html.parser')
except requests.exceptions.RequestException:
page_content = None
return page_content
|
[
"bentzjedwards@yahoo.com"
] |
bentzjedwards@yahoo.com
|
f19ea99f2120f246069b26e0cc42f0c7eda2b2b8
|
63da0c2b37595f1cda5900405fa9fb8b14169909
|
/0x04-python-more_data_structures/dev/5-main.py
|
a63b9c221c48c0b9de472df2a00c5ebd53b75f2b
|
[
"MIT"
] |
permissive
|
johncoleman83/bootcampschool-higher_level_programming
|
113800b199ff493beb1c734f9fbf4660624f94b8
|
a83c3b7092cfe893c87e495f8d8eec9228c9b808
|
refs/heads/master
| 2022-11-19T13:37:50.305680
| 2020-07-24T04:19:39
| 2020-07-24T04:19:39
| 89,968,863
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
#!/usr/bin/python3
number_keys = __import__('5-number_keys').number_keys
my_dict = { 'language': "C", 'number': 13, 'track': "Low level" }
nb_keys = number_keys(my_dict)
print("Number of keys: {:d}".format(nb_keys))
|
[
"lcsw@davidjohncoleman.com"
] |
lcsw@davidjohncoleman.com
|
1365c5f14b6cbcb3daef01803cf7b5b140b87c64
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Opencv_pil/source36/numpy/lib/arraysetops.py
|
fd64ecbd64b130b6253b4ad370bccf39e5849256
|
[
"MIT",
"GPL-3.0-or-later",
"BSD-3-Clause",
"GCC-exception-3.1",
"Apache-2.0",
"BSD-3-Clause-Open-MPI",
"BSD-2-Clause",
"Python-2.0",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 24,049
|
py
|
"""
Set operations for arrays based on sorting.
:Contains:
unique,
isin,
ediff1d,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
from __future__ import division, absolute_import, print_function
import functools
import numpy as np
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d', 'isin'
]
def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
return (ary, to_end, to_begin)
@array_function_dispatch(_ediff1d_dispatcher)
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
# force a 1d array
ary = np.asanyarray(ary).ravel()
# we have unit tests enforcing
# propagation of the dtype of input
# ary to returned result
dtype_req = ary.dtype
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
if to_begin is None:
l_begin = 0
else:
to_begin = np.asanyarray(to_begin)
if not np.can_cast(to_begin, dtype_req):
raise TypeError("dtype of to_begin must be compatible "
"with input ary")
to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
to_end = np.asanyarray(to_end)
if not np.can_cast(to_end, dtype_req):
raise TypeError("dtype of to_end must be compatible "
"with input ary")
to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
l_diff = max(len(ary) - 1, 0)
result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
result = ary.__array_wrap__(result)
if l_begin > 0:
result[:l_begin] = to_begin
if l_end > 0:
result[l_begin + l_diff:] = to_end
np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
return result
def _unpack_tuple(x):
""" Unpacks one-element tuples for use as return values """
if len(x) == 1:
return x[0]
else:
return x
def _unique_dispatcher(ar, return_index=None, return_inverse=None,
return_counts=None, axis=None):
return (ar,)
@array_function_dispatch(_unique_dispatcher)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : array_like
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
.. versionadded:: 1.9.0
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. Object arrays or structured arrays
that contain objects are not supported if the `axis` kwarg is used. The
default is None.
.. versionadded:: 1.13.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1, 0, 0], [2, 3, 4]])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
"""
ar = np.asanyarray(ar)
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts)
return _unpack_tuple(ret)
# axis was specified and not None
try:
ar = np.swapaxes(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim)
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
ar = ar.reshape(orig_shape[0], -1)
ar = np.ascontiguousarray(ar)
dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
try:
consolidated = ar.view(dtype)
except TypeError:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
raise TypeError(msg.format(dt=ar.dtype))
def reshape_uniq(uniq):
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(-1, *orig_shape[1:])
uniq = np.swapaxes(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
return_inverse, return_counts)
output = (reshape_uniq(output[0]),) + output[1:]
return _unpack_tuple(output)
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
"""
Find the unique elements of an array, ignoring shape.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
mask = np.empty(aux.shape, dtype=np.bool_)
mask[:1] = True
mask[1:] = aux[1:] != aux[:-1]
ret = (aux[mask],)
if return_index:
ret += (perm[mask],)
if return_inverse:
imask = np.cumsum(mask) - 1
inv_idx = np.empty(mask.shape, dtype=np.intp)
inv_idx[perm] = imask
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
ret += (np.diff(idx),)
return ret
def _intersect1d_dispatcher(
ar1, ar2, assume_unique=None, return_indices=None):
return (ar1, ar2)
@array_function_dispatch(_intersect1d_dispatcher)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
return_indices : bool
If True, the indices which correspond to the intersection of the two
arrays are returned. The first instance of a value is used if there are
multiple. Default is False.
.. versionadded:: 1.15.0
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
comm1 : ndarray
The indices of the first occurrences of the common values in `ar1`.
Only provided if `return_indices` is True.
comm2 : ndarray
The indices of the first occurrences of the common values in `ar2`.
Only provided if `return_indices` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
To intersect more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
To return the indices of the values common to the input arrays
along with the intersected values:
>>> x = np.array([1, 1, 2, 3, 4])
>>> y = np.array([2, 1, 4, 6])
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
>>> x_ind, y_ind
(array([0, 2, 4]), array([1, 0, 2]))
>>> xy, x[x_ind], y[y_ind]
(array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
"""
ar1 = np.asanyarray(ar1)
ar2 = np.asanyarray(ar2)
if not assume_unique:
if return_indices:
ar1, ind1 = unique(ar1, return_index=True)
ar2, ind2 = unique(ar2, return_index=True)
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
else:
ar1 = ar1.ravel()
ar2 = ar2.ravel()
aux = np.concatenate((ar1, ar2))
if return_indices:
aux_sort_indices = np.argsort(aux, kind='mergesort')
aux = aux[aux_sort_indices]
else:
aux.sort()
mask = aux[1:] == aux[:-1]
int1d = aux[:-1][mask]
if return_indices:
ar1_indices = aux_sort_indices[:-1][mask]
ar2_indices = aux_sort_indices[1:][mask] - ar1.size
if not assume_unique:
ar1_indices = ind1[ar1_indices]
ar2_indices = ind2[ar2_indices]
return int1d, ar1_indices, ar2_indices
else:
return int1d
def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
return (ar1, ar2)
@array_function_dispatch(_setxor1d_dispatcher)
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setxor1d : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
return aux[flag[1:] & flag[:-1]]
def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None):
return (ar1, ar2)
@array_function_dispatch(_in1d_dispatcher)
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
We recommend using :func:`isin` instead of `in1d` for new code.
Parameters
----------
ar1 : (M,) array_like
Input array.
ar2 : array_like
The values against which to test each value of `ar1`.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted (that is,
False where an element of `ar1` is in `ar2` and True otherwise).
Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
to (but is faster than) ``np.invert(in1d(a, b))``.
.. versionadded:: 1.8.0
Returns
-------
in1d : (M,) ndarray, bool
The values `ar1[in1d]` are in `ar2`.
See Also
--------
isin : Version of this function that preserves the
shape of ar1.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
equivalent to ``np.array([item in b for item in a])``.
However, this idea fails if `ar2` is a set, or similar (non-sequence)
container: As ``ar2`` is converted to an array, in those cases
``asarray(ar2)`` is an object array rather than the expected array of
contained values.
.. versionadded:: 1.4.0
Examples
--------
>>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
>>> mask = np.in1d(test, states)
>>> mask
array([ True, False, True, False, True])
>>> test[mask]
array([0, 2, 0])
>>> mask = np.in1d(test, states, invert=True)
>>> mask
array([False, True, False, True, False])
>>> test[mask]
array([1, 5])
"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# Check if one of the arrays may contain arbitrary objects
contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
# This code is run when
# a) the first condition is true, making the code significantly faster
# b) the second condition is true (i.e. `ar1` or `ar2` may contain
# arbitrary objects), since then sorting is not guaranteed to work
if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
if invert:
mask = np.ones(len(ar1), dtype=bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
ret = np.empty(ar.shape, dtype=bool)
ret[order] = flag
if assume_unique:
return ret[:len(ar1)]
else:
return ret[rev_idx]
def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None):
return (element, test_elements)
@array_function_dispatch(_isin_dispatcher)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input array.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is an array or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned array are inverted, as if
calculating `element not in test_elements`. Default is False.
``np.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``np.invert(np.isin(a, b))``.
Returns
-------
isin : ndarray, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to arrays if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object array with one element, rather than an
array of the values contained in `test_elements`. This is a consequence
of the `array` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
.. versionadded:: 1.13.0
Examples
--------
>>> element = 2*np.arange(4).reshape((2, 2))
>>> element
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = np.isin(element, test_elements)
>>> mask
array([[ False, True],
[ True, False]])
>>> element[mask]
array([2, 4])
The indices of the matched values can be obtained with `nonzero`:
>>> np.nonzero(mask)
(array([0, 1]), array([1, 0]))
The test can also be inverted:
>>> mask = np.isin(element, test_elements, invert=True)
>>> mask
array([[ True, False],
[ False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> np.isin(element, test_set)
array([[ False, False],
[ False, False]])
Casting the set to a list gives the expected result:
>>> np.isin(element, list(test_set))
array([[ False, True],
[ True, False]])
"""
element = np.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def _union1d_dispatcher(ar1, ar2):
return (ar1, ar2)
@array_function_dispatch(_union1d_dispatcher)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union1d : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
To find the union of more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2), axis=None))
def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
return (ar1, ar2)
@array_function_dispatch(_setdiff1d_dispatcher)
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the unique values in `ar1` that are not in `ar2`.
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
setdiff1d : ndarray
1D array of values in `ar1` that are not in `ar2`. The result
is sorted when `assume_unique=False`, but otherwise only sorted
if the input is sorted.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4, 1])
>>> b = np.array([3, 4, 5, 6])
>>> np.setdiff1d(a, b)
array([1, 2])
"""
if assume_unique:
ar1 = np.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
|
[
"ryfeus@gmail.com"
] |
ryfeus@gmail.com
|
8288c6b92f4dc334943901a405c4730207676272
|
0879b7f63a6299087e9f2498de8aa3151439520b
|
/tournament.py
|
5c9af9591d5a7812a06b42c73329f8189ae72190
|
[] |
no_license
|
tomrhoads/Swiss-Style-Tournament
|
00f344bc833271bce278f77131647c334b4718f5
|
1655ef28e1264c2bf6d47dd57cf984e70a7cad3a
|
refs/heads/master
| 2016-09-13T18:40:39.416191
| 2016-05-13T04:05:05
| 2016-05-13T04:05:05
| 58,670,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,013
|
py
|
#!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
db = connect()
c = db.cursor()
query = "UPDATE Players SET score = 0, matches = 0;"
c.execute(query)
db.commit()
db.close()
def deletePlayers():
"""Remove all the player records from the database."""
db = connect()
c = db.cursor()
query = "DELETE FROM Players;"
c.execute(query)
db.commit()
db.close()
def countPlayers():
"""Returns the number of players currently registered."""
db = connect()
c = db.cursor()
query = "SELECT count(*) FROM Players;"
c.execute(query)
count = c.fetchone()[0]
db.close()
return count
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
db = connect()
c = db.cursor()
query = "INSERT INTO Players (playername, score, matches) \
VALUES (%s,0,0);"
c.execute(query, (name,))
db.commit()
db.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a
player tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
db = connect()
c = db.cursor()
playerwins = "SELECT Players.id, Players.playername, Players.score,\
Players.matches \
FROM Players \
ORDER BY Players.score DESC;"
c.execute(playerwins)
leaderboard = [(row[0], row[1], row[2], row[3])
for row in c.fetchall()]
db.close()
return leaderboard
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
winscore = 1
lossscore = 0
db = connect()
c = db.cursor()
won = "UPDATE Players SET score = score + 1, matches = matches + 1 \
WHERE Players.id = %s"
lost = "UPDATE Players SET matches = matches + 1 \
WHERE Players.id = %s"
c.execute(won, (winner,))
c.execute(lost, (loser,))
db.commit()
db.close()
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name"""
playerstats = playerStandings()
pairings = []
i = 0
while (i < len(playerstats)):
player1id = playerstats[i][0]
player1name = playerstats[i][1]
player2id = playerstats[i+1][0]
player2name = playerstats[i+1][1]
pairings.append((player1id, player1name, player2id, player2name))
i += 2
return pairings
|
[
"terhoads1@gmail.com"
] |
terhoads1@gmail.com
|
9adf946312bd44d558482bdd7ab94fa152e7bbdd
|
f4b263dc58c70c66c92613c0db3247d3f8d66605
|
/tfdqn.py
|
e3c3cc2c8d7eeb714831d31b4958ecdd90d2bb9c
|
[] |
no_license
|
AngelLittleChaochao/cartpole
|
93a4e4eb6c13d3fe6d88694dd4b357d6e1d91991
|
03b0abdde2b3ae58cd77ec393e072c3df3222feb
|
refs/heads/master
| 2020-03-25T17:30:42.076678
| 2018-08-28T06:58:06
| 2018-08-28T06:58:06
| 143,981,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,548
|
py
|
# Cartpole DQN
# Deep Q-Learning Network with Keras and OpenAI Gym, based on Keon Kim's code](https://github.com/keon/deep-q-learning/blob/master/dqn.py).
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
import tensorflow as tf
import os
env = gym.make('CartPole-v0') # initialise environment
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
batch_size = 32
n_episodes = 1001 # n games we want agent to play (default 1001)
output_dir = 'model_output/cartpole/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
#### Define agent
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
# double-ended queue; acts like list, but elements can be added/removed from either end
self.memory = deque(maxlen=2000)
# decay or discount rate: enables agent to take into account future actions in addition to the immediate ones, but discounted at this rate
self.gamma = 0.95
# exploration rate: how much to act randomly; more initially than later due to epsilon decay
self.epsilon = 1.0
# decrease number of random explorations as the agent's performance (hopefully) improves over time
self.epsilon_decay = 0.995
self.epsilon_min = 0.01 # minimum amount of random exploration permitted
self.learning_rate = 0.001 # rate at which NN adjusts models parameters via SGD to reduce cost
self.model = self._build_model() # private method
def _build_model(self):
self.sess = tf.Session()
# neural net to approximate Q-value function:
c_names = ['train_net', tf.GraphKeys.GLOBAL_VARIABLES]
self.s = tf.placeholder(
tf.float32, [None, self.state_size], name='s') # input
self.a = tf.placeholder(
tf.float32, [None, self.action_size], name='a') # input
w1 = tf.get_variable(
'w1', [self.state_size, 24],
initializer=tf.random_normal_initializer(0., 0.3),
collections=c_names)
b1 = tf.get_variable(
'b1', [1, 24],
initializer=tf.constant_initializer(0.1),
collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
w2 = tf.get_variable(
'w2', [24, 24],
initializer=tf.random_normal_initializer(0., 0.3),
collections=c_names)
b2 = tf.get_variable(
'b2', [1, 24],
initializer=tf.constant_initializer(0.1),
collections=c_names)
l2 = tf.nn.relu(tf.matmul(l1, w2) + b2)
w3 = tf.get_variable(
'w3', [24, action_size],
initializer=tf.random_normal_initializer(0., 0.3),
collections=c_names)
b3 = tf.get_variable(
'b3', [1, action_size],
initializer=tf.constant_initializer(0.1),
collections=c_names)
self.l3 = tf.matmul(l2, w3) + b3
self.loss = tf.reduce_mean(tf.squared_difference(self.l3, self.a))
self.optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(self.loss)
self.sess.run(tf.global_variables_initializer())
def remember(self, state, action, reward, next_state, done):
self.memory.append(
(state, action, reward, next_state,
done)) # list of previous experiences, enabling re-training later
def act(self, state):
if np.random.rand(
) <= self.epsilon: # if acting randomly, take random action
return random.randrange(self.action_size)
#act_values = self.model.predict(state)
act_values = self.sess.run(self.l3, feed_dict={self.s: state})
a = np.argmax(act_values[0])
# print('act_values:', act_values[0])
return a
# method that trains NN with experiences sampled from memory
def replay(self, batch_size):
minibatch = random.sample(self.memory,
batch_size) # sample a minibatch from memory
for state, action, reward, next_state, done in minibatch: # extract data for each minibatch sample
target = reward # if done (boolean whether game ended or not, i.e., whether final state or not), then target = reward
if not done: # if not done, then predict future discounted reward
# (target) = reward + (discount rate gamma) * max(next_state value)
res = self.sess.run(self.l3, feed_dict={self.s: next_state})[0]
target = (reward + self.gamma * np.amax(
self.sess.run(self.l3, feed_dict={self.s: next_state})[0]))
# target_f = self.model.predict(state)
target_f = self.sess.run(self.l3, feed_dict={self.s: state})
target_f[0][action] = target
# target_f[0][action] = target
self.sess.run(
[self.optimizer, self.loss],
feed_dict={
self.s: state,
self.a: target_f
})
# self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
saver = tf.train.Saver()
save_path = saver.save(self.sess, name)
print('save name:', save_path)
agent = DQNAgent(state_size, action_size) # initialise agent
done = False
for e in range(n_episodes): # iterate over new episodes of the game
state = env.reset() # reset state at start of each new episode of the game
state = np.reshape(state, [1, state_size])
for time in range(5000):
# env.render()
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
reward = reward if not done else -10
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
print("episode: {}/{}, score: {}, e: {:.2}".format(
e, n_episodes, time, agent.epsilon))
break
if len(agent.memory) > batch_size:
agent.replay(batch_size)
if e % 50 == 0:
agent.save(output_dir + "weights_" + '{:04d}'.format(e) + '.ckpt')
|
[
"chtang@microsoft.com"
] |
chtang@microsoft.com
|
04bec9d1f2c2b1b044facd42d45daf9ce836dce2
|
646ee98e184fd8aa38ba6c3f92455484ba159a24
|
/commitizen/out.py
|
268f02e29f84c4418923c7ca152d9a5d9f6887bd
|
[
"MIT"
] |
permissive
|
SHAQ522/commitizen
|
499e8fa5d6bd9e33c5d53fedd1ae992564c20d74
|
c51a521d8117eb7778b9bb64190ef144c600305e
|
refs/heads/master
| 2023-01-31T06:25:41.392565
| 2020-12-10T07:35:02
| 2020-12-10T07:35:02
| 321,911,238
| 0
| 0
|
MIT
| 2020-12-16T08:35:13
| 2020-12-16T08:03:11
| null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
import sys
from termcolor import colored
def write(value: str, *args):
"""Intended to be used when value is multiline."""
print(value, *args)
def line(value: str, *args, **kwargs):
"""Wrapper in case I want to do something different later."""
print(value, *args, **kwargs)
def error(value: str):
message = colored(value, "red")
line(message, file=sys.stderr)
def success(value: str):
message = colored(value, "green")
line(message)
def info(value: str):
message = colored(value, "blue")
line(message)
|
[
"santiwilly@gmail.com"
] |
santiwilly@gmail.com
|
e295eca96113a81568a2bb8075589226975c7511
|
206a5cf8f243016b6ad9cf5183622d1f97128003
|
/construction/project-master/project/urls.py
|
d9af7a88a31d5f0577beb9afcefc8fd3c10c9eba
|
[] |
no_license
|
krishabista/project
|
cd70da033c42f8fc925b0cbe13c70f0faf904245
|
845452ec146f9fac32ff729534dacf2c1570856a
|
refs/heads/main
| 2022-12-24T22:35:30.735418
| 2020-10-12T12:02:29
| 2020-10-12T12:02:29
| 303,381,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,219
|
py
|
from django.urls import include, path
from django.contrib import admin
from mains.views import HomeView, about_view, privacy_policy_view, ContactView, PurchaseHistoryView
from django.conf import settings
from django.conf.urls.static import static
admin.site.site_header = "FOUR SQUARE RENTAL"
admin.site.site_title = "FOUR SQUARE RENTAL Admin Portal"
admin.site.index_title = "Welcome to FOUR SQUARE RENTAL Portal"
urlpatterns = [
path("admin/", admin.site.urls),
path("contact/", ContactView.as_view(), name="contact"),
path("agents/", include("agents.urls", namespace="agents")),
path("", HomeView.as_view(), name="home"),
path("property/",include('properties.urls',namespace='properties')),
path("purchase-history/", PurchaseHistoryView.as_view(), name="purchase_history"),
path("accounts/", include('allauth.urls')),
path("about/", about_view,name="about"),
path("privacy_policy/", privacy_policy_view,name="privacy_policy"),
path("blog/", include('posts.urls', namespace="posts")),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"krishabista47@gmail.com"
] |
krishabista47@gmail.com
|
0a877cff2fd1d8bb9fc7016528c63e51d6f188fb
|
c46e923f4bde5d94c5f057193dfd63769d8dc83e
|
/utils.py
|
e9b0882eef04650e167bd09534c3781254ddccbf
|
[] |
no_license
|
tkolanka/ece285_mlip_projectA
|
8ec7cb354e3ac5c5b9f3527e5cee98d3cee0fe1c
|
3e6f89f4be8a0e4ddeaca7c6593c4d9eca6f66c5
|
refs/heads/master
| 2020-09-28T13:34:13.826420
| 2019-12-10T07:12:16
| 2019-12-10T07:12:16
| 226,788,649
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
'''
This file contains some generic functions/classes that are needed in the overall implementation. It contains definitions of the accuracy function needed during training and validation. It also has a function to compute the actual length of a caption.
'''
import torch
import torchvision as tv
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
# Transform to be applied on the image prior to processing - Used for the data visualization step
data_transforms = tv.transforms.Compose([
tv.transforms.Resize((224, 224)),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
class AverageMeter(object):
'''
Taken from https://github.com/pytorch/examples/blob/master/imagenet/main.py
This class can be used to maintain average statistics over multiple iterations of training and validation.
'''
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(preds, targets, k):
'''
Function to compute the accuracy of prediction.
Arguments:
preds: Predictions of the network
targets: Expected output of the network
k (int): Top k accuracy to be determined
Example: k=1 -> Top 1 accuracy
k=5 -> Top 5 accuracy
'''
batch_size = targets.size(0)
_, pred = preds.topk(k, 1, True, True)
correct = pred.eq(targets.view(-1, 1).expand_as(pred))
correct_total = correct.view(-1).float().sum()
return correct_total.item() * (100.0 / batch_size)
def calculate_caption_lengths(word_dict, captions):
'''
Calculate the length of the caption excluding the start, end and padding tokens in the caption.
Arguments:
word_dict (dict): Dictionary of words (vocabulary)
captions (list): List of encoded captions where each entry in the encoded caption is corresponding index from word_dict
'''
lengths = 0
for caption_tokens in captions:
for token in caption_tokens:
if token in (word_dict['<start>'], word_dict['<eos>'], word_dict['<pad>']):
continue
else:
lengths += 1
return lengths
def pil_loader(path):
'''
Load an image from the specified path and convert to RGB
Arguments:
path (str): Complete path of the image that is to be loaded
'''
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def myimshow(image, ax=plt):
'''
Funtion to display an input image
Arguments:
image : Tensor or array of image pixel values
'''
image = image.to('cpu').numpy()
image = np.moveaxis(image, [0, 1, 2], [2, 0, 1])
image = (image + 1) / 2
image[image < 0] = 0
image[image > 1] = 1
h = ax.imshow(image)
ax.axis('off')
return h
def generate_caption(enc_caption, word_dict):
'''
Function to create the caption sentence from the encoded caption using the word dictionary
Arguments:
enc_caption (list): Encoded caption in terms of dictionary indices
word_dict (dict): Dictionary of words (vocabulary)
'''
# Using the dictionary, convert the encoded caption to normal words
token_dict = {idx: word for word, idx in word_dict.items()}
sentence_tokens = []
enc_caption = enc_caption.to('cpu').tolist()
for word_idx in enc_caption:
if word_idx == word_dict['<start>']:
continue
if word_idx == word_dict['<eos>']:
break
sentence_tokens.append(token_dict[word_idx])
# Creation of a sentence from the list of words
caption = ''
for word in sentence_tokens:
if word is sentence_tokens[len(sentence_tokens) - 1]:
caption = caption + word + '.'
else:
caption = caption + word + ' '
return caption.capitalize()
|
[
"noreply@github.com"
] |
tkolanka.noreply@github.com
|
55983a2582275b16b108623cdfb9dc73b1b62fc1
|
836cda1852a331c3b98e768a06f5cdfcb1f4c385
|
/python-service/wsgi.py
|
4e67044b0381ebb52a4800ea9bc6dc2a42d9ce06
|
[] |
no_license
|
guifonte/test-microservices-sockets
|
f4081c61e147b0238c8da9148b66ce1a7dd2429b
|
b8083647789103bcf2451a4b8d739a554eb2e9e3
|
refs/heads/master
| 2022-12-15T05:00:30.181165
| 2019-10-11T04:23:03
| 2019-10-11T04:23:03
| 212,253,828
| 1
| 0
| null | 2022-12-11T07:54:29
| 2019-10-02T04:16:44
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 67
|
py
|
from pyservice import app
if __name__ == "__main__":
app.run()
|
[
"guilhermenishifonte@gmail.com"
] |
guilhermenishifonte@gmail.com
|
b6a066f24958cc021788f917a16af6e9e2d2da34
|
483ba045a505b5ffd76f5b999da76ccfd99bb413
|
/queue_moddeling_kolmogorov.py
|
10a52ea8da214f50336afad32ebe7dcc8a79903c
|
[] |
no_license
|
MaximKrokhin/QueueSystemKolmogorov
|
75789f5b3dc7be2029183712399208bf4d463825
|
46c21ddd1a85c90c187d35782e177e63047d78ce
|
refs/heads/master
| 2022-07-04T10:39:19.210766
| 2020-05-17T12:43:09
| 2020-05-17T12:43:09
| 264,662,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,183
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import math
import time
start = time.time()
def rungeKutta(f, t0, p0, tEnd, tau):
def increment(f, t, p, tau):# поиск приближённого решения методом Рунге—Кутта—Фельберга.
k1 = tau * f(t, p)
k2 = tau * f(t + (1/4) * tau, p + (1/4) * k1)
k3 = tau *f(t + (3/8) * tau, p + (3/32) * k1 + (9/32) * k2)
k4 = tau * f(t + (12/13) * tau, p + (1932/2197) * k1 - (7200/2197) * k2 + (7296/2197) * k3)
k5 = tau*f(t + tau, p + (439/216) * k1 - 8 * k2 + (3680/513) * k3 - (845/4104) * k4)
k6 = tau*f(t + (1/2) * tau, p - (8/27) * k1 + 2 * k2 - (3544/2565) * k3 + (1859/4104) * k4 - (11/40) * k5)
return (16/135) * k1 + (6656/12825) * k3 + (28561/56430) * k4 - (9/50) * k5 + (2/55) * k6
t = []#подготовка пустого списка t
p = []#подготовка пустого списка p
t.append(t0)#внесение в список t начального значения to
p.append(p0)#внесение в список p начального значения po
while t0 < tEnd:#внесение результатов расчёта в массивы t,p
tau = min(tau, tEnd - t0)#определение минимального шага tau
p0 = p0 + increment(f, t0, p0, tau) # расчёт значения в точке t0,p0 для задачи Коши
t0 = t0 + tau # приращение времени
t.append(t0) # заполнение массива t
p.append(p0) # заполнение массива p
return np.array(t),np.array(p)
def f(t, p): #система уравнений
func = np.zeros([n+m+1])
summ= 1-sum(p)+p[n+m]
func[0] = -1 * lam * p[0] + n * mu * p[1]
for k in range(1, n+m-1):
func[k] = lam * p[k-1] - (lam + n * mu) * p[k] + n * mu * p[k+1]
func[n + m - 1] = lam * p[n + m - 2] - (lam + n * mu) * p[n + m -1] + n * mu * summ
func[n + m] = lam * p[n + m - 1] - n * mu * summ
return func
n = 5 #начальные параметры - число каналов
m = 6 #места в очереди
t0 = 0.0 #начальный момент времени
tEnd = 50.0 #конечный момент времени
p0 = np.zeros([n+m+1]) #начальные вероятности
p0[0] = 1.0 #начальное значение вероятности состояние 0 заявок
lam = 0.5 #интенсивность поступления заявок
mu = 0.15 #интенсивность обслуживания заявок
tau = 0.01 #величина шага
hi = lam/(n*mu) #коэф-т загруженности
A = lam*(1-hi**(n+m))/(1-hi**(n+m+1))
print("Абсолютная пропускная способность: ",A)
q = (1-hi**(n+m))/(1-hi**(n+m+1))
print("Относительная пропускная способность: ", q)
p_rej = (1-hi)*(hi**(n+m))/(1 - hi**(m+n+1))
print("Вероятность отказа: ", p_rej)
p_hold = 1 -((1-hi**m)*(hi**(n+1))/(1-hi**(n+m+1)))
print("Вероятность отсутствия очереди: ", p_hold)
l = (1-(n+m+1)*(hi**(n+m))+(n+m)*(hi**(n+m+1)))*hi/((1-hi)*(1-hi**(n+m+1)))
print("Среднее число заявок в системе: ", l)
t_serv_avg = ((1-(n+1)*(hi**n)+n*hi**(n+1))/(n*mu*(1-hi)*(1-hi**(n+m+1))))+(((hi**n)*(1-hi**m))/(mu*(1-hi**(n+m+1))))
print("Среднее время обслуживания заявки в системе: ", t_serv_avg)
r = (1-(m+1)*(hi**m)+m*(hi**(m+1)))*hi/((1-hi)*(1-hi**(n+m+1)))
print("Среднее число заявок в очереди: ", r)
t_queue_avg = (1 - (m+1)*(hi**m)+m*(hi**(m+1)))/(n*mu*(1-hi)*(1-hi**(n+m+1)))
print("Среднее время заявки в очереди", t_queue_avg)
t_smo_avg = (1 - (n+m+1)*(hi**(n+m))+(n+m)*(hi**(n+m+1)))/(n*mu*(1-hi)*(1-hi**(n+m+1)))
print("Среднее время нахождения заявки в системе", t_smo_avg)
t, p = rungeKutta(f, t0, p0, tEnd, tau)
#интенсивность потока
pow_hi = np.array([hi**i for i in range(0,n+m+1)])
#вектор предельных вероятностей
p_lim=list()
sum_hi = np.sum(pow_hi)
p_lim.append(1/sum_hi)
for i in range (1,n+m+1):
p_lim.append(pow_hi[i]/sum_hi)
print(p_lim, sum(p_lim))
stop = time.time()
print ("Время на модельную задачу: %f"%(stop-start))
fig, ax = plt.subplots(figsize=(10, 8))
plt.plot(t, p) # графики
plt.xlabel('Time')
plt.ylabel('Probability')
ax.set_xlim(0, 50)
ax.set_ylim(0, 1)
ax.xaxis.set_major_locator(MultipleLocator(2))
ax.yaxis.set_major_locator(MultipleLocator(0.05))
ax.xaxis.set_minor_locator(AutoMinorLocator(1))
ax.yaxis.set_minor_locator(AutoMinorLocator(0.01))
ax.grid(which='major', color='#CCCCCC', linestyle='--')
ax.grid(which='minor', color='#CCCCCC', linestyle=':')
plt.grid(True)
plt.show()
|
[
"noreply@github.com"
] |
MaximKrokhin.noreply@github.com
|
6b529ccfff62d136ae9de177963cf1a90ba015e5
|
c42ba06697527e2e5606c40bc3093c9af87a0145
|
/good_logic/LoopForeachList.py
|
8d0537ade2123ff209eebada46773522098aba90
|
[
"Apache-2.0"
] |
permissive
|
YellowTulipShow/PythonScripts
|
2da657e6a0dcee511a4354d9b6072f22deb8eb8a
|
09bf756f08e6b66fe3b13206c3b972c3434e53ee
|
refs/heads/master
| 2020-04-17T17:12:12.297092
| 2020-03-04T08:03:21
| 2020-03-04T08:03:21
| 166,773,201
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 933
|
py
|
# coding: UTF-8
# 不使用递归的情况下, 遍历数组/嵌套数组的每一个值
mylist = [[1, 2], 3, 4, 5, 6, [7, 8, 9, [10, 11, 12, 13, 14, [15, 16, 17], 18], 19, 20], [[21, 22, 23], 24, 25], 26]
print(mylist)
resultList = []
while len(mylist) > 0:
if 'list' in str(type(mylist[0])):
while mylist[0]:
mylist.append(mylist[0].pop(0))
mylist.pop(0)
continue
# whlie must need conduct step
resultList.append(mylist.pop(0))
print("Raw Result Data:")
print(resultList)
# Insertion Sort Method:
def Insertion_Sort(intList):
for j in range(1, len(intList)):
key = intList[j]
i = j - 1
while i >= 0 and intList[i] > key:
intList[i + 1] = intList[i]
i = i - 1
intList[i + 1] = key
return intList
print("Sort After:")
resultList = Insertion_Sort(resultList)
print("{} | len: {}".format(resultList, len(resultList)))
|
[
"1426689530@qq.com"
] |
1426689530@qq.com
|
9bb4dae388eae2750af19fb98bb7d4aecd259a20
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_07_30_00_25_05_729510.py
|
ec39c5a645e6d8d5a2b84e50902a5bbc8cdb127d
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 114
|
py
|
import math
def calcula_gaussiana(x,u,si):
y = [1/si*((2*math.pi)**1/2)]**[(-0.5)*((x-u)/si)**2]
return y
|
[
"you@example.com"
] |
you@example.com
|
1c05cfe6f5778998e9c4460b7d8b6602fdd8c6ca
|
19f10d205c7ffa59fa0b6cef089ecf8b0b22431f
|
/app.py
|
05736221b9a60f9c2a075d34c0f335f3961325cc
|
[
"BSD-2-Clause"
] |
permissive
|
jzellman/keras
|
247c75a3fce29ddb8c2393de668919bf5cb11277
|
9a2b4e1bfcebb1c72d0fd0b509fafd263d09aa29
|
refs/heads/master
| 2021-03-13T00:01:39.391344
| 2014-01-01T21:21:12
| 2014-01-01T21:21:12
| 13,747,165
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,362
|
py
|
import csv
from collections import defaultdict
from itertools import groupby
from StringIO import StringIO
from datetime import datetime
import web
from http_basic import basic_auth
import utils
import gcal
import config
from config import db
def fmt_date(date, date2):
return "{} - {}".format(date.strftime('%a %b %d %I:%M %p'),
date2.strftime('%I:%M %p'))
def minutes(delta):
return delta.seconds / 60
def breadcrumbs():
yield ("Clients", "/clients/")
path = web.ctx.fullpath
if "entries" in path:
yield ("Time Entries", path.split("entries")[0] + "entries/")
if "invoices" in path:
yield ("Invoices", path.split("invoices")[0] + "invoices/")
render = web.template.render("templates/", base='layout',
globals={'minutes': minutes,
'breadcrumbs': breadcrumbs,
'markdown': web.safemarkdown,
'fmt_date': fmt_date})
urls = (
r'/clients/', 'ClientList',
r'/clients/add', 'ClientEdit',
r'/clients/(\d*)/edit', 'ClientEdit',
r'/clients/(\d*)', 'ClientView',
r'/clients/(\d*)/categories/', 'CategoryList',
r'/clients/(\d*)/categories/add', 'CategoryEdit',
r'/clients/(\d*)/categories/(\d*)/edit', 'CategoryEdit',
r'/clients/(\d*)/entries/', 'EntryList',
r'/clients/(\d*)/entries/add', 'EntryEdit',
r'/clients/(\d*)/entries/(\d*)/edit', 'EntryEdit',
r'/clients/(\d*)/invoices/generate', 'GenerateInvoice',
r'/clients/(\d*)/invoices/', 'InvoiceList',
r'/clients/(\d*)/invoices/(\d*)/edit', 'InvoiceEdit',
r'/clients/(\d*)/invoices/(\d*)/receipt', 'InvoiceReceipt',
r'/clients/(\d*)/invoices/(\d*).?(\w*)', 'InvoiceView',
r'/calfetch', 'CalendarFetch',
r'/', 'Index',
)
def get_or_404(res):
first = get_safe(res)
if first:
return first
raise web.notfound()
def get_safe(res):
try:
return res[0]
except IndexError:
return None
def get_one(table, record_id):
if not record_id:
return None
return get_or_404(db.select(table,
where="id=$record_id",
vars={'record_id': record_id}))
def invoice_entries(invoice_id):
return list(reversed(time_entries("invoice_id=$invoice_id",
invoice_id=invoice_id)))
def time_entries(where, **vars):
query = """
SELECT time_entries.*, categories.name as category
FROM time_entries left join categories ON
time_entries.category_id = categories.id
WHERE %s
ORDER BY time_entries.start_time DESC
""" % where
def map_minutes(e):
e.duration = minutes(e.end_time - e.start_time)
e.hours = round(e.duration / 60.0, 2)
return e
return [map_minutes(e) for e in db.query(query, vars=vars)]
def total_ranges():
return (
("prev_year", utils.prev_year_range()),
("this_year", utils.year_range()),
("prev_month", utils.prev_month_range()),
("this_month", utils.month_range()),
("prev_week", utils.prev_week_range()),
("this_week", utils.week_range()),
("this_day", utils.day_range()),
("prev_day", utils.prev_day_range()))
def add_client_totals(client, totals=None):
ranges = total_ranges()
client.total_invoiced = sum([i.total for i in invoices(client.id)])
if totals is not None:
totals['total_invoiced'] += client.total_invoiced
for name, drange in ranges:
start, end = drange
entries = time_entries(
""" start_time >= $start AND
start_time <= $end AND time_entries.client_id = $cid""",
start=start, end=end, cid=client.id)
total = sum([t.hours for t in entries])
setattr(client, name, total)
if totals is not None:
totals[name] += total
if totals is not None:
return client, totals
else:
return client
def get_clients():
totals = dict([(total_name, 0.0) for total_name, _ in total_ranges()])
totals['total_invoiced'] = 0.0
clients = []
for c in db.select("clients"):
client, totals = add_client_totals(c, totals)
clients.append(client)
return clients, web.storage(totals)
def month_entries(entries):
month_entries = []
grouper = lambda i: [i.start_time.date().year, i.start_time.date().month]
for year_month, entries in groupby(entries, grouper):
d = datetime(year_month[0], year_month[1], 1).strftime("%B %Y")
entries = list(entries)
total = sum([minutes(te.end_time-te.start_time)
for te in entries]) / 60.0
month_entries.append(web.Storage({'date': d,
'total': round(total, 2),
'entries': entries}))
return month_entries
def invoices(client_id):
for i in db.select("invoices", where="client_id=$client_id",
vars={'client_id': client_id}, order="month desc"):
i.total = sum([e.hours for e in invoice_entries(i.id)])
yield i
def get_or_create_category(client, name):
cat = get_safe(db.select("categories",
where="client_id=$client_id AND name=$name",
vars={'client_id': client.id, 'name': name}))
if cat:
return cat
db.insert("categories", client_id=client.id, name=name)
return get_or_create_category(client, name)
class CalendarFetch:
def GET(self):
title = "Fetch Events from Google Calendar '{}'".format(
config.g_calendar_name)
return render.form(web.form.Form(), name="Fetch", title=title)
def POST(self):
events = [web.storage(e) for e
in gcal.fetch_events(config.g_username,
config.g_password,
config.g_calendar_name)]
for event in events:
client = get_safe(db.select("clients",
where="lower(name)=lower($name)",
vars={'name': event.client_name}))
if not client:
event['status'] = 'Client could not be found.'
continue
category_id = None
if event.category_name:
category = get_or_create_category(client, event.category_name)
category_id = category.id
entry = get_safe(db.select("time_entries",
where="external_reference=$ref", vars={'ref': event.id}))
if not entry:
db.insert("time_entries",
client_id=client.id,
description=event.description,
start_time=event.start,
end_time=event.end,
category_id=category_id,
external_reference=event.id)
event.status = 'Creating new time entry'
elif entry.invoice_id:
event.status = 'Skipping, entry has already been invoiced.'
else:
db.update("time_entries",
where="id=$entry_id",
vars={'entry_id': entry.id},
description=event.description,
start_time=event.start,
end_time=event.end,
category_id=category_id)
event.status = 'Updated time entry'
return render.fetch_status(events=events)
class GenerateInvoice:
def POST(self, client_id):
start = datetime.now().date().replace(day=1)
start = datetime(start.year, start.month, start.day)
earliest = time_entries(
"time_entries.client_id=$client_id and invoice_id is null",
client_id=client_id)[0]
beg_month = earliest.start_time.date().replace(day=1)
invoice_id = db.insert("invoices",
month=beg_month,
client_id=client_id,
status='billed')
db.update('time_entries',
where="client_id=$client_id and invoice_id is null",
vars=locals(),
invoice_id=invoice_id)
raise web.seeother("/clients/%s/entries/" % client_id)
class InvoiceList:
def GET(self, client_id):
client = get_one("clients", client_id)
return render.invoice_list(client, invoices(client.id))
class InvoiceEdit:
def GET(self, client_id, invoice_id):
f = web.form.Form(
web.form.Dropdown("status", ["billed", "closed"]),
web.form.File("receipt"))()
return render.form(f)
def POST(self, client_id, invoice_id):
i = web.input(receipt={}, status="")
to_update = {'status': i.status}
if i.receipt.filename:
to_update = {'receipt_name': i.receipt.filename,
'receipt': buffer(i.receipt.file.read())}
db.update("invoices", where="id=$invoice_id and client_id=$client_id",
vars=locals(), **to_update)
raise web.seeother("/clients/%s/invoices/" % client_id)
class InvoiceReceipt:
def GET(self, client_id, invoice_id):
i = get_or_404(
db.select("invoices",
where="id=$invoice_id and client_id=$client_id",
limit=1,
vars=locals()))
return str(i.receipt)
class InvoiceView:
def generate_csv(self, entries, hourly_rate):
s = StringIO()
w = csv.writer(s)
categories = list(set([e.category for e in entries]))
w.writerow(["Start Time", "End Time",
"Hours", "Description"] + categories)
totals = defaultdict(lambda: 0.0)
for e in entries:
totals[e.category] += e.hours
row = [e.start_time, e.end_time, e.hours, e.description]
for c in categories:
if e.category == c:
row.append(e.hours)
else:
row.append("")
w.writerow(row)
total_row = ["Totals", "", "", ""] + [totals[c] for c in categories]
w.writerow(total_row)
w.writerow(())
w.writerow(())
total = sum(totals.values())
w.writerow(("Total Hours", total))
w.writerow(("Rate per Hour", '$%d' % hourly_rate))
w.writerow(("Total", "$%s" % (hourly_rate * total)))
return s.getvalue()
def GET(self, client_id, invoice_id, format):
client = get_one("clients", client_id)
invoice = get_one('invoices', invoice_id)
entries = invoice_entries(invoice.id)
_month_entries = month_entries(entries)
if format == "csv":
filename = 'Invoice for {} for {}.csv'.format(
client.name, invoice.month.strftime("%B %Y"))
web.header('Content-Type', 'text/csv')
web.header('Content-disposition', 'attachment; filename={}'.format(
filename))
return self.generate_csv(entries, client.hourly_rate)
else:
return render.entry_list(client, _month_entries)
class CategoryEdit:
form = web.form.Form(web.form.Textbox("name", web.form.notnull))
def GET(self, client_id, category_id=None):
category = get_one("categories", category_id)
form = self.form()
form.fill(category)
return render.form(form)
def POST(self, client_id, category_id=None):
client = get_one("clients", client_id)
category = get_one("categories", category_id)
form = self.form()
if not form.validates():
return render.form(form)
if category:
db.update("categories", where="id=$cat_id",
vars={'cat_id': category.id}, name=form.d.name)
else:
get_or_create_category(client, form.d.name)
raise web.seeother("/clients/%s/categories/" % client_id)
class CategoryList:
def GET(self, client_id):
categories = db.select("categories", where="client_id=$client_id",
vars=locals())
client = get_one("clients", client_id)
return render.category_list(client, categories)
class EntryList:
def GET(self, client_id, _open=None):
client = get_one("clients", client_id)
entries = time_entries(where="time_entries.client_id=$client_id",
client_id=client_id)
return render.entry_list(client, month_entries(entries))
class EntryEdit:
def form(self, client_id):
categories = db.select("categories", where='client_id=$client_id',
vars=locals())
cat_options = [""] + [(c.id, c.name) for c in categories]
return web.form.Form(
web.form.Textbox("start_date", web.form.notnull,
class_='datepicker',
description="date"),
web.form.Textbox("start_time", web.form.notnull,
class_='timepicker',
description='start time'),
web.form.Textbox("duration", web.form.notnull,
description="duration/end time"),
web.form.Textarea("description", rows="8"),
web.form.Dropdown("category_id", cat_options,
description="category"))()
def GET(self, client_id, entry_id=None):
form = self.form(client_id)
entry = get_one("time_entries", entry_id) or web.storage()
if entry:
start_time = entry.start_time
entry.duration = "%d minutes " % (
minutes(entry.end_time - start_time))
else:
start_time = datetime.now()
entry.start_date = start_time.strftime("%m/%d/%Y")
entry.start_time = start_time.strftime("%I:%M %p")
form.fill(entry)
return render.form(form)
def POST(self, client_id, entry_id=None):
entry = get_one("time_entries", entry_id)
f = self.form(client_id)
if f.validates():
date_str = "{} {}".format(f.d.start_date.strip(),
f.d.start_time.strip())
start_time = end_time = datetime.strptime(date_str,
"%m/%d/%Y %I:%M %p")
end_time = utils.compute_end_time(f.d.duration, start_time)
if entry:
db.update("time_entries",
where="id=$entry_id",
vars={'entry_id': entry_id},
description=f.d.description,
start_time=start_time,
end_time=end_time,
category_id=f.d.category_id)
else:
db.insert("time_entries",
client_id=client_id,
description=f.d.description,
start_time=start_time,
end_time=end_time,
category_id=f.d.category_id)
raise web.seeother("/clients/%s/entries/" % client_id)
else:
return render.form(f)
class ClientList:
def GET(self):
clients, totals = get_clients()
return render.client_list(clients, totals)
class ClientView:
def GET(self, client_id=None):
client = get_one("clients", client_id)
client = add_client_totals(client)
return render.client_view(client)
class ClientEdit:
require_number = web.form.regexp(r'^\d+$', 'must be a number')
form = web.form.Form(web.form.Textbox("name", web.form.notnull),
web.form.Textarea("notes", rows=15,
style="width:600px"),
web.form.Textbox("hourly_rate",
web.form.notnull,
require_number,
description="hourly rate"))
def GET(self, client_id=None):
client = get_one("clients", client_id)
f = self.form()
f.fill(client or web.storage(hourly_rate=100))
return render.form(f)
def POST(self, client_id=None):
f = self.form()
if f.validates():
if client_id:
db.update("clients", where="id=$client_id",
vars=locals(), **f.d)
else:
db.insert("clients", **f.d)
raise web.seeother("/clients/")
else:
return render.form(f)
class Index:
def GET(self):
raise web.seeother("/clients/")
app = web.application(urls, globals(), autoreload=True)
auth = basic_auth("Application", config.basic_auth_user,
config.basic_auth_pw)
if __name__ == "__main__":
app.run(auth)
else:
app_wsgi = app.wsgifunc(auth)
|
[
"jzellman@gmail.com"
] |
jzellman@gmail.com
|
b5b9744aad6e7b4981d56fd09b119bfe1cfcca3f
|
2198905527766aba5cd71f7854720c22eb2cf647
|
/1.py
|
29ea96d27f6b3027c9bad312863690b1a3fed13e
|
[] |
no_license
|
jiuwangshiyan/1807
|
5273e38bf688e4015fc9327695f76e9461914304
|
343b3fc4642a4a610607b8f7107fe39b5a5f011b
|
refs/heads/master
| 2020-03-22T11:14:28.055624
| 2018-07-09T06:50:49
| 2018-07-09T06:50:49
| 139,957,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17
|
py
|
zhkvkvcjhchgfgf
|
[
"1848919677@qq.com"
] |
1848919677@qq.com
|
8e6108e098811c7200bb1709be882039085a7e74
|
24fb3fa105318e739c00944faaf7e0ca8bcdcfc0
|
/backend/Endpoints/Users/teams/register.py
|
3e686044c3d6fb8737e511658cb8724fdb86dbfc
|
[] |
no_license
|
malbaugh/crowd-u
|
457dfb390e94beed00df9b8e62e34042d34202e7
|
9ead6b26ae1197511dfc131e8bc20ea933b74f61
|
refs/heads/main
| 2023-08-30T03:46:17.193086
| 2021-09-29T16:40:51
| 2021-09-29T16:40:51
| 411,744,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
# coding=utf-8
from os import path
from flask_restful import Resource
from flask import request
from Models.entity import DB_SESSION
from Models.Users.teams.team import TeamTable, TeamTableSchema
from Models.entity import SERIALIZER
from Models.Users.users.user import UserTable
from werkzeug.security import check_password_hash
from Statuses.statuses import NO_CONTENT, UNAUTHORIZED, NOT_FOUND, FORBIDDEN, CONFLICT
class Register(Resource):
def get(self):
pass
def post(self):
leader = request.get_json()['leader']
members = request.get_json()['members']
name = request.get_json()['name']
token = request.headers.get("Authorization")
if (token != None):
current_username = SERIALIZER.loads(token)['username']
current_password = SERIALIZER.loads(token)['password']
user_type = SERIALIZER.loads(token)['user_type']
else:
return UNAUTHORIZED
db_session = DB_SESSION()
user = db_session.query(UserTable).filter_by(username=leader).first()
if (user == None):
db_session.close()
return NOT_FOUND
elif (user.email_confirmed == False):
db_session.close()
return FORBIDDEN
elif ((current_username == leader) and check_password_hash(user.password, current_password) and (user_type == "participant")):
if (db_session.query(TeamTable).filter_by(name=name).first() == None):
corrected_data = {
'members': members,
'name': name,
'leader': leader
}
posted_team = TeamTableSchema(only=('members','name','leader'))\
.load(corrected_data)
team = TeamTable(**posted_team.data ,created_by="HTTP post request")
db_session.add(team)
db_session.commit()
db_session.close()
return NO_CONTENT
else:
db_session.close()
return CONFLICT
else:
db_session.close()
return UNAUTHORIZED
def put(self):
pass
def delete(self):
pass
|
[
"dane@predictivewear.com"
] |
dane@predictivewear.com
|
3d9308229bee020f17e4e90c2f21b27699be55ff
|
7cc5e5fee3dbf0cebe3930f12f43a7f7f42f38c8
|
/3D Detection/loadData.py
|
2c64c72d37e661f5859df3ac1d1474cde166a67d
|
[] |
no_license
|
MiRA-lab-dev/IHC_organelles_detection
|
580b28cc0377fcabc5af87715c2935a2e87a93f7
|
538ea22ec910f7d7d264959b27ebd2eac67346cc
|
refs/heads/main
| 2023-03-29T21:36:02.177104
| 2021-04-06T08:23:40
| 2021-04-06T08:23:40
| 355,045,330
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
from PIL import Image
import os, sys
import numpy
import cv2
from skimage.io import imsave
def load_tiff_data(dataFile, dtype='float32'):
"""
Loads data from a multilayer .tif file.
Returns result as a 3d numpy tensor.
"""
if not os.path.isfile(dataFile):
raise RuntimeError('could not find "%s"' % dataFile)
# load the data from multi-layer TIF files
dataImg = Image.open(dataFile)
X = []
for ii in range(sys.maxsize):
Xi = numpy.array(dataImg, dtype=dtype)
X.append(Xi)
try:
dataImg.seek(dataImg.tell() + 1)
except EOFError:
break
X = numpy.dstack(X).transpose((2, 0, 1))
X = X.reshape((X.shape[0], X.shape[1], X.shape[2], 1))
return X
def loadDataFromDir(path):
files = os.listdir(path)
imgs = []
for filename in files:
I = Image.open(os.path.join(path,filename))
imgs.append(numpy.array(I))
imgs = numpy.dstack(imgs).transpose((2,0,1))
imgs = imgs.reshape(imgs.shape[0],imgs.shape[1],imgs.shape[2],1)
return imgs
if __name__ == '__main__':
data_file = "D:\keras\liuj\mitochondria\\training.tif"
x_train = load_tiff_data(data_file, dtype=numpy.uint8)
for i in range(x_train.shape[0]):
test = x_train[i, :, :, 0]
imsave('./images/'+str(i+1).zfill(3)+'.png',test)
# cv2.imwrite('./masks/neurons/'+str(i+1).zfill(2)+'.jpg',test,[cv2.IMWRITE_JPEG_QUALITY,90])
# img = cv2.imread('./masks/neurons/'+str(i+1).zfill(2)+'.png')
# Image.fromarray(test).save('./masks/neurons/'+str(i+1).zfill(2)+'.jpg')
|
[
"noreply@github.com"
] |
MiRA-lab-dev.noreply@github.com
|
a7e31f9f3caa20c74b7125b9c1990aa19243a6b6
|
204c2504e6e5459be8cf38208dafb8b6b4d518dc
|
/simple_page_application_project/asgi.py
|
08e3af27e57131531288a7520c5a9531b3139eb9
|
[] |
no_license
|
anoleose/simple-page-application
|
87ae3866a5f0746938f263cf355dc17ceb18b0c5
|
263c6b7073cc07b2648e1c9066cd76a5a7947988
|
refs/heads/master
| 2023-08-15T07:40:12.686440
| 2021-09-26T06:43:20
| 2021-09-26T06:43:20
| 410,188,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
"""
ASGI config for simple_page_application_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'simple_page_application_project.settings')
application = get_asgi_application()
|
[
"anoleose@bingo.bingo"
] |
anoleose@bingo.bingo
|
157df771aebc7068eb7fad7b54b0e0faf995c1b1
|
ef9c83ad39196f4a682a7ca61a96b730edd3d9f9
|
/Import Module/Tabel cosinus.py
|
64c0a80f312c7ccb710b0edd69d80691c6afc2a7
|
[] |
no_license
|
tennayarista/Python-XI-MIPA-1-Keisya-Tennaya-R
|
92eb4c3c18ef10c7705e9c0e889292fce07a4b8b
|
fc57268e1fbcc0dfd2bd17a4e80eb84608c02ea8
|
refs/heads/master
| 2023-06-08T15:58:31.351120
| 2021-06-20T04:23:08
| 2021-06-20T04:23:08
| 337,772,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
from math import*
print("menentukan nilai sin dan cos dari 0, 30, 60 - 360 derajat")
print("sudut","\t","sin","\t","cos","\t","tan")
for i in range (0,361,30):
a=radians (i)
b=sin (a)
b=format(b,".2f")
c=cos (a)
c=format(c,".2f")
d=tan (a)
d=format(d,".2f")
print (i,"\t",b,"\t",c,"\t",d)
input("selesai")
|
[
"tennayarista@gmail.com"
] |
tennayarista@gmail.com
|
c0adc1f01340bc0c5c2ca58dffdb2f91c4ae8c0b
|
f0a814c03a954c809bc56210207a69317c3b8ae7
|
/helper/RegressionExperiment.py
|
187fe23b00afbef988f7037ff2c9c93decc38a0d
|
[] |
no_license
|
ollixy/benchmark
|
5ec275a7b66594f29f3067a83fe25e069e9ccfb7
|
4f02219710a405a956e67f6d14b046ab028969c7
|
refs/heads/master
| 2021-01-21T01:26:08.648587
| 2013-09-19T16:07:06
| 2013-09-19T16:07:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,582
|
py
|
import os
import subprocess
import shutil
from os.path import isfile, join
import re
import csv
from collections import defaultdict
from Build import *
def columns_from_csv(filename):
columns = defaultdict(list)
with open(filename) as f:
reader = csv.DictReader(f, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader:
for (k,v) in row.items():
try:
value = int(v)
except Exception, e:
try:
value = float(v)
except Exception, e:
value = v
columns[k].append(value)
return columns
class ResultManager(object):
def __init__(self):
self.results = []
def add_result(self, result):
self.results.append(result)
def write_csv(self):
if len(self.results) == 0:
return
with open('result.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
# take first result as header
header = ["_exp"]
for key,value in self.results[0].items():
header.append(key)
header = sorted(header)
writer.writerow(header)
rownumber = 0
for result in self.results:
row = []
for key in header:
if key != "_exp":
if key in result:
row.append(result[key])
else:
row.append("None")
else:
row.append(rownumber)
rownumber = rownumber + 1
writer.writerow(row)
class RegressionExperiment(object):
def __init__(self, settingsfile, testname):
self.settingsfile = settingsfile
self.testname = testname
self.hyrise_dir = "./hyrise/"
self.bin_dir = "./builds/"+settingsfile+"/"
self.build = Build(settingsfile)
def execute(self):
print self.settingsfile + ": Executing " + self.testname
print "#########################"
cwd = os.getcwd()
os.chdir(self.hyrise_dir)
exp_env = os.environ.copy()
exp_env["HYRISE_DB_PATH"] = "." + self.bin_dir
exp_env["LD_LIBRARY_PATH"] = "." + self.bin_dir
proc = subprocess.Popen([".%sperf_regression --gtest_filter=%s" % (self.bin_dir, self.testname)], env=exp_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print out
os.chdir(cwd)
return self.parse_result(out)
def parse_result(self, resultstring):
result = {"_testname":self.testname, "_settingsfile":self.settingsfile}
for line in resultstring.splitlines():
line = line.replace(" ", "")
m = re.search(r"\[(\w+)\]", line)
if m != None and m.group(1) == "MSG":
line = line[5:]
keyvalue = line.split(":")
if len(keyvalue) >=2:
result[keyvalue[0]] = float(keyvalue[1])
return result
|
[
"schwalb.david@gmail.com"
] |
schwalb.david@gmail.com
|
ca1eda60265bccb2a5ec87e77c4b843c57907cd8
|
d0d150e83bbb2da9be7c8effd2d2af22a393fa62
|
/testscrape.py
|
8243fe8c8d472213f7f38268f1f4401ec9cd3c5f
|
[] |
no_license
|
wonathanjong/BTHO-TAMU-Registration
|
0e416ea484a55fff0922b5c8f621a61f44440bbc
|
cf7ec300e544801e577328ccd6ec6066cc1082eb
|
refs/heads/master
| 2020-04-18T19:51:27.139337
| 2019-01-29T19:39:01
| 2019-01-29T19:39:01
| 167,723,046
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,502
|
py
|
import scraperwiki
import sqlite3
from bs4 import BeautifulSoup
import string
import unicodedata
import time
import requests
import json
headers = ["Name","Department","Total Ratings","Overall Quality","Easiness","Hot"]
#Dictionary of school ids (keys) that map to tuple of school name and number of pages
colleges = {"1003":("Texas A&M",4)}
for sid in colleges.keys():
college,pages = colleges[sid]
print college
for i in xrange(1,pages+1):
response = scraperwiki.scrape("http://www.ratemyprofessors.com/SelectTeacher.jsp?sid=%s&pageNo=%s" % (sid,str(i)))
xxxx = requests.get("http://www.ratemyprofessors.com/SelectTeacher.jsp?sid=%s&pageNo=%s" % (sid,str(i)))
print(xxxx.text)
print("http://www.ratemyprofessors.com/SelectTeacher.jsp?sid=%s&pageNo=%s" % (sid,str(i)))
time.sleep(5)
soup = BeautifulSoup(response)
rows = soup.find_all("div",{"class":"entry odd vertical-center"})
rows.extend(soup.find_all("div",{"class":"entry even vertical-center"}))
for row in rows:
columns = row.find_all('div')
columns = columns[3:]
variables = {}
for i,col in enumerate(columns):
value = unicodedata.normalize('NFKD', col.text).encode('ascii', 'ignore')
variables[headers[i]] = value
variables["College"] = college
scraperwiki.sqlite.save(unique_keys=['Name',"Department"], data = variables)
print(variables)
|
[
"bobbykim1013@yahoo.com"
] |
bobbykim1013@yahoo.com
|
d207fc4c0568cf2a78db1395fb9177a2aa9e438e
|
b3677e950abc06e93afb918b682bed1c4cf69e88
|
/blog/migrations/0001_initial.py
|
c6aa3b75e7747ee78a9dde9f67b5fffac5c210e2
|
[] |
no_license
|
MuhooziJr/my-first-blog
|
3465c900dfb4c6b47a4127a1357d9b15531d7e04
|
17d0daf2802836855bf1c8d65cfff02f55d3ea30
|
refs/heads/master
| 2022-12-11T15:02:59.707212
| 2020-08-26T13:47:28
| 2020-08-26T13:47:28
| 290,390,430
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 987
|
py
|
# Generated by Django 2.2.15 on 2020-08-25 08:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"muhabuzi34muhoozi@gmail.com"
] |
muhabuzi34muhoozi@gmail.com
|
7e735b6c1df4f2decc9c1973bd0cda6434597efa
|
1b51bc84231eefb73a05fbde037373ef1a649269
|
/transport_controller/nodes/transport_state_machine_node.py
|
dacb6ef1df5ff196a19baa6af61a1e0d88300c34
|
[] |
no_license
|
matchRos/Collaborative_Transport
|
4faed31182a9220ffa2312c701a64f52269861ca
|
788e0a28d2c76c20613cad15e6365572746d4a11
|
refs/heads/master
| 2023-04-30T18:52:59.899111
| 2021-05-12T13:12:49
| 2021-05-12T13:12:49
| 346,361,156
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
#!/usr/bin/env python
import smach
import smach_ros
import rospy
from transport_controller.StateMachine import PrepareSystemStateMachine
from transport_controller.StateMachine import MoveStateMachine
from transport_controller.StateMachine import FormationControlStateMachine
if __name__=="__main__":
rospy.init_node('transport_state_machine')
base_namespaces=rospy.get_param("~base_namespaces",["/mur/mir","/miranda/mir"])
arm_namespaces=rospy.get_param("~arm_namespaces",["/mur/ur","/miranda/panda"])
sm=smach.StateMachine(outcomes=["out"])
try:
with sm:
smach.StateMachine.add( "PrepareMovement",
PrepareSystemStateMachine(base_namespaces,arm_namespaces),
transitions={ "preparation_done":"MoveToFormation",
"preparation_error":'out'})
smach.StateMachine.add( "MoveToFormation",
MoveStateMachine(base_namespaces,arm_namespaces),
transitions={ "movement_done":"FormationControl",
"movement_error":'out'})
smach.StateMachine.add( "FormationControl",
FormationControlStateMachine(base_namespaces,arm_namespaces),
transitions={ 'formation_control_move':"MoveToFormation",
'formation_control_stop':'out',
"formation_control_error":'out'})
sis = smach_ros.IntrospectionServer('server_name', sm, '/SM_ROOT')
sis.start()
sm.execute()
# Wait for ctrl-c to stop the application
rospy.spin()
sis.stop()
except Exception as e:
print (e)
|
[
"Heinrich"
] |
Heinrich
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.