blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d10c3fb59eb602e7a438fe8b8b7ccca52fcc45d2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_syphon.py
|
1ef3547d3d666728720ba4bfc26206b8a9d76bc4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
#calss header
class _SYPHON():
def __init__(self,):
self.name = "SYPHON"
self.definitions = [u'a siphon noun ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
5916126d6b2b7816ef167915954b5ddf8cb45d8f
|
54b238d50baee4f483c0690d77d106ebc30a4c0a
|
/tests/test_space_time/test_type_helpers.py
|
cb9dab6c31fed82502d58d190cd6d40c0ba43739
|
[
"MIT"
] |
permissive
|
David-Durst/aetherling
|
4a5d663a98428769834e8ebbf7e9b63cb7788319
|
91bcf0579608ccbf7d42a7bddf90ccd4257d6571
|
refs/heads/master
| 2021-08-16T01:48:20.476097
| 2020-06-19T19:25:46
| 2020-06-19T19:25:46
| 114,405,958
| 10
| 1
|
MIT
| 2021-03-29T17:44:39
| 2017-12-15T19:46:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,731
|
py
|
from aetherling.space_time.space_time_types import *
from aetherling.space_time.type_helpers import *
def test_same_type():
x = ST_TSeq(3, 0, ST_Int())
y = ST_TSeq(3, 0, ST_Int())
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_Tombstone()
assert shared_diff.diff_output == ST_Tombstone()
assert shared_diff.shared_inner == x
assert shared_diff.shared_outer == ST_Tombstone()
def test_same_type_nested():
x = ST_TSeq(3, 0, ST_SSeq(4, ST_Int))
y = ST_TSeq(3, 0, ST_SSeq(4, ST_Int))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_Tombstone()
assert shared_diff.diff_output == ST_Tombstone()
assert shared_diff.shared_inner == x
assert shared_diff.shared_outer == ST_Tombstone()
def test_diff_no_outer_same():
x = ST_SSeq(6, ST_TSeq(3, 0, ST_SSeq(4, ST_Int)))
y = ST_TSeq(3, 0, ST_SSeq(6, ST_SSeq(4, ST_Int)))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_SSeq(6, ST_TSeq(3, 0, ST_Tombstone()))
assert shared_diff.diff_output == ST_TSeq(3, 0, ST_SSeq(6, ST_Tombstone()))
assert shared_diff.shared_inner == x.t.t
assert shared_diff.shared_outer == ST_Tombstone()
def test_diff_with_outer_same():
x = ST_TSeq(9, 2, ST_SSeq(6, ST_TSeq(3, 0, ST_SSeq(4, ST_Int))))
y = ST_TSeq(9, 2, ST_TSeq(3, 0, ST_SSeq(6, ST_SSeq(4, ST_Int))))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_SSeq(6, ST_TSeq(3, 0, ST_Tombstone()))
assert shared_diff.diff_output == ST_TSeq(3, 0, ST_SSeq(6, ST_Tombstone()))
assert shared_diff.shared_inner == x.t.t.t
assert shared_diff.shared_outer == ST_TSeq(9, 2, ST_Tombstone())
def test_diff_with_partially_diff_inner():
x = ST_TSeq(9, 2, ST_SSeq(6, ST_SSeq(7, ST_TSeq(3, 0, ST_SSeq(4, ST_Int)))))
y = ST_TSeq(9, 2, ST_TSeq(3, 0, ST_SSeq(7, ST_SSeq(6, ST_SSeq(4, ST_Int)))))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_SSeq(6, ST_SSeq(7, ST_TSeq(3, 0, ST_Tombstone())))
assert shared_diff.diff_output == ST_TSeq(3, 0, ST_SSeq(7, ST_SSeq(6, ST_Tombstone())))
assert shared_diff.shared_inner == x.t.t.t.t
assert shared_diff.shared_outer == ST_TSeq(9, 2, ST_Tombstone())
def test_diff_depths():
x = ST_TSeq(4, 12, ST_Int())
y = ST_TSeq(2, 2, ST_TSeq(2, 2, ST_Int()))
shared_diff = get_shared_and_diff_subtypes(x,y)
assert shared_diff.diff_input == ST_TSeq(4, 12, ST_Tombstone())
assert shared_diff.diff_output == ST_TSeq(2, 2, ST_TSeq(2, 2, ST_Tombstone()))
assert shared_diff.shared_inner == ST_Int()
assert shared_diff.shared_outer == ST_Tombstone()
|
[
"davidbdurst@gmail.com"
] |
davidbdurst@gmail.com
|
a95c8e9ee9b4167c2ef845c2453d3b7c424026ec
|
4df63456e42591b5858c29986089b84ecac01fea
|
/tracker-visual/read_cluster.py
|
3739ddc9ef84e61e5178f922a6728920b78a1a44
|
[
"MIT"
] |
permissive
|
will-fawcett/trackerSW
|
6f71a8ab9e2013e439e1e24326c1cc59f7be1e7f
|
fc097b97539d0b40a15e1d6e112f4048cb4122b4
|
refs/heads/master
| 2021-04-25T21:39:17.321302
| 2018-06-14T13:31:13
| 2018-06-14T13:31:13
| 109,404,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,055
|
py
|
"""
Quick script to dump FCCSW tracker validation data to plaintext spacepoints.
Requires podio, fcc-edm in pythonpath and ld-library-path.
"""
from EventStore import EventStore
import numpy as np
import sys
filename = sys.argv[1]
basefilename = filename.replace(".root", "")
events = EventStore([filename])
print 'number of events: ', len(events)
pos_b = []
ids_b = []
pos_e = []
ids_e = []
barrel_ids = []
nEventsMax = 100000
#nEventsMax = 1
for i, store in enumerate(events):
# Only run over 100000 events
if i > nEventsMax:
break
# <class 'ROOT.fcc.PositionedTrackHitCollection'>
clusters = store.get('positionedHits')
#print clusters
layerIdmax = 0
for c in clusters:
#c is of type <class 'ROOT.fcc.PositionedTrackHit'>
# <class 'ROOT.fcc.Point'>
cor = c.position()
layerId = (c.cellId() / 32) %32
if layerId > 20:
print c.cellId(), c.cellId() % 32, c.cellId() / 32, (c.cellId() / 32) %32
if layerId > layerIdmax:
layerIdmax = layerId
if (c.cellId() % 32) == 0:
# Select only the triplet layers
#if (c.cellId() / 32) %32 == 20 or (c.cellId() / 32) %32 == 21 (c.cellId() / 32) %32 == 22:
if (c.cellId() / 32) %32 == 1: # or (c.cellId() / 32) %32 == 21 (c.cellId() / 32) %32 == 22:
pass
pos_b.append([cor.x, cor.y, cor.z])
ids_b.append([c.bits(), c.cellId()])
#print c.cellId() % 32, np.sqrt(cor.x**2 + cor.y**2)
#else:
# pos_e.append([cor.x, cor.y, cor.z])
# ids_e.append([c.bits(), c.cellId()])
pos_e = np.array(pos_e)
ids_e = np.array(ids_e)
pos_b = np.array(pos_b)
ids_b = np.array(ids_b)
print "number of endcap hits: ", len(pos_e)
print "number of barrel hits: ", len(pos_b)
np.savetxt(basefilename + 'hit_positions_e.dat', pos_e)
np.savetxt(basefilename + 'hit_ids_e.dat', ids_e, fmt="%i")
np.savetxt(basefilename + 'hit_positions_b.dat', pos_b)
np.savetxt(basefilename + 'hit_ids_b.dat', ids_b, fmt="%i")
|
[
"william.fawcett@cern.ch"
] |
william.fawcett@cern.ch
|
99f477ff8ee5eee19b30adddfcaa704802c97c42
|
9b9a02657812ea0cb47db0ae411196f0e81c5152
|
/repoData/Floobits-flootty/allPythonContent.py
|
233f4eda6ac3b66566c18b3214288161442dcb88
|
[] |
no_license
|
aCoffeeYin/pyreco
|
cb42db94a3a5fc134356c9a2a738a063d0898572
|
0ac6653219c2701c13c508c5c4fc9bc3437eea06
|
refs/heads/master
| 2020-12-14T14:10:05.763693
| 2016-06-27T05:15:15
| 2016-06-27T05:15:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 38,459
|
py
|
__FILENAME__ = api
import sys
import base64
import json
try:
from urllib.request import Request, urlopen
assert Request and urlopen
except ImportError:
from urllib2 import Request, urlopen
try:
from . import shared as G, utils
assert G and utils
except (ImportError, ValueError):
import shared as G
import utils
def get_basic_auth():
# TODO: use api_key if it exists
basic_auth = ('%s:%s' % (G.USERNAME, G.SECRET)).encode('utf-8')
basic_auth = base64.encodestring(basic_auth)
return basic_auth.decode('ascii').replace('\n', '')
def api_request(url, data=None):
if data:
data = json.dumps(data).encode('utf-8')
r = Request(url, data=data)
r.add_header('Authorization', 'Basic %s' % get_basic_auth())
r.add_header('Accept', 'application/json')
r.add_header('Content-type', 'application/json')
r.add_header('User-Agent', 'Flootty py-%s.%s' % (sys.version_info[0], sys.version_info[1]))
return urlopen(r, timeout=5)
def create_workspace(post_data):
url = 'https://%s/api/workspace/' % G.DEFAULT_HOST
return api_request(url, post_data)
def get_workspace_by_url(url):
result = utils.parse_url(url)
api_url = 'https://%s/api/workspace/%s/%s/' % (result['host'], result['owner'], result['workspace'])
return api_request(api_url)
def get_workspace(owner, workspace):
api_url = 'https://%s/api/workspace/%s/%s/' % (G.DEFAULT_HOST, owner, workspace)
return api_request(api_url)
def get_workspaces():
api_url = 'https://%s/api/workspace/can/view/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_now_editing_workspaces():
api_url = 'https://%s/api/workspaces/now_editing/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_orgs():
api_url = 'https://%s/api/orgs/' % (G.DEFAULT_HOST)
return api_request(api_url)
def get_orgs_can_admin():
api_url = 'https://%s/api/orgs/can/admin/' % (G.DEFAULT_HOST)
return api_request(api_url)
def send_error(data):
try:
api_url = 'https://%s/api/error/' % (G.DEFAULT_HOST)
return api_request(api_url, data)
except Exception as e:
print(e)
return None
########NEW FILE########
__FILENAME__ = cert
CA_CERT = '''-----BEGIN CERTIFICATE-----
MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW
MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg
Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh
dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9
MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi
U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh
cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA
A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk
pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf
OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C
Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT
Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi
HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM
Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w
+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+
Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3
Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B
26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID
AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE
FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j
ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js
LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM
BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0
Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy
dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh
cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh
YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg
dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp
bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ
YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT
TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ
9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8
jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW
FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz
ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1
ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L
EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu
L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq
yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC
O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V
um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh
NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=
-----END CERTIFICATE-----'''
########NEW FILE########
__FILENAME__ = flootty
#!/usr/bin/env python
# coding: utf-8
try:
unicode()
except NameError:
unicode = str
# Heavily influenced by the work of Joshua D. Bartlett
# see: http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
# original copyright
# Copyright (c) 2011 Joshua D. Bartlett
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# boilerplate to allow running as script directly
if __name__ == "__main__" and __package__ is None:
import sys
import os
# The following assumes the script is in the top level of the package
# directory. We use dirname() to help get the parent directory to add to
# sys.path, so that we can import the current package. This is necessary
# since when invoked directly, the 'current' package is not automatically
# imported.
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, parent_dir)
import flootty
assert flootty
__package__ = str("flootty")
del sys, os
import atexit
import fcntl
import json
import optparse
import array
import os
import pty
import select
import socket
import ssl
import sys
import tempfile
import termios
import tty
import signal
import time
import base64
import collections
import errno
PY2 = sys.version_info < (3, 0)
try:
import __builtin__
input = getattr(__builtin__, 'raw_input')
except (ImportError, AttributeError):
pass
try:
from . import api, cert, shared as G, utils, version
assert api and cert and G and utils
except (ImportError, ValueError):
import api
import cert
import shared as G
import utils
import version
PROTO_VERSION = '0.11'
CLIENT = 'flootty %s' % version.FLOOTTY_VERSION
INITIAL_RECONNECT_DELAY = 1000
FD_READ_BYTES = 65536
# Seconds
SELECT_TIMEOUT = 0.1
NET_TIMEOUT = 10
MAX_BYTES_TO_BUFFER = 65536
DEFAULT_HOST = "floobits.com"
DEFAULT_PORT = 3448
def read_floorc():
settings = {}
p = os.path.expanduser('~/.floorc')
try:
fd = open(p, 'rb')
except IOError as e:
if e.errno == 2:
return settings
raise
data = fd.read().decode('utf-8')
fd.close()
for line in data.split('\n'):
position = line.find(' ')
if position < 0:
continue
settings[line[:position]] = line[position + 1:]
return settings
def write(fd, b):
if (not PY2) and isinstance(b, str):
b = b.encode('utf-8')
elif PY2 and isinstance(b, unicode):
b = b.encode('utf-8')
while len(b):
try:
n = os.write(fd, b)
b = b[n:]
except (IOError, OSError):
pass
def read(fd):
buf = b''
while True:
try:
d = os.read(fd, FD_READ_BYTES)
if not d or d == '':
break
buf += d
except (IOError, OSError):
break
return buf
def out(*args):
buf = "%s\r\n" % " ".join(args)
write(pty.STDOUT_FILENO, buf)
def err(*args):
buf = "%s\r\n" % " ".join(args)
write(pty.STDERR_FILENO, buf)
def die(*args):
err(*args)
sys.exit(1)
usage = '''usage: %prog [options] [terminal_name]\n
For more help, see https://github.com/Floobits/flootty'''
def main():
settings = read_floorc()
parser = optparse.OptionParser(usage=usage)
parser.add_option("-u", "--username",
dest="username",
default=settings.get('username'),
help="Your Floobits username")
parser.add_option("-s", "--secret",
dest="secret",
default=settings.get('secret'),
help="Your Floobits secret (api key)")
parser.add_option("-c", "--create",
dest="create",
default=False,
action="store_true",
help="The terminal name to create")
parser.add_option("--host",
dest="host",
default=DEFAULT_HOST,
help="The host to connect to. Deprecated. Use --url instead.")
parser.add_option("-p", "--port",
dest="port",
default=DEFAULT_PORT,
help="The port to connect to. Deprecated. Use --url instead.")
parser.add_option("-w", "--workspace",
dest="workspace",
help="The workspace name. --owner is required with this option. Deprecated. Use --url instead.")
parser.add_option("-o", "--owner",
dest="owner",
help="The workspace owner. --workspace is required with this option. Deprecated. Use --url instead.")
parser.add_option("-l", "--list",
dest="list",
default=False,
action="store_true",
help="List all terminals in the workspace")
parser.add_option("--unsafe",
dest="safe",
default=True,
action="store_false",
help="Less safe terminal. This allows other users to send enter in your terminal.")
parser.add_option("--no-ssl",
dest="use_ssl",
default=True,
action="store_false",
help="Do not use this option unless you know what you are doing!")
parser.add_option("--url",
dest="workspace_url",
default=None,
help="The URL of the workspace to connect to.")
parser.add_option("--resize",
dest="resize",
default=False,
action="store_true",
help="Resize your terminal to the host terminal size.")
parser.add_option("-P", "--preserve-ps1",
dest="set_prompt",
default=True,
action="store_false",
help="Don't change $PS1 (bash/zsh prompt)")
parser.add_option("-v", "--version",
dest="version",
default=False,
action="store_true",
help="Print version")
options, args = parser.parse_args()
if options.version:
print(CLIENT)
return
G.USERNAME = options.username
G.SECRET = options.secret
default_term_name = ""
if options.create:
default_term_name = "ftty"
term_name = args and args[0] or default_term_name
if options.workspace and options.owner and options.workspace_url:
# TODO: confusing
parser.error("You can either specify --workspace and --owner, or --url, but not both.")
if bool(options.workspace) != bool(options.owner):
parser.error("You must specify a workspace and owner or neither.")
for opt in ['owner', 'workspace']:
if getattr(options, opt):
print('%s is deprecated. Please use --url instead.' % opt)
if not options.workspace or not options.owner:
floo = {}
if options.workspace_url:
floo = utils.parse_url(options.workspace_url)
else:
for floo_path in walk_up(os.path.realpath('.')):
try:
floo = json.loads(open(os.path.join(floo_path, '.floo'), 'rb').read().decode('utf-8'))
floo = utils.parse_url(floo['url'])
except Exception:
pass
else:
break
options.host = floo.get('host')
options.workspace = floo.get('workspace')
options.owner = floo.get('owner')
options.use_ssl = floo.get('secure')
if not options.port:
options.port = floo.get('port')
if not options.host:
options.host = floo.get('host')
if not options.workspace or not options.owner:
now_editing = api.get_now_editing_workspaces()
now_editing = json.loads(now_editing.read().decode('utf-8'))
if len(now_editing) == 1:
options.workspace = now_editing[0]['name']
options.owner = now_editing[0]['owner']
# TODO: list possible workspaces to join if > 1 is active
if options.list:
if len(term_name) != 0:
die("I don't understand why you gave me a positional argument.")
for opt in ['workspace', 'owner', 'username', 'secret']:
if not getattr(options, opt):
parser.error('%s not given' % opt)
color_reset = '\033[0m'
if options.safe:
green = '\033[92m'
print('%sTerminal is safe. Other users will not be able to send [enter]%s' % (green, color_reset))
else:
yellorange = '\033[93m'
print('%sTerminal is unsafe. Other users will be able to send [enter]. Be wary!%s' % (yellorange, color_reset))
f = Flootty(options, term_name)
atexit.register(f.cleanup)
f.connect_to_internet()
f.select()
def walk_up(path):
step_up = lambda x: os.path.realpath(os.path.join(x, '..'))
parent = step_up(path)
while parent != path:
yield path
path = parent
parent = step_up(path)
yield path
class FD(object):
def __init__(self, fileno, reader=None, writer=None, errer=None, name=None):
self.fileno = fileno
self.reader = reader
self.writer = writer
self.errer = errer
self.name = name
def __getitem__(self, key):
return getattr(self, key, None)
def __str__(self):
return str(self.name)
class Flootty(object):
'''Mostly OK at sharing a shell'''
def __init__(self, options, term_name):
self.master_fd = None
self.original_wincher = None
self.fds = {}
self.readers = set()
self.writers = set()
self.errers = set()
self.empty_selects = 0
self.reconnect_timeout = None
self.buf_out = collections.deque()
self.buf_in = b''
self.host = options.host
self.port = int(options.port)
self.workspace = options.workspace
self.owner = options.owner
self.options = options
self.term_name = term_name
self.authed = False
self.term_id = None
self.orig_stdin_atts = None
self.orig_stdout_atts = None
self.last_stdin = 0
self.reconnect_delay = INITIAL_RECONNECT_DELAY
def add_fd(self, fileno, **kwargs):
try:
fileno = fileno.fileno()
except:
fileno = fileno
fd = FD(fileno, **kwargs)
self.fds[fileno] = fd
if fd.reader:
self.readers.add(fileno)
if fd.writer:
self.writers.add(fileno)
if fd.errer:
self.errers.add(fileno)
def remove_fd(self, fileno):
self.readers.discard(fileno)
self.writers.discard(fileno)
self.errers.discard(fileno)
try:
del self.fds[fileno]
except KeyError:
pass
def transport(self, name, data):
data['name'] = name
self.buf_out.append(data)
def select(self):
'''
'''
attrs = ('errer', 'reader', 'writer')
while True:
utils.call_timeouts()
if len(self.buf_out) == 0 and self.sock:
self.writers.remove(self.sock.fileno())
try:
# NOTE: you will never have to write anything without reading first from a different one
_in, _out, _except = select.select(self.readers, self.writers, self.errers, SELECT_TIMEOUT)
except (IOError, OSError) as e:
continue
except (select.error, socket.error, Exception) as e:
# Interrupted system call.
if e[0] == 4:
continue
self.reconnect()
continue
finally:
if self.sock:
self.writers.add(self.sock.fileno())
for position, fds in enumerate([_except, _in, _out]):
attr = attrs[position]
for fd in fds:
# the handler can remove itself from self.fds...
handler = self.fds.get(fd)
if handler is None:
continue
handler = handler[attr]
if handler:
handler(fd)
else:
raise Exception('no handler for fd: %s %s' % (fd, attr))
def cloud_read(self, fd):
buf = b''
try:
while True:
d = self.sock.recv(FD_READ_BYTES)
if not d:
break
buf += d
except (socket.error, TypeError):
pass
if buf:
self.empty_selects = 0
self.handle(buf)
else:
self.empty_selects += 1
if (int(self.empty_selects * SELECT_TIMEOUT)) > NET_TIMEOUT:
err('No data from sock.recv() {0} times.'.format(self.empty_selects))
return self.reconnect()
def cloud_write(self, fd):
new_buf_out = collections.deque()
try:
while True:
item = self.buf_out.popleft()
data = json.dumps(item) + '\n'
if self.authed or item['name'] == 'auth':
if not PY2:
data = data.encode('utf-8')
self.sock.sendall(data)
else:
new_buf_out.append(item)
except socket.error:
self.buf_out.appendleft(item)
self.reconnect()
except IndexError:
pass
self.buf_out.extendleft(new_buf_out)
def cloud_err(self, err):
out('reconnecting because of %s' % err)
self.reconnect()
def handle(self, req):
self.buf_in += req
while True:
before, sep, after = self.buf_in.partition(b'\n')
if not sep:
break
data = json.loads(before.decode('utf-8'), encoding='utf-8')
self.handle_event(data)
self.buf_in = after
def handle_event(self, data):
name = data.get('name')
if not name:
return out('no name in data?!?')
func = getattr(self, "on_%s" % (name), None)
if not func:
return
func(data)
def on_room_info(self, ri):
self.authed = True
self.ri = ri
def list_terms(terms):
term_name = ""
for term_id, term in terms.items():
owner = str(term['owner'])
term_name = term['term_name']
out('terminal %s created by %s' % (term['term_name'], ri['users'][owner]['username']))
return term_name
if self.options.create:
buf = self._get_pty_size()
term_name = self.term_name
i = 0
term_names = [term['term_name'] for term_id, term in ri['terms'].items()]
while term_name in term_names:
i += 1
term_name = self.term_name + str(i)
self.term_name = term_name
return self.transport('create_term', {'term_name': self.term_name, 'size': [buf[1], buf[0]]})
elif self.options.list:
out('Terminals in %s::%s' % (self.owner, self.workspace))
list_terms(ri['terms'])
return die()
elif not self.term_name:
if len(ri['terms']) == 0:
out('There is no active terminal in this workspace. Do you want to share your terminal? (y/n)')
choice = input().lower()
self.term_name = "_"
if choice == 'y':
self.options.create = True
buf = self._get_pty_size()
return self.transport('create_term', {'term_name': self.term_name, 'size': [buf[1], buf[0]]})
else:
die('If you ever change your mind, you can share your terminal using the --create [super_awesome_name] flag.')
elif len(ri['terms']) == 1:
term_id, term = list(ri['terms'].items())[0]
self.term_id = int(term_id)
self.term_name = term['term_name']
else:
out('More than one active term exists in this workspace.')
example_name = list_terms(ri['terms'])
die('Please pick a workspace like so: flootty %s' % example_name)
else:
for term_id, term in ri['terms'].items():
if term['term_name'] == self.term_name:
self.term_id = int(term_id)
break
if self.term_id is None:
die('No terminal with name %s' % self.term_name)
return self.join_term()
def on_ping(self, data):
self.transport('pong', {})
def on_disconnect(self, data):
reason = data.get('reason')
out('Disconnected by server!')
if reason:
# TODO: don't kill terminal until current process is done or something
die('Reason: %s' % reason)
self.reconnect()
def on_error(self, data):
if self.term_id is None:
die(data.get('msg'))
else:
out('Error from server: %s' % data.get('msg'))
def on_create_term(self, data):
if data.get('term_name') != self.term_name:
return
self.term_id = data.get('id')
self.create_term()
def on_delete_term(self, data):
if data.get('id') != self.term_id:
return
die('User %s killed the terminal. Exiting.' % (data.get('username')))
def on_update_term(self, data):
if data.get('id') != self.term_id:
return
self._set_pty_size()
def on_term_stdin(self, data):
if data.get('id') != self.term_id:
return
if not self.options.create:
return
self.handle_stdio(base64.b64decode(data['data']), data.get('user_id'))
def on_term_stdout(self, data):
if data.get('id') != self.term_id:
return
self.handle_stdio(data['data'])
def reconnect(self):
if self.reconnect_timeout:
return
new_buf_out = collections.deque()
total_len = 0
while True:
try:
item = self.buf_out.popleft()
except IndexError:
break
if item['name'] == 'term_stdout':
total_len += len(item['data'])
if total_len > MAX_BYTES_TO_BUFFER:
continue
new_buf_out.appendleft(item)
self.buf_out = new_buf_out
if self.sock:
self.remove_fd(self.sock.fileno())
try:
self.sock.shutdown(2)
except Exception:
pass
try:
self.sock.close()
except Exception:
pass
self.sock = None
self.authed = False
self.reconnect_delay *= 1.5
if self.reconnect_delay > 10000:
self.reconnect_delay = 10000
self.reconnect_timeout = utils.set_timeout(self.connect_to_internet, self.reconnect_delay)
def send_auth(self):
self.buf_out.appendleft({
'name': 'auth',
'username': self.options.username,
'secret': self.options.secret,
'room': self.workspace,
'room_owner': self.owner,
'client': CLIENT,
'platform': sys.platform,
'version': PROTO_VERSION
})
def connect_to_internet(self):
self.empty_selects = 0
self.reconnect_timeout = None
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.options.use_ssl:
self.cert_fd = tempfile.NamedTemporaryFile()
self.cert_fd.write(cert.CA_CERT.encode('utf-8'))
self.cert_fd.flush()
self.sock = ssl.wrap_socket(self.sock, ca_certs=self.cert_fd.name, cert_reqs=ssl.CERT_REQUIRED)
elif self.port == 3448:
self.port = 3148
out('Connecting to %s' % self.workspace_url())
try:
self.sock.connect((self.host, self.port))
if self.options.use_ssl:
self.sock.do_handshake()
except socket.error as e:
out('Error connecting: %s.' % e)
return self.reconnect()
self.sock.setblocking(0)
out('Connected!')
self.send_auth()
self.add_fd(self.sock, reader=self.cloud_read, writer=self.cloud_write, errer=self.cloud_err, name='net')
self.reconnect_delay = INITIAL_RECONNECT_DELAY
def workspace_url(self):
proto = {True: "https", False: "http"}
proto_str = proto[self.options.use_ssl]
port_str = ''
if self.options.use_ssl:
if self.port != 3448:
port_str = ':%s' % self.port
else:
if self.port != 3148:
port_str = ':%s' % self.port
return '%s://%s%s/%s/%s' % (proto_str, self.host, port_str, self.owner, self.workspace)
def join_term(self):
out('Successfully joined %s' % (self.workspace_url()))
self.orig_stdout_atts = tty.tcgetattr(sys.stdout)
stdout = sys.stdout.fileno()
tty.setraw(stdout)
fl = fcntl.fcntl(stdout, fcntl.F_GETFL)
fcntl.fcntl(stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.orig_stdin_atts = tty.tcgetattr(sys.stdin)
stdin = sys.stdin.fileno()
tty.setraw(stdin)
fl = fcntl.fcntl(stdin, fcntl.F_GETFL)
fcntl.fcntl(stdin, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def ship_stdin(fd):
data = read(fd)
if data:
self.transport("term_stdin", {'data': base64.b64encode(data).decode('utf8'), 'id': self.term_id})
if 'term_stdin' in self.ri['perms']:
out('You have permission to write to this terminal. Remember: With great power comes great responsibility.')
self.add_fd(stdin, reader=ship_stdin, name='join_term_stdin')
else:
out('You do not have permission to write to this terminal.')
def stdout_write(buf):
write(stdout, base64.b64decode(buf))
self.handle_stdio = stdout_write
self._set_pty_size(self.ri['terms'][str(self.term_id)]['size'])
def create_term(self):
'''
Create a spawned process.
Based on the code for pty.spawn().
'''
if self.master_fd:
# reconnected. don't spawn a new shell
out('Reconnected to %s' % (self.workspace_url()))
return
shell = os.environ['SHELL']
out('Successfully joined %s' % (self.workspace_url()))
self.child_pid, self.master_fd = pty.fork()
if self.child_pid == pty.CHILD:
os.execlpe(shell, shell, '--login', os.environ)
self.orig_stdin_atts = tty.tcgetattr(sys.stdin.fileno())
tty.setraw(pty.STDIN_FILENO)
self.original_wincher = signal.signal(signal.SIGWINCH, self._signal_winch)
self._set_pty_size()
def slave_death(fd):
die('Exiting flootty because child exited.')
self.extra_data = b''
def stdout_write(fd):
'''
Called when there is data to be sent from the child process back to the user.
'''
try:
data = self.extra_data + os.read(fd, FD_READ_BYTES)
except:
data = None
if not data:
return die("Time to go!")
self.transport("term_stdout", {'data': base64.b64encode(data).decode('utf8'), 'id': self.term_id})
write(pty.STDOUT_FILENO, data)
self.add_fd(self.master_fd, reader=stdout_write, errer=slave_death, name='create_term_stdout_write')
def stdin_write(fd):
data = os.read(fd, FD_READ_BYTES)
if data:
write(self.master_fd, data)
now = time.time()
# Only send stdin event if it's been > 2 seconds. This prevents people from figuring out password lengths
if now - self.last_stdin > 2:
self.transport("term_stdin", {'data': ' ', 'id': self.term_id})
self.last_stdin = now
self.add_fd(pty.STDIN_FILENO, reader=stdin_write, name='create_term_stdin_write')
def net_stdin_write(buf, user_id=None):
if self.options.safe:
if buf.find('\n') != -1 or buf.find('\r') != -1:
to = user_id or []
self.transport('datamsg', {
'to': to,
'data': {
'name': 'safe_term',
'term_id': self.term_id,
'msg': 'Terminal %s is in safe mode. Other users are not allowed to press enter.' % self.term_name,
}})
self.transport('term_stdout', {
'id': self.term_id,
'data': base64.b64encode('\a').decode('utf8'),
})
buf = buf.replace('\n', '')
buf = buf.replace('\r', '')
if not buf:
return
write(self.master_fd, buf)
self.handle_stdio = net_stdin_write
color_green = '\\[\\e[32m\\]'
color_reset = '\\[\\033[0m\\]'
color_yellorange = '\\[\\e[93m\\]'
# TODO: other shells probably use weird color escapes
if 'zsh' in shell:
color_green = "%{%F{green}%}"
color_reset = "%{%f%}"
color_yellorange = "%{%F{yellow}%}"
if self.options.set_prompt:
term_color = color_yellorange
if self.options.safe:
term_color = color_green
# Not confusing at all </sarcasm>
cmd = 'PS1="%s%s::%s::%s%s%s%s $PS1"\n' % (color_green, self.owner, self.workspace, color_reset, term_color, self.term_name, color_reset)
write(self.master_fd, cmd)
def _signal_winch(self, signum, frame):
'''
Signal handler for SIGWINCH - window size has changed.
'''
self._set_pty_size()
def _get_pty_size(self):
buf = array.array('h', [0, 0, 0, 0])
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True)
return buf
def _set_pty_size(self, size=None):
'''
Sets the window size of the child pty based on the window size of our own controlling terminal.
'''
# Get the terminal size of the real terminal, set it on the pseudoterminal.
buf = self._get_pty_size()
if size:
buf[0] = size[1]
buf[1] = size[0]
if self.options.create:
assert self.master_fd is not None
fcntl.ioctl(self.master_fd, termios.TIOCSWINSZ, buf)
if self.term_id:
self.transport('update_term', {'id': self.term_id, 'size': [buf[1], buf[0]]})
else:
# XXXX: this resizes the window :/
if self.options.resize:
os.write(pty.STDOUT_FILENO, "\x1b[8;{rows};{cols}t".format(rows=buf[0], cols=buf[1]))
fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCSWINSZ, buf)
def cleanup(self):
if self.orig_stdout_atts:
self.orig_stdout_atts[3] = self.orig_stdout_atts[3] | termios.ECHO
tty.tcsetattr(sys.stdout, tty.TCSAFLUSH, self.orig_stdout_atts)
if self.orig_stdin_atts:
self.orig_stdin_atts[3] = self.orig_stdin_atts[3] | termios.ECHO
tty.tcsetattr(sys.stdin, tty.TCSAFLUSH, self.orig_stdin_atts)
if self.original_wincher:
signal.signal(signal.SIGWINCH, self.original_wincher)
try:
self.cert_fd.close()
except Exception:
pass
print('ciao.')
if __name__ == '__main__':
main()
########NEW FILE########
__FILENAME__ = shared
import os
__VERSION__ = ''
__PLUGIN_VERSION__ = ''
# Config settings
USERNAME = ''
SECRET = ''
API_KEY = ''
DEBUG = False
SOCK_DEBUG = False
ALERT_ON_MSG = True
LOG_TO_CONSOLE = False
BASE_DIR = os.path.expanduser(os.path.join('~', 'floobits'))
# Shared globals
DEFAULT_HOST = 'floobits.com'
DEFAULT_PORT = 3448
SECURE = True
SHARE_DIR = None
COLAB_DIR = ''
PROJECT_PATH = ''
JOINED_WORKSPACE = False
PERMS = []
STALKER_MODE = False
AUTO_GENERATED_ACCOUNT = False
PLUGIN_PATH = None
WORKSPACE_WINDOW = None
CHAT_VIEW = None
CHAT_VIEW_PATH = None
TICK_TIME = 100
AGENT = None
IGNORE_MODIFIED_EVENTS = False
VIEW_TO_HASH = {}
FLOORC_PATH = os.path.expanduser(os.path.join('~', '.floorc'))
########NEW FILE########
__FILENAME__ = utils
import re
import time
from collections import defaultdict
try:
from urllib.parse import urlparse
assert urlparse
except ImportError:
from urlparse import urlparse
try:
from . import shared as G
assert G
except (ImportError, ValueError):
import shared as G
top_timeout_id = 0
cancelled_timeouts = set()
timeout_ids = set()
timeouts = defaultdict(list)
def set_timeout(func, timeout, *args, **kwargs):
global top_timeout_id
timeout_id = top_timeout_id
top_timeout_id += 1
if top_timeout_id > 100000:
top_timeout_id = 0
def timeout_func():
timeout_ids.discard(timeout_id)
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
then = time.time() + (timeout / 1000.0)
timeouts[then].append(timeout_func)
timeout_ids.add(timeout_id)
return timeout_id
def cancel_timeout(timeout_id):
if timeout_id in timeout_ids:
cancelled_timeouts.add(timeout_id)
def call_timeouts():
now = time.time()
to_remove = []
for t, tos in timeouts.copy().items():
if now >= t:
for timeout in tos:
timeout()
to_remove.append(t)
for k in to_remove:
del timeouts[k]
def parse_url(workspace_url):
secure = G.SECURE
owner = None
workspace_name = None
parsed_url = urlparse(workspace_url)
port = parsed_url.port
if not port:
port = G.DEFAULT_PORT
if parsed_url.scheme == 'http':
if not port:
port = 3148
secure = False
result = re.match('^/([-\@\+\.\w]+)/([-\@\+\.\w]+)/?$', parsed_url.path)
if not result:
result = re.match('^/r/([-\@\+\.\w]+)/([-\@\+\.\w]+)/?$', parsed_url.path)
if result:
(owner, workspace_name) = result.groups()
else:
raise ValueError('%s is not a valid Floobits URL' % workspace_url)
return {
'host': parsed_url.hostname,
'owner': owner,
'port': port,
'workspace': workspace_name,
'secure': secure,
}
########NEW FILE########
__FILENAME__ = version
FLOOTTY_VERSION = '2.1.4'
########NEW FILE########
|
[
"dyangUCI@github.com"
] |
dyangUCI@github.com
|
dc5c45a0d9c44cc2b7f3bb35d23198d69dbd715b
|
e73430ff79c2d9325037bd07d0dbdf9cc7c93d84
|
/main.py
|
96f07d51325003fb5c336ddcad218beaa913f938
|
[] |
no_license
|
GianBkk/python_les3_syntra
|
8232b6773f6d82ff819c1a6a9823d21cd471a5b0
|
1abb1c57c862fbc29d1a1b7245ede151e24b15f2
|
refs/heads/master
| 2023-07-10T23:32:59.059183
| 2021-08-20T22:38:45
| 2021-08-20T22:38:45
| 398,345,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
# Hello Word
print('Hello World7')
|
[
"gian200308@gmail.com"
] |
gian200308@gmail.com
|
ee27313bde085575df70e1d42550c376748fe931
|
08a9dc04e6defa9dc9378bfbfbe0b6185af6a86a
|
/manager/views.py
|
78b92fee93ead9c43d6d958d58f90642c7277c7f
|
[] |
no_license
|
Felicity-jt/50.008-Project-1
|
8ecc63d2302b2eaa4060f4c900d7fed2e958927c
|
960b5e57a39bfda1c31653798c23ddc051a2ff19
|
refs/heads/master
| 2021-08-24T00:40:27.886634
| 2017-12-07T09:14:12
| 2017-12-07T09:14:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,191
|
py
|
from json import loads
from django.http import Http404
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.views.decorators.http import require_POST
from common.db import sql, page
from common.utils import pagination
from common.messages import NOT_STAFF
from common.decorators import json_response
@require_POST
@json_response
def new(request):
"""Add item or entity into inventory."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
s = """INSERT INTO item (id, name)
VALUES (DEFAULT, %s)"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (rq['name'],)
except (ValueError, KeyError):
return None
sql(s, *values)
return {}
@json_response
def stock(request, item_id):
"""Get or update current stock."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
q = 'SELECT id, price, quantity FROM item WHERE id = %s'
if request.method == 'POST':
# update price and/or quantity from post data
s = """UPDATE item SET
quantity = %s
WHERE id = %s"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (int(rq['quantity']),)
except (ValueError, KeyError):
return None
sql(s, *values, item_id)
try:
r = sql(q, item_id)[0]
except IndexError:
raise Http404
return {
'id': r[0],
'price': r[1],
'quantity': r[2],
}
@json_response
def stats(request, entity, year, month):
"""Get stats for entity."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
if entity not in ('item', 'company', 'creator'):
raise Http404
q = """SELECT item_id, SUM(quantity) AS total FROM purchase_item
INNER JOIN purchase p ON p.id = purchase_item.purchase_id
WHERE YEAR(p.made_on) = %s AND MONTH(p.made_on) = %s
GROUP BY item_id"""
pg = pagination(request)
pg['sort'].append('-total')
return sql(q + page(**pg), year, month)
|
[
"kwokshungit@gmail.com"
] |
kwokshungit@gmail.com
|
3761f78794812b198b0c941d3b519e3150947df5
|
88dde3533d8283452b780fd120d8da94a2231876
|
/lab1/encoders.py
|
e670db5d8e2da9dfbcba5f7d55128dd8dacf95cf
|
[] |
no_license
|
jando16407/ControlofMobileRobots
|
035e654cd556138321eb8c442a8c8f535edbcfdb
|
10806892b812296bb5fc83124094a802596760b4
|
refs/heads/master
| 2020-03-28T17:45:50.229926
| 2018-09-16T01:49:10
| 2018-09-16T01:49:10
| 148,688,281
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,469
|
py
|
import time
import RPi.GPIO as GPIO
import signal
import sys
import tty
import termios
# Pins that the encoders are connected to
LENCODER = 17
RENCODER = 18
left = 0
right = 0
# declare tuple
#counts = ("Left count : ", str(left), ", RIght count : ", str(right));
# The det_ch method will determine which key has been pressed
def det_ch():
aa = sys.stdin.fileno()
settings = termios.tcgetattr(aa)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(aa, termios.TCSADRAIN, settings)
return key
# This function is called when the left encoder detects a rising edge signal.
def onLeftEncode(pin):
global left
left += 1
display_ticks()
# This function is called when the right encoder detects a rising edge signal.
def onRightEncode(pin):
global right
right += 1
display_ticks()
# This function displays current number of left and right ticks
def display_ticks():
sys.stdout.write('\r')
sys.stdout.write("Left encoder ticked! ")
sys.stdout.write(str(left))
sys.stdout.write(" : Right encoder ticked! ")
sys.stdout.write(str(right))
sys.stdout.flush()
# This function is called when Ctrl+C is pressed.
# It's intended for properly exiting the program.
def ctrlC(signum, frame):
print(str(left))
print("\n", str(right))
print("\nExiting")
GPIO.cleanup()
exit()
# This function resets the tick count
def resetCounts():
print("RESETCOUNTS CALLED")
global left
global right
left = 0
right = 0
# This function return the tuple of tick counts
def getCounts():
print("GETCOUNTS CALLED\n")
return (str(left), str(right))
# Attach the Ctrl+C signal interrupt
signal.signal(signal.SIGINT, ctrlC)
# Set the pin numbering scheme to the numbering shown on the robot itself.
GPIO.setmode(GPIO.BCM)
# Set encoder pins as input
# Also enable pull-up resistors on the encoder pins
# This ensures a clean 0V and 3.3V is always outputted from the encoders.
GPIO.setup(LENCODER, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(RENCODER, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Attach a rising edge interrupt to the encoder pins
GPIO.add_event_detect(LENCODER, GPIO.RISING, onLeftEncode)
GPIO.add_event_detect(RENCODER, GPIO.RISING, onRightEncode)
# Prevent the program from exiting by adding a looping delay.
while True:
time.sleep(1)
key_input = det_ch()
if key_input == "g":
print(getCounts())
elif key_input == "r":
resetCounts()
elif key_input == "c":
GPIO.cleanup()
print("Exiting")
exit()
|
[
"jando16407c@gmail.com"
] |
jando16407c@gmail.com
|
39ce07857213f8a281675528cad52ce7943c5bf1
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/zpt/trunk/site-packages/zpt/_pytz/zoneinfo/US/Indiana_minus_Starke.py
|
f06a4f85e29494d5c49f070ed6153788987fe72d
|
[
"MIT",
"ZPL-2.1"
] |
permissive
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683
| 2010-05-13T18:24:20
| 2010-05-13T18:24:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,743
|
py
|
'''tzinfo timezone information for US/Indiana_minus_Starke.'''
from zpt._pytz.tzinfo import DstTzInfo
from zpt._pytz.tzinfo import memorized_datetime as d
from zpt._pytz.tzinfo import memorized_ttinfo as i
class Indiana_minus_Starke(DstTzInfo):
'''US/Indiana_minus_Starke timezone definition. See datetime.tzinfo for details'''
zone = 'US/Indiana_minus_Starke'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1947,4,27,8,0,0),
d(1947,9,28,7,0,0),
d(1948,4,25,8,0,0),
d(1948,9,26,7,0,0),
d(1949,4,24,8,0,0),
d(1949,9,25,7,0,0),
d(1950,4,30,8,0,0),
d(1950,9,24,7,0,0),
d(1951,4,29,8,0,0),
d(1951,9,30,7,0,0),
d(1952,4,27,8,0,0),
d(1952,9,28,7,0,0),
d(1953,4,26,8,0,0),
d(1953,9,27,7,0,0),
d(1954,4,25,8,0,0),
d(1954,9,26,7,0,0),
d(1955,4,24,8,0,0),
d(1955,10,30,7,0,0),
d(1956,4,29,8,0,0),
d(1956,10,28,7,0,0),
d(1957,4,28,8,0,0),
d(1957,9,29,7,0,0),
d(1958,4,27,8,0,0),
d(1958,9,28,7,0,0),
d(1959,4,26,8,0,0),
d(1959,10,25,7,0,0),
d(1960,4,24,8,0,0),
d(1960,10,30,7,0,0),
d(1961,4,30,8,0,0),
d(1961,10,29,7,0,0),
d(1962,4,29,8,0,0),
d(1963,10,27,7,0,0),
d(1967,4,30,8,0,0),
d(1967,10,29,7,0,0),
d(1968,4,28,8,0,0),
d(1968,10,27,7,0,0),
d(1969,4,27,8,0,0),
d(1969,10,26,7,0,0),
d(1970,4,26,8,0,0),
d(1970,10,25,7,0,0),
d(1971,4,25,8,0,0),
d(1971,10,31,7,0,0),
d(1972,4,30,8,0,0),
d(1972,10,29,7,0,0),
d(1973,4,29,8,0,0),
d(1973,10,28,7,0,0),
d(1974,1,6,8,0,0),
d(1974,10,27,7,0,0),
d(1975,2,23,8,0,0),
d(1975,10,26,7,0,0),
d(1976,4,25,8,0,0),
d(1976,10,31,7,0,0),
d(1977,4,24,8,0,0),
d(1977,10,30,7,0,0),
d(1978,4,30,8,0,0),
d(1978,10,29,7,0,0),
d(1979,4,29,8,0,0),
d(1979,10,28,7,0,0),
d(1980,4,27,8,0,0),
d(1980,10,26,7,0,0),
d(1981,4,26,8,0,0),
d(1981,10,25,7,0,0),
d(1982,4,25,8,0,0),
d(1982,10,31,7,0,0),
d(1983,4,24,8,0,0),
d(1983,10,30,7,0,0),
d(1984,4,29,8,0,0),
d(1984,10,28,7,0,0),
d(1985,4,28,8,0,0),
d(1985,10,27,7,0,0),
d(1986,4,27,8,0,0),
d(1986,10,26,7,0,0),
d(1987,4,5,8,0,0),
d(1987,10,25,7,0,0),
d(1988,4,3,8,0,0),
d(1988,10,30,7,0,0),
d(1989,4,2,8,0,0),
d(1989,10,29,7,0,0),
d(1990,4,1,8,0,0),
d(1990,10,28,7,0,0),
d(1991,4,7,8,0,0),
d(1991,10,27,7,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,6,0,0),
d(2007,3,11,7,0,0),
d(2007,11,4,6,0,0),
d(2008,3,9,7,0,0),
d(2008,11,2,6,0,0),
d(2009,3,8,7,0,0),
d(2009,11,1,6,0,0),
d(2010,3,14,7,0,0),
d(2010,11,7,6,0,0),
d(2011,3,13,7,0,0),
d(2011,11,6,6,0,0),
d(2012,3,11,7,0,0),
d(2012,11,4,6,0,0),
d(2013,3,10,7,0,0),
d(2013,11,3,6,0,0),
d(2014,3,9,7,0,0),
d(2014,11,2,6,0,0),
d(2015,3,8,7,0,0),
d(2015,11,1,6,0,0),
d(2016,3,13,7,0,0),
d(2016,11,6,6,0,0),
d(2017,3,12,7,0,0),
d(2017,11,5,6,0,0),
d(2018,3,11,7,0,0),
d(2018,11,4,6,0,0),
d(2019,3,10,7,0,0),
d(2019,11,3,6,0,0),
d(2020,3,8,7,0,0),
d(2020,11,1,6,0,0),
d(2021,3,14,7,0,0),
d(2021,11,7,6,0,0),
d(2022,3,13,7,0,0),
d(2022,11,6,6,0,0),
d(2023,3,12,7,0,0),
d(2023,11,5,6,0,0),
d(2024,3,10,7,0,0),
d(2024,11,3,6,0,0),
d(2025,3,9,7,0,0),
d(2025,11,2,6,0,0),
d(2026,3,8,7,0,0),
d(2026,11,1,6,0,0),
d(2027,3,14,7,0,0),
d(2027,11,7,6,0,0),
d(2028,3,12,7,0,0),
d(2028,11,5,6,0,0),
d(2029,3,11,7,0,0),
d(2029,11,4,6,0,0),
d(2030,3,10,7,0,0),
d(2030,11,3,6,0,0),
d(2031,3,9,7,0,0),
d(2031,11,2,6,0,0),
d(2032,3,14,7,0,0),
d(2032,11,7,6,0,0),
d(2033,3,13,7,0,0),
d(2033,11,6,6,0,0),
d(2034,3,12,7,0,0),
d(2034,11,5,6,0,0),
d(2035,3,11,7,0,0),
d(2035,11,4,6,0,0),
d(2036,3,9,7,0,0),
d(2036,11,2,6,0,0),
d(2037,3,8,7,0,0),
d(2037,11,1,6,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Indiana_minus_Starke = Indiana_minus_Starke()
|
[
"chad@zetaweb.com"
] |
chad@zetaweb.com
|
79f4e2ff02c9db62970001cd7f0a7386496d11e2
|
e8ea8326756378702052f5a785fab02e92abb21f
|
/Bluebook/Data/preprocess.py
|
9f07a132942a8c356a14b0de37f030f0fc828ee7
|
[] |
no_license
|
zyx061212/Kaggle
|
a6111464b3acf9e276a98844f65cd27852619f44
|
6051051882d41ea1bcb6930a9d1a9d0525fc869a
|
refs/heads/master
| 2020-03-26T15:48:59.545030
| 2015-02-23T22:23:35
| 2015-02-23T22:23:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,365
|
py
|
#!/usr/bin/env python
import util
from collections import defaultdict
import numpy as np
import pandas as pd
import csv_io
import math
def get_date_dataframe(date_column):
return pd.DataFrame({
"SaleYear": [d.year for d in date_column],
"SaleMonth": [d.month for d in date_column],
"SaleDay": [d.day for d in date_column]
}, index=date_column.index)
def preprocess():
train, test = util.get_train_test_df()
columns = set(train.columns)
#columns.remove("SalesID")
#columns.remove("SalePrice")
#columns.remove("saledate")
#train_fea = get_date_dataframe(train["saledate"])
#test_fea = get_date_dataframe(test["saledate"])
#parseColumns = ["UsageBand"]
parseColumns = [ "UsageBand","fiBaseModel","fiModelSeries","fiModelDescriptor","ProductSize","ProductGroup","Drive_System","Enclosure","Forks","Pad_Type","Ride_Control","Stick","Transmission","Turbocharged","Blade_Extension","Blade_Width","Enclosure_Type","Engine_Horsepower","Hydraulics","Pushblock","Ripper","Scarifier","Tip_ControlCoupler","Coupler_System","Grouser_Tracks","Hydraulics_Flow","Track_Type","Thumb","Pattern_Changer","Grouser_Type","Backhoe_Mounting","Blade_Type","Travel_Controls","Differential_Type","Steering_Controls"]
#"auctioneerID","state","ProductGroupDesc",,"fiSecondaryDesc"
# this is redundant "fiModelDesc", and has too many options...
# Q, AC, AL AR AS
colDict = {}
for col in parseColumns:
colDict[col] = []
colMap = {}
notInTest = []
for index, col in enumerate(train.columns):
print "MAP:", col, index
colMap[col] = index
if col in parseColumns:
#print "start"
s = set(x for x in train[col].fillna(0)) # 0 if x == "" or not isinstance(x, float) else x
s.update(x for x in test[col].fillna(0)) # math.isnan(x)
colDict[col] = s
print s
if col == "fiBaseModel":
a = set(x for x in train[col].fillna(0))
b = set(x for x in test[col].fillna(0))
print "fiBaseModel"
print
print
# found 11 type in test not in train
print [x for x in b if x not in a]
print
print
# found several hundred in train that are not in test, try dropping these...
print [x for x in a if x not in b]
notInTest = [x for x in a if x not in b]
SaleIDArr = []
trainSalePriceArr = []
count = 0
csv_io.delete_file("train1.csv")
for row in train.iterrows():
trainSalePrice = []
rowVals = row[1].fillna(0)
newSet = []
newRow = []
if rowVals["fiBaseModel"] not in notInTest:
continue
trainSalePrice.append(rowVals["SalePrice"])
trainSalePriceArr.append(trainSalePrice)
SaleID = []
SaleID.append(rowVals["SalesID"])
SaleIDArr.append(SaleID)
for col in colDict.keys():
for val in colDict[col]:
if val == rowVals[col] :
newRow.append(1)
else:
newRow.append(0)
#newRow.append(rowVals["YearMade"]) # need to calculate age, sale date minus year
newRow.append(rowVals["MachineHoursCurrentMeter"])
count += 1
if count % 10000 == 0:
print "Count", count
newSet.append(newRow)
csv_io.write_delimited_file("train1.csv", newSet ,header=None, delimiter=",", filemode="a")
csv_io.write_delimited_file("target.csv", trainSalePriceArr ,header=None, delimiter=",")
csv_io.write_delimited_file("train_salesID.csv", SaleIDArr ,header=None, delimiter=",")
# -------------------------------------------
SaleIDArr = []
count = 0
csv_io.delete_file("test1.csv")
for row in test.iterrows():
rowVals = row[1].fillna(0)
newSet = []
newRow = []
SaleID = []
SaleID.append(rowVals["SalesID"])
SaleIDArr.append(SaleID)
for col in colDict.keys():
for val in colDict[col]:
if val == rowVals[col] :
newRow.append(1)
else:
newRow.append(0)
#newRow.append(rowVals["YearMade"]) # need to calculate age, sale date minus year
newRow.append(rowVals["MachineHoursCurrentMeter"])
count += 1
if count % 10000 == 0:
print "Count", count
newSet.append(newRow)
csv_io.write_delimited_file("test1.csv", newSet ,header=None, delimiter=",", filemode="a")
csv_io.write_delimited_file("test_salesID.csv", SaleIDArr ,header=None, delimiter=",")
if __name__=="__main__":
preprocess()
|
[
"mb16@hood.edu"
] |
mb16@hood.edu
|
e20f6bd63d39fd455755dbb515cb9dc5635e3808
|
a552f40c687d7e11e4c90350339286bd636f14ec
|
/mpproj/mpproj/wsgi.py
|
8c796e81e3d6e6bfa02bc2d7a6fabf4f39a2df7f
|
[
"Apache-2.0"
] |
permissive
|
rankrh/MountainProject
|
26e851bf266ba488f98f798c8f6d0030f439cac1
|
0e89e64ea15b09cbd3845ad9579fe8434a1f02c0
|
refs/heads/master
| 2021-06-24T19:58:51.511835
| 2020-11-20T01:30:51
| 2020-11-20T01:30:51
| 162,328,722
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 389
|
py
|
"""
WSGI config for mpproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpproj.settings')
application = get_wsgi_application()
|
[
"rankrh@gmail.com"
] |
rankrh@gmail.com
|
d2989e04adcb61408d61d1acff049248324876bf
|
7892e5cad76cb0de81dd4d2962b865d749111f2d
|
/pyFTS/sfts.py
|
656514329a9843578d4d13744a872a2ee9252951
|
[] |
no_license
|
cseveriano/solarenergyforecasting
|
5c70932d5168c12242efe41c85d183a41be71abf
|
b61fe76a4f7fc5863448b35881e01e1ccdb2dedd
|
refs/heads/master
| 2021-01-11T20:31:45.533149
| 2017-02-02T13:13:06
| 2017-02-02T13:13:06
| 79,134,373
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,645
|
py
|
import numpy as np
from pyFTS.common import FuzzySet,FLR
import fts
class SeasonalFLRG(fts.FTS):
def __init__(self, seasonality):
self.LHS = seasonality
self.RHS = []
def append(self, c):
self.RHS.append(c)
def __str__(self):
tmp = str(self.LHS) + " -> "
tmp2 = ""
for c in sorted(self.RHS, key=lambda s: s.name):
if len(tmp2) > 0:
tmp2 = tmp2 + ","
tmp2 = tmp2 + c.name
return tmp + tmp2
class SeasonalFTS(fts.FTS):
def __init__(self, name):
super(SeasonalFTS, self).__init__(1, "SFTS")
self.name = "Seasonal FTS"
self.detail = "Chen"
self.seasonality = 1
self.hasSeasonality = True
def generateFLRG(self, flrs):
flrgs = []
season = 1
for flr in flrs:
if len(flrgs) < self.seasonality:
flrgs.append(SeasonalFLRG(season))
flrgs[season].append(flr.RHS)
season = (season + 1) % (self.seasonality + 1)
if season == 0: season = 1
return (flrgs)
def train(self, data, sets, seasonality):
self.sets = sets
self.seasonality = seasonality
tmpdata = FuzzySet.fuzzySeries(data, sets)
flrs = FLR.generateRecurrentFLRs(tmpdata)
self.flrgs = self.generateFLRG(flrs)
def forecast(self, data):
ndata = np.array(data)
l = len(ndata)
ret = []
for k in np.arange(1, l):
flrg = self.flrgs[data[k]]
mp = self.getMidpoints(flrg)
ret.append(sum(mp) / len(mp))
return ret
|
[
"carlossjr@gmail.com"
] |
carlossjr@gmail.com
|
75470a97bd504c9d4dc72ff45e08c475eb574228
|
921e109a719351ac053f33fccacb55e9b54bbee1
|
/courses/admin.py
|
5002e52cdf17036a60919ea3e27033b02d16451d
|
[
"MIT"
] |
permissive
|
deboraazevedo/udemy-free-courses
|
a49db24b89ca7a10444fbd8551d2800347ee4c25
|
6365321de0ed1c8521e1db96a63d17a06e056e4b
|
refs/heads/master
| 2021-06-25T23:04:45.749555
| 2018-02-27T23:48:26
| 2018-02-27T23:48:26
| 123,203,654
| 1
| 2
|
MIT
| 2020-10-04T09:44:06
| 2018-02-27T23:58:27
|
Python
|
UTF-8
|
Python
| false
| false
| 90
|
py
|
from django.contrib import admin
from .models import Course
admin.site.register(Course)
|
[
"contato.hudsonbrendon@gmail.com"
] |
contato.hudsonbrendon@gmail.com
|
bfa4051b7daa99e35be4c69d94d185b37ba84f1b
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_035/ch37_2020_03_25_14_04_04_120072.py
|
a165e2f3c23563f7b30d6684819d8aca366bc2cd
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
Senha = True
resposta = input("Qual é a senha")
while Senha:
if resposta=="desisto":
Senha = False
else:
Senha = True
return resposta
print("Você acertou a senha!")
|
[
"you@example.com"
] |
you@example.com
|
64facd39145200dcd135636db0dd618f2d8a637a
|
b62dfe6d3049ea7fc7488d5dddce5a7fc968cc73
|
/venv/bin/pip
|
768260327e139dbb09ab6fd61c27320a344ee4ce
|
[] |
no_license
|
jiwenyu0531/MyAI
|
9764299498abbe742e3b7112acdb6fc6d4802adf
|
15808c3bcce8e861690039e0a2bc7819c12c6743
|
refs/heads/master
| 2020-03-28T19:44:27.717510
| 2018-09-16T14:46:10
| 2018-09-16T14:46:10
| 149,005,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
#!/Users/jiwenyu/PycharmProjects/MyAI/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip')()
)
|
[
"jiwenyu0531@qq.com"
] |
jiwenyu0531@qq.com
|
|
9269612b5c1989fb509e058f4f2198d446452df2
|
63f783b298c90d8c71d49be208172bd201e4cbca
|
/manage.py
|
fb2c235c6d28e1a55533132eed856d3ae6bffdb3
|
[
"MIT"
] |
permissive
|
dbobbgit/recipebox
|
58c41b1d89befc60cf4d5b7834b59d02d2dcf227
|
8e4e5c6f609e2524726954c9382ca37e844721f9
|
refs/heads/main
| 2023-05-26T00:56:38.292455
| 2021-06-09T14:11:46
| 2021-06-09T14:11:46
| 375,378,400
| 0
| 0
|
MIT
| 2021-06-09T14:11:47
| 2021-06-09T14:10:06
|
Python
|
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'recipebox.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"danielle@oakandivymedia.com"
] |
danielle@oakandivymedia.com
|
d4efd4910a49742035a254ce235200d20ebfb4ca
|
e77027cb5ffa4754a5ac1cf8d1cd1e2035710cfe
|
/VectorComplexLibrary.py
|
75f3378e79e5fcb97984b6c1edbeea7dd22f7ffb
|
[] |
no_license
|
wilmer-rodriguez-r/LibraryComplexVectors
|
36e5fc8bb19219cee3db327ace7c2406c61aead3
|
9fa83a829aaeb1e869376e4000389cf9b2ca941f
|
refs/heads/master
| 2023-03-02T17:04:03.423077
| 2021-02-09T15:07:34
| 2021-02-09T15:07:34
| 336,027,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,704
|
py
|
# Librería de funciones para vectores y matrices complejos
import numpy as np
def auxiliar(matriz_vector):
# función que corrobora si es una matriz o un arreglo devolviendo la cantidad de columnas de este
try:
column = len(matriz_vector[0])
return column
except TypeError:
column = 1
return column
def auxiliar_1(matriz_vector):
# función que corrobora si es una matriz o un arreglo devolviendo un valor booleano
try:
len(matriz_vector[0])
return True
except TypeError:
return False
def auxiliar_2(num_complex):
# función que conjuga los números complejos
real = num_complex.real
imaginaria = -1 * num_complex.imag
if imaginaria == 0:
imaginaria = 0
if imaginaria < 0:
num = str(real) + str(imaginaria) + 'j'
else:
num = str(real) + '+' + str(imaginaria) + 'j'
return complex(num)
def vectorSumaComplex(vector_1, vector_2):
for i in range(len(vector_1)):
vector_1[i] = vector_1[i] + vector_2[i]
return vector_1[:]
def vectorInverComplex(vector):
for i in range(len(vector)):
vector[i] = -1 * vector[i]
return vector[:]
def vectorMultEsComplex(vector, escalar):
for i in range(len(vector)):
vector[i] = escalar * vector[i]
return vector[:]
def matrizSumaComplex(vector_1, vector_2):
rows = len(vector_1)
colums = len(vector_1[0])
for i in range(rows):
for j in range(colums):
vector_1[i][j] = vector_1[i][j] + vector_2[i][j]
return vector_1[:]
def matrizInverComplex(matriz):
matriz = [[-1 * matriz[j][k] for k in range(len(matriz[0]))] for j in range(len(matriz))]
return matriz[:]
def matrizMultEsComplex(matriz, escalar):
matriz = [[escalar * matriz[j][k] for k in range(len(matriz[0]))] for j in range(len(matriz))]
return matriz[:]
def trasMatrizVector (matriz_vector):
rows = len(matriz_vector)
column = auxiliar(matriz_vector)
if auxiliar_1(matriz_vector):
aux = [[matriz_vector[k][j] for k in range(rows)] for j in range(column)]
aux = (aux[0] if len(aux) == 1 else aux)
else:
aux = [[matriz_vector[j] for k in range(column)] for j in range(rows)]
return aux
def conjMatrizVector(matriz_vector):
rows = len(matriz_vector)
column = auxiliar(matriz_vector)
if auxiliar_1(matriz_vector):
for j in range(rows):
for k in range(column):
matriz_vector[j][k] = auxiliar_2(matriz_vector[j][k])
else:
for j in range(rows):
matriz_vector[j] = auxiliar_2(matriz_vector[j])
return matriz_vector[:]
def adjuntMatrizVector (matriz_vector):
return trasMatrizVector(conjMatrizVector(matriz_vector))
def multMatrices (matriz_a, matriz_b):
rows_a, rows_b, column_a, column_b = len(matriz_a), len(matriz_b), auxiliar(matriz_a), auxiliar(matriz_b)
if rows_b == column_a:
aux = [[0 for i in range(column_b)] for j in range(rows_a)]
for h in range(column_b):
for j in range(rows_a):
for k in range(column_a):
aux[j][h] += matriz_a[j][k] * matriz_b[k][h]
return aux
else:
return 'Las matrices no se pueden operar'
def accion(matriz, vector):
rows, columns, size = len(matriz), len(matriz[0]), len(vector)
aux = [0 for i in range(rows)]
for j in range(rows):
for k in range(columns):
aux[j] += matriz[j][k] * vector[k]
return aux
def dotProduct (vector_a, vector_b):
size = len(vector_a)
try:
for i in range(size):
int(vector_a[i])
suma = 0
for j in range(size):
suma += vector_a[j] * vector_b[j]
return suma
except TypeError:
vector_a = conjMatrizVector(vector_a[:])
suma = 0
for j in range(size):
suma += complex(vector_a[j]) * complex(vector_b[j])
return suma
def normVector(vector):
try:
for i in range(len(vector)):
int(vector[i])
return (dotProduct(vector[:], vector[:]))**(1/2)
except TypeError:
return (dotProduct(vector[:], vector[:]))**(1/2)
def disVectors(vector_a, vector_b):
vector = vectorSumaComplex(vectorInverComplex(vector_a), vector_b)
return normVector(vector)
def matrizHermitian(matriz):
matriz_a = conjMatrizVector(trasMatrizVector(matriz[:]))
if matriz_a == matriz:
return True
else:
return False
def matrizUnitary(matriz):
size = len(matriz)
identidad = [[(1 if j == k else 0) for k in range(size)]for j in range(size)]
matriz = multMatrices(matriz, conjMatrizVector(trasMatrizVector(matriz[:])))
if matriz == identidad:
return True
else:
return False
def tensorProduct(matriz_vector_0,matriz_vector_1):
rows_0, columns_0, valor = len(matriz_vector_0), auxiliar(matriz_vector_0), auxiliar_1(matriz_vector_1)
if columns_0 == 1 and valor:
for j in range(rows_0):
matriz_vector_0[j] = matrizMultEsComplex(matriz_vector_1[:], matriz_vector_0[j])
elif columns_0 == 1:
for j in range(rows_0):
matriz_vector_0[j] = vectorMultEsComplex(matriz_vector_1[:], matriz_vector_0[j])
elif columns_0 != 1 and valor:
for j in range(rows_0):
for k in range(columns_0):
matriz_vector_0[j][k] = matrizMultEsComplex(matriz_vector_1[:], matriz_vector_0[j][k])
else:
for j in range(rows_0):
for k in range(columns_0):
matriz_vector_0[j][k] = vectorMultEsComplex(matriz_vector_1[:], matriz_vector_0[j][k])
return matriz_vector_0[:]
|
[
"wilmer.rodriguez-r@mail.escuelaing.edu.co"
] |
wilmer.rodriguez-r@mail.escuelaing.edu.co
|
a37479b83e3e6ebdfa280a2339adbc632b8a3439
|
2611d6ab0963ba5da8cec69559a6ad4b7efb92d9
|
/emojisss.py
|
f95a3d4789d4e416f67fe803c0f0d48a4f73054b
|
[] |
no_license
|
myke-oliveira/curso-em-video-python3
|
f4108eebc8dc5fde1574dc2a8ab0f6eff7650e5d
|
97dc244ff50720190a134a1eb2fef9f8b43cdae3
|
refs/heads/master
| 2021-05-12T11:34:28.036631
| 2018-01-16T19:28:44
| 2018-01-16T19:28:44
| 117,392,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
import emoji
print(emoji.emojize(':thumbs_up_sign:'))
|
[
"mykeapo@gmail.com"
] |
mykeapo@gmail.com
|
511fe8b79650e5129209a33e6c7d768af423c6e6
|
2a1f4c4900693c093b2fcf4f84efa60650ef1424
|
/py/dome/backend/apps.py
|
fc8e9e1db58cfc9dbc955eb7df36461f862fe2b5
|
[
"BSD-3-Clause"
] |
permissive
|
bridder/factory
|
b925f494303728fa95017d1ba3ff40ac5cf6a2fd
|
a1b0fccd68987d8cd9c89710adc3c04b868347ec
|
refs/heads/master
| 2023-08-10T18:51:08.988858
| 2021-09-21T03:25:28
| 2021-09-21T03:25:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import unicode_literals
from django.apps import AppConfig
class BackendConfig(AppConfig):
name = 'backend'
|
[
"chrome-bot@chromium.org"
] |
chrome-bot@chromium.org
|
9d0d9428ad332411b938df93cd900f02fefc493a
|
f921b215086d6556885da0c50b68e4de861216ac
|
/src/helpers/operations.py
|
b533530fbdc6a9316593c2f4f7a1a166640e31d8
|
[] |
no_license
|
tonaflcastelan/prueba-ing-backend
|
afee173d31d78139306a38a55d4e98f219281068
|
7c114be5e4c59c0e2b0fd3775ba237ac94e6f4a4
|
refs/heads/master
| 2022-12-03T23:58:36.924155
| 2020-08-04T00:17:54
| 2020-08-04T00:17:54
| 284,596,408
| 0
| 0
| null | 2020-08-03T03:42:34
| 2020-08-03T03:42:33
| null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
def get_average(value1, value2) -> float:
"""
Get average
"""
return (value1 + value2) / 2
def get_percent(dividend, divider) -> float:
"""
Get percent
"""
return round((float(dividend / divider)), 2) * 100
|
[
"tona.castelan16@gmail.com"
] |
tona.castelan16@gmail.com
|
cb5e4cf9e89b4161efe98de831afd47a6290f0db
|
1c8a05b18d1e895a99e92c9d9d48f30296342a76
|
/2. socket通信.py
|
98e28c13d8edce629a2e35f4eaa157199c53bdfa
|
[] |
no_license
|
codingtruman/socket-UDP
|
1190bbd1a5055bf6461d6cb2b499a81e234760f0
|
6b4e9839b14571f6472f6fc7b37514c31f353789
|
refs/heads/master
| 2023-01-04T18:38:37.356220
| 2020-11-04T01:20:21
| 2020-11-04T01:20:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
from socket import *
# 创建TCP socket
tcpSocket = socket(AF_INET, SOCK_STREAM)
print('TCP socket created')
# 设置地址,端口和要发送的数据
sendAddr = ('192.168.0.19', 3306)
# 如果此处没写b,下面使用sendData时要变成sendData.encode('utf-8')
sendData = b'ok, tcp'
# 连接服务器和发送
# tcpSocket.connect(sendAddr)
# tcpSocket.send(sendData)
# 关闭socket
tcpSocket.close()
##############################################
# 创建UDP socket
udpSocket = socket(AF_INET, SOCK_DGRAM)
print('UDP socket created')
# 绑定本地信息,如果一个网络程序不稳定,系统就会随机分配。但一般情况作为接收方时需要绑定
# bind()参数是个元祖 #IP一般不用写,表示本机任何一个IP,有几个网卡就几个IP
udpSocket.bind(('', 3304))
# 设置地址,端口和要发送的数据
sendAddr = ('192.168.0.19', 3306)
sendData = 'ok, 尼玛123'
# 在这里就他妈的提前转换好编码
sendData = sendData.encode('utf-8')
# 连接服务器和发送
udpSocket.sendto(sendData, sendAddr)
# 等待接收方发送数据
# 设置一次只接收1024 bytes
recvdata = udpSocket.recvfrom(1024)
# recvData是个元祖
content, host_info = recvdata
# 如果接收的是中文消息,用utf-8解码会报错的!
content = content.decode('gb2312')
print(content, host_info)
# 关闭socket
udpSocket.close()
|
[
"maplewong04@gmail.com"
] |
maplewong04@gmail.com
|
cc43cf0ec8d3d75f01f17a65aa8ee501efb3738b
|
6d08b4d926519dbb47b45addc53cd92f5f75e569
|
/app.py
|
799df4c64b398e9bacee9edcd9e21d237bd2a496
|
[] |
no_license
|
elieu17/bellybuttonhmwk
|
f09daf2f381c9a23ce3b686e89d9d639d8a459ca
|
4a8439667aff6ec36a83e50af278c5546b797d77
|
refs/heads/master
| 2020-04-29T08:57:08.234124
| 2019-04-17T22:57:35
| 2019-04-17T22:57:35
| 176,005,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,886
|
py
|
import os
import pandas as pd
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
################################################
# Database Setup
#################################################
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db/bellybutton.sqlite"
db = SQLAlchemy(app)
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(db.engine, reflect=True)
# Save references to each table
Samples_Metadata = Base.classes.sample_metadata
Samples = Base.classes.samples
@app.route("/")
def index():
"""Return the homepage."""
return render_template("index.html")
@app.route("/names")
def names():
"""Return a list of sample names."""
# Use Pandas to perform the sql query
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Return a list of the column names (sample names)
return jsonify(list(df.columns)[2:])
@app.route("/metadata/<sample>")
def sample_metadata(sample):
"""Return the MetaData for a given sample."""
sel = [
Samples_Metadata.sample,
Samples_Metadata.ETHNICITY,
Samples_Metadata.GENDER,
Samples_Metadata.AGE,
Samples_Metadata.LOCATION,
Samples_Metadata.BBTYPE,
Samples_Metadata.WFREQ,
]
results = db.session.query(*sel).filter(Samples_Metadata.sample == sample).all()
# Create a dictionary entry for each row of metadata information
sample_metadata = {}
for result in results:
sample_metadata["sample"] = result[0]
sample_metadata["ETHNICITY"] = result[1]
sample_metadata["GENDER"] = result[2]
sample_metadata["AGE"] = result[3]
sample_metadata["LOCATION"] = result[4]
sample_metadata["BBTYPE"] = result[5]
sample_metadata["WFREQ"] = result[6]
print(sample_metadata)
return jsonify(sample_metadata)
@app.route("/samples/<sample>")
def samples(sample):
"""Return `otu_ids`, `otu_labels`,and `sample_values`."""
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
# Filter the data based on the sample number and
# only keep rows with values above 1
sample_data = df.loc[df[sample] > 1, ["otu_id", "otu_label", sample]]
sample_data.sort_values(by =[sample], ascending=False, inplace=True)
# Format the data to send as json
data = {
"otu_ids": sample_data.otu_id.values.tolist(),
"sample_values": sample_data[sample].values.tolist(),
"otu_labels": sample_data.otu_label.tolist(),
}
return jsonify(data)
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
elieu17.noreply@github.com
|
b217f0ad3fe6dbfaa9f4171e6b8a876290441a13
|
b80ab06996845b63d78158e9713e4f1ad7229ee7
|
/main.py
|
e14d58e06f9e5dcfadc2df6bf0025c32e3c3c5c5
|
[] |
no_license
|
Pepega123/tracker
|
1bd96244a1d65f06292e20a3a81e07c86d70ba65
|
ad639d19cd1657d06e04afb32e91979603afd88f
|
refs/heads/master
| 2022-09-10T22:16:36.226686
| 2020-06-05T12:04:30
| 2020-06-05T12:04:30
| 269,622,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,171
|
py
|
import datetime
import hashlib
import logging
import os
import psutil
import psycopg2
import time
import atexit
from db import *
logger = logging.getLogger('tracker')
logger.setLevel(logging.DEBUG)
TOTAL_MINUTES = 0
NEW_DAY = False
START_TIME = 0
CURRENT_DATE = datetime.date.today()
CONN = psycopg2.connect(**config())
CUR = CONN.cursor()
def get_hash(filename):
# make a hash object
h = hashlib.sha1()
# open file for reading in binary mode
with open(filename,'rb') as file:
# loop till the end of the file
chunk = 0
while chunk != b'':
# read only 1024 bytes at a time
chunk = file.read(1024)
h.update(chunk)
# return the hex representation of digest
return h.hexdigest()
def find_proc_by_hash(hash="fdd2fe36c6e859779f8e5e65155acf55d625f7f1"):
#find by hash
for p in psutil.process_iter():
try:
#reduce computational intensity, remove for more thorough search
if("C:\\Program Files (x86)\\" in p.exe()):
if get_hash(p.exe()) == hash:
return True
except (psutil.AccessDenied, FileNotFoundError):
pass
return False
def calc_time_diff(start_time, end_time):
return int(time.mktime(end_time.timetuple()) - time.mktime(start_time.timetuple())) / 60
def execute_stmt(date, minutes, cur):
global NEW_DAY
#print(NEW_DAY)
#new day, create new entry:
if(NEW_DAY):
stmnt = "INSERT INTO times(date, minutes) VALUES (\'" + str(date) + "\', " + str(minutes) + ");"
NEW_DAY = False
#same day, update entry:
else:
stmnt = "UPDATE times SET minutes = " + str(minutes) + " WHERE date = \'" + str(date) + "\';"
logger.debug(stmnt)
cur.execute(stmnt)
def init():
fh = logging.FileHandler('log/spam.log')
fh.setLevel(logging.DEBUG)
info = logging.FileHandler('log/info.log')
info.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(info)
logger.debug("-------------------------------")
logger.debug("STARTING APPLICATION")
def main():
logger.info("Initializing tracker")
init()
stmnt = ""
global TOTAL_MINUTES
global NEW_DAY
global START_TIME
global CURRENT_DATE
UPDATE_INTERVAL = 10
INITIAL_INTERVAL = 10
CURRENT_DATE = datetime.date.today()
params = config()
conn = psycopg2.connect(**params)
cur = conn.cursor()
#proc_closed = True
#queried_after_close = False
#print(get_hash("C:\\Program Files (x86)\\World of Warcraft\\_classic_\\abc.exe"))
# get date from previous run
stmnt = "SELECT date FROM last_run;"
cur.execute(stmnt)
last_run_date = cur.fetchall()[0][0]
logger.debug(stmnt)
logger.debug(str(last_run_date))
if str(last_run_date) != str(CURRENT_DATE):
NEW_DAY = True
#same day, start counting from already counted hours
else:
stmnt = "SELECT minutes FROM times WHERE date = \'" + str(CURRENT_DATE) + "\';"
cur.execute("SELECT minutes FROM times WHERE date = \'" + str(CURRENT_DATE) + "\';")
TOTAL_MINUTES += cur.fetchall()[0][0]
logger.debug(stmnt)
logger.debug(str(TOTAL_MINUTES))
#TODO: maybe close connection to DB here and reopen at end?
#outer while, wait for application to be opened
while(True):
time.sleep(INITIAL_INTERVAL)
process_running = "WowClassic.exe" in (p.name() for p in psutil.process_iter()) or find_proc_by_hash()
#process found
if(process_running):
logger.info("Process started!")
START_TIME = datetime.datetime.now()
#inner while, count time app is open
while("WowClassic.exe" in (p.name() for p in psutil.process_iter()) or find_proc_by_hash()):
logger.info("WoW running still...") #TODO: ADD LOGGING STATEMENTS
time.sleep(UPDATE_INTERVAL) #wait 10 seconds between checks
end_time = datetime.datetime.now()
#calculate amount of time application was open
elapsed_minutes = calc_time_diff(START_TIME, end_time)
TOTAL_MINUTES = TOTAL_MINUTES + elapsed_minutes
#wait for process to start
else:
continue
logger.info("Total time active this session: " + str(elapsed_minutes))
logger.info("Total time active today: " + str(TOTAL_MINUTES))
#update last_run
cur.execute("UPDATE last_run SET date = \'" + str(CURRENT_DATE) + "\';")
#update/insert into times
execute_stmt(CURRENT_DATE, TOTAL_MINUTES, cur)
#update times
#new day, create new entry:
# if(NEW_DAY):
# stmnt = "INSERT INTO times(date, minutes) VALUES (\'" + str(CURRENT_DATE) + "\', " + str(TOTAL_MINUTES) + ");"
# logger.debug(stmnt)
# cur.execute(stmnt)
# #process has been closed, if it is opened again later it is (most likely) on the same day
# NEW_DAY = False
# #same day, update entry:
# else:
# stmnt = "UPDATE times SET minutes = " + str(TOTAL_MINUTES) + " WHERE date = \'" + str(CURRENT_DATE) + "\';"
# logger.debug(stmnt)
# cur.execute("UPDATE times SET minutes = " + str(TOTAL_MINUTES) + " WHERE date = \'" + str(CURRENT_DATE) + "\';")
# cur.execute(stmnt)
conn.commit()
#start counting fresh next time process is opened
#TOTAL_MINUTES = 0
#never reached, maybe close/open before each update instead?
#conn.close()
#cleanup, log times before exiting
atexit.register(execute_stmt, CURRENT_DATE, TOTAL_MINUTES, CUR)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Pepega123.noreply@github.com
|
091a0e98da2ac80625b44ff0b4fd86db82597c27
|
0e50b69d115d90eccec55b7049e5d8f5ee438ba3
|
/brabbl/accounts/migrations/0013_user_image.py
|
6b2bb515e6d0afc8c80631c7a3cc8641dcf7ed27
|
[] |
no_license
|
strader07/brabbl-backend-django
|
bd7a5d3e328e3ff975e450e9fd8de933b2a3992c
|
795113ee7e1c3d7ed3b093f8c9435777a29bfd10
|
refs/heads/master
| 2023-05-15T05:31:51.422840
| 2021-06-04T16:12:23
| 2021-06-04T16:12:23
| 373,679,407
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_auto_20160308_1500'),
]
operations = [
migrations.AddField(
model_name='user',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='images/profiles/', verbose_name='Profilbild'),
),
]
|
[
"v7sion@topdogtek.com"
] |
v7sion@topdogtek.com
|
9532029f8f9affb83b76eb28c4c4b4ce1d9c037f
|
bf3d802cf7b75ab84fc0bfae125d9b0cc1aed1e4
|
/API/manager/salt.py
|
c39acfedaf0ef0d82652fed0385c66ddd4be0276
|
[] |
no_license
|
AlesKas/BAK
|
cf93c6b2a7475139bbfc8833e811264efcf0d8ff
|
56f4ac00a523c4ac02da0b7e33590d0b3dd2b190
|
refs/heads/master
| 2023-04-21T10:46:44.359788
| 2021-05-12T14:17:40
| 2021-05-12T14:17:40
| 197,364,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
import json
from utils.logger import initLogging
from .base import GetRequest, BaseException
from utils.db.model import NtwSalt
from peewee import DoesNotExist
LOGGER = initLogging()
class Salt(GetRequest):
@classmethod
def handle_get(cls, **kwargs):
salt = NtwSalt.get().salt
response = {}
response['salt'] = salt
return response
|
[
"akaspare@redhat.com"
] |
akaspare@redhat.com
|
c55382d209d32058d1850a42aceb160d7068f6a1
|
9a65cdc78a860ecc176985860f2d02a841b72ef2
|
/editors/vinegar.py
|
bef3383bb94eb863957f44686bc9bb7a51fb3207
|
[] |
no_license
|
akanevs/dsp
|
4da0354c78b8c0b33bce94af471d3d65ef3e7c0c
|
7e97f2fbfdfd0a6647affcd74303ee8e4a30bde6
|
refs/heads/master
| 2021-01-22T18:15:30.340275
| 2017-09-18T02:26:01
| 2017-09-18T02:26:01
| 100,755,288
| 0
| 0
| null | 2017-08-18T22:42:07
| 2017-08-18T22:42:07
| null |
UTF-8
|
Python
| false
| false
| 123
|
py
|
print('This is not my first or even second file created using vim')
print('This was was created by Alex in Millburn, NJ!')
|
[
"noreply@github.com"
] |
akanevs.noreply@github.com
|
2baf8dc9a7b2bfb654cb56694bc8420624a2c939
|
7cb5a8a08d21f2e4ae2b5b0de3cd53b1074f772c
|
/aws_django/webproj/homepage/views.py
|
67a0fd3691812ad271e7c1d06af89b09fdcee0ac
|
[] |
no_license
|
KDT-Hot6/Coogle
|
29d8778f8510dd14cc10dd7de8f11aab9cfe1e46
|
9dd742a022f209215df650f016f06bb2cbb0d3b9
|
refs/heads/main
| 2023-05-04T23:51:09.554583
| 2021-05-30T07:15:03
| 2021-05-30T07:15:03
| 353,909,037
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,005
|
py
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from elasticsearch import Elasticsearch
from sentence_transformers import SentenceTransformer, util
import numpy as np
import json
import re
# Create your views here.
######################################## return html page ##########################################
# def main(request):
# return render(request, 'main.html', {})
# 식당 리스트를 보여주는 페이지
def getResListpage(request):
print(request.GET)
search = request.GET.get('q', '')
if 'search_key' in request.GET:
search = request.GET.get('search_key')
info = getInfo(search)
res_list = groupingBoard((jsonPaser(info)))
content = {'res_list': res_list,}
return render(request, 'res_list_page.html', content)
# 메인 검색 페이지
def getSearchPage(request):
return render(request, 'coogle_search.html')
#######################################################################
########################## elastic search #############################
#######################################################################
model_path = '/home/ubuntu/hot6/bh/KoSentenceBERT_SKTBERT/output/training_stsbenchmark_skt_kobert_model_-2021-03-28_05-25-43_best'
embedder = SentenceTransformer(model_path)
client = Elasticsearch()
res_size = 18 #120
def getInfo(search):
query = search
vectors = embedder.encode(query, convert_to_tensor=True)
query_vector = [vector.tolist() for vector in vectors]
script_query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, doc['comment_vector']) + 1.0",
"params": {"query_vector": query_vector}
}
}
}
response = client.search(
index='revd',
body={
"size": res_size,
"query": script_query
# "_source": {"includes": ["res_id", "res_name", "comment", "adress", "keywords"]}
}
)
# data_list = response['hits']
return response
################################## funcs to preprocess restaurant infomation #######################
###### elastic search에서 받아온 json 정보를 html에서 보여줄 수 있도록 파싱
def jsonPaser(info):
# res_info_key = info['hits']['hits'][0]['_source'].keys()
res_list = {}
number = 0
for i in range(len(info['hits']['hits'])):
# 'res_id', 'res_name', 'adress', 'comment', 'keywords'
number += 1
res_comments = []
res_number = number
res_id = info['hits']['hits'][i]['_source']['res_id']
res_name = info['hits']['hits'][i]['_source']['res_name']
res_addr = info['hits']['hits'][i]['_source']['adress']
res_comment = info['hits']['hits'][i]['_source']['comment']
res_keywords = info['hits']['hits'][i]['_source']['keywords']
#식당 주소 전처리 ('\n' -> '<br>')
res_addr = preprocessAddr(res_addr)
# 리뷰 데이터가 1개만 있어서 type이 string일 때, type을 리스트로 만들어준다.
# (만약, 같은 식당이 나오면 리스트들끼리 병합하기 위해)
if type(res_comment) != type([]):
res_comments.append(res_comment)
else:
res_comments = res_comment
# res_id를 기준으로 같은 식당이 나오는지 검사
if res_list.get(res_id) == None:
res_comments.sort(reverse=True)
res_info = {'res_name':res_name, 'res_addr':res_addr,
'res_comment':res_comments, 'res_keywords':res_keywords,
'res_number': res_number,
}
res_list[res_id] = res_info
else:
comments = res_list[res_id]['res_comment'] + res_comments
comments.sort(reverse=True)
res_list[res_id]['res_comment'] = comments
number -= 1
return res_list
###### 식당들을 6개씩 그룹핑
def groupingBoard(info):
res_list = []
group = []
count = 0
for v in info.values():
if count == 0:
group = []
group.append(v)
count += 1
if count == 6:
res_list.append(group)
count = 0
return res_list
def groupingPage(info):
res_list = []
group = []
count = 0
for res_board in info:
if count == 0:
group = []
group.append(res_board)
count += 1
if count == 2:
res_list.append(group)
count = 0
return res_list
def preprocessAddr(addr):
addr2 = re.sub('\n', '<br> ', addr)
return addr2
|
[
"kbh122369@gmail.com"
] |
kbh122369@gmail.com
|
395c2604b50278ae902f02181ae78d04c84bb3d9
|
23acc2a4c8ad278d998cde78eb2340bc138844de
|
/alibaba_scrawl/listen.py
|
ec93c236eb4a877cbf9570a6e3b9a5f763bef6ed
|
[] |
no_license
|
jorliang/pthion_basic_learning
|
f915b137abd9116819cd0692d5d859285d68616c
|
425e01b322324947c93cfc9c345cfc2bafb492fe
|
refs/heads/master
| 2022-12-12T18:29:13.250193
| 2022-12-03T01:42:51
| 2022-12-03T01:42:51
| 175,117,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
import winsound
import win32com.client
import time
import speech
# speak_out = win32com.client.Dispatch('SAPI.SPVOICE')
# speak_out = win32com.client.Dispatch('Word.Application')
# def speak(str):
# print(str)
# speak_out.Speak(str)
# winsound.PlaySound(str,winsound.SND_ASYNC)
#
#
# ak='簡単'
# time.sleep(1)
# speak(ak)
speech.say('でわ')
|
[
"liangjiao@imagingdynamics.com"
] |
liangjiao@imagingdynamics.com
|
52b11a09076f3904dc2f45e1e998edf62a885d87
|
aae0432eede626a0ac39ff6d81234e82f8d678c2
|
/leetcode/algorithm/4.median-of-two-sorted-arrays.py
|
63670a63bf49ee10613895df33ff3b9ae3388fc8
|
[] |
no_license
|
KIDJourney/algorithm
|
81c00186a6dfdc278df513d25fad75c78eb1bf68
|
e1cf8e12050b9f1419a734ff93f9c626fc10bfe0
|
refs/heads/master
| 2022-11-24T09:30:16.692316
| 2022-11-06T09:33:51
| 2022-11-06T09:33:51
| 40,428,125
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
#
# @lc app=leetcode id=4 lang=python3
#
# [4] Median of Two Sorted Arrays
#
# @lc code=start
class Solution:
def findMedianSortedArrays(self, nums1, nums2) -> float:
return self.easy(nums1, nums2)
def easy(self, nums1, nums2):
result = []
idx1, idx2 = 0, 0
while True:
if idx1 == len(nums1) and idx2 == (len(nums2)):
break
if idx1 == len(nums1):
result.append(nums2[idx2])
idx2 += 1
continue
if idx2 == len(nums2):
result.append(nums1[idx1])
idx1 += 1
continue
if nums1[idx1] > nums2[idx2]:
result.append(nums2[idx2])
idx2 += 1
else:
result.append(nums1[idx1])
idx1 += 1
mid = len(result) // 2
if len(result) % 2 == 0:
return (result[mid] + result[mid-1]) / 2.0
else:
return (result[mid])
# @lc code=end
|
[
"kingdeadfish@qq.com"
] |
kingdeadfish@qq.com
|
de68899e48dd15c7bcd79c0be0f42079daac4b5c
|
05b9797a76bf0ee17f3d6ef812e5dd2a186a17fa
|
/jogo/models/conta.py
|
ab32d99b26083355e15848f3edf07d53d93b7565
|
[] |
no_license
|
renatoaloi/desafio-jogo
|
e9b99d39ad46bfeca006297d492615782e008179
|
c2bb464f17132df2ece18e99d5bb35a767dfaa53
|
refs/heads/main
| 2023-03-06T20:57:29.912528
| 2021-02-22T15:34:53
| 2021-02-22T15:34:53
| 341,075,416
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from sqlalchemy import Column, Integer, String, Float, ForeignKey
from jogo.dao.database import Base
class Conta(Base):
__tablename__ = 'conta'
id = Column(Integer, primary_key=True)
jogo_id = Column(Integer, ForeignKey('jogo.id'), nullable=False)
jogador_id = Column(Integer, ForeignKey('jogador.id'), nullable=True)
saldo = Column(Float, nullable=False)
def __init__(self, jogo_id, jogador_id, saldo):
self.jogo_id = jogo_id
self.jogador_id = jogador_id
self.saldo = saldo
def __repr__(self):
return '<Conta %r>' % self.saldo
|
[
"renato.aloi@gmail.com"
] |
renato.aloi@gmail.com
|
e56f0bd33da3d74267fd6ab2971ead15aa9263b8
|
1c488f486d14c19e19af1a46474af224498be193
|
/experimental/serengeti/blankIBCC.py
|
649a35a733279dc7605d90eb8296b4e245101794
|
[
"Apache-2.0"
] |
permissive
|
JiaminXuan/aggregation
|
fc2117494372428adeed85a9a413e2ff47244664
|
9a7ecbc2d4b143a73e48b1826b3727b6976fa770
|
refs/heads/master
| 2020-12-11T01:49:42.977664
| 2015-05-22T16:21:15
| 2015-05-22T16:21:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,087
|
py
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
import matplotlib.cbook as cbook
sys.path.append("/home/greg/github/pyIBCC/python")
import ibcc
client = pymongo.MongoClient()
db = client['serengeti_2014-07-28']
collection = db["serengeti_classifications"]
collection2 = db["serengeti_subjects"]
subjects = []
users = []
classifications = []
class_count = {}
blank_count = {}
retiredBlanks = {}
with open("/home/greg/Databases/serengeti_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \"/home/greg/Databases/serengeti_ibcc.csv\"\n")
f.write("outputFile = \"/home/greg/Databases/serengeti_ibcc.out\"\n")
f.write("confMatFile = \"/home/greg/Databases/serengeti_ibcc.mat\"\n")
f.write("nu0 = np.array([30,70])\n")
f.write("alpha0 = np.array([[3, 1], [1,3]])\n")
with open("/home/greg/Databases/serengeti_ibcc.csv","wb") as f:
f.write("a,b,c\n")
import datetime
def update(individual_classifications):
#start by removing all temp files
try:
os.remove("/home/greg/Databases/serengeti_ibcc.out")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.mat")
except OSError:
pass
try:
os.remove("/home/greg/Databases/serengeti_ibcc.csv.dat")
except OSError:
pass
with open("/home/greg/Databases/serengeti_ibcc.csv","a") as f:
for u, s, b in individual_classifications:
f.write(str(u)+","+str(s)+","+str(b)+"\n")
print datetime.datetime.time(datetime.datetime.now())
ibcc.runIbcc("/home/greg/Databases/serengeti_ibcc.py")
print datetime.datetime.time(datetime.datetime.now())
def analyze():
with open("/home/greg/Databases/serengeti_ibcc.out","rb") as f:
reader = csv.reader(f,delimiter=" ")
for subject_index,p0,p1 in reader:
subject_index = int(float(subject_index))
subject_id = subjects[subject_index]
c = class_count[subject_id]
if (float(p1) >= 0.995) and (c>= 2):
if not(subject_id in retiredBlanks):
retiredBlanks[subject_id] = c
#print str(c) + " :: " + str(p1)
i = 0
unknownUsers = []
for r in collection.find({"tutorial": {"$ne": True}}):
try:
user_name = r["user_name"]
except KeyError:
unknownUsers.append(r["user_ip"])
continue
zooniverse_id = r["subjects"][0]["zooniverse_id"]
if zooniverse_id in retiredBlanks:
continue
if ((i%10000) == 0) and (i > 0):
print i
update(classifications)
classifications = []
analyze()
if not(user_name in users):
users.append(user_name)
if not(zooniverse_id in subjects):
subjects.append(zooniverse_id)
class_count[zooniverse_id] = 0
blank_count[zooniverse_id] = 0
i += 1
user_index = users.index(user_name)
subject_index = subjects.index(zooniverse_id)
class_count[zooniverse_id] += 1
a = r["annotations"]
if not("nothing" in a[-1]):
assert('species' in a[0])
blank = 0
else:
blank = 1
blank_count[zooniverse_id] += 1
classifications.append((user_index,subject_index,blank))
if i >= 300000:
break
#print len(unknownUsers)
#print len(list(set(unknownUsers)))
tBlank = 0
fBlank = 0
speciesList = ['blank','elephant','zebra','warthog','impala','buffalo','wildebeest','gazelleThomsons','dikDik','giraffe','gazelleGrants','lionFemale','baboon','hippopotamus','ostrich','human','otherBird','hartebeest','secretaryBird','hyenaSpotted','mongoose','reedbuck','topi','guineaFowl','eland','aardvark','lionMale','porcupine','koriBustard','bushbuck','hyenaStriped','jackal','cheetah','waterbuck','leopard','reptiles','serval','aardwolf','vervetMonkey','rodents','honeyBadger','batEaredFox','rhinoceros','civet','genet','zorilla','hare','caracal','wildcat']
errors = {s.lower():0 for s in speciesList}
for zooniverse_id in retiredBlanks:
r = collection2.find_one({"zooniverse_id" : zooniverse_id})
retire_reason = r["metadata"]["retire_reason"]
if retire_reason in ["blank", "blank_consensus"]:
tBlank += 1
else:
fBlank += 1
print zooniverse_id + " :: " + str(r["location"]["standard"][0])
f = max(r["metadata"]["counters"].items(), key = lambda x:x[1])
print f
try:
errors[f[0].lower()] += 1
print str(blank_count[zooniverse_id]) + "/" + str(class_count[zooniverse_id])
except KeyError:
print "---***"
#print str(r["metadata"]["counters"].values())
print "==---"
print tBlank
print fBlank
print np.mean(retiredBlanks.values())
print np.median(retiredBlanks.values())
print "===---"
for s in speciesList:
if errors[s.lower()] != 0:
print s + " - " + str(errors[s.lower()])
|
[
"greg@zooniverse.org"
] |
greg@zooniverse.org
|
719fe210dc977586a94742653b7a84094eb6aa0d
|
68ff38f2dd0188909737b395ac227ec0e205d024
|
/Project-2/src/UtilsModule.py
|
d2d03210a398be119b8953bc2db5d6cfbc88b347
|
[] |
no_license
|
kushagraThapar/Data-Mining
|
7c3992c5efe820185e5197c8098168ae92e68cf9
|
658f3fdc8c4ea91d717cb36d5504d5e1fb803960
|
refs/heads/master
| 2021-01-18T15:40:23.412283
| 2017-01-29T08:49:04
| 2017-01-29T08:49:04
| 68,418,623
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,160
|
py
|
import sys
def write_file(filename, text):
try:
f = open(filename, "w")
f.write(text)
f.close()
except IOError:
print("IOError occurred in file [" + filename + "]")
exit_program()
return
def read_file(filename):
try:
f = open(filename, "rU")
text = f.read()
f.close()
return text
except FileNotFoundError:
print("File not found with name [" + filename + "]")
except IOError:
print("IOError occurred in file [" + filename + "]")
exit_program()
def process_tweets(tweet_data):
if tweet_data is None or tweet_data.strip() is "":
print("Tweet Data is Empty")
exit_program()
for single_row in tweet_data.split("\n"):
single_row = single_row.strip()
single_row_array = single_row.split(",", 3)
if len(single_row_array) >= 3:
last_index = single_row_array[3].rfind("\"")
tweet = single_row_array[3][0:last_index + 1]
tweet_class = single_row_array[3][last_index + 2:last_index + 3]
def exit_program():
print("Program will exit now... ")
sys.exit(1)
|
[
"kthapa2@uic.edu"
] |
kthapa2@uic.edu
|
82f5cd9bfe71e1f9c34be4f1d8c1789d269f969b
|
61267e7bb146e67d7ce5b81ef8c6fb32cdb1088e
|
/apps/forums/migrations/0002_auto_20190408_2007.py
|
b928de461230382f7b0e27ca992d15780a476ae9
|
[] |
no_license
|
xiaoyaochen/ACshare
|
8f7e294724d90925f9fb80799c9fbd3680c01057
|
482985231e0e6d8632c8504a30f994ba246a060a
|
refs/heads/master
| 2020-05-07T11:57:43.663344
| 2019-04-20T14:55:55
| 2019-04-20T14:55:55
| 180,483,088
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
# Generated by Django 2.0 on 2019-04-08 20:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forums', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='forumscomment',
old_name='reviewer',
new_name='user',
),
]
|
[
"1595029296@qq.com"
] |
1595029296@qq.com
|
15e3cb84a94201fa536f06f31f13a17e5a8b6dfb
|
5f809898a9f153d8645205aa601b2b3083beafa1
|
/krrThomas/searchStat/plot_structs_near_best.py
|
e0263c56ba0312ee7915e88edc7e6a07acbb6a67
|
[] |
no_license
|
MaltheBisbo/learningForces
|
ea1b258e115327e1e0876a60345366f349afb1ac
|
7a726a5931454534585563dd607faf75c5d706c6
|
refs/heads/master
| 2021-01-23T22:31:02.654738
| 2018-12-25T21:48:54
| 2018-12-25T21:48:54
| 102,938,949
| 0
| 0
| null | 2018-12-25T21:48:55
| 2017-09-09T08:22:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,410
|
py
|
import numpy as np
from scipy.spatial.distance import euclidean
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
from gaussComparator import gaussComparator
from featureCalculators_multi.angular_fingerprintFeature_cy import Angular_Fingerprint
from delta_functions_multi.delta import delta as deltaFunc
from krr_errorForce import krr_class
def plot_near_best(traj, MLmodel):
Ndata = len(traj)
# Sort traj after energy
E = np.array([a.get_potential_energy() for a in traj])
index_best = np.argmin(E)
a_best = traj[index_best]
f_traj = MLmodel.featureCalculator.get_featureMat(traj)
f_best = MLmodel.featureCalculator.get_feature(a_best)
d = cdist(f_best.reshape((1,len(f_best))), f_traj, metric='euclidean')
index_closest = np.argsort(d[0])[:5]
print('d:\n', d[0][index_closest])
print('E:\n', E[index_closest])
traj_nearby = [traj[i] for i in index_closest]
return traj_nearby
if __name__ == '__main__':
n = 2
i = 0
traj_init = read('/home/mkb/DFT/gpLEA/anatase/step/sanity_check/test_new_calc/runs{}/run{}/global{}_initTrain.traj'.format(n,i,i), index=':')
traj_sp = read('/home/mkb/DFT/gpLEA/anatase/step/sanity_check/test_new_calc/runs{}/run{}/global{}_spTrain.traj'.format(n,i,i), index=':')
traj = traj_init + traj_sp
#ref = read('/home/mkb/DFTB/TiO_2layer/ref/Ti13O26_GM_done.traj', index='0')
### Set up feature ###
# Template structure
a = traj[0]
# Radial part
Rc1 = 6
binwidth1 = 0.2
sigma1 = 0.2
# Angular part
Rc2 = 4
Nbins2 = 30
sigma2 = 0.2
gamma = 2
# Radial/angular weighting
eta = 20
use_angular = True
# Initialize feature
featureCalculator = Angular_Fingerprint(a, Rc1=Rc1, Rc2=Rc2, binwidth1=binwidth1, Nbins2=Nbins2, sigma1=sigma1, sigma2=sigma2, gamma=gamma, eta=eta, use_angular=use_angular)
### Set up KRR-model ###
comparator = gaussComparator(featureCalculator=featureCalculator, max_looks_like_dist=0.2)
delta_function = deltaFunc(atoms=a, rcut=6)
krr = krr_class(comparator=comparator,
featureCalculator=featureCalculator,
delta_function=delta_function,
bias_std_add=0)
traj_nearby = plot_near_best(traj, krr)
view(traj_nearby)
|
[
"mkb@s81n11.grendel.cscaa.dk"
] |
mkb@s81n11.grendel.cscaa.dk
|
7859135534e30b04d89d94a136f8d0e24d12d03c
|
f0ff71345c625a7e7c839eea570aca350bcb24b8
|
/plots.py
|
147bb5fad872e10fde6216a86f96baab54a5de62
|
[
"MIT"
] |
permissive
|
ivastar/wfc3_gyro
|
ef2663f73ba775bb62fe595a33e13a0b4ceffd8d
|
3a77050183ddc3f7cb904eb8d59d773d41a00037
|
refs/heads/master
| 2021-01-15T08:37:50.860268
| 2016-11-16T14:59:51
| 2016-11-16T14:59:51
| 44,345,619
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65,014
|
py
|
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import matplotlib.gridspec as gridspec
import pylab
import unicorn
import astropy
from astropy.table import Table as table
from astropy.io import fits
import numpy as np
import glob, os
def mag_radius():
from matplotlib.ticker import ScalarFormatter
import my_python.mk_region_file
gabe = table.read('../REF/cosmos-wide_v0.3_drz_bkg_sci.cat', format='ascii.sextractor')
orig = table.read('/3DHST/Photometry/Release/v4.0/COSMOS/Detection/cosmos_3dhst.v4.0.F160W_orig.cat', format='ascii.sextractor')
cat = table.read('test7_drz_sci.cat', format='ascii.sextractor')
top = 0.075
bottom = 0.1
left = 0.15
fig = unicorn.catalogs.plot_init(xs=10,aspect=0.5, left=left, right=0.1, bottom=bottom, top=top, NO_GUI=False)
fig.subplots_adjust(wspace=0.15)
fig.subplots_adjust(hspace=0.1)
fs = 10
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
gs1 = gridspec.GridSpec(1,2)
ax1 = fig.add_subplot(gs1[0,0], ylim=[0.8, 20], xlim=[14,26])
ax1.plot(orig['MAG_AUTO'],orig['FLUX_RADIUS']*0.06/0.1, '.', color='0.7',markersize=0.5)
ax1.plot(cat['MAG_AUTO'],cat['FLUX_RADIUS'], 'o', color='black', markersize=1, alpha=0.5)
ax1.set_xlabel('MAG_AUTO F160W')
ax1.set_ylabel('FLUX_RADIUS')
ax1.set_yscale('log')
cr = (cat['FLUX_RADIUS']*0.1/0.06 < 2.)
stars = (cat['MAG_AUTO'] > 15.) & (cat['MAG_AUTO'] < 22.) & (cat['FLUX_APER_5']/cat['FLUX_APER'] > 1.1) & (cat['FLUX_APER_5']/cat['FLUX_APER'] < 1.2)
#stars = (cat['MAG_AUTO'] > 15.) & (cat['MAG_AUTO'] < 23.) & (cat['FLUX_RADIUS']*0.1/0.06 < 5.15 - 0.115*cat['MAG_AUTO']) & (cat['FLUX_RADIUS']*0.1/0.06 > 2.1)
#ax1.plot(cat['MAG_AUTO'][cr],cat['FLUX_RADIUS'][cr], 'o', color='blue', markersize=2, alpha=1.0)
ax1.plot(cat['MAG_AUTO'][stars],cat['FLUX_RADIUS'][stars], 'o', color='red', markersize=2, alpha=1.0, markeredgecolor='red')
print 'STARS: mean: {} / median{}'.format(np.mean(cat['FWHM_IMAGE'][stars]), np.median(cat['FWHM_IMAGE'][stars]))
#yy_select = -0.115*np.array([14.,23.]) +5.15
#ax1.plot(np.array([14.,23.]), yy_select,'--',color='red')
#ax1.plot([23.,23.], np.array([2.1, yy_select[-1]]), '--',color='red')
#ax1.plot([14.,23.], [2.1,2.1],'--',color='red')
ax1.yaxis.set_major_formatter(ScalarFormatter())
ax2 = fig.add_subplot(gs1[0,1], ylim=[0.5,4], xlim=[14,26])
#ax2.plot(orig['MAG_AUTO'],orig['FLUX_RADIUS'], '.', color='0.7',markersize=0.5)
#ax2.plot(gabe['MAG_AUTO'],gabe['FLUX_RADIUS']*0.1/0.06, 'o', color='red', markersize=1., alpha=0.5)
ax2.plot(orig['MAG_AUTO'], orig['FLUX_APER_5']/orig['FLUX_APER'], '.', color='0.7',markersize=0.5)
ax2.plot(cat['MAG_AUTO'], cat['FLUX_APER_5']/cat['FLUX_APER'], 'o', color='black', markersize=1., alpha=0.5)
ax2.plot(cat['MAG_AUTO'][stars], cat['FLUX_APER_5'][stars]/cat['FLUX_APER'][stars], 'o', color='red', markersize=2., alpha=1.0, markeredgecolor='red')
ax2.plot(cat['MAG_AUTO'][cr], cat['FLUX_APER_5'][cr]/cat['FLUX_APER'][cr], 'o', color='blue', markersize=2., alpha=1.0)
ax2.set_xlabel('MAG_AUTO F160W')
ax2.set_ylabel('Flux (2.0\")/Flux (0.5\")')
#ax2.set_title('PIPELINE FLTS (GABE)')
ax2.yaxis.set_major_formatter(ScalarFormatter())
my_python.mk_region_file.mk_region_file_from_lists(cat['X_WORLD'][stars],cat['Y_WORLD'][stars],outfile = 'stars', printids='no', color='cyan')
my_python.mk_region_file.mk_region_file_from_lists(cat['X_WORLD'][cr],cat['Y_WORLD'][cr],outfile = 'cr', printids='no', color='yellow')
plt.show(block=False)
fig.savefig('mag_radius.pdf', dpi=200, transparent=False)
fig.savefig('mag_radius.png', dpi=200, transparent=False)
def noise_distribution(master_root='icxe15010'):
import threedhst
import stwcs
import scipy
import scipy.optimize
import unicorn
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import matplotlib.gridspec as gridspec
import drizzlepac
from drizzlepac import astrodrizzle
master_asn = threedhst.utils.ASNFile('{}_asn.fits'.format(master_root))
#print 'Read files...'
ref = fits.open('{}_drz_sci.fits'.format(master_root))
ref_wcs = stwcs.wcsutil.HSTWCS(ref, ext=0)
seg = fits.open('{}_drz_seg.fits'.format(master_root))
seg_data = np.cast[np.float32](seg[0].data)
yi, xi = np.indices((1014,1014))
# read candels
candels = table.read('candels_noise.txt', format='ascii')
# Make plot
fs=8.
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
fig = unicorn.catalogs.plot_init(xs=10., aspect=0.5, fontsize=fs, left=0.1, right=0.1, top=0.1, bottom=0.1)
gs = gridspec.GridSpec(2,4,top=0.95, bottom=0.1)
fig.subplots_adjust(wspace=0.05)
fig.subplots_adjust(hspace=0.1)
fp = open('sigma_table.txt','a')
#### Loop through the pointings in this orbit
for j, root in enumerate(master_asn.exposures):
asn = threedhst.utils.ASNFile('{}_asn.fits'.format(root))
ax = fig.add_subplot(gs[j])
if j == 0:
flt_color='blue'
else:
flt_color='red'
#### Loop through FLTs
for exp in asn.exposures:
NSAMP = len(asn.exposures)
flt = fits.open('{}_flt.fits'.format(exp))
flt_wcs = stwcs.wcsutil.HSTWCS(flt, ext=1)
if flt[1].header['BG_SUB'] == 'No':
flt[1].data -= flt[1].header['MDRIZSKY']
if exp == asn.exposures[0]:
print 'Segmentation image: {}_blot.fits'.format(exp)
blotted_seg = astrodrizzle.ablot.do_blot(seg_data, ref_wcs, flt_wcs, 1, coeffs=True, interp='nearest', sinscl=1.0, stepsize=10, wcsmap=None)
mask = (blotted_seg == 0) & (flt['DQ'].data == 0) & (xi > 10) & (yi > 10) & (xi < 1004) & (yi < 1004)
n, bin_edge, patch = ax.hist(flt[1].data[mask], bins=300, range=[-3,3], color='0.5', alpha=0.5, histtype='step', normed=True)
if exp == asn.exposures[0]:
centers = (bin_edge[:-1] + (bin_edge[1:] - bin_edge[:-1])/2)
flt_coeff, flt_var = scipy.optimize.curve_fit(gauss, centers, n, p0=[np.max(n),0.,1.])
drz = fits.open('{}_drz_sci.fits'.format(root))
drz_wcs = stwcs.wcsutil.HSTWCS(drz, ext=0)
drz_sh = np.shape(drz[0].data)
dyi, dxi = np.indices(drz_sh)
blotted_seg = astrodrizzle.ablot.do_blot(seg_data, ref_wcs, drz_wcs, 1, coeffs=False, interp='nearest', sinscl=1.0, stepsize=10, wcsmap=None)
mask = (blotted_seg == 0) & (dxi > 10) & (dyi > 10) & (dxi < drz_sh[1]-10) & (dyi < drz_sh[0]-10)
n, bin_edge, patch = ax.hist(drz[0].data[mask], bins=100, range=[-1.,1.], color='black', alpha=1.0, histtype='step', normed=True)
centers = (bin_edge[:-1] + (bin_edge[1:] - bin_edge[:-1])/2)
coeff, var = scipy.optimize.curve_fit(gauss, centers, n, p0=[np.max(n),0.,1.])
flt = fits.open('{}_flt.fits'.format(root))
flt_wcs = stwcs.wcsutil.HSTWCS(flt, ext=1)
blotted_seg = astrodrizzle.ablot.do_blot(seg_data, ref_wcs, flt_wcs, 1, coeffs=True, interp='nearest', sinscl=1.0, stepsize=10, wcsmap=None)
mask = (blotted_seg == 0) & (flt['DQ'].data == 0) & (xi > 10) & (yi > 10) & (xi < 1004) & (yi < 1004)
n, bin_edge, patch = ax.hist(flt[1].data[mask], bins=100, range=[-1.,1.], color=flt_color, alpha=1.0, histtype='step', normed=True)
centers = (bin_edge[:-1] + (bin_edge[1:] - bin_edge[:-1])/2)
flt_orig_coeff, flt_orig_var = scipy.optimize.curve_fit(gauss, centers, n, p0=[np.max(n),0.,1.])
if root == master_asn.exposures[0]:
flag = 0
else:
flag = 1
fp.write('{}\t{:7.4f}\t{:7.4f}\t{:7.2f}\t{:7.4f}\t{}\n'.format(root, np.abs(flt_orig_coeff[2]), np.abs(coeff[2]), flt[1].header['SAMPTIME'], flt[1].header['MDRIZSKY'],flag))
### plot candels
#ax.bar(candels['centers'], 4.5*candels['n']/np.max(candels['n']), width=0.001, align='center', edgecolor='orange', color='orange', alpha=0.3)
ax.set_ylim([0, 5.0])
ax.set_xlim([-3.0, 3.0])
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if j == 0 or j==4:
ax.set_ylabel('Normalized N', fontsize=fs)
else:
ax.set_yticklabels([])
if j > 3:
ax.set_xlabel('e$^-$/s', fontsize=fs)
else:
ax.set_xticklabels([])
ax.set_title(root, fontsize=fs)
ax.text(-2.5, 4.5,'$\sigma_{reads}$ = %4.3f' % (flt_coeff[2]), color='0.5', fontsize=fs)
ax.text(-2.5, 4.,'$\sigma_{FLT}$ = %4.3f' % (flt_orig_coeff[2]), color=flt_color, fontsize=fs)
ax.text(-2.5, 3.5,'$\sigma_{DRZ}$ = %4.3f' % (coeff[2]), color='black', fontsize=fs)
#ax.text(-2.5, 3.0,'$\sigma_{CAND}$ = %4.3f' % (4.44509564e-03), color='orange', fontsize=fs)
plt.show(block=False)
fig.savefig('{}_noise.png'.format(master_root), dpi=200, transparent=False)
def candels_mosaic_noise():
import scipy
mos = fits.open('/3DHST/Photometry/Release/v4.0/COSMOS/HST_Images/cosmos_3dhst.v4.0.F160W_orig_sci.fits')
wht = fits.open('/3DHST/Photometry/Release/v4.0/COSMOS/HST_Images/cosmos_3dhst.v4.0.F160W_orig_wht.fits')
seg = fits.open('/3DHST/Photometry/Release/v4.0/COSMOS/Detection/cosmos_3dhst.v4.0.F160W_seg.fits.gz')
mask = (seg[0].data == 0) & (wht[0].data != 0)
n, bin_edge = np.histogram(mos[0].data[mask], bins=100, range=[-0.05,0.05], normed=True)
centers = (bin_edge[:-1] + (bin_edge[1:] - bin_edge[:-1])/2)
coeff, var = scipy.optimize.curve_fit(gauss, centers, n, p0=[np.max(n),0.,0.01])
print 'Gaussian coeffs: {}'.format(coeff)
t = table([centers,n], names =('centers','n'))
t.write('candels_noise.txt', format='ascii')
def overall_offsets():
import glob
import matplotlib.colors as colors
import matplotlib.cm as cmx
files = glob.glob('shifts_icxe*010.txt')
fig = unicorn.catalogs.plot_init(xs=10.,aspect=0.24, left=0.12, right=0.05, bottom=0.15, top=0.15, NO_GUI=False)
gs = gridspec.GridSpec(1,4,top=0.9, bottom=0.15)
fig.subplots_adjust(wspace=0.17)
fig.subplots_adjust(hspace=0.1)
fs = 8
jet = cm = plt.get_cmap('jet_r')
cNorm = colors.Normalize(vmin=0, vmax=9)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
for j, file in enumerate(files):
print file, '{}'.format(file.split('_')[1].split('.')[0])
ax = fig.add_subplot(gs[j])
if file.startswith('shifts_icxe15'):
ax.set_xlim([-5.,70.])
ax.set_ylim([-5.,20.])
if file.startswith('shifts_icxe16'):
ax.set_xlim([-5.,50.])
ax.set_ylim([-10.,5.])
if file.startswith('shifts_icxe17'):
ax.set_xlim([-50.,5.])
ax.set_ylim([-10.,5.])
if file.startswith('shifts_icxe18'):
ax.set_xlim([-40.,5.])
ax.set_ylim([-5.,40.])
ax.set_xlabel('$\Delta$ x [pix]', fontsize=fs, labelpad=0.1)
if j == 0:
ax.set_ylabel('$\Delta$ y [pix]', fontsize=fs)
ax.set_title('{}'.format(file.split('_')[1].split('.')[0]), fontsize=fs)
cc = 0.
with open(file) as f:
for line in f:
if not line.startswith('#'):
data = line.split()
cc += 1.
color = scalarMap.to_rgba(cc)
#ax.arrow(0.,0., float(data[1]), float(data[2]), head_width=0., head_length=0., fc=color, ec = color)
ax.annotate("",xy=(float(data[1]), float(data[2])), xytext=(0,0),
arrowprops=dict(arrowstyle='->', color=color))
plt.show(block=False)
fig.savefig('overall_offsets.png', dpi=200, transparent=False)
def overall_offsets_vs_distance():
import glob
import matplotlib.colors as colors
import matplotlib.cm as cmx
files = glob.glob('shifts_icxe*010.txt')
fig = unicorn.catalogs.plot_init(xs=10.,aspect=0.47, left=0.2, right=0.05, bottom=0.15, top=0.15, NO_GUI=False)
gs = gridspec.GridSpec(1,2,top=0.9, bottom=0.15)
fig.subplots_adjust(wspace=0.2)
fig.subplots_adjust(hspace=0.2)
fs = 10
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
jet = cm = plt.get_cmap('jet_r')
cNorm = colors.Normalize(vmin=0, vmax=8)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
lines = ['-','--','-.',':']
labels = ['COSMOS-15', 'COSMOS-16', 'COSMOS-17','COSMOS-18']
for j, file in enumerate(files):
print file
root = '{}'.format(file.split('_')[1].split('.')[0])
ax1.set_xlim([-0.5,9.])
ax1.set_ylim([-0.005,0.15])
ax1.set_xlabel('Commanded Offset Relative to First Position\n[arcmin]', fontsize=fs, labelpad=0.1)
ax1.set_ylabel('Offset from Commanded Position\n[arcmin]', fontsize=fs)
root = '{}'.format(file.split('_')[1].split('.')[0])
data = table.read(file, format='ascii', names=('file','x','y','rot','scale','x_rms','y_rms'))
small_off = np.sqrt(data['x']**2 + data['y']**2)*0.12/60.
#colors = (scalarMap.to_rgba(cc) for cc in range(len(small_off)))
big_off = [0.0]
drift_rate = [0.0]
origin = fits.open(data['file'][0])
x_orig, y_orig = origin[1].header['CRVAL1'], origin[1].header['CRVAL2']
for k, file in enumerate(data['file'][1:]):
flt = fits.open(file)
x, y = flt[1].header['CRVAL1'], flt[1].header['CRVAL2']
big_off.append(60.*np.sqrt((x_orig-x)**2 + (y_orig-y)**2))
if k+1 < 4:
t_exp = 255
else:
t_exp = 277
drift = table.read(file.split('_')[0]+'_shifts.txt', format='ascii',
names=('file','x','y','rot','scale','x_rms','y_rms'))
drift_rate.append(25.*np.sqrt((drift['x'][0]-drift['x'][-1])**2 + (drift['y'][0]-drift['y'][-1])**2)/t_exp)
print len(small_off), len(big_off)
ax1.plot(big_off, small_off,linestyle=lines[j], color='0.5', label=labels[j], zorder=0)
ax1.scatter(big_off, small_off, c=range(len(small_off)), cmap='jet', s=35., edgecolors='black', alpha=0.7)
ax1.legend(loc='upper left', frameon=False, labelspacing=0.8, fontsize=9)
ax2.set_xlim([-0.5, 9.])
ax2.set_ylim([-0.01,0.5])
ax2.set_xlabel('Commanded Offset Relative to First Position\n[arcmin]', fontsize=fs, labelpad=0.1)
ax2.set_ylabel('Drift Rate During Observations\n[pix per 25 seconds]', fontsize=fs)
ax2.plot(big_off, drift_rate,linestyle=lines[j], color='0.5', zorder=0)
ax2.scatter(big_off, drift_rate, c=range(len(small_off)), cmap='jet', s=35., edgecolors='black', alpha=0.7)
plt.show(block=False)
fig.savefig('overall_offsets_vs_distance.png', dpi=200, transparent=False)
def overall_offsets_vs_time():
import glob
import matplotlib.colors as colors
import matplotlib.cm as cmx
files = glob.glob('shifts_icxe*010.txt')
fig = unicorn.catalogs.plot_init(xs=10.,aspect=0.47, left=0.2, right=0.05, bottom=0.15, top=0.15, NO_GUI=False)
gs = gridspec.GridSpec(1,2,top=0.9, bottom=0.15)
fig.subplots_adjust(wspace=0.2)
fig.subplots_adjust(hspace=0.2)
fs = 10
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
jet = cm = plt.get_cmap('jet_r')
cNorm = colors.Normalize(vmin=0, vmax=8)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
time = np.array([0, 368, 713, 1060, 1405, 1777, 2147, 2519]) + 333.
lines = ['-','--','-.',':']
labels = ['COSMOS-15', 'COSMOS-16', 'COSMOS-17','COSMOS-18']
for j, file in enumerate(files):
root = '{}'.format(file.split('_')[1].split('.')[0])
ax1.set_xlim([-5., 3100.])
ax1.set_ylim([-0.005,0.15*60.])
ax1.set_xlabel('Time Since Beginning of Orbit\n[seconds]', fontsize=fs)
ax1.set_ylabel('Offset from Commanded Position\n[arcsec]', fontsize=fs)
root = '{}'.format(file.split('_')[1].split('.')[0])
data = table.read(file, format='ascii', names=('file','x','y','rot','scale','x_rms','y_rms'))
small_off = np.sqrt(data['x']**2 + data['y']**2)*0.12
#colors = (scalarMap.to_rgba(cc) for cc in range(len(small_off)))
drift_rate = [0.0]
for k, file in enumerate(data['file'][1:]):
if k+1 < 4:
t_exp = 255
else:
t_exp = 277
drift = table.read(file.split('_')[0]+'_shifts.txt', format='ascii',
names=('file','x','y','rot','scale','x_rms','y_rms'))
drift_rate.append(25.*np.sqrt((drift['x'][0]-drift['x'][-1])**2 + (drift['y'][0]-drift['y'][-1])**2)/t_exp)
ax1.plot(time, small_off, linestyle=lines[j], color='0.5', label=labels[j], zorder=0)
ax1.scatter(time, small_off, c=range(len(small_off)), cmap='jet', s=35., edgecolors='black', alpha=0.7)
ax1.legend(loc='upper left', frameon=False, labelspacing=0.8, fontsize=9)
ax2.set_xlim([-5., 3100.])
ax2.set_ylim([-0.01,0.5])
ax2.set_xlabel('Time Since Beginning of Orbit\n[seconds]', fontsize=fs)
ax2.set_ylabel('Drift Rate During Pointing\n[pix per 25 seconds]', fontsize=fs)
ax2.plot(time, drift_rate,linestyle=lines[j], color='0.5', zorder=0)
ax2.scatter(time, drift_rate, c=range(len(small_off)), cmap='jet', s=35., edgecolors='black', alpha=0.7)
plt.show(block=False)
fig.savefig('overall_offsets_vs_time.png', dpi=200, transparent=False)
fig.savefig('overall_offsets_vs_time.pdf', dpi=200, transparent=False)
def gyro_drift():
import glob
import matplotlib.colors as colors
import matplotlib.cm as cmx
import threedhst
files = glob.glob('icxe*010_asn.fits')
fig = unicorn.catalogs.plot_init(xs=10.,aspect=0.24, left=0.12, right=0.05, bottom=0.15, top=0.15, NO_GUI=False)
gs = gridspec.GridSpec(1,4,top=0.9, bottom=0.15)
fig.subplots_adjust(wspace=0.17)
fig.subplots_adjust(hspace=0.1)
fs = 8
jet = cm = plt.get_cmap('jet_r')
cNorm = colors.Normalize(vmin=0, vmax=9)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
for j, file in enumerate(files):
print file
ax = fig.add_subplot(gs[j])
ax.set_xlabel('$\Delta$ x [pix]', fontsize=fs, labelpad=0.1)
if j == 0:
ax.set_ylabel('$\Delta$ y [pix]', fontsize=fs)
ax.set_title('{}'.format(file.split('_')[0]), fontsize=fs)
ax.set_xlim([-3., 3.])
ax.set_ylim([-3., 3.])
asn = threedhst.utils.ASNFile(file)
cc = 1.
for exp in asn.exposures[1:]:
data = table.read(exp+'_shifts.txt', format='ascii', names=('file','x','y','rot','scale','x_rms','y_rms'))
cc += 1.
color = scalarMap.to_rgba(cc)
ax.plot(data['x'], data['y'], '-', color=color, alpha=0.5, linewidth=1.5)
ax.plot(data['x'], data['y'], 'o', color=color, markersize=3., alpha=0.5, markeredgecolor=None)
plt.show(block=False)
fig.savefig('gyro_drift.png', dpi=200, transparent=False)
def footprints_plot(root='icxe15010'):
import unicorn.survey_paper as sup
import matplotlib.colors as colors
import matplotlib.cm as cmx
if root == 'icxe15010':
aspect = 1.75
xlim = [150.265, 150.157]
ylim = [2.45, 2.64]
xticklab = [r'$10^\mathrm{h}01^\mathrm{m}00^\mathrm{s}$', r'$10^\mathrm{h}00^\mathrm{m}45^\mathrm{s}$']
xtickv = [sup.degrees(10,01,00, hours=True),sup.degrees(10,00,45, hours=True)]
yticklab = [r'$+02^\circ30^\prime00^{\prime\prime}$',r'$+02^\circ35^\prime00^{\prime\prime}$']
ytickv = [sup.degrees(2, 30, 00, hours=False),sup.degrees(2, 35, 00, hours=False)]
label = 'COSMOS-15'
factor=10.
if root == 'icxe16010':
aspect=0.9
xlim = [150.265, 150.1]
ylim = [2.607, 2.74]
xticklab = [r'$10^\mathrm{h}01^\mathrm{m}00^\mathrm{s}$', r'$10^\mathrm{h}00^\mathrm{m}45^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}30^\mathrm{s}$']
xtickv = [sup.degrees(10,01,00, hours=True),sup.degrees(10,00,45, hours=True),sup.degrees(10,00,30, hours=True)]
yticklab = [r'$+02^\circ38^\prime00^{\prime\prime}$',r'$+02^\circ40^\prime00^{\prime\prime}$', r'$+02^\circ42^\prime00^{\prime\prime}$', r'$+02^\circ44^\prime00^{\prime\prime}$']
ytickv = [sup.degrees(2, 38, 00, hours=False),sup.degrees(2, 40, 00, hours=False),sup.degrees(2, 42, 00, hours=False),sup.degrees(2, 44, 00, hours=False)]
label='COSMOS-16'
factor=20.
if root == 'icxe17010':
aspect=1.4
xlim = [150.2, 150.06]
ylim = [2.52, 2.72]
xticklab = [r'$10^\mathrm{h}00^\mathrm{m}45^\mathrm{s}$', r'$10^\mathrm{h}00^\mathrm{m}30^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}15^\mathrm{s}$']
xtickv = [sup.degrees(10,00,45, hours=True),sup.degrees(10,00,30, hours=True),sup.degrees(10,00,15, hours=True)]
yticklab = [r'$+02^\circ35^\prime00^{\prime\prime}$',r'$+02^\circ40^\prime00^{\prime\prime}$']
ytickv = [sup.degrees(2, 35, 00, hours=False),sup.degrees(2, 40, 00, hours=False)]
label='COSMOS-17'
factor=240.
if root == 'icxe18010':
aspect=1.577
xlim = [150.14, 150.01]
ylim = [2.53, 2.735]
xticklab = [r'$10^\mathrm{h}00^\mathrm{m}30^\mathrm{s}$', r'$10^\mathrm{h}00^\mathrm{m}20^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}10^\mathrm{s}$']
xtickv = [sup.degrees(10,00,30, hours=True),sup.degrees(10,00,20, hours=True),sup.degrees(10,00,10, hours=True)]
yticklab = [r'$+02^\circ35^\prime00^{\prime\prime}$',r'$+02^\circ40^\prime00^{\prime\prime}$']
ytickv = [sup.degrees(2, 35, 00, hours=False),sup.degrees(2, 40, 00, hours=False)]
label='COSMOS-18'
factor=240.
fig = unicorn.catalogs.plot_init(square=True, xs=5., aspect=aspect,
fontsize=8, left=0.18, right=0.02, bottom=0.10, top=0.10)
ax = fig.add_subplot(111)
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=9)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
reg_file = root+'_asn.reg'
poly = []
with open(reg_file) as f:
for line in f:
if not line.startswith('fk5'):
region = line.split('#')[0]
poly.append(sup.polysplit(region=region, get_shapely=True))
shifts = table.read('shifts_{}.txt'.format(root), format='ascii',
names=('file','x','y','rot','scale','x_rms','y_rms'))
cc = 0
xcen_all = []
ycen_all = []
for j,(pp, x_off, y_off, file) in enumerate(zip(poly, shifts['x'], shifts['y'], shifts['file'])):
cc += 1.
color = scalarMap.to_rgba(cc)
x, y = pp.exterior.xy
flt = fits.open(file)
xcen = flt[1].header['CRVAL1O']
ycen = flt[1].header['CRVAL2O']
x_off = (flt[1].header['CRVAL1B']-flt[1].header['CRVAL1O'])*20.
y_off = (flt[1].header['CRVAL2B']-flt[1].header['CRVAL2O'])*20.
print file, xcen, xcen+x_off, ycen, ycen+y_off
#xcen = (np.mean(x[:-1]))
#ycen = (np.mean(y[:-1]))
xcen_all.append(xcen)
ycen_all.append(ycen)
ax.plot(x,y,'-', color=color)
#ax.annotate("",xy=(xcen+(x_off*0.12)/factor, ycen+(y_off*0.12)/factor), xytext=(xcen, ycen),
# arrowprops=dict(arrowstyle='->', color=color))
#ax.plot([xcen, xcen+x_off], [ycen, ycen+y_off], '-')
ax.annotate("",xy=(xcen+x_off, ycen+y_off), xytext=(xcen, ycen),
arrowprops=dict(arrowstyle='->', color=color))
ax.plot(xcen_all, ycen_all, '+:', markersize=10., color='0.5', alpha=0.5)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklab)
xtick = ax.set_xticks(xtickv)
ax.set_yticklabels(yticklab)
ytick = ax.set_yticks(ytickv)
ax.set_title(label)
plt.show(block=False)
fig.savefig('footprint_{}.png'.format(label.lower()), dpi=200, transparent=False)
def footprints_all():
import unicorn.survey_paper as sup
import matplotlib.colors as colors
import matplotlib.cm as cmx
fig = unicorn.catalogs.plot_init(square=True, xs=10., aspect=1.1,
fontsize=8, left=0.18, right=0.02, bottom=0.10, top=0.10)
ax = fig.add_subplot(111)
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=9)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
xlim = [150.270, 149.99]
ylim = [2.45, 2.751]
xticklab = [r'$10^\mathrm{h}01^\mathrm{m}00^\mathrm{s}$', r'$10^\mathrm{h}00^\mathrm{m}45^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}30^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}15^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}00^\mathrm{s}$']
xtickv = [sup.degrees(10,01,00, hours=True),sup.degrees(10,00,45, hours=True),sup.degrees(10,00,30, hours=True),sup.degrees(10,00,15, hours=True),sup.degrees(10,00,00, hours=True)]
yticklab = [r'$+02^\circ30^\prime00^{\prime\prime}$',r'$+02^\circ35^\prime00^{\prime\prime}$', r'$+02^\circ40^\prime00^{\prime\prime}$',r'$+02^\circ45^\prime00^{\prime\prime}$']
ytickv = [sup.degrees(2, 30, 00, hours=False),sup.degrees(2, 35, 00, hours=False),sup.degrees(2, 40, 00, hours=False),sup.degrees(2, 45, 00, hours=False)]
factor=20.
roots = ['icxe15010', 'icxe16010', 'icxe17010','icxe18010']
labels = ['COSMOS-15', 'COSMOS-16','COSMOS-17','COSMOS-18']
lines = ['-','--','-.',':']
for root, label, linestyle in zip(roots, labels, lines):
reg_file = root+'_asn.reg'
poly = []
with open(reg_file) as f:
for line in f:
if not line.startswith('fk5'):
region = line.split('#')[0]
poly.append(sup.polysplit(region=region, get_shapely=True))
shifts = table.read('shifts_{}.txt'.format(root), format='ascii',
names=('file','x','y','rot','scale','x_rms','y_rms'))
cc = 0
xcen_all = []
ycen_all = []
for j,(pp, file) in enumerate(zip(poly, shifts['file'])):
cc += 1.
color = scalarMap.to_rgba(cc)
x, y = pp.exterior.xy
flt = fits.open(file)
xcen = flt[1].header['CRVAL1O']
ycen = flt[1].header['CRVAL2O']
x_off = (flt[1].header['CRVAL1B']-flt[1].header['CRVAL1O'])*factor
y_off = (flt[1].header['CRVAL2B']-flt[1].header['CRVAL2O'])*factor
xcen_all.append(xcen)
ycen_all.append(ycen)
ax.plot(x,y,'-', color=color)
ax.annotate("",xy=(xcen+x_off, ycen+y_off), xytext=(xcen, ycen),
arrowprops=dict(arrowstyle='->', color=color))
ax.text(xcen, ycen+0.005, file.split('_')[0], fontsize=9, va='top', ha='center')
ax.plot(xcen_all, ycen_all, linestyle=linestyle, markersize=10.,
color='0.5', alpha=0.7, label=label)
ax.plot(xcen_all, ycen_all, '+', markersize=10., color='0.5', alpha=0.7)
ax.text(xcen_all[0], ycen_all[0]-0.005, label, fontsize=10, va='top', ha='center')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklab)
xtick = ax.set_xticks(xtickv)
ax.set_yticklabels(yticklab)
ytick = ax.set_yticks(ytickv)
ax.legend(loc='lower right', frameon=False, labelspacing=0.8, fontsize=9, handlelength=10, borderpad=5.)
#ax.set_title(label)
plt.show(block=False)
fig.savefig('footprints_all.png'.format(label.lower()), dpi=200, transparent=False)
def make_hist_plot_bw():
import astropy.io.ascii as ascii
from matplotlib.ticker import MultipleLocator, FormatStrFormatter,AutoMinorLocator, ScalarFormatter
minorLocator = MultipleLocator(10)
matplotlib.rc('xtick',labelsize=10)
matplotlib.rc('ytick',labelsize=10)
data = ascii.read('/3DHST/Spectra/Work/HOPR/GYRO/gyro_shifts.txt')
fig = unicorn.catalogs.plot_init(square=True, xs=6., aspect=0.9, fontsize=10., left=0.15, right=0.05, top=0.1, bottom=0.15)
ax = fig.add_subplot(111)
ax.fill_between([0.,0.5], 0, 30, color='0.85', alpha=0.9)
nn, bins, patches = ax.hist(data['rate']*25., bins=60, range=[0.,6.], color='black', histtype='step', linestyle='dotted')
test_val = data['rate'][data['root'] == 'ib2u22prq']
ax.arrow(test_val*25., 17.1, 0., -1., head_length=0.5, head_width=0.1, fc='red', ec='red')
ax.text(0., 18.0, 'Fig. 1 Example', ha='left', va='center', fontsize=13, fontweight='heavy')
ax.arrow(0., 21., 0.5, 0., head_length=0.05, head_width=0.30, fc='black', ec='black', length_includes_head=True)
ax.arrow(0.5, 21., -0.5, 0., head_length=0.05, head_width=0.30, fc='black', ec='black', length_includes_head=True)
ax.text(0.0, 22.5, 'Expected Drift', ha='left', va='center', fontsize=15, fontweight='heavy')
ax.text(1.1, 5, 'gyro\nproblem', fontsize=12.5, fontweight=0.1, multialignment='center')
ax.text(4.3, 5, 'failed\nguide star\nacquisition', fontsize=12.5, fontweight=0.1, multialignment='center')
shift_files = glob.glob('shifts_icxe*010.txt')
drift_rate = []
for shift_file in shift_files:
data = table.read(shift_file, format='ascii', names=('file','x','y','rot','scale','x_rms','y_rms'))
for k, file in enumerate(data['file'][1:]):
if k+1 < 4:
t_exp = 255
else:
t_exp = 277
drift = table.read(file.split('_')[0]+'_shifts.txt', format='ascii',
names=('file','x','y','rot','scale','x_rms','y_rms'))
drift_rate.append(25.*np.sqrt((drift['x'][0]-drift['x'][-1])**2 + (drift['y'][0]-drift['y'][-1])**2)/t_exp)
nn_sh, bins_sh, patches_sh = ax.hist(drift_rate, bins=5, range=[0.,0.5], color='red', histtype='step', lw=3)
print "Mean drift: {}\nMedian drift: {}".format(np.mean(drift_rate), np.median(drift_rate))
ax.set_xlim([-0.15, 2.7])
ax.set_ylim([0., 25])
ax.set_xlabel('Drift in WFC3/IR Pixels per 25 Seconds', fontsize=12)
ax.set_ylabel('Number of Images')
ax.tick_params(axis='both', which='major',labelsize=12)
minorLocator = AutoMinorLocator()
ax.xaxis.set_minor_locator(minorLocator)
minorLocator = AutoMinorLocator(5)
ax.yaxis.set_minor_locator(minorLocator)
ax.tick_params(which='minor', length=2)
plt.show(block=False)
plt.savefig('drift_hist_bw.png',dpi=100,transparent=False)
plt.savefig('drift_hist_bw.pdf',dpi=100,transparent=False)
def xy_shifts():
import scipy
# x gaussian: [ 3.41423523 1.29066909 6.73498474] or uniform [-70, 70np.]
# y gaussian: [ 5.27724105 -0.78413847 -4.80223471]
tab = table.read('allshifts.txt', format='ascii')
nn, bins, patches = plt.hist(tab['col2'], bins=50, range=[-70,70], color='red', alpha=0.5)
centers = 0.5*(bins[1:]+bins[:-1])
coeff_g, var_g = scipy.optimize.curve_fit(gauss, centers , nn, p0=[np.max(nn),0.,100.])
fit_g = gauss(centers, *coeff_g)
plt.plot(centers, fit_g, color='red')
print coeff_g, np.min()
nn1, bins1, patches = plt.hist(tab['col3'], bins=bins, range=[-70,70], color='blue', alpha=0.5)
centers1 = 0.5*(bins1[1:]+bins1[:-1])
coeff_g, var_g = scipy.optimize.curve_fit(gauss, centers1 , nn1, p0=[np.max(nn1),0.,100.])
fit_g = gauss(centers, *coeff_g)
plt.plot(centers1, fit_g, color='blue')
print coeff_g
plt.show(block=False)
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def area_mag():
import unicorn
import unicorn.survey_paper as sup
from my_python.phot_paper import determine_outline as determine_outline
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter,AutoMinorLocator
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=1.0, fontsize=10, left=0.15, right=0.02, top=0.03, bottom=0.12)
ax = fig.add_subplot(111)
WFC3_IR_AREA = 4.6 # arcmin2
imaging = {'UDF/XDF':(236.1e3/3000, 1*WFC3_IR_AREA, 150, 0.3), 'HUDF09-1': (13, 1*WFC3_IR_AREA,350,0.25), 'HUDF09-2':(19, 1*WFC3_IR_AREA, 350,0.25), 'CANDELS-Deep': (12, 0.04*3600, 350,0.07), 'CANDELS-Wide': (2./3., 0.2*3600, 0, 0.3), 'CLASH': (2, 25*WFC3_IR_AREA, 0,0.3), 'FF clusters': (24, 6*WFC3_IR_AREA, 150, 0.3)}
for key in imaging.keys():
mag = 2.5*np.log10(np.sqrt(imaging[key][0]/0.125))+25.5
ax.plot(imaging[key][1],mag, 's', markersize=7, color='black')
ax.text(imaging[key][1]+imaging[key][2], mag+imaging[key][3], key, ha='center', va='top', fontsize=9)
print key, mag
if key == 'HUDF09-1':
ax.plot([13+50, 13+130], [mag, mag+0.17], color='black', lw=1.)
if key == 'HUDF09-2':
ax.plot([19+50, 19+130], [mag, mag+0.17], color='black', lw=1.)
if key == 'FF clusters':
ax.plot()
ax.plot([2000.], [25.0], 's', markersize=12, color='red')
ax.text(2000., 25.3, 'GO-14114', multialignment='center', color='red',fontsize=10, ha='center', va='top')
ax.text(1200, 28.5, 'HST/WFC3 F160W\n imaging', fontsize =12, multialignment='center')
ax.set_xlim([-100, 2490.])
ax.set_ylim([24.65, 29.5])
ax.set_xlabel('Area [arcmin$^2$]', fontsize=12)
ax.set_ylabel('5$\sigma$ Point Source Depth [AB mag]', fontsize=12)
minorLocator = AutoMinorLocator()
ax.xaxis.set_minor_locator(minorLocator)
minorLocator = AutoMinorLocator(5)
ax.yaxis.set_minor_locator(minorLocator)
yticklab = ['25.0', '26.0', '27.0', '28.0', '29.0']
ytickv = [25.0, 26.0, 27.0, 28.0, 29.0]
ax.set_yticklabels(yticklab)
ytick = ax.set_yticks(ytickv)
plt.show(block=False)
fig.savefig('area_mag.png', dpi=200, transparent=False)
fig.savefig('area_mag.pdf', dpi=200, transparent=False)
def mosaic_demo():
mosaic = fits.open('test7_drz_sci.fits')
### open COSMOS-15 mosaics
cos15 = fits.open('icxe15010_test_drz_sci.fits')
### open CANDELS mosaic
candels = fits.open('cosmos_3dhst_cutout.fits')
fig = unicorn.catalogs.plot_init(square=True, xs=10., aspect=0.835,
fontsize=8, left=0.02, right=0.02, bottom=0.02, top=0.02)
gs1 = gridspec.GridSpec(1,1)
gs2 = gridspec.GridSpec(2,1, left=0.65, right=0.98, top=0.8, bottom=0.4)
gs3 = gridspec.GridSpec(1,2, left=0.40, right=0.88, top = 0.3, bottom=0.05)
fig.subplots_adjust(wspace=0.05)
fig.subplots_adjust(hspace=0.05)
ax1 = fig.add_subplot(gs1[0,0], xlim=[0,12200], ylim=[0,10200])
im1 = ax1.imshow(mosaic[0].data, cmap = pylab.cm.Greys, vmin=-0.01,
vmax=0.5, interpolation='None')
ax1.plot([2150,3050,3050,2150,2150],[3250,3250,3850,3850,3250],'-',color='black')
ax1.plot([1900,2125,2125,1900,1900],[5250,5250,5400,5400,5250],'-',color='black')
ax1.tick_params(axis='both',which='both',top='off',bottom='off', right='off', left='off')
ax1.axis('off')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
### plot mosaic with boxes at the correct positions
### plot candels outline
### plot zooms of the unsmeared and smeared mosaics
ax2 = fig.add_subplot(gs2[0,0], xlim=[0,225], ylim=[0,150])
im2 = ax2.imshow(mosaic[0].data[5250:5400,1900:2125], cmap = pylab.cm.Greys, vmin=-0.01,
vmax=0.4, interpolation='bicubic')
ax2.tick_params(axis='both',which='both',top='off',bottom='off', right='off', left='off')
ax2.text(20,15,'COSMOS-DASH Mosaic', fontsize=10)
ax2.set_xticklabels([])
ax2.set_yticklabels([])
ax3 = fig.add_subplot(gs2[1,0], xlim=[0,225], ylim=[0,150])
im3 = ax3.imshow(cos15[0].data[5250:5400,1900:2125], cmap = pylab.cm.Greys, vmin=-0.01,
vmax=0.4, interpolation='bicubic')
ax3.text(20,15,'Uncorrected (smeared) image', fontsize=10)
ax3.tick_params(axis='both',which='both',top='off',bottom='off', right='off', left='off')
ax3.set_xticklabels([])
ax3.set_yticklabels([])
### plot zooms of our mosaic and CANDELS
ax4 = fig.add_subplot(gs3[0,0], xlim=[0,900], ylim=[0,600])
im4 = ax4.imshow(mosaic[0].data[3250:3850,2150:3050], cmap = pylab.cm.Greys, vmin=-0.01,
vmax=0.5, interpolation='bicubic')
ax4.text(50,50,'COSMOS-DASH Mosaic', fontsize=10)
ax4.tick_params(axis='both',which='both',top='off',bottom='off', right='off', left='off')
ax4.set_xticklabels([])
ax4.set_yticklabels([])
ax5 = fig.add_subplot(gs3[0,1], xlim=[0,1500], ylim=[0,1000])
im5 = ax5.imshow(candels[0].data[880:1880,610:2110], cmap = pylab.cm.Greys, vmin=-0.01,
vmax=0.1, interpolation='bicubic')
ax5.text(50,83,'CANDELS Mosaic', fontsize=10)
ax5.tick_params(axis='both',which='both',top='off',bottom='off', right='off', left='off')
ax5.set_xticklabels([])
ax5.set_yticklabels([])
plt.show(block=False)
fig.savefig('mosaic_demo.png', dpi=200, transparent=False)
fig.savefig('mosaic_demo.pdf', dpi=200, transparent=False)
def plot_galfit():
"""
Makes the plot comparing the GALFIT derived parameters from the new mosaics to CANDELS.
"""
import threedhst
from my_python.pyspherematch import spherematch
from matplotlib.ticker import ScalarFormatter, MultipleLocator, FormatStrFormatter
fs=9
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
mag_limit=21.
### read in CANDELS size measurements
old = table.read('F160W_galfit_v4.0.cat.FITS')
big_cat = table.read('/3DHST/Photometry/Release/v4.1/COSMOS/Catalog/cosmos_3dhst.v4.1.cat.FITS')
old = old[(old['mag'] <= mag_limit) & (old['f'] < 2) & (big_cat['use_phot'] == 1)]
### read in new data from Arjen: H_v2.cat
print 'Reading H_v4.cat'
new = table.read('H_v4.cat', format='ascii')
new = new[(new['mag'] <= mag_limit) & (new['f'] < 2)]
### cross-match coordinates
idx_n, idx_o, d = spherematch(new['RA'], new['DEC'], old['ra'], old['dec'],tol = 0.5/3600.)
print 'Total matches: {}'.format(len(idx_n))
### make plot
fig = unicorn.catalogs.plot_init(square=True, xs=8., aspect=1.0,
fontsize=8, left=0.15, right=0.1, bottom=0.15, top=0.05)
gs = gridspec.GridSpec(2,2)
fig.subplots_adjust(wspace=0.20)
fig.subplots_adjust(hspace=0.20)
ax1 = fig.add_subplot(gs[0,0], xlim=[16,22], ylim=[16,22])
ax1.plot([0,100],[0,100],':', color='gray', alpha=0.8)
ax1.plot(new['mag'][idx_n],old['mag'][idx_o],'o', color='0.5', alpha=0.5, markersize=4.)
ax1.set_xlabel('COSMOS-DASH F160W Magnitude', fontsize=fs)
ax1.set_ylabel('CANDELS F160W Magnitude', fontsize=fs)
diff = new['mag'][idx_n] - old['mag'][idx_o]
print 'Magnitude: \nMEDIAN: {}\nNMAD: {}\n\n'.format(np.median(diff), threedhst.utils.nmad(diff))
ax2 = fig.add_subplot(gs[0,1], xlim=[0.1,3.5], ylim=[0.1,3.5])
ax2.plot([0.01,5],[0.01,5],':', color='gray', alpha=0.8)
ax2.plot(new['re'][idx_n],old['re'][idx_o],'o', color='0.5', alpha=0.5, markersize=4.)
ax2.set_xlabel('COSMOS-DASH $R_{eff}$ [arcsec]', fontsize=fs)
ax2.set_ylabel('CANDELS $R_{eff}$ [arcsec]', fontsize=fs, labelpad=0)
ax2.set_xscale('log')
ax2.xaxis.set_major_formatter(ScalarFormatter())
ax2.xaxis.set_major_locator(MultipleLocator(1))
ax2.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax2.set_yscale('log')
ax2.yaxis.set_major_formatter(ScalarFormatter())
ax2.yaxis.set_major_locator(MultipleLocator(1))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%d'))
diff = np.log10(new['re'][idx_n]) - np.log10(old['re'][idx_o])
print 'log(Reff): \nMEDIAN: {}\nNMAD: {}\n\n'.format(np.median(diff), threedhst.utils.nmad(diff))
print np.median(new['re'][idx_n]/old['re'][idx_o])
ax3 = fig.add_subplot(gs[1,0], xlim=[0.6, 8.9], ylim=[0.6,8.9])
ax3.plot([-1,100],[-1,100],':', color='gray', alpha=0.8)
ax3.plot(new['n'][idx_n],old['n'][idx_o],'o', color='0.5', alpha=0.5, markersize=4.)
ax3.set_xlabel("COSMOS-DASH Sersic Index", fontsize=fs)
ax3.set_ylabel("CANDELS Sersic Index", fontsize=fs, labelpad=0)
ax3.set_xscale('log')
ax3.xaxis.set_major_formatter(ScalarFormatter())
ax3.xaxis.set_major_locator(MultipleLocator(1))
ax3.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax3.set_yscale('log')
ax3.yaxis.set_major_formatter(ScalarFormatter())
ax3.yaxis.set_major_locator(MultipleLocator(1))
ax3.yaxis.set_major_formatter(FormatStrFormatter('%d'))
diff = (new['n'][idx_n]) - (old['n'][idx_o])
print 'log(n): \nMEDIAN: {}\nNMAD: {}\n\n'.format(np.median(diff), threedhst.utils.nmad(diff))
ax4 = fig.add_subplot(gs[1,1], xlim=[0,1.0], ylim=[0,1.0])
ax4.plot([0,100],[0,100],':', color='gray', alpha=0.8)
ax4.plot(new['q'][idx_n],old['q'][idx_o],'o', color='0.5', alpha=0.5, markersize=4.)
ax4.set_xlabel('COSMOS-DASH Axis Ratio', fontsize=fs)
ax4.set_ylabel('CANDELS Axis Ratio', fontsize=fs)
diff = new['q'][idx_n] - old['q'][idx_o]
print 'Axis ratio: \nMEDIAN: {}\nNMAD: {}\n\n'.format(np.median(diff), threedhst.utils.nmad(diff))
plt.show(block=False)
fig.savefig('galfit_gyro_comp.pdf', dpi=200)
fig.savefig('galfit_gyro_comp.png', dpi=200)
def mag_depth():
orig = table.read('/3DHST/Photometry/Release/v4.0/COSMOS/Detection/cosmos_3dhst.v4.0.F160W_orig.cat', format='ascii.sextractor')
cat = table.read('test7_drz_sci.cat', format='ascii.sextractor')
fig = unicorn.catalogs.plot_init(square=True, xs=6., aspect=0.9, fontsize=10., left=0.15, right=0.15, top=0.1, bottom=0.15)
ax = fig.add_subplot(111)
n_orig, bins, patches = ax.hist(orig['MAG_AUTO'], bins=25,
range=[16., 30.], color='0.5', alpha=0.5, lw=2, histtype='step')
n_cat, bins, patches = ax.hist(cat['MAG_AUTO'], bins=bins,
range=[16., 30.], color='red', alpha=0.5, histtype='step', lw=2)
ax.set_xlim([17.,28.])
ax.set_ylim([0, 8e3])
ax.set_xlabel('F160W Magnitude', fontsize=12)
ax.set_ylabel('N')
ax2 = ax.twinx()#fig.add_subplot(111, sharex=ax, frameon=False)
bin_c = bins[:-1] + (bins[1:] - bins[:-1])/2
width = np.mean(bins[1:] - bins[:-1])
ax2.plot(bin_c+width/2, (n_cat/142.429)/(n_orig/183.9), color='black', lw=2, alpha=0.8)
ax2.plot([15.,31.], [1.0, 1.0], linestyle='dashed', color='0.8', alpha=0.5)
ax2.plot([15.,31.], [0.9, 0.9], linestyle='dashed', color='0.8', alpha=0.5)
ax2.plot([15.,31.], [0.75,0.75], linestyle='dashed', color='0.8', alpha=0.5)
ax2.plot([15.,31.], [0.5, 0.5], linestyle='dashed', color='0.8', alpha=0.5)
ax2.set_ylim([0.,1.3])
ax2.set_xlim([17.,28.])
ax2.set_ylabel('Fraction')
plt.show(block=False)
fig.savefig('mag_depth.pdf', dpi=200)
fig.savefig('mag_depth.png', dpi=200)
def psf_plot():
psf = fits.open('test7_psf_v2.fits')
wht = fits.open('test7_wht_v2.fits')
### make plot
fig = unicorn.catalogs.plot_init(square=True, xs=6., aspect=0.6,
fontsize=8, left=0.05, right=0.05, bottom=0.1, top=0.00)
gs = gridspec.GridSpec(1,2)
fig.subplots_adjust(wspace=0.10)
fig.subplots_adjust(hspace=0.10)
ax1 = fig.add_subplot(gs[0], xlim=[0,68], ylim=[0,68])
ax1.axis('off')
ax2 = fig.add_subplot(gs[1], xlim=[0,68], ylim=[0,68])
ax2.axis('off')
im1 = ax1.imshow(psf[0].data/np.max(psf[0].data), cmap = pylab.cm.Greys_r, vmin=0.0,
vmax=0.015, interpolation='None')
cbar_ax1 = fig.add_axes([0.06, 0.12, 0.40, 0.07])
cbar1 = plt.colorbar(im1, cax = cbar_ax1,orientation='horizontal', ticks = [0.0, 0.005, 0.010, 0.015])
cbar1.ax.set_xticklabels([0.0, 0.005, 0.010, 0.015])
im2 = ax2.imshow(wht[0].data/np.max(wht[0].data), cmap = pylab.cm.Greys_r, vmin=0.7,
vmax=1., interpolation='None')
cbar_ax2 = fig.add_axes([0.54, 0.12, 0.40, 0.07])
cbar2 = plt.colorbar(im2, cax = cbar_ax2,orientation='horizontal', ticks = [0.7,0.8,0.9,1.0])
cbar2.ax.set_xticklabels([0.7,0.8,0.9,1.0])
plt.show(block=False)
fig.savefig('psf_plot.pdf', dpi=200)
fig.savefig('psf_plot.png', dpi=200)
def rotation_check():
import unicorn.survey_paper as sup
from my_python.pyspherematch import spherematch as psh
import matplotlib.colors as colors
import matplotlib.cm as cmx
os.chdir('/3DHST/Spectra/Work/14114/PREPARE/')
REF_CAT = '../REF/IPAC_ACS.fits'
NEW_CAT = 'test7_drz_sci.cat'
ref_cat = table.read(REF_CAT)
new_cat = table.read(NEW_CAT, format='ascii.sextractor')
idx1, idx2, dd = psh(new_cat['X_WORLD'], new_cat['Y_WORLD'], ref_cat['ra'], ref_cat['dec'], tol=1./3600.)
fig = unicorn.catalogs.plot_init(square=True, xs=10., aspect=1.1,
fontsize=8, left=0.18, right=0.02, bottom=0.10, top=0.10)
ax = fig.add_subplot(111)
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=9)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
xlim = [150.270, 149.99]
ylim = [2.45, 2.751]
xticklab = [r'$10^\mathrm{h}01^\mathrm{m}00^\mathrm{s}$', r'$10^\mathrm{h}00^\mathrm{m}45^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}30^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}15^\mathrm{s}$',r'$10^\mathrm{h}00^\mathrm{m}00^\mathrm{s}$']
xtickv = [sup.degrees(10,01,00, hours=True),sup.degrees(10,00,45, hours=True),sup.degrees(10,00,30, hours=True),sup.degrees(10,00,15, hours=True),sup.degrees(10,00,00, hours=True)]
yticklab = [r'$+02^\circ30^\prime00^{\prime\prime}$',r'$+02^\circ35^\prime00^{\prime\prime}$', r'$+02^\circ40^\prime00^{\prime\prime}$',r'$+02^\circ45^\prime00^{\prime\prime}$']
ytickv = [sup.degrees(2, 30, 00, hours=False),sup.degrees(2, 35, 00, hours=False),sup.degrees(2, 40, 00, hours=False),sup.degrees(2, 45, 00, hours=False)]
factor=20.
ax.quiver(new_cat['X_WORLD'][idx1], new_cat['Y_WORLD'][idx1], (new_cat['X_WORLD'][idx1]-ref_cat['ra'][idx2])*3600.*2,
(new_cat['Y_WORLD'][idx1]-ref_cat['dec'][idx2])*3600.*2,
width=0.8, linewidth=0.3, units='dots', headwidth=4, minshaft=1, headlength=5, pivot='tail', scale=0.025)
print np.median(new_cat['X_WORLD'][idx1]-ref_cat['ra'][idx2])*3600., np.median(new_cat['Y_WORLD'][idx1]-ref_cat['dec'][idx2])*3600.
roots = ['icxe15010', 'icxe16010', 'icxe17010','icxe18010']
labels = ['COSMOS-15', 'COSMOS-16','COSMOS-17','COSMOS-18']
lines = ['-','--','-.',':']
for root, label, linestyle in zip(roots, labels, lines):
reg_file = root+'_asn.reg'
poly = []
with open(reg_file) as f:
for line in f:
if not line.startswith('fk5'):
region = line.split('#')[0]
poly.append(sup.polysplit(region=region, get_shapely=True))
shifts = table.read('shifts_{}.txt'.format(root), format='ascii',
names=('file','x','y','rot','scale','x_rms','y_rms'))
cc = 0
xcen_all = []
ycen_all = []
for j,(pp, file) in enumerate(zip(poly, shifts['file'])):
cc += 1.
color = scalarMap.to_rgba(cc)
x, y = pp.exterior.xy
flt = fits.open(file)
xcen = flt[1].header['CRVAL1O']
ycen = flt[1].header['CRVAL2O']
x_off = (flt[1].header['CRVAL1B']-flt[1].header['CRVAL1O'])*factor
y_off = (flt[1].header['CRVAL2B']-flt[1].header['CRVAL2O'])*factor
xcen_all.append(xcen)
ycen_all.append(ycen)
ax.plot(x,y,'-', color=color)
#ax.annotate("",xy=(xcen+x_off, ycen+y_off), xytext=(xcen, ycen),
# arrowprops=dict(arrowstyle='->', color=color))
#ax.text(xcen, ycen+0.005, file.split('_')[0], fontsize=9, va='top', ha='center')
ax.plot(xcen_all, ycen_all, linestyle=linestyle, markersize=10.,
color='black', alpha=0.7, label=label)
ax.plot(xcen_all, ycen_all, '+', markersize=10., color='0.5', alpha=0.7)
#ax.text(xcen_all[0], ycen_all[0]-0.005, label, fontsize=10, va='top', ha='center')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticklabels(xticklab)
xtick = ax.set_xticks(xtickv)
ax.set_yticklabels(yticklab)
ytick = ax.set_yticks(ytickv)
ax.legend(loc='lower right', frameon=False, labelspacing=0.8, fontsize=9, handlelength=10, borderpad=5.)
plt.show(block=False)
fig.savefig('rotation_check.png'.format(label.lower()), dpi=200, transparent=False)
def rotation_fits():
import glob
import matplotlib.colors as colors
import matplotlib.cm as cmx
files = glob.glob('PREP_ROT/shifts_icxe*010.txt')
fig = unicorn.catalogs.plot_init(xs=10.,aspect=0.45, left=0.2, right=0.05, bottom=0.15, top=0.15, NO_GUI=False)
gs = gridspec.GridSpec(1,2,top=0.9, bottom=0.15)
fig.subplots_adjust(wspace=0.2)
fig.subplots_adjust(hspace=0.2)
fs = 10
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
jet = cm = plt.get_cmap('jet_r')
cNorm = colors.Normalize(vmin=0, vmax=8)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
time = np.array([0, 368, 713, 1060, 1405, 1777, 2147, 2519]) + 333.
lines = ['-','--','-.',':']
labels = ['COSMOS-15', 'COSMOS-16', 'COSMOS-17','COSMOS-18']
for j, file in enumerate(files):
root = '{}'.format(file.split('_')[1].split('.')[0])
ax1.set_xlim([-0.5, 13])
ax1.set_ylim([-0.075,0.075])
ax1.set_xlabel('Read Number', fontsize=fs)
ax1.set_ylabel('Rotation [deg]', fontsize=fs)
root = '{}'.format(file.split('_')[1].split('.')[0])
data = table.read(file, format='ascii', names=('file','x','y','rot','scale','x_rms','y_rms'))
rot_rate = [0.0]
for k, file in enumerate(data['file'][1:]):
if k+1 < 4:
t_exp = 255
else:
t_exp = 277
drift = table.read('PREP_ROT/'+file.split('_')[0]+'_shifts.txt', format='ascii',
names=('file','x','y','rot','scale','x_rms','y_rms'))
rot = drift['rot']
tt = (drift['rot'] > 180.)
rot[tt] = drift['rot'][tt] - 360.
rot_rate.append(rot[0]-rot[-1])
ax1.plot(np.arange(len(drift['rot']))+1, drift['rot'], linestyle=lines[j],
color='0.5', label=labels[j], zorder=0)
ax1.scatter(np.arange(len(drift['rot']))+1, drift['rot'], c='0.5', s=10., edgecolors='black', alpha=0.7)
ax2.set_xlim([-5., 3100.])
ax2.set_ylim([-0.075,0.075])
ax2.set_xlabel('Time Since Beginning of Orbit\n[seconds]', fontsize=fs)
ax2.set_ylabel('Rotation During Pointing [deg]', fontsize=fs)
ax2.plot(time, np.array(rot_rate),linestyle=lines[j], color='0.5', zorder=0)
ax2.scatter(time, np.array(rot_rate), c=range(len(rot_rate)), cmap='jet', s=35., edgecolors='black', alpha=0.7)
print rot_rate
plt.show(block=False)
fig.savefig('rotation_fits.png', dpi=200, transparent=False)
#fig.savefig('overall_offsets_vs_time.pdf', dpi=200, transparent=False)
def mag_radius_test():
from matplotlib.ticker import ScalarFormatter
import my_python.mk_region_file
orig = table.read('test7_drz_sci_1.8.cat', format='ascii.sextractor')
cat = table.read('test7_drz_sci_0.8.cat', format='ascii.sextractor')
top = 0.075
bottom = 0.1
left = 0.15
fig = unicorn.catalogs.plot_init(xs=10,aspect=0.5, left=left, right=0.1, bottom=bottom, top=top, NO_GUI=False)
fig.subplots_adjust(wspace=0.15)
fig.subplots_adjust(hspace=0.1)
fs = 10
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
gs1 = gridspec.GridSpec(1,2)
ax1 = fig.add_subplot(gs1[0,0], ylim=[0.8, 20], xlim=[14,26])
ax1.plot(orig['MAG_AUTO'],orig['FLUX_RADIUS'], '.', color='0.4',markersize=0.9)
ax1.plot(cat['MAG_AUTO'],cat['FLUX_RADIUS'], 'o', color='black', markersize=1, alpha=0.5)
ax1.set_xlabel('MAG_AUTO F160W')
ax1.set_ylabel('FLUX_RADIUS')
ax1.set_yscale('log')
cr = (cat['FLUX_RADIUS']*0.1/0.06 < 2.)
stars = (cat['MAG_AUTO'] > 15.) & (cat['MAG_AUTO'] < 22.) & (cat['FLUX_APER_5']/cat['FLUX_APER'] > 1.1) & (cat['FLUX_APER_5']/cat['FLUX_APER'] < 1.2)
#ax1.plot(cat['MAG_AUTO'][stars],cat['FLUX_RADIUS'][stars], 'o', color='red', markersize=2, alpha=1.0, markeredgecolor='red')
print 'STARS: mean: {} / median{}'.format(np.mean(cat['FWHM_IMAGE'][stars]), np.median(cat['FWHM_IMAGE'][stars]))
ax1.yaxis.set_major_formatter(ScalarFormatter())
ax2 = fig.add_subplot(gs1[0,1], ylim=[0.5,4], xlim=[14,26])
ax2.plot(orig['MAG_AUTO'], orig['FLUX_APER_5']/orig['FLUX_APER'], '.', color='0.4',markersize=0.9)
ax2.plot(cat['MAG_AUTO'], cat['FLUX_APER_5']/cat['FLUX_APER'], 'o', color='black', markersize=1., alpha=0.5)
#ax2.plot(cat['MAG_AUTO'][stars], cat['FLUX_APER_5'][stars]/cat['FLUX_APER'][stars], 'o', color='red', markersize=2., alpha=1.0, markeredgecolor='red')
ax2.plot(cat['MAG_AUTO'][cr], cat['FLUX_APER_5'][cr]/cat['FLUX_APER'][cr], 'o', color='blue', markersize=2., alpha=1.0)
ax2.set_xlabel('MAG_AUTO F160W')
ax2.set_ylabel('Flux (2.0\")/Flux (0.5\")')
ax2.yaxis.set_major_formatter(ScalarFormatter())
my_python.mk_region_file.mk_region_file_from_lists(cat['X_WORLD'][stars],cat['Y_WORLD'][stars],outfile = 'stars', printids='no', color='cyan')
my_python.mk_region_file.mk_region_file_from_lists(cat['X_WORLD'][cr],cat['Y_WORLD'][cr],outfile = 'cr', printids='no', color='yellow')
plt.show(block=False)
#fig.savefig('mag_radius_test.pdf', dpi=200, transparent=False)
#fig.savefig('mag_radius_test.png', dpi=200, transparent=False)
def signal_to_noise_test():
import threedhst.utils
top = 0.05
bottom = 0.15
left = 0.15
fig = unicorn.catalogs.plot_init(xs=10,aspect=0.5, left=left, right=0.05, bottom=bottom, top=top, NO_GUI=False)
fig.subplots_adjust(wspace=0.01)
fig.subplots_adjust(hspace=0.01)
fs = 9
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
etc_mag = [18.,19.,20.,21.,22.,23.,24.,25.]
etc_sn = np.array([509.,318.,195.8,116.7,65.1,33.1,15.2,6.5])
etc_sn_high = np.array([493.,295.,166.9,85.9,39.97,17.2,7.09,2.87])
factor = (1.-(0.8/3/2.75)) #0.678
gs = gridspec.GridSpec(2,4)
asn_files = glob.glob('icxe1*010_asn.fits')
for file in asn_files:
asn = threedhst.utils.ASNFile(file)
for ii, exp in enumerate(asn.exposures):
cat = table.read('/3DHST/Spectra/Work/14114/TEST_SEXTR/{}_drz_sci.cat'.format(exp), format='ascii.sextractor')
ax = fig.add_subplot(gs[ii], ylim=[3., 200], xlim=[21,25.5])
ax.plot(cat['MAG_APER'], cat['FLUX_APER']/(cat['FLUXERR_APER']/factor), '.', color='0.6',markersize=0.9)
stars = (cat['MAG_AUTO'] > 15.) & (cat['MAG_AUTO'] < 22.) & (cat['FLUX_APER_6']/cat['FLUX_APER_1'] > 1.1) & (cat['FLUX_APER_6']/cat['FLUX_APER_1'] < 1.2)
ax.plot(cat['MAG_APER'][stars], cat['FLUX_APER'][stars]/(cat['FLUXERR_APER'][stars]/factor), '^', color='black', markersize=2, alpha=1.0, markeredgecolor='0.3')
if file == 'icxe18010_asn.fits':
ax.text(24,70,'Pointing {}'.format(ii+1), fontsize=9, va='top', ha='center')
#ax.plot(etc_mag,etc_sn_high, 'o', color='#b10026',markersize=2)
ax.plot(etc_mag,etc_sn_high, '--', color='#b10026',markersize=2)
ax.plot(etc_mag,etc_sn_high/1.07, '-', color='#b10026',markersize=2)
#ax.plot(etc_mag,etc_sn, 'o', color='black',markersize=2)
ax.plot(etc_mag,etc_sn, '--', color='black',markersize=2)
ax.plot(etc_mag,etc_sn/1.37, '-', color='black',markersize=2)
ax.plot([10,30],[5.,5.], ':', color='0.5')
#ax.plot([10,30],[4.,4.], ':', color='0.5')
ax.plot([25.,25.],[1.,3000.], ':', color='0.5')
ax.set_yscale('log')
if (ii == 0) or (ii == 4):
ax.set_ylabel('S/N [r = 0.2" aperture]', fontsize=fs)
else:
ax.set_yticklabels([])
if (ii < 3):
ax.set_xticklabels([])
else:
ax.set_xlabel('AB Magnitude\n[r = 0.2" aperture]', fontsize=fs)
ax.set_xticklabels([22,23,24,25], fontsize=fs)
xtick = ax.set_xticks([22,23,24,25])
plt.show(block=False)
fig.savefig('signal_to_noise_test.pdf', dpi=200, transparent=False)
fig.savefig('signal_to_noise_test.png', dpi=200, transparent=False)
def sources_of_noise():
top = 0.075
bottom = 0.1
left = 0.15
fig = unicorn.catalogs.plot_init(xs=10,aspect=0.5, left=left, right=0.1, bottom=bottom, top=top, NO_GUI=False)
fig.subplots_adjust(wspace=0.15)
fig.subplots_adjust(hspace=0.1)
fs = 8
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
gs1 = gridspec.GridSpec(1,2)
ax1 = fig.add_subplot(gs1[0,0], ylim=[0.0,0.27], xlim=[0.1,2.5])
exptime=275.
rdnoise = 15.
xx = np.linspace(0.2,2.4,10)
noise_15 = np.sqrt(rdnoise**2+xx*exptime + (xx*exptime*0.002)**2 + (0.17*exptime))/exptime
rd_15 = np.sqrt(rdnoise**2)/exptime
bg_15 = np.sqrt(xx*exptime)/exptime
rdnoise = np.sqrt(11)*15.
noise_21_11 = np.sqrt(rdnoise**2+xx*exptime + (xx*exptime*0.002)**2 + (0.17*exptime))/exptime
rd_21_11 = np.sqrt(rdnoise**2)/exptime
rdnoise = np.sqrt(2)*21.
noise_21_2 = np.sqrt(rdnoise**2+xx*exptime + (xx*exptime*0.002)**2 + (0.17*exptime))/exptime
rd_21_2 = np.sqrt(rdnoise**2)/exptime
rdnoise = np.sqrt(4)*21.
noise_21_4 = np.sqrt(rdnoise**2+xx*exptime + (xx*exptime*0.002)**2 + (0.17*exptime))/exptime
rd_21_4 = np.sqrt(rdnoise**2)/exptime
ax1.plot(xx, noise_21_11, '-',color='#b10026',lw=1, label=r'RDN = 15$\times\sqrt{11}$ e$^{-}$: (11$\times$25 sec exp (w/ ramp fits))')
ax1.plot([0.,3.], [rd_21_11,rd_21_11], ':',color='#b10026',lw=1)
ax1.plot(xx, noise_21_4, '-',color='#fc4e2a',lw=1, label=r'RDN = 21$\times\sqrt{4}$ e$^{-}$')
ax1.plot([0.,3.], [rd_21_4,rd_21_4], ':',color='#fc4e2a',lw=1)
ax1.plot(xx, noise_21_2, '-',color='#feb24c',lw=1, label=r'RDN = 21$\times\sqrt{2}$ e$^{-}$')
ax1.plot([0.,3.], [rd_21_2,rd_21_2], ':',color='#feb24c',lw=1)
ax1.plot(xx, noise_15, '-',color='black',lw=1, label=r'RDN = 15 e$^{-}$: one 275 sec exp (/w ramp fit)')
ax1.plot([0.,3.], [rd_15,rd_15], ':',color='black',lw=1)
ax1.plot(xx, bg_15, '--',color='black',lw=1)
ax1.text(2.0,0.105,'full noise model', rotation=11, va='center', ha='center', fontsize=fs, backgroundcolor='white', alpha=0.7)
ax1.text(2.0,0.085,'sky noise', rotation=12, va='center', ha='center', fontsize=fs, backgroundcolor='white', alpha=0.7)
ax1.text(2.0,0.053,'read noise',va='center', ha='center', fontsize=fs, backgroundcolor='white', alpha=0.7)
#ax1.fill_between([0.6,0.8], 0.,2.,color='0.5', alpha=0.5)
ax1.legend(loc='upper left', fontsize=fs, frameon=False)
ax1.set_ylabel('$\sigma [e^-/s]$', fontsize=fs)
ax1.set_xlabel('Background [e$^-$/s]', fontsize=fs)
ax2 = fig.add_subplot(gs1[0,1], ylim=[0.2,1.25], xlim=[0.1,2.5])
ax2.plot(xx, 2.5*np.log10(noise_21_11/noise_15), '-',color='#b10026',lw=1)
ax2.plot(xx, 2.5*np.log10(noise_21_4/noise_15), '-',color='#fc4e2a',lw=1)
ax2.plot(xx, 2.5*np.log10(noise_21_2/noise_15), '-',color='#feb24c',lw=1)
ax2.grid(True, linestyle=':',linewidth=0.5)
ax2.fill_between([0.6,0.8], 0.,2.,color='0.5', alpha=0.5)
ax2.fill_between([2.1,2.3], 0.,2.,color='0.5', alpha=0.5)
ax2.text(0.7, 1.1,'nominal\nbackground', rotation=90., va='center', ha='center', fontsize=8)
ax2.text(2.2, 0.9,'Oct. 2015\nbackground', rotation=90., va='center', ha='center', fontsize=8)
ax2.set_ylabel('2.5*log$_{10}$($\sigma/\sigma_{15}$) [mag]', fontsize=fs)
ax2.set_xlabel('Background [e$^-$/s]', fontsize=fs)
plt.show(block=False)
fig.savefig('sources_of_noise.pdf', dpi=200, transparent=False)
fig.savefig('sources_of_noise.png', dpi=200, transparent=False)
def plot_empty_apertures():
import glob
import matplotlib.colors as colors
import matplotlib.cm as cmx
import threedhst.utils
from scipy.interpolate import interp1d
import scipy
growth = table.read('COSMOS_growth_curve.dat', format='ascii')
fp = interp1d(growth['radius_arcsec'],growth['fraction'])
fig = unicorn.catalogs.plot_init(xs=10.,aspect=0.45, left=0.2, right=0.05, bottom=0.15, top=0.15, NO_GUI=False)
gs = gridspec.GridSpec(1,2,top=0.9, bottom=0.15)
fig.subplots_adjust(wspace=0.2)
fig.subplots_adjust(hspace=0.2)
fs = 10
matplotlib.rc('xtick',labelsize=fs)
matplotlib.rc('ytick',labelsize=fs)
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=8)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
ax1 = fig.add_subplot(gs[0], ylim=[0.1,1.], xlim=[0.1,0.5])
ax2 = fig.add_subplot(gs[1], ylim=[10.,15.], xlim=[0.1,2])
files = glob.glob('icxe*0_asn.fits')
for file in files:
asn = threedhst.utils.ASNFile('{}'.format(file))
for j, exp in enumerate(asn.exposures):
data = table.read('{}_drz_sci.apertures.F160W.v4.0.dat'.format(exp), format='ascii')
color = scalarMap.to_rgba(j+1)
ax1.plot(data['radius_pix']*0.12825, data['sigma'], 'o-', color=color, alpha=0.8, mfc=color, mec=color)
fp1 = interp1d(data['radius_pix']*0.12825, data['sigma'])
if (file == 'icxe15010_asn.fits') & ((j == 0) | (j == 7)):
coeff, var = scipy.optimize.curve_fit(sigma, data['radius_pix'], data['sigma'], p0=[0.1, 1])
ax1.plot(data['radius_pix']*0.12825, 0.175*coeff[0]*data['radius_pix']**2,'--',color='black')
ax1.plot(data['radius_pix']*0.12825, 0.175*coeff[0]*data['radius_pix'],'--',color='black')
growth_y = fp(data['radius_pix']*0.12825)
ax2.plot(data['radius_pix']*0.12825, -2.5*np.log10(5*(data['sigma']**2/fp(data['radius_pix']*0.12825))*15369.)+25. ,'o-', color=color, alpha=0.8, mfc=color, mec=color)
ax1.set_ylabel('$\sigma$ [e-/s]')
ax1.set_xlabel('Radius [arcsec]')
#ax2.plot(growth['radius_arcsec'],1/growth['fraction'],'-')
#ax2.plot(growth['radius_arcsec'], 1/fp(growth['radius_arcsec']),'o')
guided = glob.glob('../TEST_HOPR_COOPER/REDUCE/ibt355*_drz_sci.apertures.F160W.v4.0.dat')
dash = glob.glob('../TEST_HOPR_COOPER/REDUCE/ibt305*_drz_sci.apertures.F160W.v4.0.dat')
#ax1.set_yscale('log')
for file in guided:
data = table.read(file, format='ascii')
#ax1.plot(data['radius_pix']*0.128, data['sigma'], 'o-', color='black', alpha=0.8)
for file in dash:
data = table.read(file, format='ascii')
#ax1.plot(data['radius_pix']*0.128, data['sigma'], 'o-', color='gray', alpha=0.8)
plt.show(block=False)
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
def sigma(x, *p):
alpha, beta = p
sigma1 = 0.175
return sigma1*alpha*x**beta
|
[
"ivelina.momcheva@yale.edu"
] |
ivelina.momcheva@yale.edu
|
dd86851e1b6b44d25f0a1e670ee136136ddee155
|
71f894d99a5e3118bc1d53953b22eb1f3097a679
|
/LkCRM/LkCRM/wsgi.py
|
31588500062e9ed1070e86b2092bad28ab2f7a5d
|
[] |
no_license
|
kevinliu007/CRM
|
081db5c942a97952b82598e7046747d820abe12f
|
84ce7d41dddac428cc41367f4888ada6bc972d55
|
refs/heads/master
| 2020-09-14T07:35:01.883726
| 2019-07-11T15:06:46
| 2019-07-11T15:06:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for LkCRM project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'LkCRM.settings')
application = get_wsgi_application()
|
[
"caixiaobai@caixiaobai.com"
] |
caixiaobai@caixiaobai.com
|
36a0dfc565c95c26b1822a0d3b66a4c426abe740
|
f07a5976c336c738d00984945e62aec7054f6870
|
/kore4.py
|
e71b96a4a91ccfccf82c9a18a7a8ec259a4088b4
|
[] |
no_license
|
snorey/lexical-tools
|
8bca81e8b0231619ba9750c91f425b03ae652a01
|
3ab390885cb1b309a2727fdffe98efa3923e0db5
|
refs/heads/master
| 2021-01-24T00:44:20.052004
| 2018-02-24T22:22:20
| 2018-02-24T22:22:20
| 122,780,255
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,989
|
py
|
import datetime
import os
import re
import time
import urllib
import urllib2
dirr="C:\\Code\\Naver"
def naverextract(text):
title=text.split("<title>")[1].split(":")[0].strip()
if "'" in title:
title=title.split("'")[1].split("'")[0].strip()
pieces=text.split('<SPAN class="EQUIV">')[1:]
pieces=[x.split("</span>")[0] for x in pieces]
pieces=[re.sub("\<.*?\>","",x) for x in pieces]
pieces=[re.sub("[\<\(\,\[\r\n].*","",x) for x in pieces]
glosses=[x.strip() for x in pieces]
return title,glosses
def naverextract_ko(text):
pass
def naverloop(directory=dirr):
files=os.listdir(directory)
files=[os.path.join(directory,x) for x in files if "G" not in x] # skip Googley files
outlines=[]
for f in files:
stamp=f.split("\\")[-1].split(".")[0]
print stamp
text=open(f).read()
if not text: continue
title,glosses=naverextract(text)
outlines.append(stamp+"\t"+title+"\t"+", ".join(glosses))
print stamp,str(glosses)
return outlines
def googleextract(text):
catchstring1='<meta name="description" content="'
catchstring2="- Google"
if catchstring1 not in text:
return "",""
caught=text.split(catchstring1)[1].split(catchstring2)[0].strip()
if '"' in caught: caught=caught.split('"')[0].strip()
if ":" not in caught:
return "",""
title=caught.split(":")[0].strip()
glosses=caught.split(":")[1].split(";")
glosses=[x.strip() for x in glosses]
return title,glosses
def googloop(directory=dirr):
files=os.listdir(directory)
files=[os.path.join(directory,x) for x in files if "G" in x] # Googles only
outlines=[]
for f in files:
stamp=f.split("\\")[-1].split(".")[0]
print stamp
text=open(f).read()
if not text: continue
title,glosses=googleextract(text)
outlines.append(stamp+"\t"+title+"\t"+", ".join(glosses))
print stamp,str(glosses)
return outlines
def list2voc(path="C:\\Code\\koreanvocab2.txt"):
import pie
vocab=pie.Vocabulary(filter=False,language="Korean")
text=open(path).read()
text=text.decode("utf-8","ignore")
lines=text.split("\n")
lines=[tuple(x.split("\t")) for x in lines if "\t" in x]
for line in lines:
rank=line[0]
print rank.encode('utf-8','ignore')
if rank:
try:
tally=1000000/int(rank)
except:
tally=0
else:
tally=0
word=line[1]
newword=pie.Word(text=word)
newword.tally=tally
vocab.allwords.add(newword)
return vocab
def combine(file1,file2):# TSV of CSV glosses
dixie={}
dixie2={}
for line in file1.split("\n"): #files come in as text, not handles
parts=line.split("\t")
dixie[parts[1]]=[x.strip() for x in parts[2].split(",") if x.strip()]
for line in file2.split("\n"):
parts=line.split("\t")
if parts[1] in dixie.keys():
dixie[parts[1]].extend([x.strip() for x in parts[2].split(",") if x.strip()])
else:
dixie[parts[1]]=[x.strip() for x in parts[2].split(",") if x.strip()]
for d in dixie.keys():
newlist=[]
newlist2=[]
countlist=[]
for i in dixie[d]:
newlist.extend([x.strip() for x in re.split("[^a-zA-Z0-9\-\s]+",i) if x])
for n in newlist:
testers=["a","an","the","to"]
for t in testers:
if (n.startswith(t+" ") or n.startswith(t.title()+" ")) and len(n) > 1+len(t):
n=n[len(t):].strip()
break
newlist2.append(n)
countlist=list(set((newlist2.count(x),x) for x in newlist2))
countlist.sort()
countlist.reverse()
dixie[d]=newlist2
dixie2[d]=countlist
return dixie,dixie2
def get_naver_en(word):
pass
def get_naver_ko(word):
import urllib, urllib2
url="http://krdic.naver.com/search.nhn?dic_where=krdic&query=%s&kind=keyword" % urllib.quote(word)
page=urllib2.urlopen(url,timeout=60).read()
matcher=re.escape('<a class="fnt15" href="') + '([^\"]*)' + re.escape('"><strong>') + '([^\<]*)' + re.escape('</strong><') # trailing "<" excludes partial headword matches
pieces=re.findall(matcher,page)
defs=[]
for p in pieces: # keep this simple for now; don't bother actually chasing to next page
if word not in p:
# print "No words!"
continue
else:
# print "Yay!"
pass
try:
chunk=page.split(p[0])[1].split("<p>")[1].split("<div")[0].split("<p")[0]
except Exception, e:
print "Caught",e
continue
chunk=re.sub("\<[^\>]*\>","",chunk)
chunk=chunk.replace("<","<").replace(">",">")
lines=[x.strip() for x in chunk.split("\n") if x.strip()]
defs.append(" / ".join(lines))
return defs
def naver_ko_loop(inpath,outpath="",directory="C:\\Code"):
if not outpath:
outpath=os.path.join(directory,"testdefs-"+datetime.date.today().isoformat()+".txt")
words=open(inpath).read().split("\n")
output=""
words=[x.strip() for x in words if x.strip()]
print len(words)
for w in words:
done=False
print words.index(w)
while not done:
try:
defs=get_naver_ko(w)
time.sleep(1)
except Exception, e:
print str(e)
time.sleep(5)
continue
done=True
if defs:
defstring=" // ".join(defs)
output+=w+"\t"+defstring+"\n"
else:
output+=w+"\t\n"
if outpath:
try:
open(outpath,"w").write(output)
print outpath
except Exception, e:
print str(e)
outdefs=dict([tuple(x.split("\t")) for x in output.split("\n") if x.strip()])
return outpath,outdefs
def get_examples_naver(word,kill_html=True): #UTF8-encoded hangul string
url="http://krdic.naver.com/search.nhn?kind=all&scBtn=true&query="+urllib.quote(word)
print url
output=[]
done=False
while not done:
try:
page=urllib2.urlopen(url).read()
done=True
except Exception, e:
print e
continue
try:
section=page.split('<span class="head_ex">')[1].split("</ul>")[0]
except IndexError, e:
print str(e)
return output
lines=section.split("<li>")[1:]
lines=[x.split("<p>")[1].split("<span class")[0].strip() for x in lines]
if kill_html:
lines=[re.sub("\<[^\<]*\>","",x) for x in lines]
return lines
|
[
"noreply@github.com"
] |
snorey.noreply@github.com
|
b68ab3cba51f473017a3cad5f4f2bf14b108ee1f
|
0aa0f63e54368583f0aa9d5df6def2a2abffc029
|
/13-Intro-to-Trees-master/pitchscrape/reviews/settings.py
|
f209def26a7ae16bd9761691042759fd13303b87
|
[] |
no_license
|
abendm/Pandas-Stuff
|
d9c13e3cd2ff5f0a210aed83fed3cc0531b590b9
|
f623d42100e53602ece47f079cb6b80288fbef55
|
refs/heads/master
| 2020-03-24T04:32:05.350498
| 2018-09-21T17:25:07
| 2018-09-21T17:25:07
| 142,456,361
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,083
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for reviews project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'reviews'
SPIDER_MODULES = ['reviews.spiders']
NEWSPIDER_MODULE = 'reviews.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'reviews (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'reviews.middlewares.ReviewsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'reviews.middlewares.ReviewsDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'reviews.pipelines.ReviewsPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
[
"abendm@gmail.com"
] |
abendm@gmail.com
|
8e86bc7463a15ee8ba540cebbdc6dbebe01e0474
|
461d7bf019b9c7a90d15b3de05891291539933c9
|
/bip_utils/bip39/bip39_entropy_generator.py
|
47c75cf8f3c76ff3b2cb1f678605ec4780e1d6e9
|
[
"MIT"
] |
permissive
|
renauddahou/bip_utils
|
5c21503c82644b57ddf56735841a21b6306a95fc
|
b04f9ef493a5b57983412c0ce460a9ca05ee1f50
|
refs/heads/master
| 2023-07-16T05:08:45.042084
| 2021-08-19T09:33:03
| 2021-08-19T09:33:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,419
|
py
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import os
from enum import IntEnum, unique
from typing import List, Union
@unique
class Bip39EntropyBitLen(IntEnum):
""" Enumerative for BIP-0039 entropy bit lengths. """
BIT_LEN_128 = 128,
BIT_LEN_160 = 160,
BIT_LEN_192 = 192,
BIT_LEN_224 = 224,
BIT_LEN_256 = 256,
class Bip39EntropyGeneratorConst:
""" Class container for BIP39 entropy generator constants. """
# Accepted entropy lengths in bit
ENTROPY_BIT_LEN: List[Bip39EntropyBitLen] = [
Bip39EntropyBitLen.BIT_LEN_128,
Bip39EntropyBitLen.BIT_LEN_160,
Bip39EntropyBitLen.BIT_LEN_192,
Bip39EntropyBitLen.BIT_LEN_224,
Bip39EntropyBitLen.BIT_LEN_256,
]
class Bip39EntropyGenerator:
""" Entropy generator class. It generates random entropy bytes with the specified length. """
def __init__(self,
bits_len: Union[int, Bip39EntropyBitLen]) -> None:
""" Construct class by specifying the bits length.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Raises:
ValueError: If the bit length is not valid
"""
if not self.IsValidEntropyBitLen(bits_len):
raise ValueError("Entropy bit length is not valid (%d)" % bits_len)
self.m_bits_len = bits_len
def Generate(self) -> bytes:
""" Generate random entropy bytes with the length specified during construction.
Returns:
bytes: Generated entropy bytes
"""
return os.urandom(self.m_bits_len // 8)
@staticmethod
def IsValidEntropyBitLen(bits_len: Union[int, Bip39EntropyBitLen]) -> bool:
""" Get if the specified entropy bit length is valid.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Returns:
bool: True if valid, false otherwise
"""
return bits_len in Bip39EntropyGeneratorConst.ENTROPY_BIT_LEN
@staticmethod
def IsValidEntropyByteLen(bytes_len: int) -> bool:
""" Get if the specified entropy byte length is valid.
Args:
bytes_len (int): Entropy length in bytes
Returns:
bool: True if valid, false otherwise
"""
return Bip39EntropyGenerator.IsValidEntropyBitLen(bytes_len * 8)
|
[
"54482000+ebellocchia@users.noreply.github.com"
] |
54482000+ebellocchia@users.noreply.github.com
|
928fcb4241acb702ec2df763ded6e985a2e6fec8
|
8ff979eb571966fcd3a8a4ac359110945b21a471
|
/ud120/ex01/ClassifyNB.py
|
cc4faeceb6d24854ce4a127805292bcd20862bcb
|
[] |
no_license
|
ziriuz/udacity_learn
|
4a1ad2f4f6ed3670321b431a6a8d35027b1cede3
|
acdf479936368e0f5803fb0c1f004ee8a85fdbe1
|
refs/heads/master
| 2021-08-28T23:16:26.303069
| 2017-12-13T08:11:45
| 2017-12-13T08:11:45
| 113,209,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
# import the sklearn module for GaussianNB
from sklearn.naive_bayes import GaussianNB
def classify(features_train, labels_train):
# create classifier
gnb = GaussianNB()
# fit the classifier on the training features and labels
cls = gnb.fit(features_train, labels_train)
# return the fit classifier
return cls
|
[
"noreply@github.com"
] |
ziriuz.noreply@github.com
|
b3b85534011c46c43575a3576f1acb0d4bd933bd
|
dc939ac0e50b9a03ba1b24215415e628279fd17f
|
/mysite/congratulation/migrations/0003_auto_20150724_1413.py
|
5abe0836e4055ba37f9680a5f827444fc4d0b156
|
[] |
no_license
|
RamiliaR/django
|
d3b3e084bb3a860a0d67e1e10cb5a844472b533b
|
6fe2e0455578ea3c53365239d74c4274be0ee859
|
refs/heads/master
| 2021-01-10T18:50:07.143708
| 2015-08-20T18:00:58
| 2015-08-20T18:00:58
| 41,083,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('congratulation', '0002_auto_20150714_1310'),
]
operations = [
migrations.AddField(
model_name='customer',
name='email',
field=models.CharField(null=True, max_length=75),
),
migrations.AddField(
model_name='customer',
name='password',
field=models.CharField(null=True, max_length=30),
),
migrations.AddField(
model_name='customer',
name='username',
field=models.CharField(null=True, max_length=30),
),
]
|
[
"RamiliaNigmatullina@gmail.com"
] |
RamiliaNigmatullina@gmail.com
|
19c119d0ed1fe30a4bd6eede46042eb475aa7159
|
eb58d60b139300e99b4c5b0018e49a1b951d9b49
|
/hw1/temp.py
|
2c31d2677edfd1c2763e5a35b4ea2c2604b60b0f
|
[] |
no_license
|
Vamanan/inf553
|
114e6186f5349da996fc18c00773fc1ecb799edd
|
b2b826d3d0e2939eeba12b4b8df3f6bbd2b673da
|
refs/heads/master
| 2021-01-19T05:01:14.230363
| 2015-11-06T02:17:05
| 2015-11-06T02:17:05
| 42,913,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
import MapReduce
import sys
import re
"""
tf df Example in the Simple Python MapReduce Framework
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
dname = record[0]
content = record[1].lower()
words = content.split()
for w in set(words):
if re.match(r'\w+$', w):
mr.emit_intermediate(w, (dname,words.count(w)))
def reducer(key, list_of_values):
# key: word
# value: df along with individual tf tuples
dftotal = len(list_of_values)
mr.emit((key, dftotal,list_of_values))
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
|
[
"passmaster10@bitbucket.org"
] |
passmaster10@bitbucket.org
|
cd2694476723a2181a5d941f4cd30cf0eec0d1b6
|
035df9fcfd3737dbb18c804c7c396b5f94bed435
|
/WatershedFermeture/watershed/Watershed - femeture.py
|
1ae445772cdcc995e3d744876151f5ea11d20d4b
|
[] |
no_license
|
ASTAugustin/IMA_P1_Projet
|
01d93759eaa8c180ec809de7e6359b9e3249061d
|
d48bc5b3d3f12acde0a1f2cee0ff6b1dcb4f197c
|
refs/heads/master
| 2020-07-30T09:11:55.913404
| 2019-11-05T10:08:38
| 2019-11-05T10:08:38
| 210,168,008
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,475
|
py
|
import numpy as np
from collections import deque
import cv2
# Implementation of:
# Pierre Soille, Luc M. Vincent, "Determining watersheds in digital pictures via
# flooding simulations", Proc. SPIE 1360, Visual Communications and Image Processing
# '90: Fifth in a Series, (1 September 1990); doi: 10.1117/12.24211;
# http://dx.doi.org/10.1117/12.24211
class Watershed(object):
MASK = -2
WSHD = 0
INIT = -1
INQE = -3
def __init__(self, levels=256):
self.levels = levels
# Neighbour (coordinates of) pixels, including the given pixel.
def _get_neighbors(self, height, width, pixel):
return np.mgrid[
max(0, pixel[0] - 1):min(height, pixel[0] + 2),
max(0, pixel[1] - 1):min(width, pixel[1] + 2)
].reshape(2, -1).T
'''使用示例:
Ex.返回包括自己的邻居
>>> np.mgrid[0:3,0:3].reshape(2,-1).T
array([[0, 0],
[0, 1],
[0, 2],
[1, 0],
[1, 1],
[1, 2],
[2, 0],
[2, 1],
[2, 2]])
'''
def apply(self, image):
current_label = 0
flag = False
fifo = deque() # 堆栈
height, width = image.shape
total = height * width
labels = np.full((height, width), self.INIT, np.int32) ## self 表示 this #形成一个元素为-1的矩阵
reshaped_image = image.reshape(total) ## grey level of one degree
# [y, x] pairs of pixel coordinates of the flattened image.
pixels = np.mgrid[0:height, 0:width].reshape(2, -1).T
# Coordinates of neighbour pixels for each pixel.
neighbours = np.array([self._get_neighbors(height, width, p) for p in pixels])
## Not Clear
if len(neighbours.shape) == 3:
# Case where all pixels have the same number of neighbours.
neighbours = neighbours.reshape(height, width, -1, 2)
else:
# Case where pixels may have a different number of pixels.
neighbours = neighbours.reshape(height, width)
indices = np.argsort(reshaped_image) ## sort of index from small value to big value
sorted_image = reshaped_image[indices] ## At hte beginning of this array locate the ligntest pixel
sorted_pixels = pixels[indices] ## At hte beginning of this array locate the index of the ligntest pixel
# self.levels evenly spaced steps from minimum to maximum.
levels = np.linspace(sorted_image[0], sorted_image[-1], self.levels) ## return an array from the min to the max containing levels numbers.
level_indices = []
current_level = 0
# Get the indices that deleimit pixels with different values.
for i in range(total):
if sorted_image[i] > levels[current_level]: ## higher than sea level
# Skip levels until the next highest one is reached.
while sorted_image[i] > levels[current_level]: current_level += 1
level_indices.append(i) ## a ladder array
level_indices.append(total)
start_index = 0
for stop_index in level_indices:
# Mask all pixels at the current level.
for p in sorted_pixels[start_index:stop_index]:
labels[p[0], p[1]] = self.MASK
# Initialize queue with neighbours of existing basins at the current level.
for q in neighbours[p[0], p[1]]:
# p == q is ignored here because labels[p] < WSHD
if labels[q[0], q[1]] >= self.WSHD:
labels[p[0], p[1]] = self.INQE
fifo.append(p)
break
# Extend basins.
while fifo:
p = fifo.popleft()
# Label p by inspecting neighbours.
for q in neighbours[p[0], p[1]]:
# Don't set lab_p in the outer loop because it may change.
lab_p = labels[p[0], p[1]]
lab_q = labels[q[0], q[1]]
if lab_q > 0:
if lab_p == self.INQE or (lab_p == self.WSHD and flag):
labels[p[0], p[1]] = lab_q
elif lab_p > 0 and lab_p != lab_q:
labels[p[0], p[1]] = self.WSHD
flag = False
elif lab_q == self.WSHD:
if lab_p == self.INQE:
labels[p[0], p[1]] = self.WSHD
flag = True
elif lab_q == self.MASK:
labels[q[0], q[1]] = self.INQE
fifo.append(q)
# Detect and process new minima at the current level.
for p in sorted_pixels[start_index:stop_index]:
# p is inside a new minimum. Create a new label.
if labels[p[0], p[1]] == self.MASK:
current_label += 1
fifo.append(p)
labels[p[0], p[1]] = current_label
while fifo:
q = fifo.popleft()
for r in neighbours[q[0], q[1]]:
if labels[r[0], r[1]] == self.MASK:
fifo.append(r)
labels[r[0], r[1]] = current_label
start_index = stop_index
return labels
if __name__ == "__main__":
import numpy as np
np.set_printoptions(threshold=np.inf)
##from Watershed import Watershed
from PIL import Image
import matplotlib.pyplot as plt
import cv2
w = Watershed()
image = np.array(cv2.imread('Ex1.PNG', 0))
print(image)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
## Here, (2,2) means the two diameters of the ellipse.
binary = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
print(binary)
plt.imshow(binary)
plt.show()
##gray = cv2.cvtColor('Ex1.PNG', cv2.COLOR_RGB2GRAY)
##ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
##kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
##binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
##print("xxxxxxxx")
##/labels = w.apply(binary)
#label = w.apply(image)
#print(labels)
##plt.imshow(labels, cmap='Paired', interpolation='nearest')
##/plt.imshow(labels)
##/plt.show()
|
[
"celestine_jinyi@163.com"
] |
celestine_jinyi@163.com
|
c74987a27b063d67e8ce049ee6e834b52db5fe03
|
c09b899e42e867e20993488e5f4e3d79c4eb779f
|
/movies/views.py
|
1aa3c1ecd81aa00297a088047aaa610c2979330b
|
[] |
no_license
|
JaycobDuffel/Vidly
|
33913b50096ac8b1cd54bcc62c503287d1b11c47
|
b0aa76e6e9634a7ab64b50652db941577567932a
|
refs/heads/master
| 2022-12-23T14:26:34.360432
| 2020-10-07T02:37:18
| 2020-10-07T02:37:18
| 300,044,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
from django.http import HttpResponse, Http404
from django.shortcuts import render, get_object_or_404
from .models import Movie
# Create your views here.
def index(request):
movies = Movie.objects.all()
return render(request, "movies/index.html", {"movies": movies})
def detail(request, movie_id):
movie = get_object_or_404(Movie, pk=movie_id)
return render(request, "movies/detail.html", {"movie": movie})
|
[
"jbdcoding@gmail.com"
] |
jbdcoding@gmail.com
|
3fd8971af0057cfe6f9120d8654640df8c971099
|
99e76e9e4c8031418c4c50217b48adf1d880cf2f
|
/setup.py
|
6974fdc5b21fd1b544eac798d4363569ad4198d7
|
[
"MIT"
] |
permissive
|
grow/grow-ext-responsive-styles
|
d75a5abb070613641e3da9f3f4cf7dc07e88c51f
|
bb3d8f68edc1f3e1bdf508bb5df8d5b296574e9b
|
refs/heads/master
| 2021-01-03T14:04:15.882718
| 2020-05-20T20:38:09
| 2020-05-20T20:38:09
| 240,096,948
| 0
| 0
|
MIT
| 2020-05-20T20:34:58
| 2020-02-12T19:27:42
|
HTML
|
UTF-8
|
Python
| false
| false
| 349
|
py
|
from setuptools import setup
setup(
name='grow-ext-responsive-styles',
version='1.0.0',
zip_safe=False,
license='MIT',
author='Grow Authors',
author_email='hello@grow.io',
include_package_data=True,
packages=[
'responsive_styles',
],
package_data={
'responsive_styles': ['*.html'],
},
)
|
[
"jeremydw@gmail.com"
] |
jeremydw@gmail.com
|
5a46605486c336baa1b97ab5f88da4b51e4a3852
|
4425cd9025e430735ad81cc09d126d0ce9929e07
|
/order/api/serializers.py
|
135e26fec2699d1787648ba38587774330d16a14
|
[] |
no_license
|
omkarrane/ecommerce-api
|
e7c611776977be0c753157fe9d2819f08bf86f78
|
f1d11277571df9cadbba7c8f1d2892cf8895b43c
|
refs/heads/master
| 2020-03-19T21:11:11.065801
| 2018-06-11T13:44:09
| 2018-06-11T13:44:09
| 136,931,627
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from django.core.mail import EmailMessage
from accounts.api.utils import generateToken, decodeToken
import random
from rest_framework.serializers import (
CharField,
EmailField,
HyperlinkedIdentityField,
ModelSerializer,
SerializerMethodField,
ValidationError,
PrimaryKeyRelatedField
)
from retail.models import (
Retail_Info,
Retail_Warehouse,
Retail_Product
)
from order.models import (
Order,
)
User = get_user_model()
class OrderSerializer(ModelSerializer):
class Meta:
model = Order
fields = [
'item_list',
'total_cost'
]
|
[
"omkarrane10@gmail.com"
] |
omkarrane10@gmail.com
|
17ebad59e8fb8cac9e54303768189e0f854b5502
|
e8fa6b783794bbd636d4ba815fd90390aabb4d73
|
/integration/combination/test_state_machine_with_api.py
|
20deaad43f516f96a4e5f315b5d52d5a729aa9ee
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
Skarlso/serverless-application-model
|
eb74d7dee11bf0a911e1e0dbb70bd03d4cbbbad7
|
172c832c053b3b5405dd4e85853386cc5a98841e
|
refs/heads/develop
| 2023-08-21T09:24:32.577637
| 2022-10-28T23:14:59
| 2022-10-28T23:14:59
| 325,041,387
| 1
| 1
|
Apache-2.0
| 2022-10-31T17:41:47
| 2020-12-28T15:01:05
|
Python
|
UTF-8
|
Python
| false
| false
| 4,250
|
py
|
from unittest.case import skipIf
from integration.helpers.base_test import BaseTest
from integration.helpers.common_api import get_policy_statements
from integration.helpers.resource import current_region_does_not_support
from integration.config.service_names import STATE_MACHINE_WITH_APIS
@skipIf(
current_region_does_not_support([STATE_MACHINE_WITH_APIS]),
"StateMachine with APIs is not supported in this testing region",
)
class TestStateMachineWithApi(BaseTest):
def test_state_machine_with_api(self):
self.create_and_verify_stack("combination/state_machine_with_api")
outputs = self.get_stack_outputs()
region = outputs["Region"]
partition = outputs["Partition"]
state_name_machine_arn = outputs["MyStateMachineArn"]
implicit_api_role_name = outputs["MyImplicitApiRoleName"]
implicit_api_role_arn = outputs["MyImplicitApiRoleArn"]
explicit_api_role_name = outputs["MyExplicitApiRoleName"]
explicit_api_role_arn = outputs["MyExplicitApiRoleArn"]
rest_apis = self.get_stack_resources("AWS::ApiGateway::RestApi")
implicit_rest_api_id = next(
(x["PhysicalResourceId"] for x in rest_apis if x["LogicalResourceId"] == "ServerlessRestApi"), None
)
explicit_rest_api_id = next(
(x["PhysicalResourceId"] for x in rest_apis if x["LogicalResourceId"] == "ExistingRestApi"), None
)
self._test_api_integration_with_state_machine(
implicit_rest_api_id,
"POST",
"/pathpost",
implicit_api_role_name,
implicit_api_role_arn,
"MyStateMachinePostApiRoleStartExecutionPolicy",
state_name_machine_arn,
partition,
region,
)
self._test_api_integration_with_state_machine(
explicit_rest_api_id,
"GET",
"/pathget",
explicit_api_role_name,
explicit_api_role_arn,
"MyStateMachineGetApiRoleStartExecutionPolicy",
state_name_machine_arn,
partition,
region,
)
def _test_api_integration_with_state_machine(
self, api_id, method, path, role_name, role_arn, policy_name, state_machine_arn, partition, region
):
apigw_client = self.client_provider.api_client
resources = apigw_client.get_resources(restApiId=api_id)["items"]
resource = get_resource_by_path(resources, path)
post_method = apigw_client.get_method(restApiId=api_id, resourceId=resource["id"], httpMethod=method)
method_integration = post_method["methodIntegration"]
self.assertEqual(method_integration["credentials"], role_arn)
# checking if the uri in the API integration is set for Step Functions State Machine execution
expected_integration_uri = "arn:" + partition + ":apigateway:" + region + ":states:action/StartExecution"
self.assertEqual(method_integration["uri"], expected_integration_uri)
# checking if the role used by the event rule to trigger the state machine execution is correct
start_execution_policy = get_policy_statements(role_name, policy_name, self.client_provider.iam_client)
self.assertEqual(len(start_execution_policy), 1, "Only one statement must be in Start Execution policy")
start_execution_policy_statement = start_execution_policy[0]
self.assertTrue(type(start_execution_policy_statement["Action"]) != list)
policy_action = start_execution_policy_statement["Action"]
self.assertEqual(
policy_action,
"states:StartExecution",
"Action referenced in event role policy must be 'states:StartExecution'",
)
self.assertTrue(type(start_execution_policy_statement["Resource"]) != list)
referenced_state_machine_arn = start_execution_policy_statement["Resource"]
self.assertEqual(
referenced_state_machine_arn,
state_machine_arn,
"State machine referenced in event role policy is incorrect",
)
def get_resource_by_path(resources, path):
return next((resource for resource in resources if resource["path"] == path), None)
|
[
"noreply@github.com"
] |
Skarlso.noreply@github.com
|
9202483cde896d82ddfe2dbd4a5a205224bd657e
|
8c8eaf2a82d74d9652fbe23e23b0f5856a947e7e
|
/tokens/models.py
|
6fc0c00181648dcf65eb4b1f0060c3ce5fc4f7a9
|
[] |
no_license
|
CallistoNetwork/Galileo-Backend
|
2cb0c1dbe20f43a56cab566ee77338cc68f8fda8
|
beec34cc9e480b49b6efbe0bd1cd19ddcfcb8340
|
refs/heads/master
| 2020-04-03T02:29:00.910607
| 2019-01-27T21:12:07
| 2019-01-27T21:12:07
| 151,890,661
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,584
|
py
|
from django.db import models
from explorer.models import TimeStampModel
class Token(TimeStampModel):
"""
* name -> Name of token | String | optional
* symbol -> Trading symbol | String | optional
* total_supply -> The total supply if the token | Decimal | optional
* decimals -> Number of decimal place the token can be subdivided to | Int
| optional
* type -> type of Token | String | required
* cataloged -> if token information has been cataloged | Boolean | optional
* contract_address_hash -> Foreign key to address | ForeignKey | required
"""
name = models.CharField(
max_length=255,
blank=True
)
symbol = models.CharField(
max_length=50,
blank=True
)
total_supply = models.DecimalField(
null=True,
max_digits=120,
decimal_places=0
)
decimals = models.PositiveIntegerField(
null=True
)
token_type = models.CharField(
max_length=100,
)
cataloged = models.BooleanField(
null=True
)
contract_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT
)
class TokenTransfer(TimeStampModel):
"""
* amount -> token transfer amount | Decimal | Optional
* from_address_hash -> Address send token | Foreign Key | Required
* to_address_hash -> Address received token | Foreign Key | Required
* token_contract_address_hash -> Address of the token contract
| Forreign Key | Required
* token_id -> Id of the token, only ERC-721 tokens | Optional
* transaction_hash -> Transaction token | Foreign Key | Required
* log_index -> Index of the corresponding Log in the transaction
| Positive Int | Required
"""
amount = models.DecimalField(
null=True,
max_digits=120,
decimal_places=0
)
from_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT,
related_name='token_transfer_from_address'
)
to_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT,
related_name='token_transfer_to_address'
)
token_contract_address_hash = models.ForeignKey(
'address.Address',
on_delete=models.PROTECT,
related_name='token_contract_address'
)
token_id = models.PositiveIntegerField(
null=True
)
transaction_hash = models.ForeignKey(
'transactions.Transaction',
on_delete=models.PROTECT
)
log_index = models.PositiveIntegerField()
|
[
"gollum23@gmail.com"
] |
gollum23@gmail.com
|
af935ba661ffbdb6c3921e41c3c65c2ba9235ccd
|
843d9f17acea5cfdcc5882cf8b46da82160c251c
|
/adafruit_stepper.py
|
8e9319c17ea13b32312acbe50d018791ab2ea40a
|
[] |
no_license
|
gunny26/raspberry
|
7c1da63785c86412af9fa467ea231b19a97f4384
|
e4eb0d2f537b319d41b6c50b59e69fb297c62d25
|
refs/heads/master
| 2016-09-06T14:02:30.122102
| 2014-01-29T16:31:08
| 2014-01-29T16:31:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
import RPi.GPIO as GPIO
import time
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
enable_pin = 18
coil_A_1_pin = 4
coil_A_2_pin = 17
coil_B_1_pin = 23
coil_B_2_pin = 24
GPIO.setup(enable_pin, GPIO.OUT)
GPIO.setup(coil_A_1_pin, GPIO.OUT)
GPIO.setup(coil_A_2_pin, GPIO.OUT)
GPIO.setup(coil_B_1_pin, GPIO.OUT)
GPIO.setup(coil_B_2_pin, GPIO.OUT)
GPIO.output(enable_pin, 1)
def forward(delay, steps):
for i in range(0, steps):
setStep(1, 0, 1, 0)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(1, 0, 0, 1)
time.sleep(delay)
def backwards(delay, steps):
for i in range(0, steps):
setStep(1, 0, 0, 1)
time.sleep(delay)
setStep(0, 1, 0, 1)
time.sleep(delay)
setStep(0, 1, 1, 0)
time.sleep(delay)
setStep(1, 0, 1, 0)
time.sleep(delay)
def setStep(w1, w2, w3, w4):
GPIO.output(coil_A_1_pin, w1)
GPIO.output(coil_A_2_pin, w2)
GPIO.output(coil_B_1_pin, w3)
GPIO.output(coil_B_2_pin, w4)
while True:
try:
delay = raw_input("Delay between steps (milliseconds)?")
steps = raw_input("How many steps forward? ")
forward(int(delay) / 1000.0, int(steps))
steps = raw_input("How many steps backwards? ")
backwards(int(delay) / 1000.0, int(steps))
except KeyboardInterrupt:
GPIO.cleanup()
|
[
"arthur.messner@gmail.com"
] |
arthur.messner@gmail.com
|
f7675475bf4180ae4b05a6af1aebe4521077a136
|
e131e752d826ae698e12e7bc0583362741f9d942
|
/AWS.py
|
c886890f56cf208b48066e6c151d54611fc0b574
|
[] |
no_license
|
abalberchak/TouchFace
|
ba30565be91b848126524aa47377789253370e04
|
d093ece8890b68c72e0855a024d908105df99b94
|
refs/heads/master
| 2021-01-11T01:43:35.067808
| 2016-09-29T03:41:13
| 2016-09-29T03:41:13
| 69,530,129
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
#----------------------------------------- Intent Schema Below:------------------------------
{
"intents": [
{
"intent": "AMAZON.ResumeIntent"
},
{
"intent": "AMAZON.PauseIntent"
},
{
"intent": "DojoInfoIntent"
},
{
"intent": "AMAZON.HelpIntent"
},
{
"intent": "AMAZON.StopIntent"
},
{
"intent": "TextBrendenIntent"
},
{
"intent": "GetTouchFaceIntent"
},
{
"intent": "DojoBrendenIntent"
},
{
"intent": "AskBrendan"
},
{
"intent": "twilioIntent"
},
{
"intent": "GroupTextIntent",
"slots": [
{
"name": "Name",
"type": "MEMBERS"
}
]
}
]
}
#----------------------------------------- Utterances Below:------------------------------
DojoInfoIntent what is the coding dojo
DojoInfoIntent tell me about the coding dojo
TextBrendenIntent Text Brendan
GetTouchFaceIntent Tell what does Brenden say
DojoBrendenIntent who is brenden
AskBrendan what is touchface
twilioIntent hi annet
GroupTextIntent text {Name}
|
[
"minhpn.org.ec@gmail.com"
] |
minhpn.org.ec@gmail.com
|
4f21bdabf36e65773d6c9289dad471ce6aa16e31
|
178ae62be7de20a50f96361e80bdcff5a5493ae2
|
/koica/templatetags/koica.py
|
36b3a706fcb6f684e4f9896f13b5cc8b25353d75
|
[
"MIT"
] |
permissive
|
synw/django-koica
|
a043800c15fad69f2024557e62fcf0ac4808ffae
|
d8b1c9fa70c428f0aa0db0c523524e9d2ef27377
|
refs/heads/master
| 2021-01-10T03:15:24.570691
| 2015-12-09T14:55:29
| 2015-12-09T14:55:29
| 46,188,691
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
from django import template
from koica.utils import sanitize_html
register = template.Library()
@register.filter(is_safe=True)
def remove_pre(value):
return sanitize_html(value, remove_pre=True)
|
[
"synwe@yahoo.fr"
] |
synwe@yahoo.fr
|
0389e54314c5db68c26748f6b8dc17cb73d58775
|
fc2eb6f42183d7ca0142a039400548194130ff47
|
/items.py
|
23c16a8d8a825dcbafff96eb5e47f06777634b2e
|
[] |
no_license
|
vinaygulani1/RealEstatePricePrediction
|
5e3cf7ab5659f1f28a3cf81853fca2a42e4044ac
|
8cb30eea4c10147b4dba69058620e092b06617a1
|
refs/heads/master
| 2021-01-10T01:27:21.865451
| 2015-11-20T08:15:18
| 2015-11-20T08:15:18
| 46,197,917
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 615
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class Property(scrapy.Item):
data_code = scrapy.Field()
latitude = scrapy.Field()
longtitude = scrapy.Field()
property_type = scrapy.Field()
address = scrapy.Field()
city = scrapy.Field()
askprice = scrapy.Field()
sellingprice = scrapy.Field()
year_built = scrapy.Field()
living_area = scrapy.Field()
num_parking = scrapy.Field()
num_bath = scrapy.Field()
num_bed = scrapy.Field()
num_room = scrapy.Field()
sold_date = scrapy.Field()
|
[
"vinay.gulani@relishly.com"
] |
vinay.gulani@relishly.com
|
630cda4283ce85c051de8920c72c86b1b92d2ca7
|
a305456a1b6509437883bb3de8d0c3f2968694a1
|
/apis/connect/auth.py
|
55e8b22c76ffb214dbba1f656da60e331a09384f
|
[] |
no_license
|
yoshow/tornado-quick-start
|
6ebeeb87e09b5a9c357cdc2332c7d80fdaa96d06
|
6e00dd3a873e624c1a39cc5f94d47ddc1a366a00
|
refs/heads/master
| 2021-01-18T15:27:31.410263
| 2018-06-19T16:21:59
| 2018-06-19T16:21:59
| 86,654,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,223
|
py
|
# -*- coding: utf-8 -*-
"""
OAuth 2.0
"""
import uuid
import json
from sqlalchemy import select, text
import x.data.orm
from x.web.apis import WebApiResponse
from models.membership import AccountInfo, MemberInfo
from models.connect import ConnectAuthorizationCodeInfo, ConnectAccessTokenInfo
class Auth(object):
""" 验证管理 """
def authorize(self, req, res=WebApiResponse()):
"""
授权验证
:param clientId: 客户端应用
:param redirectUri: 重定向地址
:param responseType: 响应类型
:param scope: 授权范围
:param style: 样式 自定义样式
:param loginName: 登录名
:param password: 密码
:returns: this is a description of what is returned
:raises keyError: raises an exception
"""
clientId = req.get("clientId")
redirectUri = req.get("redirectUri")
responseType = req.get("responseType")
scope = req.get("scope")
style = req.get("style")
loginName = req.get("loginName")
password = req.get("password")
session = x.data.orm.createSession()
# 获取当前用户信息
account = session.query(AccountInfo).filter(
text("loginName='" + loginName + "' and password='" + password + "'")).first()
if account is None:
if responseType is None:
res.message.returnCode = 1
res.message.value = u"帐号或者密码错误。"
return res
else:
# 如果响应类型
# TODO: 输出登录页面
pass
else:
# 检验是否有授权码
# cliendId account
authorizationCode = session.query(ConnectAuthorizationCodeInfo).filter(
text("appKey='" + clientId + "' and accountId='" + account.id + "'")).first()
# 如果不存在则新增授权码信息
if authorizationCode is None:
# 设置对象信息
authorizationCode = ConnectAuthorizationCodeInfo()
authorizationCode.id = str(uuid.uuid4())
authorizationCode.appKey = clientId
authorizationCode.accountId = account.id
authorizationCode.authorizationScope = scope == '' and "public" or scope
session.add(authorizationCode)
# 写数据库,但并不提交
session.flush()
session.commit()
# 设置访问令牌
# 设置会话信息
accessToken = session.query(ConnectAccessTokenInfo).filter(
text("appKey='" + clientId + "' and accountId='" + account.id + "'")).first()
# 如果不存在则新增授权码信息
if accessToken is None:
accessToken = ConnectAccessTokenInfo(id=str(uuid.uuid4()))
# 设置对象信息
accessToken.id = str(uuid.uuid4())
accessToken.appKey = clientId
accessToken.accountId = account.id
accessToken.authorizationScope = scope == '' and "public" or scope
session.add(accessToken)
# 写数据库,但并不提交
# session.flush()
session.commit()
# 记录日志
res.data = accessToken
res.message.returnCode = 0
res.message.value = u"验证成功。"
return res
print "authorize loginName:" + loginName + " password:" + password
res.message.returnCode = 0
res.message.value = u"执行成功。"
return res
def token(self, req, res=WebApiResponse()):
"""
获取令牌信息
:param code: 授权码信息
:returns: this is a description of what is returned
:raises keyError: raises an exception
"""
code = req.get("code")
session = x.data.orm.createSession()
authorizationCode = session.query(ConnectAuthorizationCodeInfo).filter_by(
id=code).first()
# 如果不存在则新增授权码信息
if authorizationCode is None:
res.message.returnCode = 1
res.message.value = "authorization code not find"
return res
accessToken = session.query(ConnectAccessTokenInfo).filter(
text("appKey='" + authorizationCode.appKey + "' and accountId='" + authorizationCode.accountId + "'")).first()
if accessToken is None:
res.message.returnCode = 1
res.message.value = "access code not find"
return res
return res
def refresh(self, req, res=WebApiResponse()):
""" 刷新令牌信息 """
print "token"
return "connect.auth.refresh"
def me(self, req, res=WebApiResponse()):
""" 当前用户信息 """
token = req.get("token")
session = x.data.orm.createSession()
accessToken = session.query(
ConnectAccessTokenInfo).filter_by(id=token).first()
if accessToken is None:
res.message.returnCode = 1
res.message.value = "people not find"
return res
else:
# 根据访问令牌返回当前湖用户
# IMemberInfo member =
# MembershipManagement.Instance.MemberService[accessTokenInfo.AccountId]
member = session.query(MemberInfo).filter_by(
id=accessToken.accountId).first()
if member is None:
res.message.returnCode = 1
res.message.value = "people not find"
return res
# 输出个人信息
res.data = member
res.message.returnCode = 0
res.message.value = "success"
return res
def ToPeopleJson(self, account):
""" 将人员信息格式化为特定格式 """
return {
"id": account.id,
"name": account.name,
"loginName": account.loginName,
# "certifiedAvatar": account.certifiedAvatar,
"status": account.status
}
|
[
"ruanyu@live.com"
] |
ruanyu@live.com
|
ed949aaf72486dd9f746cc86f1ee975b0529cb89
|
3bc7c8a96be2cf2b60f8c1757e71a980140bd43b
|
/bus_plotter.py
|
91c2983b5b0cd903810a8fe00e57eba7302aea54
|
[] |
no_license
|
chrisjkuch/busdata
|
98bff373efaacdd4cb6d1e618165d3de63296d69
|
f68ba523191dbcb50d6ffaeb20c73e7fc6225c0e
|
refs/heads/master
| 2020-12-31T06:56:27.346245
| 2017-05-17T03:15:27
| 2017-05-17T03:15:27
| 80,571,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,629
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 9 17:28:01 2016
@author: chris
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
import datetime as dt
def dayTimeToDateTime(fltime):
hours = int(fltime * 24)
minutes = int(fltime * 24 * 60 - hours * 60)
seconds = 0 #int(fltime * 24 * 3600 - hours * 60 * 60 - minutes * 60)
return(dt.datetime(2016,8,1,hours,minutes,seconds))
def setSameDay(dt_obj):
return(dt.datetime(2016,8,1,dt_obj.hour,dt_obj.minute,dt_obj.second))
# Temporary: Pick the stop we are interested in
stop = u'Sherman/Foster'
etoc = pd.read_csv('evanstontochicago.csv')
ctoe = pd.read_csv('chicagotoevanston.csv')
# Get the x-coordinates of vertical lines marking scheduled times
# Read the log data from the spreadsheet
busdata = pd.read_csv('test.csv')
bd = busdata[[u'Stop Name', u'Time']]
bd[u'Time'] = bd[u'Time'].astype('datetime64').apply(lambda x: md.date2num(setSameDay(x)))
groupedstops = bd.groupby(u'Stop Name')
curStop = bd.groupby(u'Stop Name').groups[stop]
times = bd.iloc[curStop, 1]
#times = times.apply(lambda x: md.date2num(setSameDay(x)))
# Set up the histogram
fig = plt.figure()
my_bins = md.date2num(dt.datetime(2016,8,1,0,0,0)) + np.linspace(6,24,(18*60)+1)/24.
hfmt = md.DateFormatter('%H:%M')
thisroute = etoc
thesestops = list(thisroute.columns.values)
nplots = len(thesestops)
i = 1
all_axes = []
for stop in thesestops:
if(i > 1):
fig.add_subplot(nplots, 1, i, sharex=all_axes[0], sharey=all_axes[0])
else:
fig.add_subplot(nplots, 1, 1)
i += 1
curStop = groupedstops.groups[stop]
curTimes = bd.iloc[curStop, 1]
ax = curTimes.plot.hist(bins=my_bins)
ax2 = curTimes[-1:].plot.hist(bins=my_bins)
all_axes.append(ax)
#nboundsched = etoc[stop].apply(lambda x: md.date2num(dayTimeToDateTime(x)))
cursched = thisroute[stop].apply(lambda x: md.date2num(dayTimeToDateTime(x)))
top = 6
#y1n = [0] * len(nboundsched)
#y2n = [top] * len(nboundsched)
y1 = [0] * len(cursched)
y2 = [top] * len(cursched)
plt.vlines(cursched, y1, y2)
ax.xaxis.set_major_locator(md.HourLocator())
ax.xaxis.set_major_formatter(hfmt)
ax.yaxis.set_ticks([])
ax.yaxis.set_label_position('right')
plt.ylabel(stop, rotation=0)
#stopdata = scheduledata[stop].apply(lambda x: md.date2num(dayTimeToDateTime(x)))
#y1 = [0] * len(stopdata)
#y2 = [2] * len(stopdata)
plt.xticks(rotation=45)
plt.xlim([md.date2num(dt.datetime(2016,8,1,6,0,0)), md.date2num(dt.datetime(2016,8,1,23,59,0))])
plt.gcf().subplots_adjust(hspace=0)
plt.show()
|
[
"noreply@github.com"
] |
chrisjkuch.noreply@github.com
|
3b9a743d044c3344202f2e666d176447cdb0156d
|
d8fa64b4176c8c434e8812faed51bc246e6794d7
|
/pythoncode/homework_api/test_tag_manage.py
|
a36ea37af271617e587d652c840657a68702532f
|
[] |
no_license
|
tanya931117/TestDev
|
0103ff216af48d80811e81ac91756fdc878ef43d
|
3eba391c1405238e6a7ccc267b1f3722364a0a5c
|
refs/heads/master
| 2022-12-09T18:42:29.940915
| 2020-09-10T11:38:07
| 2020-09-10T11:38:07
| 270,223,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,658
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/8 13:27
# @Author : tanya
# @File : test_tag_manage.py
# @Software: PyCharm
import pytest
import yaml
from pythoncode.PO.contact_tag_api import ContactTagApi
from pythoncode.PO.we_work_api import WeWorkApi
@pytest.mark.usefixtures("start_case")
class TestTagManage():
def setup_class(self):
self.contact_tag = ContactTagApi()
wework =WeWorkApi()
self.token = wework.get_token(self.contact_tag._contact_secret)
#用钩子函数pytest_generate_tests加载参数
func_params = {"test_all": ["tagname", "add_tag_api.yml"],
"test_tag_mem":["tagid,userlist","add_tag_mem_api.yml"]}
def get_params(path):
with open(path, "r",encoding="utf-8") as f:
params = yaml.safe_load(f)
return params
@pytest.mark.skip
def test_all(self,tagname):
response = self.contact_tag.get_tag_list(self.token)
result = response.json()
#{'errcode': 0, 'errmsg': 'ok', 'taglist': [{'tagid': 1, 'tagname': '标签一'}, {'tagid': 2, 'tagname': '标签二'}]}
if len(result["taglist"]) >0:
for tag in result["taglist"]:
if tag["tagname"] == tagname:
self.contact_tag.del_tag({"tagid":tag["tagid"]},self.token)
break
tag = {
"tagname": tagname
}
res = self.contact_tag.add_tag(tag,self.token)
assert res.json()["errmsg"].startswith("created")
def test_tag_mem(self,tagid,userlist):
response = self.contact_tag.get_tag_mems({"tagid":tagid},self.token)
result = response.json()
#{'errcode': 0, 'errmsg': 'ok', 'userlist': [{'userid': 'LiTan', 'name': '李土云'}, {'userid': 'Miao', 'name': '张世锋'}, {'userid': 'eunhyuk.lee', 'name': 'LeeEunHyuk'}, {'userid': 'donghae.lee', 'name': 'LeeDongHae'}], 'partylist': [], 'tagname': '标签一'}
if len(result["userlist"]) > 0:
userlist_del = []
for user in result["userlist"]:
if user["userid"] in userlist:
userlist_del.append(user["userid"])
if len(userlist_del) > 0 :
params = {
"tagid": tagid,
"userlist": userlist_del
}
self.contact_tag.del_tag_mem(params,self.token)
params = {
"tagid": tagid,
"userlist": userlist
}
res = self.contact_tag.add_tag_mem(params,self.token).json()
#{'errcode': 0, 'errmsg': 'ok', 'invalidparty': []}
assert res["errmsg"] == "ok"
|
[
"tanya_li_931117@163.com"
] |
tanya_li_931117@163.com
|
bee4f557d4eaf73b50b39fe6da8aff6cd3065efd
|
fc8ef4d310eaf76384cd1f29a45e65ffd1849a6b
|
/back-end/api/migrations/0004_article_source_name.py
|
371ef4c62f6d991ee5521f8b6c4f6db3a7a673c5
|
[] |
no_license
|
mykola829/webl_lab_3_4
|
70c6c52e23962741971efc65a5a15aed5822059f
|
c47f09a03157e71687294b7a61863d775c6e95e2
|
refs/heads/master
| 2022-09-13T02:27:17.381772
| 2020-06-04T11:57:55
| 2020-06-04T11:57:55
| 268,151,334
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 3.0.6 on 2020-06-03 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20200603_1059'),
]
operations = [
migrations.AddField(
model_name='article',
name='source_name',
field=models.CharField(default=0, max_length=512),
preserve_default=False,
),
]
|
[
"mykola829@gmail.com"
] |
mykola829@gmail.com
|
394178ecf0b8ba5aa5f8ffac26bfc54459935fb5
|
812f7c3982f2525bc7c3013938f70ffdda4abe1e
|
/vehicles/admin.py
|
875e37a21f750efb0d84da238affc5a7c39c522c
|
[] |
no_license
|
roditashakya/ebooking
|
7393aa651d2ddc979016000d62d6f44a3caddf57
|
3cdfe3a9b911835b4fcc32ae63e1e28983a4d6d9
|
refs/heads/master
| 2020-05-01T09:42:29.961770
| 2019-03-24T19:54:56
| 2019-03-24T19:54:56
| 177,406,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
from django.contrib import admin
# Register your models here.
from .models import SeatTemplate, TravelDetail, Vehicle, Booking, BookingUser, Seat, Schedule, Search
admin.site.register(SeatTemplate)
admin.site.register(TravelDetail)
admin.site.register(Vehicle)
admin.site.register(Booking)
admin.site.register(BookingUser)
admin.site.register(Seat)
admin.site.register(Schedule)
admin.site.register(Search)
|
[
"rodishakya@gmail.com"
] |
rodishakya@gmail.com
|
70b411ba66521bde662ff464e6ab782442fa0581
|
1508f7da93705839660e4fdfb87df7a9664bf087
|
/a10API/a10API/flask/bin/migrate
|
bff34539b04e8d820b8b866d8ef3ee3bbc9995fb
|
[] |
no_license
|
Younglu125/A10_Networks
|
1a1ecebb28dd225f6a1f901a7c28350300df356d
|
78a177ae4c8638d58dc873e4b1c589a1d5aaa717
|
refs/heads/master
| 2020-06-17T00:35:30.325740
| 2016-03-21T18:17:30
| 2016-03-21T18:17:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
#!/home/echou/a10API/flask/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'sqlalchemy-migrate==0.7.2','console_scripts','migrate'
__requires__ = 'sqlalchemy-migrate==0.7.2'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('sqlalchemy-migrate==0.7.2', 'console_scripts', 'migrate')()
)
|
[
"info@pythonicneteng.com"
] |
info@pythonicneteng.com
|
|
5c578a84b20bd789b433432dfab0e9c7bdd67379
|
b08a6adc56016a706d84752bcfb6d5bdf014f9fd
|
/easyocr/DBNet/assets/ops/dcn/functions/deform_pool.py
|
b4c9943cbc10212f1db23910dcafbd2a2d1b7435
|
[
"Apache-2.0"
] |
permissive
|
JaidedAI/EasyOCR
|
c83903d2f0ac2adfda89b35274e71a410f7d12e8
|
f947eaa36a55adb306feac58966378e01cc67f85
|
refs/heads/master
| 2023-08-08T08:34:28.434530
| 2023-07-04T12:44:09
| 2023-07-04T12:44:09
| 247,266,215
| 20,057
| 2,937
|
Apache-2.0
| 2023-09-12T22:16:00
| 2020-03-14T11:46:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,953
|
py
|
'''
Modified by Jaided AI
Released Date: 31/08/2022
Description:
- Add support for Deformable convolution operator on CPU for forward propagation.
- Change to Just-in-Time loading approach
'''
import os
import warnings
import torch
from torch.autograd import Function
from torch.utils import cpp_extension
# TODO - Jaided AI:
# 1. Find a better way to handle and support both Ahead-of-Time (AoT) and Just-in-Time (JiT) compilation.
# 2. Find a better way to report error to help pinpointing issues if there is any.
# Note on JiT and AoT compilation:
# This module supports both AoT and JiT compilation approaches. JiT is hardcoded as the default. If AoT compiled objects are present, it will supercede JiT compilation.
def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + '\n'
warnings.formatwarning = custom_formatwarning
dcn_dir = os.path.dirname(os.path.dirname(__file__))
try:
from .. import deform_pool_cpu
warnings.warn("Using precompiled deform_pool_cpu from {}".format(deform_pool_cpu.__file__))
dcn_cpu_ready = True
except:
try:
warnings.warn("Compiling deform_pool_cpu ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
deform_pool_cpu = cpp_extension.load(
name="deform_pool_cpu",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cpu.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cpu_kernel.cpp")])
warnings.warn("Done.")
dcn_cpu_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import or compile 'deform_pool_cpu' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on CPU."
]))
dcn_cpu_ready = False
if torch.cuda.is_available():
try:
from .. import deform_pool_cuda
warnings.warn("Using precompiled deform_pool_cuda from {}".format(deform_pool_cuda.__file__))
dcn_cuda_ready = True
except:
try:
warnings.warn("Compiling deform_pool_cuda ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
deform_pool_cuda = cpp_extension.load(
name="deform_pool_cuda",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cuda.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cuda_kernel.cu")])
warnings.warn("Done.")
dcn_cuda_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import or compile 'deform_pool_cuda' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on GPU."
]))
dcn_cuda_ready = False
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(ctx,
data,
rois,
offset,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = out_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert 0.0 <= ctx.trans_std <= 1.0
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
if not data.is_cuda and dcn_cpu_ready:
deform_pool_cpu.deform_psroi_pooling_cpu_forward(
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
elif data.is_cuda and dcn_cuda_ready:
deform_pool_cuda.deform_psroi_pooling_cuda_forward(
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
else:
device_ = input.device.type
raise RuntimeError(
"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(device_, device_),
)
if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError("DCN operator for cpu for backward propagation is not implemented.")
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
deform_pool_cuda.deform_psroi_pooling_cuda_backward(
grad_output, data, rois, offset, output_count, grad_input,
grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
ctx.trans_std)
return (grad_input, grad_rois, grad_offset, None, None, None, None,
None, None, None, None)
deform_roi_pooling = DeformRoIPoolingFunction.apply
|
[
"rkcosmos@gmail.com"
] |
rkcosmos@gmail.com
|
9f3846001c1c354cfeae9bb360ec909db95dbc28
|
3105edcc2326ed9d49c408833268453a942ba474
|
/web/modules/api/__init__.py
|
a164dbb5f0124fc34c059789ae5b31e2ac14489d
|
[] |
no_license
|
cgle/sumopromo
|
641e56a14654fbd9368f1653a0d5282726d3d533
|
1e395eaeeb44acaa23f2ffb63ad68b7ded0799cf
|
refs/heads/master
| 2019-08-01T02:54:44.736669
| 2017-03-31T00:32:26
| 2017-03-31T00:32:26
| 75,031,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 98
|
py
|
from flask import Blueprint
bp = Blueprint('api', __name__)
from web.modules.api.views import *
|
[
"cglehh@gmail.com"
] |
cglehh@gmail.com
|
6e8d64feec1287a4b57e616953041d7efd2c6bc7
|
245a9680c18bb08b338b024e8cb61da899097cec
|
/gamestore/store/urls.py
|
391ff5e89cf9119bd07a47ae7fdfcfa0e21e395f
|
[] |
no_license
|
arnold1000/onlinestore
|
c5f2c003076d248cc18a3e2698c0e09cb4c0a18c
|
fe8a393a270dfb6cd32c0628385a0777d815f8e9
|
refs/heads/master
| 2021-09-22T23:19:15.736468
| 2020-02-20T10:04:50
| 2020-02-20T10:04:50
| 240,704,666
| 0
| 0
| null | 2020-02-15T12:58:46
| 2020-02-15T12:17:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 955
|
py
|
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.games, name='store-home'),
path(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
path('games/', views.games, name='games'),
path('games/<int:game_id>/', views.game, name='game'),
path('games/<int:game_id>/score', views.save_score, name='save_score'),
path('games/<int:game_id>/save', views.save_game, name='save_game'),
path('games/<int:game_id>/load', views.load_game, name='load_game'),
path('games/new/', views.add_new, name='add_game'),
path('games/<int:game_id>/modify', views.modify, name='modify'),
path('games/<int:game_id>/delete', views.delete, name='delete'),
path('shop/', views.shop, name='shop'),
path('shop/<int:game_id>/', views.buy, name="buy"),
path('shop/payment/', views.buy_response, name="buy_response")
]
|
[
"arttu.e.koponen@aalto.fi"
] |
arttu.e.koponen@aalto.fi
|
f6f2eef9a2b17f09fa5e8751ab81ae99d47ae64e
|
e85a47a94e59f4c2feaec8aa635aa9d42e0edc00
|
/django_project/urls.py
|
f03e6a43c2f71e2fc1f25d65ab2be1a55421f207
|
[] |
no_license
|
afAditya/Django_Blog
|
1bf0e2949db03a6813db380c49cdca324cd7173b
|
7662d4765a03b25814481edc5a189b97d6899b41
|
refs/heads/master
| 2023-02-17T03:58:49.091498
| 2021-01-20T18:18:51
| 2021-01-20T18:18:51
| 330,613,546
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
"""django_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from users import views as users_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', users_views.register, name='register'),
path('profile/', users_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('', include('blog.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"adityasinghrajput971@gmail.com"
] |
adityasinghrajput971@gmail.com
|
a7072cf5db1b5527272336c6191bab4e1770b928
|
c840f190b3540bf212de2c70563e57da278fa9cb
|
/hyacinth.py
|
055e735da50162825883a5c29dfd69fcd0f7242d
|
[] |
no_license
|
edelooff/hyacinth
|
b768a871d476dd120f7d2d1acb039a6a9ebf2e19
|
0a6dd15fa1b1357afa566f924ad27b744582464b
|
refs/heads/master
| 2022-04-16T13:24:18.986246
| 2020-04-01T08:15:36
| 2020-04-01T08:15:36
| 251,756,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,538
|
py
|
from collections import (
Counter,
defaultdict)
import random
import re
import sys
DESIGN = re.compile(r'''
(?P<design>[A-Z])
(?P<size>[SL])
(?P<flowers>(:?\d+[a-z])*) # The specification is fuzzy on 1+ or 0+
(?P<total>\d+)''', re.VERBOSE)
DESIGN_FLOWER = re.compile(r'''
(?P<count>\d+)
(?P<species>[a-z])''', re.VERBOSE)
class Pool:
def __init__(self):
self.common_species = set()
self.designers = []
self.flowers = Counter()
def add_designer(self, designer):
"""Adds a BouquetDesigner for the pool size.
It also updates the set of known required species, allowing better
picking of 'filler' flowers for requested bouquets.
"""
self.designers.append(designer)
self.common_species |= designer.required_flowers.keys()
def add_flower(self, species):
"""Adds a flower of given species to the pool of available flowers."""
self.flowers[species] += 1
for designer in self.designers:
if designer.add(species):
print(self.create_bouquet(designer))
def create_bouquet(self, designer):
"""Creates a bouquet according to the given designers design.
After creating the bouquet, other designers are informed of the
removal of flower species from the shared pool.
"""
bouquet = designer.create(self.flowers, self.common_species)
bouquet_string = designer.stringify_bouquet(bouquet)
for bundle in bouquet.items():
for designer in self.designers:
designer.remove(*bundle)
return bouquet_string
class BouquetDesigner:
def __init__(self, design, flower_size, required_flowers, bouquet_size):
self.design = design
self.flower_size = flower_size
self.bouquet_size = bouquet_size
self.required_flowers = required_flowers
self.filler_quantity = bouquet_size - sum(required_flowers.values())
self.available_filler = 0
self.available_flowers = Counter()
def add(self, species):
"""Adds a species of flower to the local availability cache.
In addition. this will check whether a bouquet can be created based on
the recently seen flowers. If one can be created, this returns True.
"""
if species in self.required_flowers:
self.available_flowers[species] += 1
else:
self.available_filler += 1
return self.can_create()
def can_create(self):
"""Checks whether there are enough flowers to create a bouquet.
This will check if there is enough quantity of the required flowers and
if so, will check if there is enough filler to create a full bouquet.
"""
for flower, quantity in self.required_flowers.items():
if self.available_flowers[flower] < quantity:
return False
available = sum(self.available_flowers.values(), self.available_filler)
if available >= self.bouquet_size:
return True
return False
def create(self, pool, common_species):
"""Returns a bouquet (species listing) assembled from the given pool.
After picking the required flowers, if additional flowers are needed
as filler, this method selects a sample of flowers from the rest of
the pool in two steps:
1. Species of flowers used by other BouquetDesigners are avoided so
that selection for this bouquet causes the least conflict.
2. A random sample of flowers is picked, to avoid consistently stealing
from the same other designers. Randomly selecting also hopefully
generates nice and pleasing outcomes for the recipient, though this
hypothesis has not been tested in the least ;-)
In all cases we bias to picking filler flowers that we have a surplus
of. In an ideal world we would have a function that determines the
correct bias to introduce here.
"""
bouquet = Counter()
for species, quantity in self.required_flowers.items():
pool[species] -= quantity
bouquet[species] += quantity
# Pick the remaining flowers
if self.filler_quantity:
remaining = self.filler_quantity
for do_not_pick in (common_species, set()):
population = []
for species in pool.keys() ^ do_not_pick:
population.extend([species] * pool[species])
sample_size = min(len(population), remaining)
for species in random.sample(population, sample_size):
pool[species] -= 1
bouquet[species] += 1
remaining -= sample_size
if not remaining:
break
return bouquet
def remove(self, species, quantity):
"""Proceses removal of flowers from the flower pool.
This will update either the cache for available required flowers, or
if it's a species not -required- for this design, the filler count.
"""
if species in self.required_flowers:
self.available_flowers[species] -= quantity
else:
self.available_filler -= quantity
def stringify_bouquet(self, bouquet):
"""Returns the formatted bouquet string for this designer."""
flowers = sorted(bouquet.items())
flowerstring = (f'{count}{species}' for species, count in flowers)
return f'{self.design}{self.flower_size}{"".join(flowerstring)}'
@classmethod
def from_specification(cls, design):
"""Creates a BouquetDesigner instance from a string specification."""
spec = DESIGN.match(design).groupdict()
spec_flowers = DESIGN_FLOWER.findall(spec['flowers'])
flowers = {species: int(count) for count, species in spec_flowers}
return cls(spec['design'], spec['size'], flowers, int(spec['total']))
def read_until_empty(fp):
"""Yields lines from the given filepointer until an empty line is hit."""
while (line := fp.readline().strip()):
yield line
def main():
pools = defaultdict(Pool)
for design in read_until_empty(sys.stdin):
designer = BouquetDesigner.from_specification(design)
pools[designer.flower_size].add_designer(designer)
for species, size in read_until_empty(sys.stdin):
pools[size].add_flower(species)
if __name__ == '__main__':
main()
|
[
"elmer.delooff@gmail.com"
] |
elmer.delooff@gmail.com
|
3ad99e3d7e9841da8f65b2003210f661dc96df4a
|
0296bc69a0d9608ed826ad7a719395f019df098f
|
/Tools/Compare_images.py
|
f4ba586d2dfa3fcae52e277676f2b4a82ffdf59a
|
[] |
no_license
|
jcn16/Blender_HDRmap_render
|
c0486a77e04c5b41a6f75f123dbdb3d10c682367
|
50e6cdb79fef83081de9830e7105dd425a235a9e
|
refs/heads/main
| 2023-07-19T22:22:53.622052
| 2021-08-20T06:29:10
| 2021-08-20T06:29:10
| 377,757,283
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
import cv2
import numpy as np
def tianchong(img):
m = img.shape[0]
n = img.shape[1]
append = int(np.ceil(abs(m - n) / 2))
if m > n:
constant = cv2.copyMakeBorder(img, 0, 0, append, append, cv2.BORDER_CONSTANT, value=(0, 0, 0))
else:
constant = cv2.copyMakeBorder(img, append, append, 0, 0, cv2.BORDER_CONSTANT, value=(0, 0, 0))
constant = cv2.resize(constant, (512, 512))
return constant
def compare():
image_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/raytracing.png')
mask_1=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/alpha.png')
image_1=tianchong(image_1)
mask_1=tianchong(mask_1)
image_2=cv2.imread('/media/jcn/新加卷/JCN/JCN_test_datset/RayTracing/Train_HDR_512/126111539900259-h/0_-32_1_dikhololo_sunset_8k_324/shading.png')
image_1=image_1/255.0*mask_1/255.0
image_2=image_2/255.0*mask_1/255.0
cv2.imshow('image_1',np.asarray(image_1*255,dtype=np.uint8))
cv2.imshow('image_2',np.asarray(image_2*255,dtype=np.uint8))
res=np.asarray(np.clip((image_1-image_2)*255,0,255),dtype=np.uint8)
cv2.imshow('res',res)
cv2.waitKey(0)
def composite():
shading=cv2.imread('/media/jcn/新加卷/JCN/RelightHDR/TEST/images_high_res/10/raytracing.png')
albedo=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/p_albedo.png')
mask=cv2.imread('/home/jcn/桌面/Oppo/Results_albedo/10/gt_mask.png')
relight=albedo/255.0*shading/255.0*mask/255.0
relight=np.asarray(relight*255,dtype=np.uint8)
cv2.imshow('relight',relight)
cv2.waitKey(0)
if __name__=='__main__':
compare()
|
[
"591599635@qq.com"
] |
591599635@qq.com
|
70c1632fe562644b920d482abf735a3bc08211cc
|
673d7a7b8c43523c459c661c2e360d4398a53b28
|
/pip_upgrade-runner.py
|
c2d4d9070ba92561c8da342417040194fe860ecc
|
[
"MIT"
] |
permissive
|
addisonElliott/pip_upgrade_outdated
|
19e3c16c5f8910cb20c142842dc2a992fd8801b7
|
2cf809ff9be2ab4070b75f5959ebcf21d9d34d82
|
refs/heads/master
| 2020-04-09T19:03:26.602029
| 2019-01-15T16:29:57
| 2019-01-15T16:29:57
| 160,532,458
| 0
| 0
|
MIT
| 2018-12-05T14:43:44
| 2018-12-05T14:43:43
| null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenience wrapper for running pushover directly from source tree."""
from pip_upgrade_outdated.upgrade_pip_packages import main
if __name__ == '__main__':
main()
|
[
"a.h.jaffe@gmail.com"
] |
a.h.jaffe@gmail.com
|
f0ebbd9f94e46663f17baf0ce5d22f3445f7b76f
|
b1a584df32c2d11a0648dec27e2f9cacd540a7f2
|
/realtors/migrations/0001_initial.py
|
22c019188655b19dcb1f7eae14c3d94742f64f7b
|
[] |
no_license
|
sree61/Django_project_realestate
|
b822f65dff1ea03e3739208c66b8a5f559bce1c0
|
8da46343a0275f8cd13bd71ed74eee6e4efb003a
|
refs/heads/master
| 2020-04-26T04:02:40.594223
| 2019-03-12T18:01:51
| 2019-03-12T18:01:51
| 173,288,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
# Generated by Django 2.1.7 on 2019-02-27 03:20
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Realtor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('photo', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('description', models.TextField(blank=True)),
('phone', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
('is_mvp', models.BooleanField(default=False)),
('hire_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
],
),
]
|
[
"sreeamaravila@gmmail.com"
] |
sreeamaravila@gmmail.com
|
94d24bbab5fe5d3c8c83cd1748d41d53ea82f907
|
1f40d003bdba15086e0c2e7828398e3e8e6041e3
|
/robotics/fileread.py
|
a9bd74f6e7dc443010797d5ad2675de000633796
|
[] |
no_license
|
qwertpas/pythontests
|
0bb4982479f998625c0fd9d852df1ef66e3ada71
|
37fc2b253bf24d210364bacaf53f27347e9d29c1
|
refs/heads/master
| 2023-08-09T03:20:35.853236
| 2023-08-05T16:43:44
| 2023-08-05T16:43:44
| 177,456,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import re
def extract_nums(text):
# text.replace('[', ' ')
# text.replace(']', ' ')
p = re.compile(r'\d+\.\d+') # Compile a pattern to capture float values
floats = [float(i) for i in p.findall(text)] # Convert strings to float
return np.array(floats)
with open('ece470/data.txt') as f:
lines = f.readlines()
trials = []
for line in lines:
if line == '\n':
continue
if 'start kill' in line:
trials.append({})
continue
if 'actual pos' in line:
nums = list(extract_nums(line))
trials[-1]['actual'] = nums
if 'detect pos' in line:
nums = list(extract_nums(line))
trials[-1]['detect'] = nums
if 'camera err' in line:
nums = list(extract_nums(line))
trials[-1]['camera'] = nums
actuals = []
detects = []
cameras = []
lines = []
for trial in trials:
actuals.append(trial['actual'])
detects.append(trial['detect'])
cameras.append(trial['camera'])
lines.append((trial['actual'], trial['detect']))
actuals = np.array(actuals)
detects = np.array(detects)
cameras = np.array(cameras)
fig, ax = plt.subplots()
ax.scatter(x=actuals[:,0], y=actuals[:,1], label='Actual position')
ax.scatter(x=detects[:,0], y=detects[:,1], label='Detected position')
ax.scatter(x=0, y=0, label='Robot base')
# lines = [[(0, 1), (1, 1)], [(2, 3), (3, 3)], [(1, 2), (1, 3)]]
lc = mc.LineCollection(lines, color='black', linewidths=1)
ax.add_collection(lc)
print(len(actuals))
ax.autoscale()
accuracy = np.mean(cameras)
ax.set_title(f"Cockroach detection accuracy with average error: {np.round(accuracy, 3)} m")
ax.set_xlabel("Global X axis (m)")
ax.set_ylabel("Global Y axis (m)")
ax.legend()
plt.show()
|
[
"cyx3@illinois.edu"
] |
cyx3@illinois.edu
|
db365ccaef28c337a5d9c69e8c10f082020063ee
|
c940bcb25e1ed315263b25cbdac49cc4bf92cac1
|
/env/vkviewer/python/georef/georeferenceutils.py
|
92de981594a95d6365cfb3fdb3f7e7f015ad83b1
|
[] |
no_license
|
kwaltr/vkviewer
|
281a3f1b5b08a18a89f232ecd096cea44faca58b
|
01d64df0a9266c65e0c3fb223e073ef384281bdc
|
refs/heads/master
| 2021-01-16T22:09:41.821531
| 2014-02-07T17:19:04
| 2014-02-07T17:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,844
|
py
|
'''
Created on Oct 15, 2013
@author: mendt
'''
import subprocess
""" function: parseYSize
@param - imageFile {String} - path to a image file
@return - {Integer} - value which represents the y size of the file
This function parse the x,y size of a given image file """
def parseXYSize(imageFile):
# run gdalinfo command on imageFile and catch the response via Popen
response = subprocess.Popen("gdalinfo %s"%imageFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# read the console output line by line
for line in response.stdout:
if 'Size is ' in line:
x,y = line[8:].split(', ')
#print "X: %s, Y: %s"%(x,y)
return float(x),float(y)
""" Functions for getting the gcps. """
def getGCPsAsString(unorderedPixels, verzeichnispfad, georefCoords):
pure_gcps = getGCPs(unorderedPixels, verzeichnispfad, georefCoords)
str_gcps = []
for tuple in pure_gcps:
string = " ".join(str(i) for i in tuple[0])+", "+" ".join(str(i) for i in tuple[1])
str_gcps.append(string)
return str_gcps
def getGCPs(unorderedPixels, verzeichnispfad, georefCoords):
# transformed the pixel coordinates to the georef coordinates by recalculating the y values,
# because of a different coordinate origin
transformedUnorderedPixels = []
xSize, ySize = parseXYSize(verzeichnispfad)
for tuple in unorderedPixels:
transformedUnorderedPixels.append((tuple[0],ySize-tuple[1]))
# now order the pixel coords so that there sorting represents the order llc, ulc, urc, lrc
transformedOrderedPixels = orderPixels(transformedUnorderedPixels)
# now create the gcp list
try:
gcpPoints = []
for i in range(0,len(transformedOrderedPixels)):
pixelPoints = (transformedOrderedPixels[i][0],transformedOrderedPixels[i][1])
georefPoints = (georefCoords[i][0],georefCoords[i][1])
gcpPoints.append((pixelPoints,georefPoints))
return gcpPoints
except:
raise
def orderPixels(unorderdPixels):
"""
Function brings a list of tuples which are representing the clipping parameter from the client
in the order llc ulc urc lrc and gives them back at a list. Only valide for pixel coords
@param clippingParameterList: list whichcomprises 4 tuples of x,y coordinates
"""
xList = []
yList = []
for tuple in unorderdPixels:
xList.append(tuple[0])
yList.append(tuple[1])
orderedList = [0, 0, 0, 0]
xList.sort()
yList.sort()
for tuple in unorderdPixels:
if (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[0] = tuple
elif (tuple[0] == xList[0] or tuple[0] == xList[1]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[1] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[0] or tuple[1] == yList[1]):
orderedList[2] = tuple
elif (tuple[0] == xList[2] or tuple[0] == xList[3]) and \
(tuple[1] == yList[2] or tuple[1] == yList[3]):
orderedList[3] = tuple
return orderedList
""" Functions for creating the commands for command line """
""" function: addGCPToTiff
@param - gcPoints {list of gcp} - list of ground control points
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@return - command {String}
Add the ground control points via gdal_translate to the src tiff file """
def addGCPToTiff(gcPoints,srs,srcPath,destPath):
def addGCPToCommandStr(command,gcPoints):
for string in gcPoints:
command = command+"-gcp "+str(string)+" "
return command
command = "gdal_translate --config GDAL_CACHEMAX 500 -a_srs epsg:%s "%srs
command = addGCPToCommandStr(command,gcPoints)
command = command+str(srcPath)+" "+str(destPath)
return command
""" function: georeferenceTiff
@param - shapefilePath {String}
@param - srid {Integer} - epsg code of coordiante system
@param - srcPath {String}
@param - destPath {String}
@param - tyoe {String} - if 'fast' there is less compression
@return - command {String}
Georeferencing via gdalwarp """
def georeferenceTiff(shapefilePath, srid, srcPath, destPath, type=None):
if type == 'fast':
command = "gdalwarp --config GDAL_CACHEMAX 500 -wm 500 -overwrite -co TILED=YES -cutline %s \
-crop_to_cutline -t_srs epsg:%s %s %s"%(shapefilePath,srid,srcPath,destPath)
return command
|
[
"jacobmendt@googlemail.com"
] |
jacobmendt@googlemail.com
|
0a8927cc6f6bdd664bb45f44bde260086ecb6f86
|
14dd1fd527bb7e30abd2e4ee64ffb34fe84f1e06
|
/jzc/postgresInsert.py
|
ff33ab9255a1b1887bfe3ed360a277bce6e8efcf
|
[] |
no_license
|
wdwoodee/luozt
|
7a78e7e66f9bc51159f6f03ca4702f4db17dc580
|
6a4bc86969f6f2db24d9d7d1446e4a4f97846165
|
refs/heads/master
| 2021-05-04T04:46:30.812814
| 2016-10-14T08:30:19
| 2016-10-14T08:30:19
| 70,889,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,093
|
py
|
#_DEBUG = True
import psycopg2
import pdb
try:
conn = psycopg2.connect(database="workspace2", user="postgres", password="postgres", host="127.0.0.1", port="54321")
except Exception:
print('Get a exception: Connect failed')
print("successfully")
cur = conn.cursor()
try:
errLine = 0
with open("test1ip.csv") as f:
i = 0
while True:
ln = f.readline()
if not ln:
break
items = ln.split(',')
if(items[0]=='LAN Segment'):
continue
if len(items) != 13:
errLine += 1
continue
#pdb.set_trace()
if items[10] == "Device Interface":
items[10]=9
elif items[10] == "ARP Table":
items[10]=7
elif items[10] == "CDP/LLDP Table":
items[10]=8
elif items[10] == "MAC Table":
items[10]=6
elif items[10] == "Manual":
items[10]=1
else :
items[10]=0
#if ( not items[8].isnumeric()) or ( not items[10].isnumeric()) or ( not items[11].isnumeric()) :
# pdb.set_trace()
# errLine += 1
# continue
try:
sqlinsert = "select saveoneiprecode_ip2mac_x64 ( false, '%s', '%s', '', '%s', '%s', '%s', '%s', '%s', '%s', %s, %s, %s)" % (items[9], items[0], items[1], items[2], items[4], items[5], items[6], items[7], items[8], items[10], items[11])
cur.execute(sqlinsert)
except Exception:
print("Insert Err:%s",sqlinsert)
errLine += 1
continue
i += 1
if ( (i%5)==0 ):
#pdb.set_trace()
conn.commit()
conn.commit()
print ("Complete: insert %d , err %d" %( i, errLine) );
conn.close()
except Exception:
print('Get a exception')
|
[
"18511246771@163.com"
] |
18511246771@163.com
|
1ea1be419f42ba190fe16b9772f1ad7bd9ddae47
|
afcb1837c0c50fd823964594780111d530a73f8e
|
/qtjsonschema/__main__.py
|
d267aee40ac8028b7f4edc49b45fb37004ea312c
|
[] |
no_license
|
ArtemioGiovanni/pyqtschema
|
332a60462db5ac2e113256c940b557d77e16c0cf
|
7d84d8754d039504a5905289a33574abe5318e89
|
refs/heads/master
| 2020-05-04T20:38:22.850664
| 2014-08-15T22:07:40
| 2014-08-15T22:07:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,848
|
py
|
#!/usr/bin/env python
"""
pyqtschema - Python Qt JSON Schema Tool
Generate a dynamic Qt form representing a JSON Schema.
Filling the form will generate JSON.
"""
from PyQt4 import QtCore, QtGui
from qtjsonschema.widgets import create_widget
class MainWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("PyQtSchema")
# Menu bar
# File
# Open
# Save
# --
# Close
self.menu = QtGui.QMenuBar(self)
self.file_menu = self.menu.addMenu("&File")
_action_open = QtGui.QAction("&Open Schema", self)
_action_open.triggered.connect(self._handle_open)
_action_save = QtGui.QAction("&Save", self)
_action_save.triggered.connect(self._handle_save)
_action_quit = QtGui.QAction("&Close", self)
_action_quit.triggered.connect(self._handle_quit)
self.file_menu.addAction(_action_open)
self.file_menu.addAction(_action_save)
self.file_menu.addSeparator()
self.file_menu.addAction(_action_quit)
# Scrollable region for schema form
self.content_region = QtGui.QScrollArea(self)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.menu)
vbox.addWidget(self.content_region)
vbox.setContentsMargins(0,0,0,0)
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(0,0,0,0)
hbox.addLayout(vbox)
self.setLayout(hbox)
def process_schema(self, schema):
"""
Load a schema and create the root element.
"""
import json
import collections
with open(schema) as f:
_schema = json.loads(f.read(), object_pairs_hook=collections.OrderedDict)
if "title" in _schema:
self.setWindowTitle("%s - PyQtSchema" % _schema["title"])
self.content_region.setWidget(create_widget(_schema.get("title", "(root)"), _schema))
self.content_region.setWidgetResizable(True)
def _handle_open(self):
# Open JSON Schema
schema = QtGui.QFileDialog.getOpenFileName(self, 'Open Schema', filter="JSON Schema (*.schema *.json)")
if schema:
self.process_schema(schema)
def _handle_save(self):
# Save JSON output
import json
obj = self.content_region.widget().to_json_object()
outfile = QtGui.QFileDialog.getSaveFileName(self, 'Save JSON', filter="JSON (*.json)")
if outfile:
with open(outfile, 'w') as f:
f.write(json.dumps(obj))
def _handle_quit(self):
# TODO: Check if saved?
self.close()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
main_window = MainWindow()
main_window.show()
sys.exit(app.exec_())
|
[
"klange@yelp.com"
] |
klange@yelp.com
|
9b577e0397ab602bffa52d7290910ae2afb30a2d
|
19872c0f261100d3a7a3c770aa16ef719b7f397b
|
/PythonProjects/crawl_work/crawl_190605_51jop/nationwide_make_urls.py
|
b5e328efff01c1510da69677c7ba2fb1cb81e680
|
[] |
no_license
|
enjoqy/PythonProjects
|
b7951bd13c32ec40842e8c7f7a4b2a32929d3d8b
|
ae1a4b1a55a7906bb4dd78e8bd43d19decec48ba
|
refs/heads/master
| 2020-06-20T03:50:32.814009
| 2019-07-15T09:17:50
| 2019-07-15T09:17:50
| 184,964,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time: 2019/5/16 001612:11
# @Author: junhi
# java
# url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,java,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
# 平面设计
# url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,%25E5%25B9%25B3%25E9%259D%25A2,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
# 大数据
# url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
# 云计算
url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,%25E4%25BA%2591%25E8%25AE%25A1%25E7%25AE%2597,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
provinces = [
'北京', '上海', '广东省', '深圳', '天津', '重庆', '江苏省', '浙江省',
'四川省', '海南省', '福建省', '山东省', '江西省', '广西', '安徽省', '河北',
'河南省', '湖北省', '湖南省', '陕西省', '山西省', '黑龙江省', '辽宁省', '吉林省',
'云南省', '贵州省', '甘肃省', '内蒙古', '宁夏', '西藏', '新疆', '青海省',
]
def get_nationwide_urls():
i = 1
nationwide_java_urls = {}
for province in provinces:
if i <= 9:
province_url = url[0:31] + str(i) + url[32:]
print(province_url)
else:
province_url = url[0:30] + str(i) + url[32:]
print(province_url)
nationwide_java_urls[province] = province_url
i = int(i)
if i == 32:
break
i += 1
return nationwide_java_urls
|
[
"gdlzhh321@163.com"
] |
gdlzhh321@163.com
|
969e32c9641599d2ac6f429333a9a104f34dff93
|
6a33819de8b7aae1388f94dd0142819693da5cdf
|
/src/kitchen/auth_backend.py
|
73d1663bd8fa0f64f9a2fe2bf8ad9e972e0a97eb
|
[] |
no_license
|
goudete/clustr
|
9ffa7deb9cec4492a7f65c888287976bdbd267f9
|
0a6904c7f29a3341fef55933cf7d73c8326fdb33
|
refs/heads/master
| 2023-04-06T06:39:52.048484
| 2020-09-19T14:26:27
| 2020-09-19T14:26:27
| 289,341,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
from django.contrib.auth.backends import ModelBackend
from .models import Kitchen
from django.contrib.auth.models import User
from restaurant_admin.models import Restaurant
class PasswordlessAuthBackend(ModelBackend):
"""Log in to Django without providing a password, just a cashier code/login number
"""
def authenticate(self, request, login_number=None):
try:
rest = Restaurant.objects.filter(kitchen_login_no=login_number).first()
return rest
except rest.DoesNotExist:
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except Kitchen.DoesNotExist:
return None
|
[
"rfitch@oxy.edu"
] |
rfitch@oxy.edu
|
6fa3e92bc057c995f58b43c06e0f64afa615f900
|
9a5c5ead8471c7bb71fe90429651b9d863ee2474
|
/bots/dayandtime.py
|
4d69b928ec7f213382d296b3b691500461877e14
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mwaiton/TwitterBots
|
12af71e19895a53791d12b7a788a508752b0f1d9
|
3c641690c5047a1bbbbf3c47bb90256b7cf4f271
|
refs/heads/master
| 2023-02-08T09:12:44.300477
| 2020-12-30T02:56:04
| 2020-12-30T02:56:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 474
|
py
|
import calendar
from datetime import datetime
"""Python program to find day of the week for a given date.
Also finds current date, which can be used to find the day.
"""
# To find a day of the week.
def find_day(date):
day = datetime.strptime(date, '%Y-%m-%d').weekday()
return calendar.day_name[day]
# To find today's date.
def today():
return str(datetime.date(datetime.now()))
if __name__ == '__main__':
d = today()
print("Today is", find_day(d))
|
[
"bexxmodd@seas.upenn.edu"
] |
bexxmodd@seas.upenn.edu
|
97c5e4d3a1702140c12048536f8ec60595290914
|
b2be86ae60e9698a3daf78fdedacac631dd60cab
|
/django/user/tests/test_models.py
|
b0d94ce44fa3f235d6c64af4738666ba937c104d
|
[] |
no_license
|
TigenTigen/do_you_know
|
351326388bb1ae9caffed246b8fcdcb8ba7af594
|
0c6a5b001243fafa1b8382f89ec8bf7aef640f2e
|
refs/heads/master
| 2020-06-13T13:26:29.402100
| 2019-08-07T06:27:37
| 2019-08-07T06:27:37
| 194,670,788
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,716
|
py
|
from django.test import TestCase
from user.factories import AdvUserFactory
from user.models import AdvUser, signer, dt_engine, mail
from django.urls import reverse
class TestAdvUserModel(TestCase):
def test_str_for_user_created_by_factory(self):
user = AdvUserFactory(username='some_user')
self.assertEqual(str(user), 'some_user')
def test_save_for_user_created_by_social_auth(self):
user = AdvUser(username='id123456789', first_name='fn', last_name='ln', password='test_password')
user.save()
self.assertEqual(user.username, 'fn ln')
def test_confirm_for_not_active_user(self):
user = AdvUserFactory(is_active=False)
self.assertFalse(user.is_active)
user.confirm()
self.assertTrue(user.is_active)
def test_get_email_context(self):
user = AdvUserFactory()
link = reverse('user:registration_confirmed', kwargs={'sign': signer.sign(user.username)})
context = user.get_email_context()
self.assertIsNotNone(context)
self.assertIn('confirmation_link', str(context))
self.assertIn(link, str(context))
def test_send_confirmation_email(self):
user = AdvUserFactory()
link = reverse('user:registration_confirmed', kwargs={'sign': signer.sign(user.username)})
connection = mail.get_connection(backend='django.core.mail.backends.locmem.EmailBackend')
outbox = user.send_confirmation_email(connection)
self.assertEqual(len(outbox), 1)
self.assertEqual(outbox[0].subject, 'Подтверждение регистрации')
self.assertIn(link, outbox[0].body)
def create_users_for_test(self, number):
for i in range(number):
user = AdvUserFactory()
return AdvUser.objects.all()
def test_social_count(self):
users = self.create_users_for_test(10)
for user in users:
if user.social_auth.exists():
self.assertNotEqual(user.social_count(), 0)
else:
self.assertEqual(user.social_count(), 0)
def test_total_points_count(self):
users = self.create_users_for_test(10)
for user in users:
if user.replies.exists():
self.assertNotEqual(user.total_points_count(), 0)
else:
self.assertEqual(user.total_points_count(), 0)
def test_get_points_rating_queryset_manager_with_users_and_no_replies(self):
users = self.create_users_for_test(10)
full_qs = AdvUser.objects.get_queryset()
test_qs = AdvUser.objects.get_points_rating_queryset()
self.assertEqual(full_qs.count(), 10)
self.assertEqual(test_qs.count(), 0)
|
[
"Eleriya-25@yandex.ru"
] |
Eleriya-25@yandex.ru
|
cc97266146a0e0a5a82b27d70bd9882600547a30
|
606afd1394624127e37bf82328e1d21f00f8a2ca
|
/Layer.py
|
0c21cbb41afc36e4c72c6f5a25825d5ff2ed8f09
|
[] |
no_license
|
JoelGooch/Tensorflow-Final-Year-Project
|
b4d437a778f19e5cf7adc16afd82b5a0c7a93788
|
b72aeeed5ac9df13257c56fff5e71855709cb73c
|
refs/heads/master
| 2021-01-20T03:16:47.980594
| 2017-05-21T20:13:55
| 2017-05-21T20:13:55
| 83,829,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
# base Layer class that others will inherit from
class Layer:
def __init__(self, layer_name):
self.layer_name = layer_name
# contains all the parameters that a convolution layer will have
class ConvLayer(Layer):
def __init__(self, layer_name, kernel_size, stride, act_function, num_output_filters, weight_init, weight_val, bias_init, bias_val, padding, normalize, dropout , keep_rate):
Layer.__init__(self, layer_name)
self.layer_type = 'Convolution'
self.kernel_size = int(kernel_size)
self.stride = int(stride)
self.act_function = act_function
self.num_output_filters = int(num_output_filters)
self.weight_init = weight_init
self.weight_val = float(weight_val)
self.bias_init = bias_init
self.bias_val = float(bias_val)
self.padding = padding
self.normalize = normalize
self.dropout = dropout
self.keep_rate = float(keep_rate)
# contains all the parameters that a max pooling layer will have
class MaxPoolingLayer(Layer):
def __init__(self, layer_name, kernel_size, stride, padding, normalize, dropout, keep_rate):
Layer.__init__(self, layer_name)
self.layer_type = 'Max Pool'
self.kernel_size = int(kernel_size)
self.stride = int(stride)
self.padding = padding
self.normalize = normalize
self.dropout = dropout
self.keep_rate = float(keep_rate)
# contains all the parameters that a fully connected layer will have
class FullyConnectedLayer(Layer):
def __init__(self, layer_name, act_function, num_output_nodes, weight_init, weight_val, bias_init, bias_val, dropout, keep_rate):
Layer.__init__(self, layer_name)
self.layer_type = 'Fully Connected'
self.act_function = act_function
self.num_output_nodes = int(num_output_nodes)
self.weight_init = weight_init
self.weight_val = float(weight_val)
self.bias_init = bias_init
self.bias_val = float(bias_val)
self.dropout = dropout
self.keep_rate = float(keep_rate)
# contains all the parameters that an output layer will have
class OutputLayer(Layer):
def __init__(self, layer_name, act_function, weight_init, weight_val, bias_init, bias_val):
Layer.__init__(self, layer_name)
self.layer_type = 'Output'
self.act_function = act_function
self.weight_init = weight_init
self.weight_val = float(weight_val)
self.bias_init = bias_init
self.bias_val = float(bias_val)
|
[
"j_gooch808@hotmail.com"
] |
j_gooch808@hotmail.com
|
f1b834746b4997f601868f58ed815391ad9e6bf7
|
730b85b3c23337fddca9f4a80d82f8ed2f2eb2a4
|
/BotBlocker.py
|
6f1b566e739ae14e718fb48d898640ff6245218d
|
[
"MIT"
] |
permissive
|
sigmaister/Anonymous-Telegram-Bot
|
29db924b6a3b33fdae3aba045b68e3c0fb60992b
|
dee234dd9f931b438a9939310e6d54c0fea4999f
|
refs/heads/master
| 2023-05-25T01:08:30.300715
| 2020-04-12T10:55:33
| 2020-04-12T11:30:03
| 269,410,655
| 0
| 0
|
MIT
| 2023-05-22T23:29:49
| 2020-06-04T16:32:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
"""
This software has been developed by github user fndh (http://github.com/fndh)
You are free to use, modify and redistribute this software as you please, as
long as you follow the conditions listed in the LICENSE file of the github
repository indicated. I want to thank you for reading this small paragraph,
and please consider sending me a message if you are using this software! It
will surely make my day.
"""
class Blocker:
def __init__(self, sql_wrapper):
self.sql = sql_wrapper
self.sql.execute_and_commit(
"CREATE TABLE IF NOT EXISTS blocked_user_ids (user_id);")
def block_user(self, user_id):
"""
Block a user.
Updated the blocked table by adding the user ID if it is not already
there."""
if not self.is_user_blocked(user_id):
self.sql.execute_and_commit(
"INSERT INTO blocked_user_ids (user_id) VALUES (?);",
(user_id,))
def unblock_user(self, user_id):
"""
Unblock a user.
Remove the blocked user ID from the block table if the ID exists."""
self.sql.execute_and_commit(
"DELETE FROM blocked_user_ids WHERE user_id=?;",
(user_id,))
def get_blocked_users(self):
"""Retrieve a list of the currently blocked user IDs."""
rows = self.sql.select_and_fetch(
"SELECT user_id FROM blocked_user_ids;")
user_ids = [str(user_id[0]) for user_id in rows]
return user_ids
def is_user_blocked(self, user_id):
"""Verify if a user ID is stored in the block table."""
matched_ids = self.sql.select_and_fetch(
"SELECT COUNT(*) FROM blocked_user_ids WHERE user_id=?",
(user_id,))
# Return format from query is [(count,)]
return matched_ids[0][0]
|
[
"xavi_cat36@hotmail.com"
] |
xavi_cat36@hotmail.com
|
7535a94b63e52647dad6aafa93a3c78cf10f5ec2
|
4833d5ebc9c84acd95059eb83524923dd9aebc83
|
/asyncio_helpers.py
|
d1cf4ebc364da783fd56b5ca077a0af4b7695b57
|
[] |
no_license
|
bdutro/chromium-proxy
|
24738f5f7b088c8bae55ba398c71ac82b928b09f
|
334fb3bd006a3f26b553d354d2830ba3b0328b0b
|
refs/heads/main
| 2023-02-20T03:07:22.911243
| 2021-01-08T18:45:50
| 2021-01-08T18:45:50
| 327,177,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
import asyncio
import sys
if sys.version_info < (3, 7):
def asyncio_run(p):
loop = asyncio.get_event_loop()
return loop.run_until_complete(p)
else:
def asyncio_run(p):
return asyncio.run(p)
|
[
"brett.dutro@gmail.com"
] |
brett.dutro@gmail.com
|
f8bc14a1c8b118e1a3d390327c9c1d7f9a1cbbd5
|
7a68632e1788079f40894501e8394d89ebf784df
|
/mysite/settings.py
|
d540fa83124150c965b10568958646da1b003d75
|
[] |
no_license
|
Audywb/pythonanywhere
|
4f790dad374758f5419b59f4c59d9b22b4099881
|
4ec7494ad0e3fe478b9e6e6e56ed3ef8b1f29201
|
refs/heads/master
| 2023-01-02T14:56:31.593485
| 2020-10-25T13:38:30
| 2020-10-25T13:38:30
| 285,757,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,637
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l#d)=k3^2f9^39&__hb26y@cf+p95jv#g=p67-5yf9a1gi-$1+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [u'audyyy.pythonanywhere.com',u'localhost',u'127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myweb',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
'/var/www/static/',
]
STATIC_URL = '/static/'
# default static files settings for PythonAnywhere.
# see https://help.pythonanywhere.com/pages/DjangoStaticFiles for more info
MEDIA_ROOT = u'/home/wichit2s/mysite/media'
MEDIA_URL = '/media/'
STATIC_ROOT = u'/home/wichit2s/mysite/static'
STATIC_URL = '/static/'
|
[
"thesombats@gmail.com"
] |
thesombats@gmail.com
|
49831033a0db7eb9b44e22f82a18daf733b0ede5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03698/s076680456.py
|
f88b228e0ad2e567dcb9e176f989690214f846c7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
S=input()
for c in S:
if S.count(c)>1:
print("no")
break
else:
print("yes")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
f0b7898e2cc53710b09420d379c41c3e2ac4a97a
|
cbf70750d6c265e4043fd9d1d3bd835662cd680f
|
/customer/apps.py
|
845451d50116021235e04c440ee3b6c448bca321
|
[
"Apache-2.0"
] |
permissive
|
xxcfun/DJANGO_CRM
|
c54e249a9a3da9edaeb5d9b49e852d351c7e359a
|
1f8d2d7a025f9dc54b5bf498e7a577469f74c612
|
refs/heads/master
| 2023-01-14T05:21:54.995601
| 2020-11-27T03:23:40
| 2020-11-27T03:23:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
from django.apps import AppConfig
class CustomerConfig(AppConfig):
name = 'customer'
verbose_name = '客户管理'
|
[
"55070348+hhdMrLion@users.noreply.github.com"
] |
55070348+hhdMrLion@users.noreply.github.com
|
3b42efb7734e3bf5050c3c3470912a7e738e57a2
|
456e964284c5e25bff5dd7df7361dd6e20b3ea96
|
/house_prices_competition.py
|
d3ec623731ca86a9398cbe1a73111ac24984088c
|
[] |
no_license
|
huangshizhi/kaggle
|
069d310909f1e45bd420791ab00405fe2e49a621
|
d754fd0c152461bf96e8553a8e1fd58b65b82cd6
|
refs/heads/master
| 2021-01-16T17:16:36.212132
| 2017-08-11T07:39:17
| 2017-08-11T07:39:17
| 100,005,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,569
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 9 09:54:45 2017
@author: huangshizhi
https://www.dataquest.io/blog/kaggle-getting-started/
"""
import pandas as pd
import numpy as np
#1.加载数据
train = pd.read_csv(r'D:\kaggle\house_prices\data\train.csv')
test = pd.read_csv(r'D:\kaggle\house_prices\data\test.csv')
import matplotlib.pyplot as plt
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = (10, 6)
#2.探究数据,查看数据的统计特征,skew为分布的不对称度
train.SalePrice.describe()
print ("Skew is:", train.SalePrice.skew())
plt.hist(train.SalePrice, color='blue')
plt.show()
target = np.log(train.SalePrice)
print ("Skew is:", target.skew())
plt.hist(target, color='blue')
plt.show()
#抽取数值型变量特征,Working with Numeric Features
numeric_features = train.select_dtypes(include=[np.number])
numeric_features.dtypes
#计算协方差矩阵
corr = numeric_features.corr()
print (corr['SalePrice'].sort_values(ascending=False)[:5], '\n')
print (corr['SalePrice'].sort_values(ascending=False)[-5:])
salePrice_top5 = corr['SalePrice'].sort_values(ascending=False)[:5]
salePrice_bottom5 = corr['SalePrice'].sort_values(ascending=False)[-5:]
#对房子的整体材料和成品率进行评估
train.OverallQual.unique()
quality_pivot = train.pivot_table(index='OverallQual',
values='SalePrice', aggfunc=np.median)
quality_pivot.plot(kind='bar', color='blue')
plt.xlabel('Overall Quality')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
#居住面积
plt.scatter(x=train['GrLivArea'], y=target)
plt.ylabel('Sale Price')
plt.xlabel('Above grade (ground) living area square feet')
plt.show()
#车库大小
plt.scatter(x=train['GarageArea'], y=target)
plt.ylabel('Sale Price')
plt.xlabel('Garage Area')
plt.show()
#去掉异常值之后
train = train[train['GarageArea'] < 1200]
plt.scatter(x=train['GarageArea'], y=np.log(train.SalePrice))
plt.xlim(-200,1600) # This forces the same scale as before
plt.ylabel('Sale Price')
plt.xlabel('Garage Area')
plt.show()
#Handling Null Values
nulls = pd.DataFrame(train.isnull().sum().sort_values(ascending=False))
nulls.columns = ['Null Count']
nulls.index.name = 'Feature'
#不包括其他类别的杂项特性
print ("Unique values are:", train.MiscFeature.unique())
#抽取非数值型变量特征
categoricals = train.select_dtypes(exclude=[np.number])
cate_desc = categoricals.describe()
print ("Original: \n")
print (train.Street.value_counts(), "\n")
#One-Hot Encoding
train['enc_street'] = pd.get_dummies(train.Street, drop_first=True)
test['enc_street'] = pd.get_dummies(train.Street, drop_first=True)
print ('Encoded: \n')
print (train.enc_street.value_counts())
condition_pivot = train.pivot_table(index='SaleCondition',
values='SalePrice', aggfunc=np.median)
condition_pivot.plot(kind='bar', color='blue')
plt.xlabel('Sale Condition')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
def encode(x): return 1 if x == 'Partial' else 0
train['enc_condition'] = train.SaleCondition.apply(encode)
test['enc_condition'] = test.SaleCondition.apply(encode)
condition_pivot = train.pivot_table(index='enc_condition', values='SalePrice', aggfunc=np.median)
condition_pivot.plot(kind='bar', color='blue')
plt.xlabel('Encoded Sale Condition')
plt.ylabel('Median Sale Price')
plt.xticks(rotation=0)
plt.show()
#填充缺失值
data = train.select_dtypes(include=[np.number]).interpolate().dropna()
#测试
sum(data.isnull().sum() != 0)
#3.建立回归模型,训练数据
y = np.log(train.SalePrice)
X = data.drop(['SalePrice', 'Id'], axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, test_size=.33)
#建模
from sklearn import linear_model
lr = linear_model.LinearRegression()
model = lr.fit(X_train, y_train)
print ("R^2 is: \n", model.score(X_test, y_test))
predictions = model.predict(X_test)
from sklearn.metrics import mean_squared_error
print ('RMSE is: \n', mean_squared_error(y_test, predictions))
actual_values = y_test
plt.scatter(predictions, actual_values, alpha=.75,color='b')
#plt.plot(X, y_rbf, color='black', lw=lw, label='RBF model')
#alpha helps to show overlapping data
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Linear Regression Model')
plt.show()
for i in range (-2, 3):
alpha = 10**i
rm = linear_model.Ridge(alpha=alpha)
ridge_model = rm.fit(X_train, y_train)
preds_ridge = ridge_model.predict(X_test)
plt.scatter(preds_ridge, actual_values, alpha=.75, color='b')
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Ridge Regularization with alpha = {}'.format(alpha))
overlay = 'R^2 is: {}\nRMSE is: {}'.format(
ridge_model.score(X_test, y_test),
mean_squared_error(y_test, preds_ridge))
plt.annotate(s=overlay,xy=(12.1,10.6),size='x-large')
plt.show()
#4.提交结果
submission = pd.DataFrame()
submission['Id'] = test.Id
feats = test.select_dtypes(
include=[np.number]).drop(['Id'], axis=1).interpolate()
predictions = model.predict(feats)
final_predictions = np.exp(predictions)
submission['SalePrice'] = final_predictions
submission.head()
submission.to_csv('D:\kaggle\house_prices\submission1.csv', index=False)
|
[
"noreply@github.com"
] |
huangshizhi.noreply@github.com
|
bebb9c6ed06be5117b813c8c9ee9f2303de321f2
|
6ba09665a90059f326e594f4d1edb74fd55e2a1c
|
/utils/seg_metrics.py
|
7a0b9bac7d24bfdee7381f2d2d3880960abf9bf1
|
[
"MIT"
] |
permissive
|
NguyenNhan999/Joint-Motion-Estimation-and-Segmentation
|
f9ef73da51eaf37418ff2906c469b6f0b42ac0c5
|
1c36d97ef41bee48d377c2cf98ad1d7b86ee37b4
|
refs/heads/master
| 2022-12-03T10:02:36.489257
| 2020-08-21T12:32:14
| 2020-08-21T12:32:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,224
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 17:53:58 2018
@author: cq615
"""
import numpy as np, cv2
def np_categorical_dice(pred, truth, k):
# Dice overlap metric for label value k
A = (pred == k).astype(np.float32)
B = (truth == k).astype(np.float32)
return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B))
def distance_metric(seg_A, seg_B, dx, k):
# Measure the distance errors between the contours of two segmentations
# The manual contours are drawn on 2D slices.
# We calculate contour to contour distance for each slice.
table_md = []
table_hd = []
K, X, Y, Z = seg_A.shape
for z in range(Z):
# Binary mask at this slice
slice_A = seg_A[k, :, :, z].astype(np.uint8)
slice_B = seg_B[k, :, :, z].astype(np.uint8)
# The distance is defined only when both contours exist on this slice
if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:
# Find contours and retrieve all the points
contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pts_A = contours[0]
for i in range(1, len(contours)):
pts_A = np.vstack((pts_A, contours[i]))
contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pts_B = contours[0]
for i in range(1, len(contours)):
pts_B = np.vstack((pts_B, contours[i]))
# Distance matrix between point sets
M = np.zeros((len(pts_A), len(pts_B)))
for i in range(len(pts_A)):
for j in range(len(pts_B)):
M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])
# Mean distance and hausdorff distance
md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx
hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx
table_md += [md]
table_hd += [hd]
# Return the mean distance and Hausdorff distance across 2D slices
mean_md = np.mean(table_md) if table_md else None
mean_hd = np.mean(table_hd) if table_hd else None
return mean_md, mean_hd
|
[
"c.qin15@imperial.ac.uk"
] |
c.qin15@imperial.ac.uk
|
e6797d47ad8c18d5fc26593670c8cb8c8f0cdbd6
|
21ba6d42f7f26779322a149d0af25f3acbb07682
|
/Repository/Labelling/Automatically Labelled News/generate_auto_label.py
|
dbaca55e363df0a7a2b3423c3038b8663244f622
|
[] |
no_license
|
Bolanle/G54MIP
|
6f1fa37b1a9da0477c3b22c9f957cbaf2249b764
|
02913adc86088bbbdab23c6d508a7c91bcb0d110
|
refs/heads/master
| 2021-01-18T16:41:17.783175
| 2015-05-07T10:31:19
| 2015-05-07T10:31:19
| 24,644,928
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,177
|
py
|
from collections import defaultdict
from segmentation import SegmentCreator
from segmentation import DataPoint
import os, datetime, calendar
from xml.etree import ElementTree
from scipy.stats import pearsonr
import pandas
import matplotlib.pyplot as plot
import matplotlib.ticker as ticker
import numpy as np
def _get_y_from_eqn(start_index_point:DataPoint, end_index_point:DataPoint, x_value):
y = (((end_index_point.get_y() - start_index_point.get_y()) * (x_value - start_index_point.get_x())) / (
end_index_point.get_x() - start_index_point.get_x())) + start_index_point.get_y()
return y
def get_trend(start_point, end_point):
y_difference = end_point.get_y() - start_point.get_y()
if y_difference > 0:
return "up"
elif y_difference < 0:
return "down"
else:
return "neutral"
def get_news():
rel_path = "./"
company_news_data = dict()
for filename in os.listdir(rel_path):
if ".xml" in filename and 'unsure' not in filename:
company_news_data[filename.replace(".xml", '')] = ElementTree.parse(rel_path + filename).getroot()
return company_news_data
def get_data(path, draw=False):
creator = SegmentCreator(draw=draw)
return creator.create_segments(path, 2)
def map_segments_to_company():
rel_path = "../../Stock Data/"
company_data = dict()
for filename in os.listdir(rel_path):
if '.csv' in filename:
company = filename.replace(".csv", '')
company_data[company] = get_data(rel_path + filename)
return company_data
def get_segment_for_news_release(date, company_trends):
for i in range(len(company_trends) - 1):
start = company_trends[i]
end = company_trends[i + 1]
if datetime.datetime.strptime(start.get_date(), "%d/%m/%Y") <= datetime.datetime.strptime(date,
"%d/%m/%Y") \
<= datetime.datetime.strptime(end.get_date(), "%d/%m/%Y"):
return start, end
else:
return 0, 0
def auto_generate(news, company_trends, company_name, dates):
progress_sentiment_from_number = dict(up='u', down='d', neutral='n')
progress_sentiment_to_num = dict(up=1, down=-1, neutral=0)
progress_sentiment_trend = defaultdict(int)
for news_article in news:
date_of_news_release = news_article.get('datetime')
date_of_news_release = date_of_news_release.replace(' ET', '')
date_of_news_release = datetime.datetime.strptime(date_of_news_release, '%b %d, %Y %I:%M %p').strftime(
'%d/%m/%Y')
if date_of_news_release in dates:
start, end = get_segment_for_news_release(date_of_news_release, company_trends)
trend = get_trend(start, end)
news_article.attrib['progress_sentiment'] = progress_sentiment_from_number[trend]
try:
news_article.attrib.pop('feeling_sentiment')
except:
pass
progress_sentiment_trend[date_of_news_release] += progress_sentiment_to_num[trend]
# write to file
ElementTree.ElementTree(news).write("{}.xml".format(company_name), xml_declaration=True)
for day in dates:
if not progress_sentiment_trend[str(day)] and not company_name == "ibm":
progress_sentiment_trend[str(day)] = 1
elif not progress_sentiment_trend[str(day)] and company_name == "ibm":
progress_sentiment_trend[str(day)] = -1
sorted_progress = sorted(progress_sentiment_trend)
dateless_progress_trend = []
for day in sorted_progress:
dateless_progress_trend.append(progress_sentiment_trend[day])
return dateless_progress_trend
def get_projected_prices(segmented_price:list):
prices = []
for i in range(len(segmented_price) - 1):
start = segmented_price[i]
end = segmented_price[i + 1]
for x_value in range(start.get_x(), end.get_x()):
prices.append(DataPoint(x_value, _get_y_from_eqn(start, end, x_value)))
prices.append(segmented_price[-1])
prices.sort(key=lambda x: (x.get_x(), x.get_y()))
return prices
def aggregate_sentiment(trend):
aggregate = []
for i in range(len(trend)):
aggregate.append(sum(trend[:i + 1]))
return aggregate
if __name__ == '__main__':
news = get_news()
stock_price = map_segments_to_company()
for company in stock_price.keys():
print("*******************{0}****************".format(company))
projected_prices = get_projected_prices(stock_price[company])
file_csv = pandas.read_csv("../../Stock Data/{0}.csv".format(company))
data = file_csv[file_csv.columns[2]].tolist()
progress_trend = auto_generate(news[company], stock_price[company], company_name=company,
dates=file_csv[file_csv.columns[0]].tolist())
correlation, pvalue = pearsonr([price.get_y() for price in projected_prices],
aggregate_sentiment(progress_trend))
print("Projected correlation coefficient (progress)", correlation)
correlation, pvalue = pearsonr(data, aggregate_sentiment(progress_trend))
print("Actual correlation coefficient (progress)", correlation)
if company in []:
dates = file_csv[file_csv.columns[0]].tolist()
# next we'll write a custom formatter
N = len(dates)
ind = np.arange(N) # the evenly spaced plot indices
def format_date(x, pos=None):
thisind = np.clip(int(x + 0.5), 0, N - 1)
return datetime.datetime.strptime(dates[thisind], '%d/%m/%Y').strftime(
'%Y-%m-%d')
fig, ax = plot.subplots()
#ax.plot(ind, data, 'o-')
ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
fig.autofmt_xdate()
plot.plot(ind, [price.get_y() for price in projected_prices], linewidth=4)
#plot.plot(ind, aggregate_sentiment(progress_trend))
plot.title(company)
plot.show()
|
[
"onifade.esther@gmail.com"
] |
onifade.esther@gmail.com
|
397bb1db215c047a38a6dd15583af7806156363f
|
202e70bbfee2c70049ea8ac43711ec008baa47a3
|
/main.py
|
6e72104a97d2f709d97279c8e5c1bb314b74519f
|
[] |
no_license
|
wqq1136883696/UbuntuPython
|
714b74c3559f2bf9e57d00fbe95f001646532611
|
c3d965ee749a15efdbded4169c576cd38dc44db3
|
refs/heads/master
| 2022-12-18T22:48:33.258697
| 2020-09-11T06:41:35
| 2020-09-11T06:41:35
| 294,617,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
print("{}, nice to meet you!".format(name))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
[
"1136883696@qq.com"
] |
1136883696@qq.com
|
f6bdba59b03588b2fe0c583e830fa12b83a347cb
|
0c8594ca33e334589e9c28c525ca86f75536fc74
|
/Python/evenodd.py
|
af98dd5f07bbfdf80a8e33e0aa0bcba0fe03c2c4
|
[] |
no_license
|
shubham1592/force
|
84112c74cdca0d7dd930eaf89c7d9aa62dbb007d
|
254b4729bb5332ecceb6b395cd7082023710b381
|
refs/heads/master
| 2021-07-17T10:44:14.096408
| 2019-10-23T19:04:12
| 2019-10-23T19:04:12
| 216,235,074
| 0
| 3
| null | 2020-10-02T07:33:39
| 2019-10-19T16:19:43
|
C++
|
UTF-8
|
Python
| false
| false
| 42
|
py
|
for x in range(1,51):
print(2*x)
|
[
"pi.shubham1592@gmail.com"
] |
pi.shubham1592@gmail.com
|
034f01e6a2963900bc368fa59500b9e91af70e91
|
5504066c264a31a301b347858f0e6dd8db0fdccc
|
/docs/jliu118/reg_s3617_50um.py
|
b5eb98dec3120d2d09d8b29a13a3604dff2502da
|
[] |
no_license
|
neurodata-cobalt/cobalt
|
30fb656c851b56144b1d131e2028b5537bac8da0
|
f966200d09d03a75ff9f56ab5c08b03b7bc3aadb
|
refs/heads/master
| 2021-03-22T04:37:04.196672
| 2018-06-15T14:10:42
| 2018-06-15T14:10:42
| 102,608,162
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,956
|
py
|
from ndreg import *
import matplotlib
#import ndio.remote.neurodata as neurodata
from intern.remote.boss import BossRemote
from intern.resource.boss.resource import *
from NeuroDataResource import NeuroDataResource
import pickle
import numpy as np
from requests import HTTPError
import time
import configparser
startTime = time.time()
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
# Assume a valid configuration file exists at .keys/intern.cfg.
cfg_file = '.keys/intern.cfg'
if cfg_file.startswith('~'):
cfg_file = os.path.expanduser('~') + cfg_file[1:]
config = configparser.ConfigParser()
config.read_file(file(cfg_file))
TOKEN = config['Default']['token']
rmt = BossRemote(cfg_file_or_dict=cfg_file)
REFERENCE_COLLECTION = 'ara_2016'
REFERENCE_EXPERIMENT = 'sagittal_50um'
REFERENCE_COORDINATE_FRAME = 'ara_2016'
REFERENCE_CHANNEL = 'average_50um'
# Set/Modify these parameters
REFERENCE_RESOLUTION = 0
REFERENCE_ISOTROPIC = True
# copied code from ndreg because for some reason it wasn't working
def setup_experiment_boss(remote, collection, experiment):
exp_setup = ExperimentResource(experiment, collection)
try:
exp_actual = remote.get_project(exp_setup)
coord_setup = CoordinateFrameResource(exp_actual.coord_frame)
coord_actual= remote.get_project(coord_setup)
return (exp_setup, coord_actual)
except HTTPError as e:
print(e.message)
def setup_channel_boss(remote, collection, experiment, channel, channel_type='image', datatype='uint16'):
(exp_setup, coord_actual) = setup_experiment_boss(remote, collection, experiment)
chan_setup = ChannelResource(channel, collection, experiment, channel_type, datatype=datatype)
try:
chan_actual = remote.get_project(chan_setup)
return (exp_setup, coord_actual, chan_actual)
except HTTPError as e:
print(e.message)
def imgDownload_boss(remote, channel_resource, coordinate_frame_resource, resolution=0, size=[], start=[], isotropic=False):
"""
Download image with given token from given server at given resolution.
If channel isn't specified the first channel is downloaded.
"""
# TODO: Fix size and start parameters
voxel_unit = coordinate_frame_resource.voxel_unit
voxel_units = ('nanometers', 'micrometers', 'millimeters', 'centimeters')
factor_divide = (1e-6, 1e-3, 1, 10)
fact_div = factor_divide[voxel_units.index(voxel_unit)]
spacingBoss = [coordinate_frame_resource.x_voxel_size, coordinate_frame_resource.y_voxel_size, coordinate_frame_resource.z_voxel_size]
spacing = [x * fact_div for x in spacingBoss] # Convert spacing to mm
if isotropic:
spacing = [x * 2**resolution for x in spacing]
else:
spacing[0] = spacing[0] * 2**resolution
spacing[1] = spacing[1] * 2**resolution
# z spacing unchanged since not isotropic
if size == []: size = get_image_size_boss(coordinate_frame_resource, resolution, isotropic)
if start == []: start = get_offset_boss(coordinate_frame_resource, resolution, isotropic)
#size[2] = 200
#dataType = metadata['channels'][channel]['datatype']
dataType = channel_resource.datatype
# Download all image data from specified channel
array = remote.get_cutout(channel_resource, resolution, [start[0], size[0]], [start[1], size[1]], [start[2], size[2]])
# Cast downloaded image to server's data type
# img = sitk.Cast(sitk.GetImageFromArray(array),ndToSitkDataTypes[dataType]) # convert numpy array to sitk image
img = sitk.Cast(sitk.GetImageFromArray(array),sitk.sitkUInt16) # convert numpy array to sitk image
# Reverse axes order
#img = sitk.PermuteAxesImageFilter().Execute(img,range(dimension-1,-1,-1))
img.SetDirection(identityDirection)
img.SetSpacing(spacing)
# Convert to 2D if only one slice
img = imgCollaspeDimension(img)
return img
(ref_exp_resource, ref_coord_resource, ref_channel_resource) = setup_channel_boss(rmt, REFERENCE_COLLECTION, REFERENCE_EXPERIMENT, REFERENCE_CHANNEL)
refImg = imgDownload_boss(rmt, ref_channel_resource, ref_coord_resource, resolution=REFERENCE_RESOLUTION, isotropic=REFERENCE_ISOTROPIC)
refThreshold = imgPercentile(refImg, 0.99)
REFERENCE_ANNOTATION_COLLECTION = 'ara_2016'
REFERENCE_ANNOTATION_EXPERIMENT = 'sagittal_50um'
REFERENCE_ANNOTATION_COORDINATE_FRAME = 'ara_2016'
REFERENCE_ANNOTATION_CHANNEL = 'annotation_50um'
REFERENCE_ANNOTATION_RESOLUTION = REFERENCE_RESOLUTION
REFERENCE_ANNOTATION_ISOTROPIC = True
(refAnnotation_exp_resource, refAnnotation_coord_resource, refAnnotation_channel_resource) = setup_channel_boss(rmt, REFERENCE_ANNOTATION_COLLECTION, REFERENCE_ANNOTATION_EXPERIMENT, REFERENCE_ANNOTATION_CHANNEL)
refAnnotationImg = imgDownload_boss(rmt, refAnnotation_channel_resource, refAnnotation_coord_resource, resolution=REFERENCE_ANNOTATION_RESOLUTION, isotropic=REFERENCE_ANNOTATION_ISOTROPIC)
randValues = np.random.rand(1000,3)
randValues = np.concatenate(([[0,0,0]],randValues))
randCmap = matplotlib.colors.ListedColormap(randValues)
# Remove missing parts of the brain
remove_regions = [507, 212, 220, 228, 236, 244, 151, 188, 196, 204]
refAnnoImg = sitk.GetArrayFromImage(refAnnotationImg)
remove_indices = np.isin(refAnnoImg, remove_regions)
refAnnoImg[remove_indices] = 0
# adjust annotations
refAnnoImg_adj = sitk.GetImageFromArray(refAnnoImg)
refAnnoImg_adj.SetSpacing(refAnnotationImg.GetSpacing())
refAnnotationImg = refAnnoImg_adj
# adjust atlas with corresponding indices
# refImg_adj = sitk.GetArrayFromImage(refImg)
# refImg_adj[remove_indices] = 0
# refImg_adj = sitk.GetImageFromArray(refImg_adj)
# refImg_adj.SetSpacing(refImg.GetSpacing())
# refImg = refImg_adj
# Downloading input image
# Modify these parameters for your specific experiment
SAMPLE_COLLECTION = 'ailey-dev'
SAMPLE_EXPERIMENT = 's3617'
SAMPLE_COORDINATE_FRAME = 'aileydev_s3617'
SAMPLE_CHANNEL = 'channel1'
SAMPLE_RESOLUTION = 4
SAMPLE_ISOTROPIC = False
sample_exp_resource, sample_coord_resource, sample_channel_resource = setup_channel_boss(rmt, SAMPLE_COLLECTION, SAMPLE_EXPERIMENT, SAMPLE_CHANNEL)
sampleImg = imgDownload_boss(rmt, sample_channel_resource, sample_coord_resource, resolution=SAMPLE_RESOLUTION, isotropic=SAMPLE_ISOTROPIC)
sampleThreshold = imgPercentile(sampleImg, .999)
#Reorienting input image
# modify sampleOrient based on your image orientation
sampleOrient = "RPI"
refOrient = "ASR"
sampleImgReoriented = imgReorient(sampleImg, sampleOrient, refOrient)
# Downsample images
DOWNSAMPLE_SPACING = 0.010 # millimeters
spacing = [DOWNSAMPLE_SPACING,DOWNSAMPLE_SPACING,DOWNSAMPLE_SPACING]
refImg_ds = sitk.Clamp(imgResample(refImg, spacing), upperBound=refThreshold)
sampleImg_ds = sitk.Clamp(imgResample(sampleImgReoriented, spacing), upperBound=sampleThreshold)
sampleImgSize_reorient = sampleImgReoriented.GetSize()
sampleImgSpacing_reorient= sampleImgReoriented.GetSpacing()
# Affine Registration
affine = imgAffineComposite(sampleImg_ds, refImg_ds, iterations=200, useMI=True, verbose=True)
sampleImg_affine = imgApplyAffine(sampleImgReoriented, affine, size=refImg.GetSize(), spacing=refImg.GetSpacing())
sampleImg_affine_bounded = sitk.Clamp(sampleImg_affine,upperBound=sampleThreshold)
refImg_bounded = sitk.Clamp(refImg, upperBound=refThreshold)
# LDDMM Registration
(field, invField) = imgMetamorphosisComposite(sampleImg_ds, refImg_ds, alphaList=[0.2, 0.1, 0.05],
scaleList = 1.0, useMI=True, iterations=100, verbose=True)
affineField = affineToField(affine, field.GetSize(), field.GetSpacing())
fieldComposite = fieldApplyField(field, affineField)
invAffineField = affineToField(affineInverse(affine), invField.GetSize(), invField.GetSpacing())
invFieldComposite = fieldApplyField(invAffineField, invField)
sampleImg_lddmm = imgApplyField(sampleImgReoriented, fieldComposite, size=refImg.GetSize(), spacing=refImg.GetSpacing())
|
[
"jonathan.jy.liu@gmail.com"
] |
jonathan.jy.liu@gmail.com
|
d35f840aebca9e72f9b224612463cc501f5f7dda
|
827dcdf40e7a4998b03b8d8f07f28ede09e47fd9
|
/corpus_scripts/surp_dict.py
|
720aaa496b2bf53b36180ebc441d6667f7830695
|
[] |
no_license
|
LauraGwilliams/arab_pred
|
9d1dfb1285c769888905f09d30d0a14b5468fad0
|
f006a42a9979e7ca6c19634c5b90e51b8f8ba3f0
|
refs/heads/master
| 2016-09-08T05:04:18.583702
| 2015-11-02T16:30:41
| 2015-11-02T16:30:41
| 18,057,133
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,548
|
py
|
import sys
import math
from collections import Counter
from collections import defaultdict
#NOTE -- 'CV' -- corresponds to the element of interest, and
# -- 'C' -- corresponds to the element of interest -1
#sets up the script arguments#
phoneme_of_interest = sys.argv[1]
#create empty dict that we'll fill after#
MyDict = defaultdict(int)
#figures out if it's a linear or mophemic analysis, reads the correct files and sets up the output file and folder#
if '_' in phoneme_of_interest:
C_in = open(phoneme_of_interest[:-2] + '_counts_final.txt','r').read().split('\n')
folder_path = 'log2_surp_dicts/surp-dict-'
print 'conducting morphemic analysis..'
else:
C_in = open(phoneme_of_interest[:-1] + '_counts_final.txt','r').read().split('\n')
print 'conducting linear analysis..'
folder_path = 'log2_surp_dicts/surp-dict-'
#open and read the CV counts, then if the first element of CV is in C, print the frequency of both C and CV#
with open(phoneme_of_interest + '_counts_final.txt','r') as CV_in:
for CV in CV_in:
for C in C_in:
length = len(phoneme_of_interest)
if '_' in phoneme_of_interest:
if len(phoneme_of_interest) >=4:
cv = ''.join(CV[0:int(length)-3])
if cv in C and ',' not in CV[0]:
CV = CV.split(',')
C = C.split(',')
print CV
print "CV"
print float(CV[1])
print C
print "C"
print float(C[1])
print "cond_prob"
cond_prob = float(CV[1])/float(C[1])
surp = math.log(float(cond_prob),2)
print cond_prob
MyDict[CV[0]] = surp
print "---"
else:
cv = ''.join(CV[0:int(length)-2])
if cv in C and ',' not in CV[0]:
CV = CV.split(',')
C = C.split(',')
print CV
print "CV"
print float(CV[1])
print C
print "C"
print float(C[1])
print "cond_prob"
cond_prob = float(CV[1])/float(C[1])
surp = math.log(float(cond_prob),2)
print cond_prob
MyDict[CV[0]] = surp
print "---"
else:
cv = ''.join(CV[0:int(length)-1])
#cv = ''.join(CV[0])
#cv = cv[:-1]
#cv = ''.join(CV[0:2])
if cv in C and ',' not in CV[0]:
#now, get the conditional probability of the second vowel, from that compute surprisal by making it log, then put it in the dict
#(the key is the CV, and the number is the surprisal #
#first, split the dict into its first and second elements i.e., 'ka' '34'
CV = CV.split(',')
C = C.split(',')
#next, get the conditional probability from the second elements of the list (the number) and work out the surprisal
print CV
print "CV"
print float(CV[1])
print C
print "C"
print float(C[1])
print "cond_prob"
cond_prob = float(CV[1])/float(C[1])
surp = math.log(float(cond_prob),2)
print cond_prob
MyDict[CV[0]] = surp
print "---"
#now we just save the dict to file#
file = open(folder_path + phoneme_of_interest + '.txt', 'w')
for key in MyDict:
file.write(key + ',' + str(MyDict[key]) + '\n')
file.close()
file = open(folder_path + phoneme_of_interest + '.txt', 'w')
for key in MyDict:
file.write(key + ',' + str(MyDict[key]) + '\n')
file.close()
#finally print out all words with a surprisal greater than 5:
highest_surp = dict((k, v) for k, v in MyDict.items() if v <= -5)
print "items with suprisal greater than 5:"
print highest_surp
#and less than 1:
highest_surp = dict((k, v) for k, v in MyDict.items() if v >= -2)
print "items with suprisal less than 2:"
print highest_surp
|
[
"root@Lauras-MacBook-Pro.local"
] |
root@Lauras-MacBook-Pro.local
|
ba33ce6940599e88acb5a239c6d0893a19068b6e
|
1b652b030a2742d8579474e155dcfdb65b34ac28
|
/print/name3.py
|
f3aa9cf3fac9d2fcf3ccdc2070123a4ca8d011b7
|
[] |
no_license
|
RobinHou0516/Homework
|
065dc076a0d5b508433bf97c73a0f1f603447e34
|
dcb687da489f7c3a87476c07e5902f124edc5856
|
refs/heads/master
| 2020-04-09T23:57:08.793511
| 2018-12-06T12:28:51
| 2018-12-06T12:28:51
| 160,671,520
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
name='Robin Kawensanna Smartest Unrivalled'
age='14'
grade='9th'
grade.title
school='JXFLS'
print('My name is '+name+'. I am a '+age+' year old '+grade+' grader at '+school+'.')
|
[
"Robinhou0516@gamil.com"
] |
Robinhou0516@gamil.com
|
417047a76d6ad25de43fc2acde8e4c37efc3ab2e
|
62187abac35eec54f56d956ced4aae18be5c667d
|
/pilta.py
|
e9eff95395e2224f94d442902c52ef9d71ff2a61
|
[] |
no_license
|
Gary345/Ivan_Arratia_1559
|
5aea66e102bcc49549f680413d5da00275079c72
|
fafcaa3c3c8e11f4264672dccee13a477a138bc0
|
refs/heads/main
| 2023-01-06T14:34:30.906982
| 2020-11-11T06:31:28
| 2020-11-11T06:31:28
| 300,461,243
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
pila =[3,4,5] # pila de inicio con un arreglo
print (pila) #impresion de la pila
#345
pila.append(7)
pila.append(8)
print (pila)
#34578
print("saco este elemento",pila.pop())
#8
print("Se quedo asi la pila: ", pila)
#3457
|
[
"ivanarratia314@aragon.unam.mx"
] |
ivanarratia314@aragon.unam.mx
|
5da3583612010525fc95b1ad9c404287e473130d
|
00a3db4204f764b2d4e49867b198c0203b3e7732
|
/House_Price_Prediction.py
|
a7b87157d709fa31b37651b1176256246e5c8ce2
|
[] |
no_license
|
Madhavraob/tensorflow
|
555397b7146753234299c26654156e2e99c7f47e
|
6b37014ceeb7e1bd025f2fa6c162b4dd9597e859
|
refs/heads/master
| 2020-03-20T05:55:16.610336
| 2018-06-24T04:15:36
| 2018-06-24T04:15:36
| 137,231,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,822
|
py
|
#
# House_Price_Prediction.py
#
# This is a very simple prediction of house prices based on house size, implemented
# in TensorFlow. This code is part of Pluralsight's course "TensorFlow: Getting Started"
#
import tensorflow as tf
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.animation as animation # import animation support
# generation some house sizes between 1000 and 3500 (typical sq ft of house)
num_house = 160
np.random.seed(42)
house_size = np.random.randint(low=1000, high=3500, size=num_house )
# Generate house prices from house size with a random noise added.
np.random.seed(42)
house_price = house_size * 100.0 + np.random.randint(low=20000, high=70000, size=num_house)
# Plot generated house and size
# plt.plot(house_size, house_price, "bx") # bx = blue x
# plt.ylabel("Price")
# plt.xlabel("Size")
# plt.show()
# you need to normalize values to prevent under/overflows.
def normalize(array):
return (array - array.mean()) / array.std()
# define number of training samples, 0.7 = 70%. We can take the first 70% since the values are randomized
num_train_samples = math.floor(num_house * 0.7)
# define training data
train_house_size = np.asarray(house_size[:num_train_samples])
train_price = np.asanyarray(house_price[:num_train_samples:])
train_house_size_norm = normalize(train_house_size)
train_price_norm = normalize(train_price)
# define test data
test_house_size = np.array(house_size[num_train_samples:])
test_house_price = np.array(house_price[num_train_samples:])
test_house_size_norm = normalize(test_house_size)
test_house_price_norm = normalize(test_house_price)
# Set up the TensorFlow placeholders that get updated as we descend down the gradient
tf_house_size = tf.placeholder("float", name="house_size")
tf_price = tf.placeholder("float", name="price")
# Define the variables holding the size_factor and price we set during training.
# We initialize them to some random values based on the normal distribution.
tf_size_factor = tf.Variable(np.random.randn(), name="size_factor")
tf_price_offset = tf.Variable(np.random.randn(), name="price_offset")
# 2. Define the operations for the predicting values - predicted price = (size_factor * house_size ) + price_offset
# Notice, the use of the tensorflow add and multiply functions. These add the operations to the computation graph,
# AND the tensorflow methods understand how to deal with Tensors. Therefore do not try to use numpy or other library
# methods.
tf_price_pred = tf.add(tf.multiply(tf_size_factor, tf_house_size), tf_price_offset)
# 3. Define the Loss Function (how much error) - Mean squared error
tf_cost = tf.reduce_sum(tf.pow(tf_price_pred-tf_price, 2))/(2*num_train_samples)
# Optimizer learning rate. The size of the steps down the gradient
learning_rate = 0.1
# 4. define a Gradient descent optimizer that will minimize the loss defined in the operation "cost".
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(tf_cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph in the session
with tf.Session() as sess:
sess.run(init)
# set how often to display training progress and number of training iterations
display_every = 2
num_training_iter = 50
# calculate the number of lines to animation
fit_num_plots = math.floor(num_training_iter/display_every)
# add storage of factor and offset values from each epoch
fit_size_factor = np.zeros(fit_num_plots)
fit_price_offsets = np.zeros(fit_num_plots)
fit_plot_idx = 0
# keep iterating the training data
for iteration in range(num_training_iter):
# Fit all training data
for (x, y) in zip(train_house_size_norm, train_price_norm):
sess.run(optimizer, feed_dict={tf_house_size: x, tf_price: y})
# Display current status
if (iteration + 1) % display_every == 0:
c = sess.run(tf_cost, feed_dict={tf_house_size: train_house_size_norm, tf_price:train_price_norm})
print("iteration #:", '%04d' % (iteration + 1), "cost=", "{:.9f}".format(c), \
"size_factor=", sess.run(tf_size_factor), "price_offset=", sess.run(tf_price_offset))
# Save the fit size_factor and price_offset to allow animation of learning process
fit_size_factor[fit_plot_idx] = sess.run(tf_size_factor)
fit_price_offsets[fit_plot_idx] = sess.run(tf_price_offset)
fit_plot_idx = fit_plot_idx + 1
print("Optimization Finished!")
training_cost = sess.run(tf_cost, feed_dict={tf_house_size: train_house_size_norm, tf_price: train_price_norm})
print("Trained cost=", training_cost, "size_factor=", sess.run(tf_size_factor), "price_offset=", sess.run(tf_price_offset), '\n')
|
[
"madhavrao039@gmail.com"
] |
madhavrao039@gmail.com
|
eb268714e1d62798cbe61cec0f6af724ee53d4f6
|
a163c2cec4d942212bd5bcd25a8759a7da570b7f
|
/ChatBot/main.py
|
f54e258b6b94e4253d6b6b77a9eb96e43cdb92e0
|
[] |
no_license
|
cainanalves/computational_intelligence
|
1daa4c3f153563e11b0d8410d6429648b48b57f1
|
503dfeb9db3dc18725d30587f968ed86ece53d7d
|
refs/heads/master
| 2021-08-23T10:47:22.650121
| 2017-12-04T15:42:48
| 2017-12-04T15:42:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
#!/usr/bin/python3.5
# encoding: utf-8
from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
import os
#Read_only=True --> Dizer ao bot que eu já o treinei e não precisa treinar novamente.
bot = ChatBot("Teste")#read_only=True)
bot.set_trainer(ListTrainer)
for arq in os.listdir("arqs"):
chats = open("arqs/"+arq,"rb").readlines()
bot.train(chats)
conversa = open("arqs/conversa", "a")
while True:
resq = input("Você: ")
conversa.write(str(resq)+"\n")
resp = bot.get_response(resq)
conversa.write(str(resp)+"\n")
print("Bot: "+ str(resp))
if (("chega de papo" in str(resq)) or ("chega de papo" in str(resp))):
break
conversa.close()
|
[
"cainan.teixeira10@hotmail.com"
] |
cainan.teixeira10@hotmail.com
|
3d62fe89850d8886db18d58cd2b87b3b04745a1b
|
60ffc2a1264a7ac6e743b0c1da380d0daf9c849b
|
/src/core.py
|
d274ce41dd9a3d9ff816e8f776bf1ece3b235894
|
[] |
no_license
|
pedromxavier/760D
|
d3622fb4487ece13c633a527e68526b1677d9da7
|
458ebbeb1aa8975628bd2caebdd919933ecf264a
|
refs/heads/master
| 2021-02-28T16:40:30.769008
| 2020-06-05T18:46:01
| 2020-06-05T18:46:01
| 245,713,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,725
|
py
|
import telebot as tb
import re
import html
import urllib.request as req
def STATUS_PONTE():
try:
URL_PONTE = "https://www.ecoponte.com.br/condicoes-da-via"
PATTERN = r'<tr class=.*?-row><td>.*?<td>(.*?)<td><span class=.*?>\s*(\S*?)\s*</span>'
answer = req.urlopen(URL_PONTE)
answer_bytes = answer.read()
answer_html = answer_bytes.decode('utf8')
matches = re.findall(PATTERN, answer_html, re.DOTALL)
TEXT = "O tráfego na ponte está *{}* no sentido *{}* e *{}* no sentido *{}*."
args = []
for name, status in matches:
args.append(html.unescape(status))
args.append(html.unescape(name))
if not args: raise ValueError
return TEXT.format(*args)
except:
return "Não tenho informações sobre a ponte agora, lamento."
def NEXT_FRESCAO():
pass
def FRESCAO_IMG():
return open(r'static/HORARIO_FRESCAO.jpg', 'rb')
def START_TEXT(json):
if tb.Tempo.morning:
return "Bom dia, Niterói!"
elif tb.Tempo.evening:
return "Boa tarde, Niterói!"
else:
return "Boa Noite, Niterói!"
def UNKNOWN_TEXT(json):
return "Comando desconhecido `{text}`".format(json)
bot = tb.Bot.load(debug=True, fname='LV760DBOT')
bot.START_TEXT = START_TEXT
bot.UNKNOWN_TEXT = UNKNOWN_TEXT
with bot:
@bot.cmd_handler('ponte')
@bot.lock_start
def ponte(self, update, context):
self.debug[0]('[cmd :: ponte]')
json = self.parse(update, context)
self.debug[1]('[obj :: json]', json)
kw = {
'chat_id' : json['chat_id'],
'text' : STATUS_PONTE(),
'parse_mode' : tb.telegram.ParseMode.MARKDOWN,
}
self.debug[1]('[obj :: kw]', kw)
json['bot'].send_message(**kw)
@bot.cmd_handler('frescao')
@bot.lock_start
def frescao(self, update, context):
self.debug[0]('[cmd :: frescao]')
json = self.parse(update, context)
self.debug[1]('[obj :: json]', json)
kw = {
'chat_id' : json['chat_id'],
'photo' : FRESCAO_IMG(),
}
self.debug[1]('[obj :: kw]', kw)
json['bot'].send_photo(**kw)
@bot.cmd_handler('lv')
@bot.lock_start
def lv(self, update, context):
self.debug[0]('[cmd :: lv]')
json = self.parse(update, context)
self.debug[1]('[obj :: json]', json)
kw = {
'chat_id' : json['chat_id'],
'text' : 'Não sei fazer isso ainda.'
}
self.debug[1]('[obj :: kw]', kw)
json['bot'].send_message(**kw)
if __name__ == '__main__':
bot.run()
|
[
"pedromxavier@poli.ufrj.br"
] |
pedromxavier@poli.ufrj.br
|
0330ab227a01ca6a6a99e83cd4578ca5f78b5586
|
b662dc0ac66c51a4ac8c707cb5222ede5f28bdea
|
/run.py
|
94883b1d621772024640541a86dda9af3048d101
|
[] |
no_license
|
lukew3/ReCrypto
|
43bd033b6ea0050536f9f6d3e14d1e403470908a
|
6d6fe3e6e035d9dd74d9ab101b7c3ab4cfb8faf7
|
refs/heads/master
| 2023-02-21T11:32:54.709442
| 2021-09-08T15:43:03
| 2021-09-08T15:43:03
| 231,695,238
| 0
| 0
| null | 2023-02-15T23:02:21
| 2020-01-04T02:04:08
|
HTML
|
UTF-8
|
Python
| false
| false
| 45
|
py
|
from recrypto import app
app.run(debug=True)
|
[
"lukew25073@gmail.com"
] |
lukew25073@gmail.com
|
da49a505c50819ea836f94a3f31c5cbacb368bcf
|
e6fb5885b04786ead91c5f98d5ce6134509e6482
|
/CompetitiveProgramming/week2/day4/prob2.py
|
66cbc1b25c74eef91f388156f2cded07d15ce8a9
|
[] |
no_license
|
ShruthikaP/CompetitiveProgramming
|
82ff2cd05a8e96278a7787b8ebcd3dd103829c0a
|
fb3cda698a1d0a859c66b1f36a2edf85d8ca0804
|
refs/heads/master
| 2020-03-21T15:41:54.665575
| 2018-07-21T09:11:22
| 2018-07-21T09:11:22
| 138,727,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
import unittest
def get(s):
if len(s) <= 1:
return set([s])
mid = s[:-1]
end = s[-1]
mid_perm = get(mid)
p = set()
for i in mid_perm:
for j in range(len(mid) + 1):
ans = (i[:j] + end + i[j:])
p.add(ans)
return p
|
[
"noreply@github.com"
] |
ShruthikaP.noreply@github.com
|
5ff698b54c085c2863d04a125320ecbaa0c0c1b0
|
470c42904eec5d46be4a12219d4dfa29e251d332
|
/spotcheck.py
|
76f8cb05049ea427ff66137e6312ef088aca5a0a
|
[] |
no_license
|
warrendlee1/Python-Programs
|
c3f21f1875c7f5a27a8add5fe2caa2a5955dd5f8
|
97c84ba8a1fa8f5c5de77a69a534115838608c2a
|
refs/heads/master
| 2021-04-13T19:49:22.991286
| 2018-04-28T05:16:54
| 2018-04-28T05:16:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 122
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 11:26:34 2017
@author: warrenl0134
"""
listA = [1,4,3,0]
listA
|
[
"warrenlee@Williams-MBP.attlocal.net"
] |
warrenlee@Williams-MBP.attlocal.net
|
4ea07f0c63929fc01a0774b85c0d959f540d5aa4
|
10259763befc3aab2cb6f172a5f49a0fe6689b8a
|
/readingdb/test_lineroute.py
|
359d8377bee14d9b0b372b6df71c7e1243f4a485
|
[] |
no_license
|
Lewington-pitsos/readingdb
|
4a8c473ffdef0f40978149f900d74051d1e8da85
|
a57b9dc1ecc8dd5c2aa1b35691fe0aa1bcaff733
|
refs/heads/master
| 2023-08-14T20:55:25.862149
| 2021-09-19T02:43:32
| 2021-09-19T02:43:32
| 349,652,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,263
|
py
|
import unittest
from readingdb.lineroute import LinePoint, LineRoute, linear_interp
class TestLineRoute(unittest.TestCase):
def test_performs_lnear_interpolation_on_real_points(self):
start_point = LinePoint.from_point({
"Timestamp": 1568113911576,
"Latitude": -37.8714232,
"Longitude": 145.2450816
})
end_point = LinePoint.from_point({
"Timestamp": 1618113911576,
"Latitude": -37.8744232,
"Longitude": 145.7450816
})
pnts = [
start_point,
end_point
]
r = LineRoute(pnts, interp_alg=linear_interp)
interp = r.point_at(1568113911576)
self.assertEqual(interp, start_point)
r = LineRoute(pnts, interp_alg=linear_interp)
interp = r.point_at(1617779685467)
self.assertEqual(interp, LinePoint.from_point({
"Timestamp": 1617779685467,
"Latitude": -37.874403146433465,
"Longitude": 145.74173933891
}))
def test_performs_lnear_interpolation_on_dummy_points(self):
start_point = LinePoint.from_point({
"Timestamp": 10,
"Latitude": 10,
"Longitude": 20
})
middle_point = LinePoint.from_point({
"Timestamp": 20,
"Latitude": 20,
"Longitude": 40
})
end_point = LinePoint.from_point({
"Timestamp": 120,
"Latitude": 21,
"Longitude": 50
})
pnts = [
start_point,
middle_point,
end_point
]
r = LineRoute(pnts, interp_alg=linear_interp)
interp = r.point_at(10)
self.assertEqual(interp, start_point)
interp = r.point_at(20)
self.assertEqual(interp, middle_point)
interp = r.point_at(120)
self.assertEqual(interp, end_point)
interp = r.point_at(19)
self.assertEqual(interp, LinePoint.from_point({
"Timestamp": 19,
"Latitude": 19,
"Longitude": 38
}))
interp = r.point_at(11)
self.assertEqual(interp, LinePoint.from_point({
"Timestamp": 11,
"Latitude": 11,
"Longitude": 22
}))
interp = r.point_at(119)
self.assertEqual(interp, LinePoint.from_point({
"Timestamp": 119,
"Latitude": 20.99,
"Longitude": 49.9
}))
class TestInterpolation(unittest.TestCase):
def test_linear_interpolation_returns_bookends(self):
start_ts = 1568113911576
end_ts = 1618113911576
start_point = LinePoint.from_point({
"Timestamp": start_ts,
"Latitude": -37.8714232,
"Longitude": 145.2450816
})
end_point = LinePoint.from_point({
"Timestamp": end_ts,
"Latitude": -37.8744232,
"Longitude": 145.7450816
})
interp = linear_interp(start_point, end_point, start_ts)
self.assertEqual(interp, start_point)
interp = linear_interp(start_point, end_point, end_ts)
self.assertEqual(interp, end_point)
def test_linear_interpolation(self):
start_point = LinePoint.from_point({
"Timestamp": 200,
"Latitude": -10.0,
"Longitude": 150.0
})
end_point = LinePoint.from_point({
"Timestamp": 300,
"Latitude": -20.0,
"Longitude": 100.0,
})
interp = linear_interp(start_point, end_point, 250)
self.assertEqual(interp, LinePoint.from_point({
"Timestamp": 250,
"Latitude": -15.0,
"Longitude": 125.0,
}))
start_point = LinePoint.from_point({
"Timestamp": 200,
"Latitude": 20.0,
"Longitude": 10.0
})
end_point = LinePoint.from_point({
"Timestamp": 300,
"Latitude": -20.0,
"Longitude": 110.0,
})
interp = linear_interp(start_point, end_point, 210)
self.assertEqual(interp, LinePoint.from_point({
"Timestamp": 210,
"Latitude": 16.0,
"Longitude": 20.0,
}))
|
[
"lewingtonpitsos@gmail.com"
] |
lewingtonpitsos@gmail.com
|
783fdd25c10199746426d1f7b3e87e009964b1e1
|
d4a5462b2cd2eff99da6ad5147b5423c819ae731
|
/1072.py
|
7cde2b4cbdcb7fcb593e7f072f7089b72b0530d7
|
[] |
no_license
|
Rafesz/URI_solutions_py
|
3a61e6b0b571a03857f1c4efb54546edb2a0fb6a
|
62a9f8227523e409afa9d506df66516ef9b48079
|
refs/heads/main
| 2023-08-11T20:55:04.267913
| 2021-09-21T22:25:50
| 2021-09-21T22:25:50
| 402,085,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
# Leia um valor inteiro N. Este valor será a quantidade de valores inteiros X que serão lidos em seguida.
# Mostre quantos destes valores X estão dentro do intervalo [10,20] e quantos estão fora do intervalo, mostrando essas informações.
# Entrada
# A primeira linha da entrada contém um valor inteiro N (N < 10000), que indica o número de casos de teste.
# Cada caso de teste a seguir é um valor inteiro X (-107 < X <107).
# Saída
# Para cada caso, imprima quantos números estão dentro (in) e quantos valores estão fora (out) do intervalo.
i = 0
contadorIn = 0
contaodorOut = 0
n = int(input())
while(i < n):
valor = int(input())
if(10 <= valor <= 20): contadorIn += 1
else: contaodorOut += 1
i += 1
print("{} in\n{} out".format(contadorIn, contaodorOut))
|
[
"noreply@github.com"
] |
Rafesz.noreply@github.com
|
af55f4e50ab0299aca371628784bd73425438b22
|
b5dcd0bad00891a07412123de90097d9459dffbe
|
/Characters/_data/noemye.py
|
0bd6ef0250c946bf262a6afe827db7e1ad7c3c0f
|
[] |
no_license
|
Vgr255/Vgr255.github.io
|
c90f741f3c03449d884dda1d4672ee841307c304
|
4fc752f86678b7885ff84f392b23a1df2d39f582
|
refs/heads/master
| 2021-01-18T21:58:22.093346
| 2016-05-15T17:11:27
| 2016-05-15T17:11:27
| 42,474,140
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
FILE = "Noemye"
NAME = "Noémye"
NATIONALITY = "Canadian"
CLASS = "Dragoon"
WEAPON = "Her fists"
BIRTH = "April 16th, 1990"
BIRTH_LOCATION = "Québec, Québec, Canada, Earth"
LETTER = "N"
RECRUITMENT_ORDER = 15
SUMMARY = ""
ABILITIES = ""
BACKSTORY = ""
HIGHLIGHTS = ""
SUMMONS = ("Bismark", "Moogle", "Carbuncle")
|
[
"vgr255@live.ca"
] |
vgr255@live.ca
|
94a992f736f385b74839c456c1539fa5deb7f28c
|
cdfa17ab8b6524a4611dbadd69fabe6a38c8fe0b
|
/pysot/models/sa/__init__.py
|
7799e4ee9021564c4e5d4e09e4fe9800056bf345
|
[
"Apache-2.0"
] |
permissive
|
bourahla-omar/pysot
|
7e61e24fe0d6375770569a47dc1051b89199bd56
|
c0fd8a0b3a307da0d50bc07208417d69244dc00f
|
refs/heads/master
| 2020-07-24T13:59:33.410511
| 2019-10-22T05:55:46
| 2019-10-22T05:55:46
| 207,949,969
| 0
| 0
|
Apache-2.0
| 2019-09-12T02:43:31
| 2019-09-12T02:43:30
| null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pysot.models.sa.sa import sablock
SABLOCKS = {
'sablock': sablock,
}
def get_sa(name, **kwargs):
return SABLOCKS[name](**kwargs)
|
[
"csxuandongzhao@gmail.com"
] |
csxuandongzhao@gmail.com
|
815317cb5e13abba4f028211c668ab00d1dd1f21
|
446695e13ded8aabaf9ed1748b9382a8ff3bfbf3
|
/node_modules/fsevents/build/config.gypi
|
460e748f09befc6081ecbf3f9ab258b60964f40f
|
[
"MIT"
] |
permissive
|
linvic/idss
|
07a99eb784e76131091002e69f64bf83173f3e2c
|
3717371d28f918771e87886e7fb26cb62eaefd0a
|
refs/heads/master
| 2022-12-16T17:57:03.216078
| 2020-02-27T00:41:00
| 2020-02-27T00:41:00
| 230,076,406
| 0
| 0
| null | 2022-12-11T18:22:28
| 2019-12-25T09:15:25
|
Vue
|
UTF-8
|
Python
| false
| false
| 5,686
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "5",
"node_byteorder": "little",
"node_code_cache": "yes",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_report": "true",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_large_pages_script_lld": "false",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "72.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"xcode_version": "8.0",
"nodedir": "/Users/chenyanlin/Library/Caches/node-gyp/12.14.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npm.taobao.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"registry": "https://registry.npm.taobao.org/",
"fetch_retries": "2",
"noproxy": "",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/chenyanlin/.npm-init.js",
"userconfig": "/Users/chenyanlin/.npmrc",
"cidr": "",
"node_version": "12.14.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"disturl": "https://npm.taobao.org/dist",
"cache_min": "10",
"otp": "",
"cache": "/Users/chenyanlin/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.13.4 node/v12.14.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/3h/7l8mkpk50qnc1dmvf3871gfm0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
|
[
"674206994@qq.com"
] |
674206994@qq.com
|
9ed4cd0dc3795cc7629b973fd8603b0b0e896b3a
|
e4343fb98e9d50f67bc47374aa8f83ae0bf15fd8
|
/Lab9/main.py
|
7df71b77ec5f5f17bffb2720f3d8ba111b857b53
|
[] |
no_license
|
VVladislaVLL/geometry-labs
|
7caaf1cb4466957330416660caf78ee4bbc44557
|
df07f774f120bde2c8c7405e9eb6a3f870758778
|
refs/heads/master
| 2023-05-08T23:05:46.542715
| 2021-06-04T07:24:46
| 2021-06-04T07:24:46
| 343,880,645
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
#!/usr/bin/python
import random
from time import sleep
from matplotlib import pyplot as plt
from classes.Point import Point
from classes.Vector2d import Vector2d, pi
from divideAndRule2 import divide_and_rule, divide_and_rule
from utils.binary import binary_test
from utils.graph import draw_polygon
def reflect(p, vector_coords):
# Previous direction
v = p.direction
# Polygon side
q = Vector2d(vector_coords[0], vector_coords[1])
scal = 2 * (Vector2d.scalar_product(v, q) / Vector2d.scalar_product(q, q))
prod = Vector2d.s_mult(q, scal)
new_direction = Vector2d.s_minus(prod, v)
return new_direction
def plot_task(rectangle, points, points_y):
MIN_DISTANSE = 0.5
plt.ion()
s = 1
while s:
plt.clf()
draw_polygon(rectangle)
clash_flag = divide_and_rule(points, points_y)
if clash_flag[1] <= MIN_DISTANSE:
clash_flag[0][0].reflect_direction()
clash_flag[0][1].reflect_direction()
for i in points:
flag_binary = binary_test(rectangle, i.get_next_state_circle(MIN_DISTANSE / 2))['flag']
if not flag_binary:
coords_binary = binary_test(rectangle, i.get_next_state_circle(MIN_DISTANSE / 2))['points']
new_direction = reflect(i, coords_binary)
i.direction = new_direction
for i in points:
i.move()
plt.scatter(i.x, i.y, s=MIN_DISTANSE / 2 * 750, marker='o', c='g')
plt.draw()
plt.gcf().canvas.flush_events()
sleep(0.002)
plt.show()
plt.ioff()
plt.show()
if __name__ == '__main__':
# Rectangle
rectangle = [Point(1, 1), Point(1, 10), Point(15, 10), Point(15, 1)]
# Our points
points_set = [Point(2, 2), Point(2, 3), Point(4, 2),
Point(4, 5), Point(4, 8), Point(6, 2),
Point(6,9), Point(8, 5), Point(10,3),
Point(11,9), Point(12, 5), Point(12,8)]
points_set.sort(key=lambda point: (point.x, point.y))
points_set_y = points_set.copy()
points_set_y.sort(key=lambda point: (point.y, point.x))
# min_dist = divide_and_rule(points_set)
# print(min_dist)
# Set points direction
for point in points_set:
point.set_direction(Vector2d.get_vector(random.uniform(0, 2 * pi), 0.09))
plot_task(rectangle, points_set, points_set_y)
|
[
"vlad2002kochurko@gmail.com"
] |
vlad2002kochurko@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.